主机名 | 角色 | ip |
---|---|---|
HDSS7-11.host.com | k8s代理节点1 | 10.4.7.11 |
HDSS7-12.host.com | k8s代理节点2 | 10.4.7.12 |
HDSS7-21.host.com | k8s运算节点1 | 10.4.7.21 |
HDSS7-22.host.com | k8s运算节点2 | 10.4.7.22 |
HDSS7-200.host.com | k8s运维节点(docker仓库) | 10.4.7.200 |
2c2g,50G硬盘
#配置YUM源
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
curl -o /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
#关闭SElinux
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
setenforce 0
#关闭firewalld
systemctl disable firewalld.service
systemctl stop firewalld.service
#安装必要工具
yum install -y wget net-tools telnet tree nmap sysstat lrzsz dos2unix bind-utils
HDSS7-11安装bind9
[root@hdss7-11 ~]# yum install -y bind
配置/etc/named/named.conf
options {
listen-on port 53 { 10.4.7.11; };
allow-query { any; }; #添加
forwaders { 10.4.7.254; }; #上级DNS,添加
dnssec-enable no;
dnssec-validation no;
检查
[root@hdss7-11 ~]# named-checkconf
区域配置文件
添加主机域
host.com
,业务域od.com
[root@hdss7-11 ~]# vim /etc/named.rfc1912.zones
#添加以下内容
zone "host.com" IN {
type master;
file "host.com.zone";
allow-update { 10.4.7.11; };
};
zone "od.com" IN {
type master;
file "od.com.zone";
allow-update { 10.4.7.11; };
};
配置区域数据文件
[root@hdss7-11 ~]# cat /var/named/host.com.zone
$ORIGIN host.com.
$TTL 600 ; 10minutes
@ IN SOA dns.host.com. dnsadmin.host.com. (
2019122301 ; serial
10800 ; refresh (3 hours)
900 ; retry (15 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
NS dns.host.com.
$TTL 60 ; 1 minute
dns A 10.4.7.11
HDSS7-11 A 10.4.7.11
HDSS7-12 A 10.4.7.12
HDSS7-21 A 10.4.7.21
HDSS7-22 A 10.4.7.22
HDSS7-200 A 10.4.7.200
[root@hdss7-11 ~]# cat /var/named/od.com.zone
$ORIGIN od.com.
$TTL 600 ; 10minutes
@ IN SOA dns.od.com. dnsadmin.od.com. (
2019122301 ; serial
10800 ; refresh (3 hours)
900 ; retry (15 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
NS dns.od.com.
$TTL 60 ; 1 minute
dns A 10.4.7.11
启动bind9
[root@hdss7-11 ~]# named-checkconf
[root@hdss7-11 ~]# systemctl start named
[root@hdss7-11 ~]# systemctl enable named
检查
[root@hdss7-11 ~]# dig -t A hdss7-21.host.com @10.4.7.11 +short
10.4.7.21
[root@hdss7-11 ~]# dig -t A hdss7-200.host.com @10.4.7.11 +short
10.4.7.200
配置DNS客户端
[root@hdss7-200 ~]# vim /etc/sysconfig/network-scripts/ifcfg-eth0
DNS1="10.4.7.11" #修改DNS1地址
[root@hdss7-200 ~]# systemctl restart network
[root@hdss7-200 ~]# cat /etc/resolv.conf
# Generated by NetworkManager
search host.com
nameserver 10.4.7.11
网络和共享中心 -> 网卡设置 -> 设置DNS服务器
如有必要,还应设置虚拟网卡的接口与跃点数为: 10
运维主机HDSS7-200.host.com
上:
证书签发工具CFSSL: R1.2
[root@hdss7-200 ~]# curl -s -L -o /usr/bin/cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
[root@hdss7-200 ~]# curl -s -L -o /usr/bin/cfssl-json https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
[root@hdss7-200 ~]# curl -s -L -o /usr/bin/cfssl-certinfo https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
[root@hdss7-200 ~]# chmod +x /usr/bin/cfssl*
[root@hdss7-200 ~]# vi /opt/certs/ca-config.json
{
"signing": {
"default": {
"expiry": "175200h"
},
"profiles": {
"server": {
"expiry": "175200h",
"usages": [
"signing",
"key encipherment",
"server auth"
]
},
"client": {
"expiry": "175200h",
"usages": [
"signing",
"key encipherment",
"client auth"
]
},
"peer": {
"expiry": "175200h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
[root@hdss7-200 ~]# cat /opt/certs/ca-csr.json
{
"CN": "kubernetes-ca",
"hosts": [
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "od",
"OU": "ops"
}
],
"ca": {
"expiry": "175200h" #证书过期时间
}
}
CN: Common Name,浏览器使用该字段验证网站是否合法,一般写的是域名。非常重要。浏览器使用该字段验证网站是否合法
C: Country, 国家
ST: State,州,省
L: Locality,地区,城市
O: Organization Name,组织名称,公司名称
OU: Organization Unit Name,组织单位名称,公司部门
[root@hdss7-200 certs]# cfssl gencert -initca ca-csr.json | cfssl-json -bare ca
#生成ca.pem、ca.csr、ca-key.pem(CA私钥,需妥善保管)
[root@hdss7-200 certs]# ll
total 20
-rw-r--r-- 1 root root 1001 Dec 23 12:24 ca.csr
-rw-r--r-- 1 root root 332 Dec 23 12:16 ca-csr.json
-rw------- 1 root root 1679 Dec 23 12:24 ca-key.pem
-rw-r--r-- 1 root root 1354 Dec 23 12:24 ca.pem
HDSS7-200.host.com
,HDSS7-21.host.com
,HDSS7-22.host.com
上:
这里以hdss7-21为例
curl -fsSL https://get.docker.com| bash -s docker --mirror Aliyun
mkdir -p /etc/docker /data/docker
vim /etc/docker/daemon.json
{
"graph": "/data/docker",
"storage-driver": "overlay",
"insecure-registries": ["registry.access.redhat.com","quay.io","harbor.od.com"],
"bip": "172.7.21.1/24",
"exec-opts": ["native.cgroupdriver=systemd"],
"live-restore": true
}
############配置说明###############
bip要根据宿主机ip变化
注意:hdss7-21.host.com bip 172.7.21.1/24
hdss7-22.host.com bip 172.7.22.1/24
hdss7-200.host.com bip 172.7.200.1/24
#################################
[root@hdss7-21 ~]# systemctl start docker
[root@hdss7-21 ~]# systemctl enable docker
运维主机hdss7-200.host.com 上
强烈建议下载1.7.5以上版本
harbor官网github地址 https://github.com/goharbor/harbor
[root@hdss7-200 ~]# tar xf harbor-offline-installer-v1.8.3.tgz -C /opt/
[root@hdss7-200 ~]# mv /opt/harbor/ /opt/harbor-v1.8.3
[root@hdss7-200 ~]# ln -s /opt/harbor-v1.8.3/ /opt/harbor
[root@hdss7-200 opt]# vi /opt/harbor/harbor.yml
hostname: harbor.od.com
http:
port: 180 #端口
harbor_admin_password:123456 #登录密码
data_volume: /data/harbor # Harbor中的数据挂载对应宿主机的位置
log:
level: info
rotate_count: 50
rotate_size:200M
location: /data/harbor/logs #日志存放位置
[root@hdss7-200 opt]# mkdir -p /data/harbor/logs
[root@hdss7-200 ~]# yum install -y docker-compose
[root@hdss7-200 ~]# cd /opt/harbor
[root@hdss7-200 harbor]# ./install.sh
将容器的80端口映射到宿主机的180端口
[root@hdss7-200 harbor]# docker-compose ps
Name Command State Ports
--------------------------------------------------------------------------------------
harbor-core /harbor/start.sh Up
harbor-db /entrypoint.sh postgres Up 5432/tcp
harbor-jobservice /harbor/start.sh Up
harbor-log /bin/sh -c /usr/local/bin/ ... Up 127.0.0.1:1514->10514/tcp
harbor-portal nginx -g daemon off; Up 80/tcp
nginx nginx -g daemon off; Up 0.0.0.0:180->80/tcp
redis docker-entrypoint.sh redis ... Up 6379/tcp
registry /entrypoint.sh /etc/regist ... Up 5000/tcp
registryctl /harbor/start.sh Up
HDSS7-11Z主机上操作
[root@hdss7-11 ~]# vi /var/named/od.com.zone
2019111002 ; serial
harbor A 10.4.7.200
//注意serial前滚一个序号
[root@hdss7-11 ~]# systemctl restart named
[root@hdss7-11 ~]# dig -t A harbor.od.com +short
10.4.7.200
#安装nginx
[root@hdss7-200 harbor]# yum install -y nginx
#配置nginx
用户名:harbor
密 码:123456
1.新建项目
2.下载测试镜像并打给镜像打一个tag
[root@hdss7-200 harbor]# docker pull nginx:1.7.9
[root@hdss7-200 harbor]# docker images |grep nginx
goharbor/nginx-photon v1.8.3 3a016e0dc7de 3 months ago 37MB
nginx 1.7.9 84581e99d807 4 years ago 91.6MB
[root@hdss7-200 harbor]# docker tag 84581e99d807 harbor.od.com/public/nginx:v1.7.9
3.推送到harbor仓库
#登录远程harbor仓库
[root@hdss7-200 harbor]# docker login harbor.od.com
Username: admin #用户名
Password: #密码
#推送测试镜像
[root@hdss7-200 harbor]# docker push harbor.od.com/public/nginx:v1.7.9
4.浏览器查看推送是否成功
主机名 | 角色 | ip地址 |
---|---|---|
hdss7-12.host.com | etcd lead | 10.4.7.12 |
hdss7-21.host.com | etcd follow | 10.4.7.21 |
hdss7-22.host.com | etcd follow | 10.4.7.22 |
注意:这里部署文档以hdss7-12.host.com为例,另外两台部署方法类似
运维主机hdss7-200
上创建
[root@hdss7-200 certs]# cat etcd-peer-csr.json
{
"CN": "k8s-etcd",
"hosts": [
"10.4.7.11",
"10.4.7.12",
"10.4.7.21",
"10.4.7.22"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "od",
"OU": "ops"
}
]
}
#HOST段,etcd需要在那些主机上安装。有可能部署的IP都添加到HOST端
[root@hdss7-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer etcd-peer-csr.json |cfssl-json -bare etcd-peer
[root@hdss7-200 certs]# ll|grep etcd
-rw------- 1 root root 1062 Dec 23 14:19 etcd-peer.csr
-rw------- 1 root root 363 Dec 23 14:18 etcd-peer-csr.json
-rw------- 1 root root 1675 Dec 23 14:19 etcd-peer-key.pem
-rw------- 1 root root 1432 Dec 23 14:19 etcd-peer.pem
HDSS7-12上
[root@hdss7-12 ~]# useradd -s /sbin/nologin -M etcd
[root@hdss7-12 ~]# id etcd
uid=1000(etcd) gid=1000(etcd) groups=1000(etcd)
etcd下载地址 https://github.com/etcd-io/etcd/tags //这里用的3.1.20版本
[root@hdss7-12 ~]# tar xf etcd-v3.1.20-linux-amd64.tar.gz -C /opt/
[root@hdss7-12 ~]# mv /opt/etcd-v3.1.20-linux-amd64/ /opt/etcd-v3.1.20
[root@hdss7-12 ~]# ln -s /opt/etcd-v3.1.20/ /opt/etcd
#创建目录并授权
[root@hdss7-12 ~]# mkdir -p /opt/etcd/certs //etcd证书目录
[root@hdss7-12 ~]# mkdir -p /data/etcd //etcd数据目录
[root@hdss7-12 ~]# mkdir -p /data/log/etcd-server //etcd日志目录
[root@hdss7-12 ~]# chown -R etcd:etcd /data/etcd /data/logs/etcd-server/
#拷贝证书与私钥文件
[root@hdss7-12 ~]# cd /opt/etcd/certs/
[root@hdss7-12 ~]# scp hdss7-200:/opt/certs/ca.pem .
[root@hdss7-12 ~]# scp hdss7-200:/opt/certs/etcd-peer.pem .
[root@hdss7-12 ~]# scp hdss7-200:/opt/certs/etcd-peer-key.pem .
[root@hdss7-12 certs]# ll
total 12
-rw-r--r-- 1 etcd etcd 1354 Dec 23 14:41 ca.pem
-rw------- 1 etcd etcd 1675 Dec 23 14:41 etcd-peer-key.pem
-rw------- 1 etcd etcd 1432 Dec 23 14:41 etcd-peer.pem
[root@hdss7-12 certs]# cat /opt/etcd/etcd-server-startup.sh
#!/bin/sh
./etcd --name etcd-server-7-12 --data-dir /data/etcd/etcd-server --listen-peer-urls https://10.4.7.12:2380 --listen-client-urls https://10.4.7.12:2379,http://127.0.0.1:2379 --quota-backend-bytes 8000000000 --initial-advertise-peer-urls https://10.4.7.12:2380 --advertise-client-urls https://10.4.7.12:2379,http://127.0.0.1:2379 --initial-cluster etcd-server-7-12=https://10.4.7.12:2380,etcd-server-7-21=https://10.4.7.21:2380,etcd-server-7-22=https://10.4.7.22:2380 --ca-file ./certs/ca.pem --cert-file ./certs/etcd-peer.pem --key-file ./certs/etcd-peer-key.pem --client-cert-auth --trusted-ca-file ./certs/ca.pem --peer-ca-file ./certs/ca.pem --peer-cert-file ./certs/etcd-peer.pem --peer-key-file ./certs/etcd-peer-key.pem --peer-client-cert-auth --peer-trusted-ca-file ./certs/ca.pem --log-output stdout
###################################################配置说明#########
[root@hdss7-12 certs]# chmod +x /opt/etcd/etcd-server-startup.sh
[root@hdss7-12 ~]# chown -R etcd.etcd /opt/etcd-v3.1.20/
supervison是一个管理后台进程的软件
[root@hdss7-12 certs]# yum install -y supervisor
[root@hdss7-12 certs]# systemctl start supervisord
[root@hdss7-12 certs]# systemctl enable supervisord
[root@hdss7-12 ~]# vi /etc/supervisord.d/etcd-server.ini
[program:etcd-server-7-12]
command=/opt/etcd/etcd-server-startup.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/etcd ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=30 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=etcd ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/etcd-server/etcd.stdout.log ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
监听了2379和2380端口才算成功
[root@hdss7-12 ~]# supervisorctl update
etcd-server-7-12: added process group
[root@hdss7-12 ~]# supervisorctl status
etcd-server-7-12 RUNNING pid 9560, uptime 0:00:40
[root@hdss7-12 ~]# netstat -luntp|grep etcd
tcp 0 0 10.4.7.12:2379 0.0.0.0:* LISTEN 22657/./etcd
tcp 0 0 127.0.0.1:2379 0.0.0.0:* LISTEN 22657/./etcd
tcp 0 0 10.4.7.12:2380 0.0.0.0:* LISTEN 22657/./etcd RTING
和上述无区别,最主要是修改两个配置文件:
1、/opt/etcd/etcd-server-startup.sh的ip地址,
2、/etc/supervisord.d/etcd-server.ini
//修改supervisord启动ini文件的program标签,是为了更好区分主机,生产规范,强迫症患者的福音,不修改不会造成启动失败
etcd任意任意节点输入
[root@hdss7-22 opt]# cd /opt/etcd
[root@hdss7-22 etcd]# ./etcdctl cluster-health
member 988139385f78284 is healthy: got healthy result from http://127.0.0.1:2379
member 5a0ef2a004fc4349 is healthy: got healthy result from http://127.0.0.1:2379
member f4a0cb0a765574a8 is healthy: got healthy result from http://127.0.0.1:2379
cluster is healthy
[root@hdss7-22 etcd]# ./etcdctl member list
988139385f78284: name=etcd-server-7-22 peerURLs=https://10.4.7.22:2380 clientURLs=http://127.0.0.1:2379,https://10.4.7.22:2379 isLeader=false
5a0ef2a004fc4349: name=etcd-server-7-21 peerURLs=https://10.4.7.21:2380 clientURLs=http://127.0.0.1:2379,https://10.4.7.21:2379 isLeader=false
f4a0cb0a765574a8: name=etcd-server-7-12 peerURLs=https://10.4.7.12:2380 clientURLs=http://127.0.0.1:2379,https://10.4.7.12:2379 isLeader=true
HDSS7-21.host.com | kube-apiserver | 10.4.7.21 |
---|---|---|
HDSS7-22.host.com | kube-apiserver | 10.4.7.22 |
HDSS7-11.host.com | 4层负载均衡 | 10.4.7.11 |
HDSS7-12.host.com | 4层负载均衡 | 10.4.7.12 |
这里
10.4.7.11
和10.4.7.12
使用nginx做4层负载均衡器,用keepalived跑一个vip:10.4.7.10,代理两个kube-apiserver,实现高可用这里以hdss21为例,另外一台运算节点部署方法类似
[root@hdss7-21 ~]# tar xf kubernetes-server-linux-amd64.tar.gz -C /opt/
[root@hdss7-21 ~]# mv /opt/kubernetes/ /opt/kubernetes-v1.15.2
[root@hdss7-21 ~]# ln -s /opt/kubernetes-v1.15.2/ /opt/kubernetes
#删掉无用的源码包,bin下无用的tag,tar文件,不用adm方式部署,所以可以删除
[root@hdss7-21 kubernetes]# ll
total 27184
drwxr-xr-x. 2 root root 6 Aug 5 18:01 addons
-rw-r--r--. 1 root root 26625140 Aug 5 18:01 kubernetes-src.tar.gz
-rw-r--r--. 1 root root 1205293 Aug 5 18:01 LICENSES
drwxr-xr-x. 3 root root 17 Aug 5 17:57 server
[root@hdss7-21 kubernetes]# rm -rf kubernetes-src.tar.gz
[root@hdss7-21 kubernetes]# cd server/bin/
[root@hdss7-21 bin]# rm -rf *.tar
[root@hdss7-21 bin]# rm -rf *_tag
#保留以下文件即可
[root@hdss7-21 bin]# ll
total 884636
-rwxr-xr-x. 1 root root 43534816 Aug 5 18:01 apiextensions-apiserver
-rwxr-xr-x. 1 root root 100548640 Aug 5 18:01 cloud-controller-manager
-rwxr-xr-x. 1 root root 200648416 Aug 5 18:01 hyperkube
-rwxr-xr-x. 1 root root 40182208 Aug 5 18:01 kubeadm
-rwxr-xr-x. 1 root root 164501920 Aug 5 18:01 kube-apiserver
-rwxr-xr-x. 1 root root 116397088 Aug 5 18:01 kube-controller-manager
-rwxr-xr-x. 1 root root 42985504 Aug 5 18:01 kubectl
-rwxr-xr-x. 1 root root 119616640 Aug 5 18:01 kubelet
-rwxr-xr-x. 1 root root 36987488 Aug 5 18:01 kube-proxy
-rwxr-xr-x. 1 root root 38786144 Aug 5 18:01 kube-scheduler
-rwxr-xr-x. 1 root root 1648224 Aug 5 18:01 mounter
运维主机HDSS7-200
操作
[root@hdss7-200 certs]# vim /opt/certs/client-crs.json
{
"CN": "k8s-node",
"hosts": [
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "od",
"OU": "ops"
}
]
}
[root@hdss7-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client client-csr.json |cfssl-json -bare client
[root@hdss7-200 certs]# ll |grep client
-rw------- 1 root root 993 Dec 23 16:40 client.csr
-rw------- 1 root root 280 Dec 23 16:39 client-csr.json
-rw------- 1 root root 1675 Dec 23 16:40 client-key.pem
-rw------- 1 root root 1371 Dec 23 16:40 client.pem
运维主机HDSS7-200
操作
[root@hdss7-200 certs]# cat /opt/certs/apiserver-csr.json
{
"CN": "apiserver",
"hosts": [
"127.0.0.1",
"192.168.0.1",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local",
"10.4.7.10",
"10.4.7.21",
"10.4.7.22",
"10.4.7.23"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "od",
"OU": "ops"
}
]
}
#host段,apiserver可能出现的IP地址写入
[root@hdss7-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server apiserver-csr.json |cfssl-json -bare apiserver
[root@hdss7-200 certs]# ll|grep apiserver
-rw------- 1 root root 1245 Dec 23 16:49 apiserver.csr
-rw------- 1 root root 562 Dec 23 16:45 apiserver-csr.json
-rw------- 1 root root 1675 Dec 23 16:49 apiserver-key.pem
-rw------- 1 root root 1598 Dec 23 16:49 apiserver.pem
[root@hdss7-21 ~]# mkdir -p /opt/kubernetes/server/bin/cert
[root@hdss7-21 ~]# cd /opt/kubernetes/server/bin/cert
[root@hdss7-21 cert]# scp hdss7-200:/opt/certs/ca.pem .
[root@hdss7-21 cert]# scp hdss7-200:/opt/certs/ca-key.pem .
[root@hdss7-21 cert]# scp hdss7-200:/opt/certs/client.pem .
[root@hdss7-21 cert]# scp hdss7-200:/opt/certs/client-key.pem .
[root@hdss7-21 cert]# scp hdss7-200:/opt/certs/apiserver.pem .
[root@hdss7-21 cert]# scp hdss7-200:/opt/certs/apiserver-key.pem .
[root@hdss7-21 bin]# mkdir /opt/kubernetes/server/bin/conf
[root@hdss7-21 conf]# vi /opt/kubernetes/server/bin/conf/audit.yaml
apiVersion: audit.k8s.io/v1beta1 # This is required.
kind: Policy
# Don't generate audit events for all requests in RequestReceived stage.
omitStages:
- "RequestReceived"
rules:
# Log pod changes at RequestResponse level
- level: RequestResponse
resources:
- group: ""
# Resource "pods" doesn't match requests to any subresource of pods,
# which is consistent with the RBAC policy.
resources: ["pods"]
# Log "pods/log", "pods/status" at Metadata level
- level: Metadata
resources:
- group: ""
resources: ["pods/log", "pods/status"]
# Don't log requests to a configmap called "controller-leader"
- level: None
resources:
- group: ""
resources: ["configmaps"]
resourceNames: ["controller-leader"]
# Don't log watch requests by the "system:kube-proxy" on endpoints or services
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: "" # core API group
resources: ["endpoints", "services"]
# Don't log authenticated requests to certain non-resource URL paths.
- level: None
userGroups: ["system:authenticated"]
nonResourceURLs:
- "/api*" # Wildcard matching.
- "/version"
# Log the request body of configmap changes in kube-system.
- level: Request
resources:
- group: "" # core API group
resources: ["configmaps"]
# This rule only applies to resources in the "kube-system" namespace.
# The empty string "" can be used to select non-namespaced resources.
namespaces: ["kube-system"]
# Log configmap and secret changes in all other namespaces at the Metadata level.
- level: Metadata
resources:
- group: "" # core API group
resources: ["secrets", "configmaps"]
# Log all other resources in core and extensions at the Request level.
- level: Request
resources:
- group: "" # core API group
- group: "extensions" # Version of group should NOT be included.
# A catch-all rule to log all other requests at the Metadata level.
- level: Metadata
# Long-running requests like watches that fall under this rule will not
# generate an audit event in RequestReceived.
omitStages:
- "RequestReceived"
[root@hdss7-21 bin]# vi /opt/kubernetes/server/bin/kube-apiserver.sh
#!/bin/bash
./kube-apiserver --apiserver-count 2 --audit-log-path /data/logs/kubernetes/kube-apiserver/audit-log --audit-policy-file ./conf/audit.yaml --authorization-mode RBAC --client-ca-file ./cert/ca.pem --requestheader-client-ca-file ./cert/ca.pem --enable-admission-plugins NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota --etcd-cafile ./cert/ca.pem --etcd-certfile ./cert/client.pem --etcd-keyfile ./cert/client-key.pem --etcd-servers https://10.4.7.12:2379,https://10.4.7.21:2379,https://10.4.7.22:2379 --service-account-key-file ./cert/ca-key.pem --service-cluster-ip-range 192.168.0.0/16 --service-node-port-range 3000-29999 --target-ram-mb=1024 --kubelet-client-certificate ./cert/client.pem --kubelet-client-key ./cert/client-key.pem --log-dir /data/logs/kubernetes/kube-apiserver --tls-cert-file ./cert/apiserver.pem --tls-private-key-file ./cert/apiserver-key.pem --v 2
#################参数说明############
--apiserver-count 2 \ // apiserver数量,有资源可以给3个 --audit-log-path /data/logs/kubernetes/kube-apiserver/audit-log \ //日志目录
--audit-policy-file ./conf/audit.yaml \ //日志审计规则
--authorization-mode RBAC \ //RBAC --基于角色访问的控制
--v 2 //log Level 是v 2
[root@hdss7-21 bin]# chmod +x kube-apiserver.sh
[root@hdss7-21 bin]# mkdir -p /data/logs/kubernetes/kube-apiserver
[root@hdss7-21 bin]# vi /etc/supervisord.d/kube-apiserver.ini
[program:kube-apiserver-7-21]
command=/opt/kubernetes/server/bin/kube-apiserver.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=30 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-apiserver/apiserver.stdout.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
[root@hdss7-21 bin]# supervisorctl update
[root@hdss7-21 bin]# supervisorctl status
etcd-server-7-21 RUNNING pid 9960, uptime 1:59:05
kube-apiserver-7-21 RUNNING pid 10178, uptime 0:05:01
[root@hdss7-21 bin]# netstat -lntup|grep kube-api
tcp 0 0 127.0.0.1:8080 0.0.0.0:* LISTEN 10179/./kube-apiser
tcp6 0 0 :::6443 :::* LISTEN 10179/./kube-apiser
hdss7-22 跟上述基本相同
/etc/supervisord.d/kube-apiserver.ini
需要更改成[program:kube-apiserver-7-22]
HDSS7-11.host.com
,HDSS7-12.host.com
上:VIP 10.4.7.10:7443代理 两台apiserver的6443端口,此处会用到keepalived
[root@hdss7-11 ~]# yum install -y nginx keepalived
[root@hdss7-11 ~]# vim /etc/nginx/nginx.conf
#添加到文件最后
stream {
upstream kube-apiserver {
server 10.4.7.21:6443 max_fails=3 fail_timeout=30s;
server 10.4.7.22:6443 max_fails=3 fail_timeout=30s;
}
server {
listen 7443;
proxy_connect_timeout 2s;
proxy_timeout 900s;
proxy_pass kube-apiserver;
}
}
检查脚本
[root@hdss7-11 ~]# vi /etc/keepalived/check_port.sh
#!/bin/bash
#keepalived 监控端口脚本
#使用方法:
#在keepalived的配置文件中
#vrrp_script check_port {#创建一个vrrp_script脚本,检查配置
# script "/etc/keepalived/check_port.sh 6379" #配置监听的端口
# interval 2 #检查脚本的频率,单位(秒)
#}
CHK_PORT=$1
if [ -n "$CHK_PORT" ];then
PORT_PROCESS=`ss -lnt|grep $CHK_PORT|wc -l`
if [ $PORT_PROCESS -eq 0 ];then
echo "Port $CHK_PORT Is Not Used,End."
exit 1
fi
else
echo "Check Port Cant Be Empty!"
fi
脚本添加执行权限
[root@hdss7-11 ~]# chmod +x /etc/keepalived/check_port.sh
配置文件
#keepalived 主:
[root@hdss7-11 conf.d]# vi /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id 10.4.7.11
}
vrrp_script chk_nginx {
script "/etc/keepalived/check_port.sh 7443"
interval 2
weight -20
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 251
priority 100
advert_int 1
mcast_src_ip 10.4.7.11
nopreempt
authentication {
auth_type PASS
auth_pass 11111111
}
track_script {
chk_nginx
}
virtual_ipaddress {
10.4.7.10
}
}
#keepalived从:
[root@hdss7-12 conf.d]# vi /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id 10.4.7.12
}
vrrp_script chk_nginx {
script "/etc/keepalived/check_port.sh 7443"
interval 2
weight -20
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 251
mcast_src_ip 10.4.7.12
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass 11111111
}
track_script {
chk_nginx
}
virtual_ipaddress {
10.4.7.10
}
}
//hdss7-11和hdss7-12上启动nginx
~]# systemctl start nginx
~]# systemctl enable nginx
//hdss7-11和hdss7-12上启动keepalived
~]# systemctl start keepalived.service
~]# systemctl enable keepalived
//检查
[root@hdss7-11 keepalived]# ip a|grep 10.4.7
inet 10.4.7.11/24 brd 10.4.7.255 scope global noprefixroute eth0
inet 10.4.7.10/32 scope global eth0
注意:keepalived hdss7-11配置 nopreempt,意为非抢占式
原因:如果抢占式,假如生产网络抖动原因,check_port脚本探测不到,VIP有可能会动触发报警,VIP漂移在生产上属于重大生产事故,是要写故障报告的,是无法忍受的
主机名 | 角色 | ip |
---|---|---|
hdss7-21.host.com | controller-manager | 10.4.7.21 |
hdss7-22.host.com | controller-manager | 10.4.7.21 |
注意:这里部署文档以hdss7-21为例,另外一台类似
[root@hdss7-21 bin]# vim /opt/kubernetes/server/bin/kube-controller-manager.sh
#!/bin/sh
./kube-controller-manager --cluster-cidr 172.7.0.0/16 --leader-elect true --log-dir /data/logs/kubernetes/kube-controller-manager --master http://127.0.0.1:8080 --service-account-private-key-file ./cert/ca-key.pem --service-cluster-ip-range 192.168.0.0/16 --root-ca-file ./cert/ca.pem --v 2
[root@hdss7-21 bin]# chmod +x /opt/kubernetes/server/bin/kube-controller-manager.sh
[root@hdss7-21 bin]# mkdir -p /data/logs/kubernetes/kube-controller-manager
[root@hdss7-21 bin]# vi /etc/supervisord.d/kube-controller-manager.ini
[program:kube-controller-manager-7-21]
command=/opt/kubernetes/server/bin/kube-controller-manager.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=30 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-controller-manager/controller.stdout.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
[root@hdss7-21 bin]# supervisorctl update
kube-controller-manager-7-21: added process group
[root@hdss7-21 bin]# supervisorctl status
etcd-server-7-21 RUNNING pid 9960, uptime 2:27:41
kube-apiserver-7-21 RUNNING pid 10178, uptime 0:33:37
kube-controller-manager-7-21 RUNNING pid 10231, uptime 0:04:16
主机名 | 角色 | ip |
---|---|---|
hdss7-21.host.com | kube-scheduler | 10.4.7.21 |
hdss7-22.host.com | kube-scheduler | 10.4.7.22 |
注意:这里部署文档以hdss7-21为例,另一运算节点类似
hdss7-21上
[root@hdss7-21 bin]# vi /opt/kubernetes/server/bin/kube-scheduler.sh
#!/bin/sh
./kube-scheduler --leader-elect --log-dir /data/logs/kubernetes/kube-scheduler --master http://127.0.0.1:8080 --v 2
//如果主控节点组件在不同的地方,是需要证书验证的('client-key.pem和client.pem'),实验环境是在一个宿主机,所以这里无需证书.
[root@hdss7-21 bin]# chmod +x /opt/kubernetes/server/bin/kube-scheduler.sh
[root@hdss7-21 bin]# mkdir -p /data/logs/kubernetes/kube-scheduler
[root@hdss7-21 bin]# vi /etc/supervisord.d/kube-scheduler.ini
[program:kube-scheduler-7-21]
command=/opt/kubernetes/server/bin/kube-scheduler.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=30 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-scheduler/scheduler.stdout.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
[root@hdss7-21 bin]# supervisorctl update
kube-scheduler-7-21: added process group
[root@hdss7-21 bin]# supervisorctl status
etcd-server-7-21 RUNNING pid 9960, uptime 2:37:41
kube-apiserver-7-21 RUNNING pid 10178, uptime 0:43:37
kube-controller-manager-7-21 RUNNING pid 10231, uptime 0:14:16
kube-scheduler-7-21 STARTING
[root@hdss7-21 bin]# supervisorctl status
etcd-server-7-21 RUNNING pid 9960, uptime 2:38:06
kube-apiserver-7-21 RUNNING pid 10178, uptime 0:44:02
kube-controller-manager-7-21 RUNNING pid 10231, uptime 0:14:41
kube-scheduler-7-21 RUNNING pid 10265, uptime 0:00:30
hdss7-22 跟上述基本相同
/etc/supervisord.d/ kube-scheduler.ini
需要更改成[program:kube-scheduler-7-22]
#设置软连接
[root@hdss7-21 bin]# ln -s /opt/kubernetes/server/bin/kubectl /usr/bin/kubectl
#
[root@hdss7-21 bin]# kubectl get cs
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-2 Healthy {"health": "true"}
etcd-0 Healthy {"health": "true"}
etcd-1 Healthy {"health": "true"}
主机名 | 角色 | ip |
---|---|---|
hdss7-21.host.com | kubelet | 10.4.7.21 |
hdss7-22.host.com | kubelet | 10.4.7.22 |
注意:这里部署文档以 hdss7-21主机为例,另外一台运算节点安装部署方法类似
运维主机hdss7-200上
{
"CN": "k8s-kubelet",
"hosts": [
"127.0.0.1",
"10.4.7.10",
"10.4.7.21",
"10.4.7.22",
"10.4.7.23",
"10.4.7.24",
"10.4.7.25",
"10.4.7.26",
"10.4.7.27",
"10.4.7.28"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "od",
"OU": "ops"
}
]
}
#将可能出现的kubelet节点提前写入到host段
[root@hdss7-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server kubelet-csr.json | cfssl-json -bare kubelet
[root@hdss7-200 certs]# ll|grep kubelet
-rw------- 1 root root 1115 Dec 23 17:58 kubelet.csr
-rw------- 1 root root 452 Dec 23 17:56 kubelet-csr.json
-rw------- 1 root root 1675 Dec 23 17:58 kubelet-key.pem
-rw------- 1 root root 1472 Dec 23 17:58 kubelet.pem
HDSS7-21,HDSS7-22
[root@hdss7-21 cert]# scp hdss7-200:/opt/certs/kubelet.pem .
[root@hdss7-21 cert]# scp hdss7-200:/opt/certs/kubelet-key.pem .
[root@hdss7-21 cert]# ll|grep kubelet
-rw-------. 1 root root 1675 Dec 23 18:02 kubelet-key.pem
-rw-------. 1 root root 1472 Dec 23 18:02 kubelet.pem
都在conf目录下
[root@hdss7-21 cert]# cd /opt/kubernetes/server/bin/conf/
[root@hdss7-21 conf]# kubectl config set-cluster myk8s --certificate-authority=/opt/kubernetes/server/bin/cert/ca.pem --embed-certs=true --server=https://10.4.7.10:7443 --kubeconfig=kubelet.kubeconfig
返回结果:
Cluster "myk8s" set.
[root@hdss7-21 conf]# kubectl config set-credentials k8s-node --client-certificate=/opt/kubernetes/server/bin/cert/client.pem --client-key=/opt/kubernetes/server/bin/cert/client-key.pem --embed-certs=true --kubeconfig=kubelet.kubeconfig
返回结果:
User "k8s-node" set.
[root@hdss7-21 conf]# kubectl config set-context myk8s-context --cluster=myk8s --user=k8s-node --kubeconfig=kubelet.kubeconfig
返回结果:
Context "myk8s-context" created.
[root@hdss7-21 conf]# kubectl config use-context myk8s-context --kubeconfig=kubelet.kubeconfig
返回结果:
Switched to context "myk8s-context".
[root@hdss7-21 conf]# ll
total 12
-rw-r--r--. 1 root root 2223 Dec 23 16:56 audit.yaml
-rw-------. 1 root root 6215 Dec 23 18:05 kubelet.kubeconfig
#cat下面的这段实际上就是ca.pem 经过base64编译以后的编码
1.创建资源配置文件
[root@hdss7-21 conf]# vi k8s-node.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: k8s-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: k8s-node
2.根据配置文件创建用户
[root@hdss7-21 conf]# kubectl create -f k8s-node.yaml //-f 指定配置文件
clusterrolebinding.rbac.authorization.k8s.io/k8s-node created //创建角色后会存到etcd里
3、查询集群角色和查看角色属性
[root@hdss7-21 conf]# kubectl get clusterrolebinding k8s-node
NAME AGE
k8s-node 7m54s
[root@hdss7-21 conf]# kubectl get clusterrolebinding k8s-node -o yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding //定义了一个集群绑定资源,k8s一切皆资源
metadata:
creationTimestamp: "2019-12-23T10:14:58Z"
name: k8s-node //资源的名称为k8s-node
resourceVersion: "3030"
selfLink: /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/k8s-node
uid: 5fda36a6-d8bb-48c0-9e7f-56d0e052bee0
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node //给k8s-node集群用户绑定了一个集群角色,叫system:node,意思是让下面k8s-node用户具备这个集群里运算节点的权限
subje cts:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: k8s-node
拷贝kubelet.kubeconfig 到hdss7-22上
[root@hdss7-22 conf]# cd /opt/kubernetes/server/bin/conf/
[root@hdss7-22 conf]# scp hdss7-21:/opt/kubernetes/server/bin/conf/kubelet.kubeconfig .
[root@hdss7-22 conf]# ll
total 12
-rw-r--r-- 1 root root 2223 Nov 16 18:48 audit.yaml
-rw------- 1 root root 6199 Nov 16 23:14 kubelet.kubeconfig
运维主机HDSS7-200
原因:需要用一个pause基础镜像把这台机器的pod拉起来,因为kubelet是干活的节点,它帮我们调度docker引擎,边车模式,让kebelet控制一个小镜像,先于我们的业务容器起来,让它帮我们业务容器去设置:UTC、NET、IPC,让它先把命名空间占上,业务容易还没起来的时候,pod的ip已经分配出来了。
[root@hdss7-200 certs]# docker pull kubernetes/pause
#打标签
[root@hdss7-200 certs]# docker tag f9d5de079539 harbor.od.com/public/pause:latest
#推送到harbor仓库
[root@hdss7-200 certs]# docker push harbor.od.com/public/pause:latest
hdss7-21上
[root@hdss7-21 conf]# vi /opt/kubernetes/server/bin/kubelet.sh
#!/bin/sh
./kubelet --anonymous-auth=false --cgroup-driver systemd --cluster-dns 192.168.0.2 --cluster-domain cluster.local --runtime-cgroups=/systemd/system.slice --kubelet-cgroups=/systemd/system.slice --fail-swap-on="false" --client-ca-file ./cert/ca.pem --tls-cert-file ./cert/kubelet.pem --tls-private-key-file ./cert/kubelet-key.pem --hostname-override hdss7-21.host.com --image-gc-high-threshold 20 --image-gc-low-threshold 10 --kubeconfig ./conf/kubelet.kubeconfig --log-dir /data/logs/kubernetes/kube-kubelet --pod-infra-container-image harbor.od.com/public/pause:latest --root-dir /data/kubelet
###################参数说明############
--anonymous-auth=false \ //匿名登陆,这里不允许
--cgroup-driver systemd \ //这里和docker的daemon.json保持一直
--cluster-dns 192.168.0.2 \
--cluster-domain cluster.local --runtime-cgroups=/systemd/system.slice --kubelet-cgroups=/systemd/system.slice --fail-swap-on="false" \ //正常需要关闭swap分区的。设置为不关闭swap分区也正常启动,
--client-ca-file ./cert/ca.pem --tls-cert-file ./cert/kubelet.pem --tls-private-key-file ./cert/kubelet-key.pem --hostname-override hdss7-21.host.com \ // 主机名
--image-gc-high-threshold 20 --image-gc-low-threshold 10 --kubeconfig ./conf/kubelet.kubeconfig \
--log-dir /data/logs/kubernetes/kube-kubelet --pod-infra-container-image harbor.od.com/public/pause:latest --root-dir /data/kubelet
==注意:kubelet集群个主机的启动脚本略不同,其他节点注意修改:--hostname-override==
[root@hdss7-21 conf]# chmod +x /opt/kubernetes/server/bin/kubelet.sh
[root@hdss7-21 conf]# mkdir -p /data/logs/kubernetes/kube-kubelet /data/kubelet
[root@hdss7-21 conf]# vi /etc/supervisord.d/kube-kubelet.ini
[program:kube-kubelet-7-21]
command=/opt/kubernetes/server/bin/kubelet.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=30 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-kubelet/kubelet.stdout.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
注意:其他主机部署时请注意修改program标签
[root@hdss7-21 conf]# supervisorctl status
etcd-server-7-21 RUNNING pid 9960, uptime 3:37:01
kube-apiserver-7-21 RUNNING pid 10178, uptime 1:42:57
kube-controller-manager-7-21 RUNNING pid 10231, uptime 1:13:36
kube-kubelet-7-21 RUNNING pid 10457, uptime 0:01:36
kube-scheduler-7-21 RUNNING pid 10265, uptime 0:59:25
其他节点类似,有些需要稍许调整:
/opt/kubernetes/server/bin/kubelet.sh
/etc/supervisord.d/kube-kubelet.ini
[root@hdss7-22 bin]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
hdss7-21.host.com Ready <none> 11m v1.15.2
hdss7-22.host.com Ready <none> 3m38s v1.15.2
// 角色是none,打个标签,标签是特色管理功能之一
[root@hdss7-22 ~]# kubectl label node hdss7-21.host.com node-role.kubernetes.io/master=
node/hdss7-21.host.com labeled
[root@hdss7-22 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
hdss7-21.host.com Ready master 8h v1.15.2
hdss7-22.host.com Ready <none> 8h v1.15.2
[root@hdss7-22 ~]# kubectl label node hdss7-21.host.com node-role.kubernetes.io/node=
node/hdss7-21.host.com labeled
[root@hdss7-22 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
hdss7-21.host.com Ready master,node 8h v1.15.2
hdss7-22.host.com Ready <none> 8h v1.15.2
联结pod网络和集群网络
主机名 | 角色 | ip |
---|---|---|
hdss7-21.host.com | kube-proxy | 10.4.7.21 |
hdss7-22.host.com | kube-proxy | 10.4.7.21 |
==注意:这里部署以hdss7-21主机为例,其他运算节点类似==
运维主机hdss7-200上
[root@hdss7-200 certs]# vi kube-proxy-csr.json
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "od",
"OU": "ops"
}
]
}
[root@hdss7-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client kube-proxy-csr.json |cfssl-json -bare kube-proxy-client
[root@hdss7-200 certs]# ll |grep proxy
-rw------- 1 root root 1005 Dec 23 19:08 kube-proxy-client.csr
-rw------- 1 root root 1679 Dec 23 19:08 kube-proxy-client-key.pem
-rw------- 1 root root 1383 Dec 23 19:08 kube-proxy-client.pem
-rw------- 1 root root 267 Dec 23 19:06 kube-proxy-csr.json
HDSS7-21,HDSS7-22
#拷贝证书,私钥,注意私钥文件属性600
[root@hdss7-21 conf]# cd /opt/kubernetes/server/bin/cert/
[root@hdss7-21 cert]# scp hdss7-200:/opt/certs/kube-proxy-client.pem .
[root@hdss7-21 cert]# scp hdss7-200:/opt/certs/kube-proxy-client-key.pem .
==注意:必须在conf目录下==
[root@hdss7-21 cert]# cd /opt/kubernetes/server/bin/conf/
kubectl config set-cluster myk8s --certificate-authority=/opt/kubernetes/server/bin/cert/ca.pem --embed-certs=true --server=https://10.4.7.10:7443 --kubeconfig=kube-proxy.kubeconfig
返回结果:
Cluster "myk8s" set.
[root@hdss7-21 conf]# kubectl config set-credentials kube-proxy --client-certificate=/opt/kubernetes/server/bin/cert/kube-proxy-client.pem --client-key=/opt/kubernetes/server/bin/cert/kube-proxy-client-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig
返回结果:
User "kube-proxy" set.
[root@hdss7-21 conf]# kubectl config set-context myk8s-context --cluster=myk8s --user=kube-proxy --kubeconfig=kube-proxy.kubeconfig
返回结果:
Context "myk8s-context" created.
[root@hdss7-21 conf]# kubectl config use-context myk8s-context --kubeconfig=kube-proxy.kubeconfig
返回结果
Switched to context "myk8s-context".
[root@hdss7-21 conf]# scp kube-proxy.kubeconfig hdss7-22:/opt/kubernetes/server/bin/conf/
HDSS7-21,HDSS7-22
这里使用ipvs进行调度
[root@hdss7-21 bin]# lsmod |grep ip_vs
[root@hdss7-21 bin]# vi /root/ipvs.sh
i#!/bin/bash
ipvs_mods_dir="/usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs"
for i in $(ls $ipvs_mods_dir|grep -o "^[^.]*")
do
/sbin/modinfo -F filename $i &>/dev/null
if [ $? -eq 0 ];then
/sbin/modprobe $i
fi
done
[root@hdss7-21 bin]# chmod +x /root/ipvs.sh
[root@hdss7-21 bin]# sh /root/ipvs.sh
[root@hdss7-21 bin]# lsmod |grep ip_vs
ip_vs_wrr 12697 0
ip_vs_wlc 12519 0
ip_vs_sh 12688 0
ip_vs_sed 12519 0
......
[root@hdss7-21 bin]# vi /opt/kubernetes/server/bin/kube-proxy.sh
#!/bin/sh
./kube-proxy --cluster-cidr 172.7.0.0/16 --hostname-override hdss7-21.host.com --proxy-mode=ipvs --ipvs-scheduler=nq --kubeconfig ./conf/kube-proxy.kubeconfig
[root@hdss7-21 conf]# ll /opt/kubernetes/server/bin/conf/|grep kube-proxy
-rw-------. 1 root root 6239 Dec 23 19:14 kube-proxy.kubeconfig
[root@hdss7-21 conf]# chmod +x /opt/kubernetes/server/bin/kube-proxy.sh
[root@hdss7-21 conf]# mkdir -p /data/logs/kubernetes/kube-proxy
[root@hdss7-21 bin]# vi /etc/supervisord.d/kube-proxy.ini
i[program:kube-proxy-7-21]
command=/opt/kubernetes/server/bin/kube-proxy.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=30 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-proxy/proxy.stdout.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
[root@hdss7-21 conf]# supervisorctl update
kube-proxy-7-21: added process group
[root@hdss7-21 conf]# supervisorctl status
etcd-server-7-21 RUNNING pid 9960, uptime 4:11:54
kube-apiserver-7-21 RUNNING pid 10178, uptime 2:17:50
kube-controller-manager-7-21 RUNNING pid 10231, uptime 1:48:29
kube-kubelet-7-21 RUNNING pid 10457, uptime 0:36:29
kube-proxy-7-21 RUNNING pid 17732, uptime 0:00:46
kube-scheduler-7-21 RUNNING pid 10265, uptime 1:34:18
hdss7-22 跟上述基本相同
/etc/supervisord.d/kube-proxy.ini
需要更改成[program:kube-proxy-7-21]
/opt/kubernetes/server/bin/kube-proxy.sh
需要改成 --hostname-override hdss7-22.host.com
[root@hdss7-21 ~]# vi /root/nginx-ds.yaml
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: nginx-ds
spec:
template:
metadata:
labels:
app: nginx-ds
spec:
containers:
- name: my-nginx
image: harbor.od.com/public/nginx:v1.7.9
ports:
- containerPort: 80
[root@hdss7-21 bin]# kubectl create -f /root/nginx-ds.yaml //创建资源
返回结果:
daemonset.extensions/nginx-ds created
[root@hdss7-21 ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-ds-hqmc6 1/1 Running 0 48m
nginx-ds-lchtt 1/1 Running 0 19m
[root@hdss7-21 ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-ds-hqmc6 1/1 Running 0 50m 172.7.22.2 hdss7-22.host.com <none> <none>
nginx-ds-lchtt 1/1 Running 0 20m 172.7.21.2 hdss7-21.host.com <none> <none>
#访问不到hdss7.22上的pod资源?
原因是跨宿主机,docker容器还不能通信,下篇:flannel解决这个问题
[root@hdss7-22 ~]# kubectl get cs
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-2 Healthy {"health": "true"}
etcd-1 Healthy {"health": "true"}
etcd-0 Healthy {"health": "true"}
[root@hdss7-22 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
hdss7-21.host.com Ready master,node 96m v1.15.2
hdss7-22.host.com Ready master,node 88m v1.15.2
[root@hdss7-22 ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-ds-hqmc6 1/1 Running 0 53m
nginx-ds-lchtt 1/1 Running 0 24m
主机名 | 角色 | ip |
---|---|---|
hdss7-21.host.com | flannel | 10.4.7.21 |
hdss7-22.host.com | flannel | 10.4.7.22 |
注意:这里部署以hdss7-21.host.com为例,另外一台运算节点方法类似
[root@hdss7-21 ~]# wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz
[root@hdss7-21 src]# mkdir /opt/flannel-v0.11.0
[root@hdss7-21 src]# tar xf flannel-v0.11.0-linux-amd64.tar.gz -C /opt/flannel-v0.11.0/
[root@hdss7-21 src]# ln -s /opt/flannel-v0.11.0/ /opt/flannel
[root@hdss7-21 ~]# mkdir /opt/flannel/cert
[root@hdss7-21 ~]# ll /opt/flannel/
total 34436
drwxr-xr-x. 2 root root 6 Dec 23 21:14 cert
-rwxr-xr-x. 1 root root 35249016 Jan 29 2019 flanneld
-rwxr-xr-x. 1 root root 2139 Oct 23 2018 mk-docker-opts.sh
-rw-r--r--. 1 root root 4300 Oct 23 2018 README.md
[root@hdss7-21 cert]# cd /opt/flannel/cert/
[root@hdss7-21 cert]# scp hdss7-200:/opt/certs/ca.pem .
[root@hdss7-21 cert]# scp hdss7-200:/opt/certs/client.pem .
[root@hdss7-21 cert]# scp hdss7-200:/opt/certs/client-key.pem .
[root@hdss7-21 cert]# vim /opt/flannel/cert/subnet.env
FLANNEL_NETWORK=172.7.0.0/16
FLANNEL_SUBNET=172.7.21.1/24
FLANNEL_MTU=1500
FLANNEL_IPMASQ=fals
==注意:其他节点不同,SUBNET记得更改==
[root@hdss7-21 flannel]# vi flanneld.sh
#!/bin/sh
./flanneld --public-ip=10.4.7.21 --etcd-endpoints=https://10.4.7.12:2379,https://10.4.7.21:2379,https://10.4.7.22:2379 --etcd-keyfile=./cert/client-key.pem --etcd-certfile=./cert/client.pem --etcd-cafile=./cert/ca.pem --iface=eth0 --subnet-file=./subnet.env --healthz-port=2401
==注意:其他节点不同,public-ip记得更改==
[root@hdss7-21 flannel]# chmod +x /opt/flannel/flanneld.sh
[root@hdss7-21 flannel]# mkdir -p /data/logs/flanneld
[root@hdss7-21 flannel]# vim /etc/supervisord.d/flannel.ini
[program:flanneld-7-21]
command=/opt/flannel/flanneld.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/flannel ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=30 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/flanneld/flanneld.stdout.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
==注意:其他节点不同,记得修改program==
#给命令创建个软连接
[root@hdss7-21 etcd]# ln -s /opt/etcd/etcdctl /usr/bin/etcdctl
#添加host-gw
[root@hdss7-21 etcd]# etcdctl set /coreos.com/network/config '{"Network": "172.7.0.0/16", "Backend": {"Type": "host-gw"}}'
返回结果:
{"Network": "172.7.0.0/16", "Backend": {"Type": "host-gw"}}
#查看
[root@hdss7-21 etcd]# etcdctl get /coreos.com/network/config
返回结果:
{"Network": "172.7.0.0/16", "Backend": {"Type": "host-gw"}}
[root@hdss7-21 flannel]# supervisorctl update
flanneld-7-21: added process group
[root@hdss7-21 flannel]# supervisorctl status
[root@hdss7-21 flannel]# supervisorctl status
etcd-server-7-21 RUNNING pid 9960, uptime 6:33:24
flanneld-7-21 RUNNING pid 52895, uptime 0:00:53 //flanneld网络
kube-apiserver-7-21 RUNNING pid 10178, uptime 4:39:20
kube-controller-manager-7-21 RUNNING pid 48963, uptime 0:14:40
kube-kubelet-7-21 RUNNING pid 10457, uptime 2:57:59
kube-proxy-7-21 RUNNING pid 17732, uptime 2:22:16
kube-scheduler-7-21 RUNNING pid 50936, uptime 0:07:44
其他节点基本和hdss7-21相同,注意修改一下文件:
# subnet.env
FLANNEL_SUBNET=172.7.22.1/24
# flanneld.sh
--public-ip=10.4.7.22
# /etc/supervisord.d/flannel.ini
[program:flanneld-7-22]
[root@hdss7-22 flannel]# ping 172.7.21.2
PING 172.7.21.2 (172.7.21.2) 56(84) bytes of data.
64 bytes from 172.7.21.2: icmp_seq=1 ttl=63 time=0.554 ms
64 bytes from 172.7.21.2: icmp_seq=2 ttl=63 time=0.485 ms
[root@hdss7-21 flannel]# ping 172.7.22.2
PING 172.7.22.2 (172.7.22.2) 56(84) bytes of data.
64 bytes from 172.7.22.2: icmp_seq=1 ttl=63 time=0.271 ms
64 bytes from 172.7.22.2: icmp_seq=2 ttl=63 time=0.196 ms
[root@hdss7-21 ~]# vi nginx-ds.yaml
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: nginx-ds
spec:
template:
metadata:
labels:
app: nginx-ds
spec:
containers:
- name: my-nginx
image: harbor.od.com/public/nginx:curl
ports:
- containerPort: 80
[root@hdss7-21 ~]# kubectl apply -f nginx-ds.yaml
[root@hdss7-21 ~]# kubectl get pods -n default
NAME READY STATUS RESTARTS AGE
nginx-ds-mcvxt 1/1 Running 1 6d22h
nginx-ds-zsnz9 1/1 Running 1 6d22h
[root@hdss7-21 ~]# kubectl delete pod nginx-ds-mcvxt
pod "nginx-ds-mcvxt" deleted
[root@hdss7-21 ~]# kubectl delete pod nginx-ds-zsnz9
pod "nginx-ds-zsnz9" deleted
[root@hdss7-21 ~]# kubectl get pods -n default -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-ds-d5kl8 1/1 Running 0 44s 172.7.22.2 hdss7-22.host.com <none> <none>
nginx-ds-jtn62 1/1 Running 0 56s 172.7.21.2 hdss7-21.host.com <none> <none>
[root@hdss7-21 ~]# kubectl exec nginx-ds-jtn62 /bin/bash
[root@hdss7-21 ~]# kubectl exec -ti nginx-ds-jtn62 /bin/bash
root@nginx-ds-jtn62:/#
root@nginx-ds-jtn62:/#
root@nginx-ds-jtn62:/#
root@nginx-ds-jtn62:/# curl 172.7.22.2
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
body {
width: 35em;
margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif;
[root@hdss7-22 flannel]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-ds-d5kl8 1/1 Running 0 5m24s 172.7.22.2 hdss7-22.host.com <none> <none>
nginx-ds-jtn62 1/1 Running 0 5m36s 172.7.21.2 hdss7-21.host.com <none> <none>
[root@hdss7-22 flannel]#
[root@hdss7-22 flannel]#
[root@hdss7-22 flannel]#
[root@hdss7-22 flannel]#
[root@hdss7-22 flannel]# kubectl logs -f nginx-ds-d5kl8
10.4.7.21 - - [23/Nov/2019:16:57:45 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.38.0" "-"
10.4.7.21 - - [23/Nov/2019:17:01:37 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.38.0" "-"
//由此可看出节点访问ip是10.4.7.21,不应该是物理机的ip,说明做了snat转换,而我们希望看到的是容器的真实IP
注意:另一节点,注意iptables规则略有不同,其他运算节点执行时注意修改
[root@hdss7-21 ~]# yum install iptables-services -y
[root@hdss7-21 ~]# systemctl start iptables
[root@hdss7-21 ~]# systemctl enable iptables
[root@hdss7-21 ~]# iptables -t nat -D POSTROUTING -s 172.7.21.0/24 ! -o docker0 -j MASQUERADE
[root@hdss7-21 ~]# iptables -t nat -I POSTROUTING -s 172.7.21.0/24 ! -d 172.7.0.0/16 ! -o docker0 -j MASQUERADE
[root@hdss7-21 ~]# iptables-save |grep -i postrouting
iptables -t filter -D INPUT -j REJECT --reject-with icmp-host-prohibited
iptables -t filter -D FORWARD -j REJECT --reject-with icmp-host-prohibited
##########规则定义#########
10.4.7.21主机上的,来源是172.7.21.0/24段的docker的ip,目标ip不是172.7.0.0/16段,网络发包不从docker0桥设备出站的,才进行SNAT转换
~]# service iptables save
iptables: Saving firewall rules to /etc/sysconfig/iptables:[ OK ]
[root@hdss7-21 ~]# kubectl logs -f nginx-ds-jtn62
172.7.22.2 - - [23/Nov/2019:17:46:48 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.38.0" "-"
[root@hdss7-22 ~]# kubectl logs -f nginx-ds-d5kl8
10.4.7.21 - - [23/Nov/2019:17:01:37 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.38.0" "-"
172.7.21.2 - - [23/Nov/2019:17:43:34 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.38.0" "-"
注意:此模型前提条件,所有的宿主机在同一个二层网络下,也就是说他们指向的是同一个网关设备,此模型效率最高
[root@hdss7-21 ~}#route add -net 172.7.22.0/24 gw 10.4.7.22 dev eth0
[root@hdss7-22~}#route add -net 172.7.21.0/24 gw 10.4.7.21 dev eth0
[root@hdss7-21 flannel]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 10.4.7.254 0.0.0.0 UG 100 0 0 eth0
10.4.7.0 0.0.0.0 255.255.255.0 U 100 0 0 eth0
172.7.21.0 0.0.0.0 255.255.255.0 U 0 0 0 docker0
172.7.22.0 10.4.7.22 255.255.255.0 UG 0 0 0 eth0
[root@hdss7-22 flannel]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 10.4.7.254 0.0.0.0 UG 100 0 0 eth0
10.4.7.0 0.0.0.0 255.255.255.0 U 100 0 0 eth0
172.7.21.0 10.4.7.21 255.255.255.0 UG 0 0 0 eth0
172.7.22.0 0.0.0.0 255.255.255.0 U 0 0 0 docker0
注意还要优化一条iptables规则:
~]# iptables -t filter -I FORWARD -d 172.7.21.0/24 -j ACCEPT
使用方法:
1、先停止flennel.sh ---通过supervisor stop flanneld-7-[21.22]
2、删除host-gw模型创建的路由
route del -net 172.7.21.0/24 gw 10.4.7.21 hdss7-22上
route del -net 172.7.22.0/24 gw 10.4.7.22 hdss7-21上
3、在etcd节点修改
./etcdctl get /coreos.com/network/config
./etcdctl rm /coreos.com/network/config
etcd]# ./etcdctl set /coreos.com/network/config '{"Network": "172.7.0.0/16", "Backend": {"Type": "VxLAN"}}'
4、supervisorctl start flanneld-7-21
supervisorctl start flanneld-7-22
5、查看ifconfig 会多了一个flannel 1的设备,route -n是没有路由的
类似与mysql日志的mixed模式
'{"Network": "172.7.0.0/16", "Backend": {"Type": "VxLAN","Directrouting": true}}'
在运维主机
HDSS7-200.host.com
上,配置一个nginx虚拟主机,用以提供k8s统一的资源配置清单访问入口
#编写配置文件
[root@hdss7-200 html]# vi /etc/nginx/conf.d/k8s-yaml.od.com.conf
server {
listen 80;
server_name k8s-yaml.od.com;
location / {
autoindex on;
default_type text/plain;
root /data/k8s-yaml;
}
}
#检查并重启
[root@hdss7-200 html]# nginx -t
[root@hdss7-200 html]# nginx -s reload
#建立yaml目录和coredns的yaml目录
[root@hdss7-200 ~]# mkdir -p /data/k8s-yaml/coredns
HDSS7-11 主机上
[root@hdss7-11 ~]# vi /var/named/od.com.zone
$ORIGIN od.com.
$TTL 600 ; 10 minutes
@ IN SOA dns.od.com. dnsadmin.od.com. (
2019111003 ; serial
10800 ; refresh (3 hours)
900 ; retry (15 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
NS dns.od.com.
$TTL 60 ; 1 minute
dns A 10.4.7.11
harbor A 10.4.7.200
k8s-yaml A 10.4.7.200 //添加此行,serial加1
[root@hdss7-11 ~]# systemctl restart named
[root@hdss7-11 ~]# dig -t A k8s-yaml.od.com @10.4.7.11 +short
10.4.7.200
[root@hdss7-200 ~]# docker pull coredns/coredns:1.6.1
[root@hdss7-200 ~]# docker tag c0f6e815079e harbor.od.com/public/coredns:v1.6.1
[root@hdss7-200 ~]# docker push harbor.od.com/public/coredns:v1.6.1
进入coredns目录
[root@hdss7-200 ~]# cd /data/k8s-yaml/coredns/
[root@hdss7-200 coredns]# vi rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
[root@hdss7-200 coredns]# vi cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
log
health
ready
kubernetes cluster.local 192.168.0.0/16
forward . 10.4.7.11
cache 30
loop
reload
loadbalance
}
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: coredns
kubernetes.io/name: "CoreDNS"
spec:
replicas: 1
selector:
matchLabels:
k8s-app: coredns
template:
metadata:
labels:
k8s-app: coredns
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
containers:
- name: coredns
image: harbor.od.com/public/coredns:v1.6.1
args:
- -conf
- /etc/coredns/Corefile
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
svc.yaml
apiVersion: v1
kind: Service
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: coredns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: coredns
clusterIP: 192.168.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
- name: metrics
port: 9153
protocol: TCP
在任意运算节点上应用
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/rbac.yaml
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/cm.yaml
configmap/coredns created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/dp.yaml
deployment.apps/coredns created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/svc.yaml
service/coredns created
[root@hdss7-21 ~]# kubectl get all -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod/coredns-6b6c4f9648-58z68 1/1 Running 0 64s 172.7.22.4 hdss7-22.host.com <none> <none>
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service/coredns ClusterIP 192.168.0.2 <none> 53/UDP,53/TCP,9153/TCP 58s k8s-app=coredns
NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
deployment.apps/coredns 1/1 1 1 64s coredns harbor.od.com/public/coredns:v1.6.1 k8s-app=coredns
NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
replicaset.apps/coredns-6b6c4f9648 1 1 1 64s coredns harbor.od.com/public/coredns:v1.6.1 k8s-app=coredns,pod-template-hash=6b6c4f9648
[root@hdss7-21 ~]# dig -t A www.baidu.com @192.168.0.2 +short
www.a.shifen.com.
39.156.66.18
39.156.66.14
[root@hdss7-21 ~]# dig -t A hdss7-21.host.com @192.168.0.2 +short
10.4.7.21 //自建dns是coredns上级dns,所以查得到
[root@hdss7-21 ~]# kubectl get svc -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
kubernetes ClusterIP 192.168.0.1 <none> 443/TCP 7d <none>
[root@hdss7-21 ~]# kubectl get pods -n kube-public
NAME READY STATUS RESTARTS AGE
nginx-dp-5dfc689474-vpff7 1/1 Running 1 19h
查看:
[root@hdss7-21 ~]# kubectl get svc -o wide -n kube-public
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
nginx-dp ClusterIP 192.168.95.151 <none> 80/TCP 7h21m app=nginx-dp
验证:
[root@hdss7-21 ~]# dig -t A nginx-dp.kube-public.svc.cluster.local. @192.168.0.2 +short
192.168.196.199
[root@hdss7-200 coredns]# docker pull traefik:v1.7.2-alpine
[root@hdss7-200 coredns]# docker images|grep traefik
traefik v1.7.2-alpine add5fac61ae5 14 months ago 72.4MB
[root@hdss7-200 coredns]# docker tag add5fac61ae5 harbor.od.com/public/traefik:v1.7.2
[root@hdss7-200 coredns]# docker push harbor.od.com/public/traefik:v1.7.2
https://github.com/containous/traefik/tree/v1.7/examples/k8s
rbac.yaml
[root@hdss7-200 coredns]# mkdir /data/k8s-yaml/traefik
[root@hdss7-200 coredns]# cd /data/k8s-yaml/traefik/
[root@hdss7-200 traefik]# vi rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system
ds.ymal
[root@hdss7-200 traefik]# vi ds.yaml
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: traefik-ingress
namespace: kube-system
labels:
k8s-app: traefik-ingress
spec:
template:
metadata:
labels:
k8s-app: traefik-ingress
name: traefik-ingress
spec:
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: harbor.od.com/public/traefik:v1.7.2
name: traefik-ingress
ports:
- name: controller
containerPort: 80
hostPort: 81
- name: admin-web
containerPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --api
- --kubernetes
- --logLevel=INFO
- --insecureskipverify=true
- --kubernetes.endpoint=https://10.4.7.10:7443
- --accesslog
- --accesslog.filepath=/var/log/traefik_access.log
- --traefiklog
- --traefiklog.filepath=/var/log/traefik.log
- --metrics.prometheus
svc.yaml
[root@hdss7-200 traefik]# vi svc.yaml
kind: Service
apiVersion: v1
metadata:
name: traefik-ingress-service
namespace: kube-system
spec:
selector:
k8s-app: traefik-ingress
ports:
- protocol: TCP
port: 80
name: controller
- protocol: TCP
port: 8080
name: admin-web
ingress.yaml
[root@hdss7-200 traefik]# vi ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: traefik-web-ui
namespace: kube-system
annotations:
kubernetes.io/ingress.class: traefik
spec:
rules:
- host: traefik.od.com
http:
paths:
- path: /
backend:
serviceName: traefik-ingress-service
servicePort: 8080
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/rbac.yaml
serviceaccount/traefik-ingress-controller created
clusterrole.rbac.authorization.k8s.io/traefik-ingress-controller created
clusterrolebinding.rbac.authorization.k8s.io/traefik-ingress-controller created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/ds.yaml
daemonset.extensions/traefik-ingress created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/svc.yaml
service/traefik-ingress-service created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/ingress.yaml
ingress.extensions/traefik-web-ui created
[root@hdss7-11 ~]# vi /var/named/od.com.zone
$ORIGIN od.com.
$TTL 600 ; 10 minutes
@ IN SOA dns.od.com. dnsadmin.od.com. (
2019111004 ; serial
10800 ; refresh (3 hours)
900 ; retry (15 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
NS dns.od.com.
$TTL 60 ; 1 minute
dns A 10.4.7.11
harbor A 10.4.7.200
k8s-yaml A 10.4.7.200
traefik A 10.4.7.10
[root@hdss7-11 ~]# systemctl restart named
HDSS7-11.host.com
和HDSS7-12.host.com
两台主机上的nginx均需要配置
[root@hdss7-11 ~]# vi /etc/nginx/conf.d/od.com.conf
upstream default_backend_traefik {
server 10.4.7.21:81 max_fails=3 fail_timeout=10s;
server 10.4.7.22:81 max_fails=3 fail_timeout=10s;
}
server {
server_name *.od.com;
location / {
proxy_pass http://default_backend_traefik;
proxy_set_header Host $http_host;
proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for;
}
}
[root@hdss7-11 ~]# nginx -t
[root@hdss7-11 ~]# nginx -s reload
[root@hdss7-200 ~]# docker pull k8scn/kubernetes-dashboard-amd64:v1.8.3
[root@hdss7-200 ~]# docker images|grep dashboard
[root@hdss7-200 ~]# docker tag fcac9aa03fd6 harbor.od.com/public/dashboard:v1.8.3
[root@hdss7-200 ~]# docker push harbor.od.com/public/dashboard:v1.8.3
rbac
[root@hdss7-200 harbor]# mkdir -p /data/k8s-yaml/dashboard && cd /data/k8s-yaml/dashboard
[root@hdss7-200 dashboard]# vi rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard-admin
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard-admin
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard-admin
namespace: kube-system
deployment
[root@hdss7-200 dashboard]# vi dp.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- name: kubernetes-dashboard
image: harbor.od.com/public/dashboard:v1.8.3
resources:
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 50m
memory: 100Mi
ports:
- containerPort: 8443
protocol: TCP
args:
# PLATFORM-SPECIFIC ARGS HERE
- --auto-generate-certificates
volumeMounts:
- name: tmp-volume
mountPath: /tmp
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard-admin
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
Service
[root@hdss7-200 dashboard]# vi svc.yaml
apiVersion: v1
kind: Service
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
k8s-app: kubernetes-dashboard
ports:
- port: 443
targetPort: 8443
Ingress
[root@hdss7-200 dashboard]# vi ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: kubernetes-dashboard
namespace: kube-system
annotations:
kubernetes.io/ingress.class: traefik
spec:
rules:
- host: dashboard.od.com
http:
paths:
- backend:
serviceName: kubernetes-dashboard
servicePort: 443
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/rbac.yaml
serviceaccount/kubernetes-dashboard-admin created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-admin created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/dp.yaml
deployment.apps/kubernetes-dashboard created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/svc.yaml
service/kubernetes-dashboard created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/ingress.yaml
ingress.extensions/kubernetes-dashboard created
[root@hdss7-21 ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-6b6c4f9648-dx4br 1/1 Running 0 29m
kubernetes-dashboard-76dcdb4677-87j7l 1/1 Running 0 79s
traefik-ingress-6fd85 1/1 Running 0 21m
traefik-ingress-sv4xk 1/1 Running 0 21m
[root@hdss7-21 ~]# kubectl get svc -n kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
coredns ClusterIP 192.168.0.2 <none> 53/UDP,53/TCP,9153/TCP 89m
kubernetes-dashboard ClusterIP 192.168.174.151 <none> 443/TCP 85s
traefik-ingress-service ClusterIP 192.168.226.249 <none> 80/TCP,8080/TCP 22m
[root@hdss7-21 ~]# kubectl get ingress -n kube-system
NAME HOSTS ADDRESS PORTS AGE
kubernetes-dashboard dashboard.od.com 80 80s
traefik-web-ui traefik.od.com 80 21m
[root@hdss7-11 conf.d]# vi /var/named/od.com.zone
$ORIGIN od.com.
$TTL 600 ; 10 minutes
@ IN SOA dns.od.com. dnsadmin.od.com. (
2019111005 ; serial //前滚一个序列号
10800 ; refresh (3 hours)
900 ; retry (15 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
NS dns.od.com.
$TTL 60 ; 1 minute
dns A 10.4.7.11
harbor A 10.4.7.200
k8s-yaml A 10.4.7.200
traefik A 10.4.7.10
dashboard A 10.4.7.10
[root@hdss7-11 conf.d]# systemctl restart named
[root@hdss7-11 conf.d]# dig -t A dashboard.od.com @10.4.7.11 +short
10.4.7.10
[root@hdss7-21 containers]# dig -t A dashboard.od.com @192.168.0.2 +short
10.4.7.10
注意:生产上不建议直接restart named,建议rndc 来 reload
注意:dashboardv1.8.3直接可以跳过。拿令牌登陆,需要https。
#setp1:创建csr.json
[root@hdss7-200 certs]# vi od.com-csr.json
{
"CN": "*.od.com",
"hosts": [
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "od",
"OU": "ops"
}
]
}
#setp2:签发
[root@hdss7-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server od.com-csr.json |cfssl-json -bare od.com
#setp3:查看生成证书
[root@hdss7-200 certs]# ll |grep od
-rw-r--r-- 1 root root 993 Dec 24 18:56 od.com.csr
-rw-r--r-- 1 root root 280 Dec 24 18:56 od.com-csr.json
-rw------- 1 root root 1679 Dec 24 18:56 od.com-key.pem
-rw-r--r-- 1 root root 1371 Dec 24 18:56 od.com.pem
HDSS7-11,HDSS7-12 操作
[root@hdss7-11 nginx]# mkdir -p /etc/nginx/certs
[root@hdss7-11 nginx]# cd /etc/nginx/certs/
[root@hdss7-11 certs]# scp hdss7-200:/opt/certs/od.com-key.pem .
[root@hdss7-11 certs]# scp hdss7-200:/opt/certs/od.com.pem .
[root@hdss7-11 conf.d]# cat dashboard.od.com.conf
server {
listen 80;
server_name dashboard.od.com;
rewrite ^(.*)$ https://${server_name}$1 permanent;
}
server {
listen 442 ssl;
server_name dashboard.od.com;
ssl_certificate "certs/od.com.pem";
ssl_certificate_key "certs/od.com-key.pem";
ssl_session_cache shared:SSL:1m;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
location / {
proxy_pass http://default_backend_traefik;
proxy_set_header Host $http_host;
proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for;
}
}
[root@hdss7-21 ~]# kubectl get secret -n kube-system
NAME TYPE DATA AGE
coredns-token-ch8cd kubernetes.io/service-account-token 3 142m
default-token-fk97s kubernetes.io/service-account-token 3 25h
kubernetes-dashboard-admin-token-blkdc kubernetes.io/service-account-token 3 54m
kubernetes-dashboard-key-holder Opaque 2 54m
traefik-ingress-controller-token-89t9g kubernetes.io/service-account-token 3 75m
[root@hdss7-21 ~]# kubectl describe secret kubernetes-dashboard-admin-token-blkdc -n kube-s
[root@hdss7-200 ~]# docker pull quay.io/bitnami/heapster:1.5.4
[root@hdss7-200 ~]# docker tag c359b95ad38b harbor.od.com/public/heapster:v1.5.4
[root@hdss7-200 ~]# docker push harbor.od.com/public/heapster:v1.5.4
hdss7-200上
创建目录
[root@hdss7-200 k8s-yaml]# mkdir -p /data/k8s-yaml/dashboard/heapster
[root@hdss7-200 k8s-yaml]# cd /data/k8s-yaml/dashboard/heapster
rbac.yaml
[root@hdss7-200 heapster]# vi /data/k8s-yaml/dashboard/heapster/rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: heapster
namespace: kube-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: heapster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:heapster
subjects:
- kind: ServiceAccount
name: heapster
namespace: kube-system
Deployment
[root@hdss7-200 heapster]# vi dp.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster
namespace: kube-system
spec:
replicas: 1
template:
metadata:
labels:
task: monitoring
k8s-app: heapster
spec:
serviceAccountName: heapster
containers:
- name: heapster
image: harbor.od.com/public/heapster:v1.5.4
imagePullPolicy: IfNotPresent
command:
- /opt/bitnami/heapster/bin/heapster
- --source=kubernetes:https://kubernetes.default
Service
[root@hdss7-200 heapster]# vi svc.yaml
apiVersion: v1
kind: Service
metadata:
labels:
task: monitoring
# For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
# If you are NOT using this as an addon, you should comment out this line.
kubernetes.io/cluster-service: 'true'
kubernetes.io/name: Heapster
name: heapster
namespace: kube-system
spec:
ports:
- port: 80
targetPort: 8082
selector:
k8s-app: heapster
任意运算节点上
[root@hdss7-21 conf]# kubectl apply -f http://k8s-yaml.od.com/dashboard/heapster/rbac.yaml
serviceaccount/heapster created
clusterrolebinding.rbac.authorization.k8s.io/heapster created
[root@hdss7-21 conf]# kubectl apply -f http://k8s-yaml.od.com/dashboard/heapster/dp.yaml
deployment.extensions/heapster created
[root@hdss7-21 conf]# kubectl apply -f http://k8s-yaml.od.com/dashboard/heapster/svc.yaml
service/heapster created
原文:https://www.cnblogs.com/lpcsf/p/12093849.html