192.168.0.21 | master | apiserver | controller-manager | scheduler | Etcd |
---|---|---|---|---|---|
192.168.0.22 | Node1 | kubelet | kube-proxy | docker | |
192.168.0.23 | Node2 | kubelet | kube-proxy | docker | |
192.168.0.24 | harbor | harbor |
# 临时关闭
systemctl stop firewalld
# 永久关闭
systemctl disable firewalld
# 临时关闭
setenforce 0
# 永久关闭
sed -i ‘s/enforcing/disabled/‘ /etc/selinux/config
# 临时关闭
swapoff -a
#永久关闭
vim /etc/fstab
ntpdate ntp1.aliyun.com
vim /etc/hosts
192.168.0.21 master
192.168.0.22 node1
192.168.0.23 node2
# 分别在对应的机器执行
hostnamectl set-hostname master
hostnamectl set-hostname node1
hostnamectl set-hostname node2
二进制包下载地址:链接:https://pan.baidu.com/s/1C1-GwFR_xs84Pvy24tPPGw
提取码:ndw3
用cfssl自签etcd的证书
# 安装cfssl工具:
[root@master ~]# tar xf TLS.tar.gz
[root@master ~]# ls
etcd.tar.gz k8s-master.tar.gz k8s-node.tar.gz TLS TLS.tar.gz
[root@master ~]# ls TLS
cfssl cfssl-certinfo cfssljson cfssl.sh etcd k8s
# 修改cffsssl.sh如下:
[root@master TLS]# cat cfssl.sh
curl -L https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -o /usr/local/bin/cfssl
curl -L https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -o /usr/local/bin/cfssljson
curl -L https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -o /usr/local/bin/cfssl-certinfo
#cp -rf cfssl cfssl-certinfo cfssljson /usr/local/bin
chmod +x /usr/local/bin/cfssl*
# 执行脚本生成工具
[root@master TLS]# sh cfssl.sh
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 9.8M 100 9.8M 0 0 2293k 0 0:00:04 0:00:04 --:--:-- 2294k
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 2224k 100 2224k 0 0 1089k 0 0:00:02 0:00:02 --:--:-- 1088k
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 6440k 100 6440k 0 0 2230k 0 0:00:02 0:00:02 --:--:-- 2231k
# 查看cffssl工具
[root@master TLS]# ll /usr/local/bin/cfssl*
-rwxr-xr-x 1 root root 10376657 Oct 9 11:24 /usr/local/bin/cfssl
-rwxr-xr-x 1 root root 6595195 Oct 9 11:24 /usr/local/bin/cfssl-certinfo
-rwxr-xr-x 1 root root 2277873 Oct 9 11:24 /usr/local/bin/cfssljson
# 生成etcd证书:
[root@master TLS]# cd etcd/
[root@master etcd]# pwd
/root/TLS/etcd
[root@master etcd]# ll
total 16
-rw-r--r-- 1 root root 287 Oct 3 13:12 ca-config.json #用于生成ca.pem ca-key.pem
-rw-r--r-- 1 root root 209 Oct 3 13:12 ca-csr.json #用于生成ca-csr
-rwxr-xr-x 1 root root 178 Oct 3 13:58 generate_etcd_cert.sh #这是生成ca和server的脚本
-rw-r--r-- 1 root root 306 Oct 3 08:26 server-csr.json #用于生成server.pem server-key.pem server.csr
[root@master etcd]# cat generate_etcd_cert.sh
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
# 初始化一个CA:
[root@master etcd]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
2019/10/09 11:27:50 [INFO] generating a new CA key and certificate from CSR
2019/10/09 11:27:50 [INFO] generate received request
2019/10/09 11:27:50 [INFO] received CSR
2019/10/09 11:27:50 [INFO] generating key: rsa-2048
2019/10/09 11:27:51 [INFO] encoded CSR
2019/10/09 11:27:51 [INFO] signed certificate with serial number 243984200992790636783468017675717297449835481076
[root@master etcd]# ll *pem
-rw------- 1 root root 1679 Oct 9 11:27 ca-key.pem #ca的私钥
-rw-r--r-- 1 root root 1265 Oct 9 11:27 ca.pem #ca数字证书
# 告诉ca要为etcd颁发一个证书:
[root@master etcd]# cat server-csr.json
{
"CN": "etcd",
"hosts": [
"192.168.0.21",
"192.168.0.22",
"192.168.0.23"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing"
}
]
}
[root@master etcd]# cat ca-config.json
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"www": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
# 生成etcd的证书:
[root@master etcd]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
2019/10/09 11:33:59 [INFO] generate received request
2019/10/09 11:33:59 [INFO] received CSR
2019/10/09 11:33:59 [INFO] generating key: rsa-2048
2019/10/09 11:33:59 [INFO] encoded CSR
2019/10/09 11:33:59 [INFO] signed certificate with serial number 730704670326462109576871660342343616627819385700
2019/10/09 11:33:59 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
# etcd证书
[root@master etcd]# ll server*.pem
-rw------- 1 root root 1675 Oct 9 11:33 server-key.pem
-rw-r--r-- 1 root root 1338 Oct 9 11:33 server.pem
# 解压etcd:
[root@master ~]# tar xf etcd.tar.gz
[root@master ~]# ll
total 229420
drwxr-xr-x 5 root root 4096 Oct 2 22:13 etcd
-rw-r--r-- 1 root root 1078 Oct 2 23:10 etcd.service
[root@master ~]# ll /opt/
total 0
[root@master ~]# tree etcd
etcd
├── bin
│ ├── etcd
│ └── etcdctl
├── cfg
│ └── etcd.conf
└── ssl
├── ca.pem
├── server-key.pem
└── server.pem
3 directories, 6 files
# 修改etcd配置文件:
[root@master cfg]# cat etcd.conf
#[Member]
ETCD_NAME="etcd-1" #etcd集群名称
ETCD_DATA_DIR="/var/lib/etcd/default.etcd" #etcd存放数据的目录
ETCD_LISTEN_PEER_URLS="https://192.168.0.21:2380" #etcd集群内部之间通信
ETCD_LISTEN_CLIENT_URLS="https://192.168.0.21:2379" #etcd客户端的与外部通信的
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.0.21:2380" #集群内部节点通信的地址
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.0.21:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.0.21:2380,etcd-2=https://192.168.0.22:2380,etcd-3=https://192.168.0.23" #集群节点连接信息
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" #用于集群内部节点之间的认证
ETCD_INITIAL_CLUSTER_STATE="new" #集群的状态,新建为new
# 修改etcd的启动文件:
[root@master ~]# cat etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/opt/etcd/cfg/etcd.conf
ExecStart=/opt/etcd/bin/etcd --name=${ETCD_NAME} --data-dir=${ETCD_DATA_DIR} --listen-peer-urls=${ETCD_LISTEN_PEER_URLS} --listen-client-urls=${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 --advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} --initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} --initial-cluster=${ETCD_INITIAL_CLUSTER} --initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN} --initial-cluster-state=new --cert-file=/opt/etcd/ssl/server.pem \ #集群外部客户端通信的
--key-file=/opt/etcd/ssl/server-key.pem \ ##集群外部客户端通信的
--peer-cert-file=/opt/etcd/ssl/server.pem \ #集群内部
--peer-key-file=/opt/etcd/ssl/server-key.pem \ #集群内部
--trusted-ca-file=/opt/etcd/ssl/ca.pem \ #集群外部客户端通信的
--peer-trusted-ca-file=/opt/etcd/ssl/ca.pem #集群内部
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
# 拷贝ssl证书到指定目录:
[root@master etcd/ssl]# cp /root/TLS/etcd/{ca,server,server-key}.pem .
[root@master etcd/ssl]# ll
total 12
-rw-r--r-- 1 root root 1265 Oct 9 11:55 ca.pem
-rw------- 1 root root 1675 Oct 9 11:55 server-key.pem
-rw-r--r-- 1 root root 1338 Oct 9 11:55 server.pem
[root@master ssl]# cd /root/
[root@master ~]# mv etcd /opt/
[root@master ~]# tree /opt/
/opt/
└── etcd
├── bin
│ ├── etcd
│ └── etcdctl
├── cfg
│ └── etcd.conf
└── ssl
├── ca.pem
├── server-key.pem
└── server.pem
4 directories, 6 files
# 拷贝etcd文件到node1,node2节点机器上:
[root@master ~]# scp -r /opt/etcd root@192.168.0.22:/opt
[root@master ~]# scp -r /opt/etcd root@192.168.0.23:/opt
修改node1,node2上的/opt/dtcd/cfg/etcd.conf 中的节点名称和ip信息;
# 拷贝服务启动文件,到对应的机器目录下去:
[root@master ~]# scp etcd.service root@192.168.0.22:/usr/lib/systemd/system
root@172.16.77.122‘s password:
etcd.service 100% 1078 650.1KB/s 00:00
[root@master ~]# scp etcd.service root@192.168.0.23:/usr/lib/systemd/system
root@172.16.77.121‘s password:
etcd.service 100% 1078 729.9KB/s 00:00
# 拷贝到执行目录下
[root@master ~]# scp etcd.service /usr/lib/systemd/system
# 启动etcd服务:
[root@master ~]# systemctl daemon-reload
[root@master ~]# systemctl start etcd
[root@master ~]# systemctl enable etcd
Created symlink from /etc/systemd/system/multi-user.target.wants/etcd.service to /usr/lib/systemd/system/etcd.service.
# 查看etcd集群状态:
[root@master ~]# /opt/etcd/bin/etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.0.21:2379,https://192.168.0.22:2379,https://192.168.0.23:2379" cluster-health
member 43721c4082c10e0d is healthy: got healthy result from https://192.168.0.21:2379
member c0a09aa80ae1d891 is healthy: got healthy result from https://192.168.0.22:2379
member c38568aa50bbde03 is healthy: got healthy result from https://192.168.0.23:2379
cluster is healthy
# 自签api-server证书
[root@master ~]# cd TLS/k8s/
[root@master TLS/k8s] ll
total 20
-rw-r--r-- 1 root root 294 Oct 3 13:12 ca-config.json #apiserver的CA
-rw-r--r-- 1 root root 263 Oct 3 13:12 ca-csr.json
-rwxr-xr-x 1 root root 321 Oct 3 08:46 generate_k8s_cert.sh
-rw-r--r-- 1 root root 230 Oct 3 13:12 kube-proxy-csr.json #为worke节点的proxy颁发的证书的文件
-rw-r--r-- 1 root root 718 Oct 3 08:45 server-csr.json #为apiserver颁发的证书的文件
[root@master TLS/k8s]# cat kube-proxy-csr.json
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
[root@master TLS/k8s]# cat server-csr.json
{
"CN": "kubernetes",
"hosts": [
"10.0.0.1",
"127.0.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local",
"192.168.0.21", #master,多master增加ip
"192.168.0.22", #滴滴云负载LB
"192.168.0.22" #备用ip
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
# 生成证书:
[root@master k8s]# ./generate_k8s_cert.sh
2019/10/09 14:20:36 [INFO] generating a new CA key and certificate from CSR
2019/10/09 14:20:36 [INFO] generate received request
2019/10/09 14:20:36 [INFO] received CSR
2019/10/09 14:20:36 [INFO] generating key: rsa-2048
2019/10/09 14:20:37 [INFO] encoded CSR
2019/10/09 14:20:37 [INFO] signed certificate with serial number 455662557732513862994369493822352115010355965578
2019/10/09 14:20:37 [INFO] generate received request
2019/10/09 14:20:37 [INFO] received CSR
2019/10/09 14:20:37 [INFO] generating key: rsa-2048
2019/10/09 14:20:37 [INFO] encoded CSR
2019/10/09 14:20:37 [INFO] signed certificate with serial number 608159772956440920681276106363224448757755871864
2019/10/09 14:20:37 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
2019/10/09 14:20:37 [INFO] generate received request
2019/10/09 14:20:37 [INFO] received CSR
2019/10/09 14:20:37 [INFO] generating key: rsa-2048
2019/10/09 14:20:37 [INFO] encoded CSR
2019/10/09 14:20:37 [INFO] signed certificate with serial number 214111655217147844580852409042396632141577923697
2019/10/09 14:20:37 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
# 查看证书文件
[root@master k8s]# ll *pem
-rw------- 1 root root 1675 Oct 9 14:20 ca-key.pem #CA用的证书
-rw-r--r-- 1 root root 1359 Oct 9 14:20 ca.pem
-rw------- 1 root root 1675 Oct 9 14:20 kube-proxy-key.pem #node要用的证书
-rw-r--r-- 1 root root 1403 Oct 9 14:20 kube-proxy.pem
-rw------- 1 root root 1675 Oct 9 14:20 server-key.pem #apiserver要用的证书
-rw-r--r-- 1 root root 1627 Oct 9 14:20 server.pem
# 在master节点上操作:
部署kube-apiserver组件
[root@master ~]# tar xf k8s-master.tar.gz
[root@master ~]# ll
total 229432
-rw-r--r-- 1 root root 1078 Oct 2 23:10 etcd.service
-rw-r--r-- 1 root root 10148977 Oct 3 10:34 etcd.tar.gz
-rw-r--r-- 1 root root 90767613 Oct 3 10:35 k8s-master.tar.gz
-rw-r--r-- 1 root root 128129460 Oct 3 10:33 k8s-node.tar.gz
-rw-r--r-- 1 root root 286 Oct 2 23:13 kube-apiserver.service
-rw-r--r-- 1 root root 321 Oct 2 23:13 kube-controller-manager.service
drwxr-xr-x 6 root root 4096 Oct 2 22:13 kubernetes
-rw-r--r-- 1 root root 285 Oct 2 23:13 kube-scheduler.service
drwxr-xr-x 4 root root 4096 Oct 9 11:24 TLS
-rw-r--r-- 1 root root 5851667 Oct 3 10:46 TLS.tar.gz
[root@master ~]# cd kubernetes/
[root@master kubernetes]# ll
total 16
drwxr-xr-x 2 root root 4096 Oct 3 09:06 bin
drwxr-xr-x 2 root root 4096 Oct 3 08:55 cfg
drwxr-xr-x 2 root root 4096 Oct 2 23:13 logs
drwxr-xr-x 2 root root 4096 Oct 3 10:34 ssl
[root@master kubernetes]# tree
.
├── bin
│ ├── kube-apiserver
│ ├── kube-controller-manager
│ ├── kubectl
│ └── kube-scheduler
├── cfg
│ ├── kube-apiserver.conf
│ ├── kube-controller-manager.conf
│ ├── kube-scheduler.conf
│ └── token.csv
├── logs
└── ssl
# 先拷贝证书:
[root@master kubernetes]# cp /root/TLS/k8s/*pem ssl/
[root@master kubernetes]# tree
.
├── bin
│ ├── kube-apiserver
│ ├── kube-controller-manager
│ ├── kubectl
│ └── kube-scheduler
├── cfg
│ ├── kube-apiserver.conf
│ ├── kube-controller-manager.conf
│ ├── kube-scheduler.conf
│ └── token.csv
├── logs
└── ssl
├── ca-key.pem
├── ca.pem
├── server-key.pem
└── server.pem
4 directories, 12 files
# 修改apiserver配置文件信息:
[root@master cfg]# cat kube-apiserver.conf
KUBE_APISERVER_OPTS="--logtostderr=false \ #输出日志
--v=2 \ #日志级别
--log-dir=/opt/kubernetes/logs \ #日志存储目录
--etcd-servers=https://192.168.0.21:2379,https://192.168.0.22:2379,https://192.168.0.23:2379 \ #etcd服务的地址
--bind-address=192.168.0.21\ #apiserver监听的地址
--secure-port=6443 \ #apiserver监听的端口
--advertise-address=192.168.0.21 \ #apiserver通告地址,与node通信
--allow-privileged=true \ #允许创建的容器赋予的权限
--service-cluster-ip-range=10.0.0.0/24 \ #集群内部的service的虚拟ip
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \ #启用准入控制插件
--authorization-mode=RBAC,Node \ #授权模式,指定的用户赋予相应的权限
--enable-bootstrap-token-auth=true \ #启用bootstrap的token认证
--token-auth-file=/opt/kubernetes/cfg/token.csv \ #token文件
--service-node-port-range=30000-32767 \ #service 的nodeport的范围
--kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \ #访问kubelet的证书
--kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \
--tls-cert-file=/opt/kubernetes/ssl/server.pem \ #配置apiserver使用https的证书
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem --client-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/opt/etcd/ssl/ca.pem \ #连接etcd的证书
--etcd-certfile=/opt/etcd/ssl/server.pem --etcd-keyfile=/opt/etcd/ssl/server-key.pem --audit-log-maxage=30 \ #日志策略
--audit-log-maxbackup=3 --audit-log-maxsize=100 --audit-log-path=/opt/kubernetes/logs/k8s-audit.log"
# 移动kubernetes到/opt/,并把service文件加入到执行目录下
[root@master ~]# mv kubernetes /opt/
[root@master ~]# mv kube-apiserver.service kube-controller-manager.service kube-scheduler.service /usr/lib/systemd/system/
# 启动apiserver服务:
[root@master ~]# systemctl start kube-apiserver
[root@master ~]# systemctl enable kube-apiserver
[root@master ~]# ps -ef|grep kube
root 11567 1 71 14:52 ? 00:00:04 /opt/kubernetes/bin/kube-apiserver --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --etcd-servers=https://172.16.153.70:2379,https://172.16.77.121:2379,https://172.16.77.122:2379 --bind-address=172.16.153.70 --secure-port=6443 --advertise-address=172.16.153.70 --allow-privileged=true --service-cluster-ip-range=10.0.0.0/24 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --enable-bootstrap-token-auth=true --token-auth-file=/opt/kubernetes/cfg/token.csv --service-node-port-range=30000-32767 --kubelet-client-certificate=/opt/kubernetes/ssl/server.pem --kubelet-client-key=/opt/kubernetes/ssl/server-key.pem --tls-cert-file=/opt/kubernetes/ssl/server.pem --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem --client-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem --etcd-cafile=/opt/etcd/ssl/ca.pem --etcd-certfile=/opt/etcd/ssl/server.pem --etcd-keyfile=/opt/etcd/ssl/server-key.pem --audit-log-maxage=30 --audit-log-maxbackup=3 --audit-log-maxsize=100 --audit-log-path=/opt/kubernetes/logs/k8s-audit.log
root 11579 1401 0 14:52 pts/0 00:00:00 grep --color=auto kube
# 部署kube-controller-manager组件
[root@master /opt/kubernetes/cfg]# cat kube-controller-manager.conf
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --leader-elect=true \ #集群的选举
--master=127.0.0.1:8080 \ #连接apiserver的地址,也就是master地址
--address=127.0.0.1 --allocate-node-cidrs=true \ #允许安装cni的插件
--cluster-cidr=10.2.0.0/16 \ #与cni的插件网段一样
--service-cluster-ip-range=10.0.0.0/24 --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \ #集群内部证书
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem --root-ca-file=/opt/kubernetes/ssl/ca.pem \ #用于签署service-account的证书
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem --experimental-cluster-signing-duration=87600h0m0s" #为每个node颁发kubelet的证书时间,10年
# 启动controller-manager服务
[root@master ~]# systemctl start kube-controller-manager
[root@master ~]# systemctl enable kube-controller-manager
[root@master ~]# ps -ef|grep controller
root 11589 1 5 14:54 ? 00:00:01 /opt/kubernetes/bin/kube-controller-manager --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --leader-elect=true --master=127.0.0.1:8080 --address=127.0.0.1 --allocate-node-cidrs=true --cluster-cidr=10.244.0.0/16 --service-cluster-ip-range=10.0.0.0/24 --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem --root-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem --experimental-cluster-signing-duration=87600h0m0s
root 11615 1401 0 14:54 pts/0 00:00:00 grep --color=auto controller
# 部署kube-scheduler组件
[root@master cfg]# cat kube-scheduler.conf
KUBE_SCHEDULER_OPTS="--logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --leader-elect --master=127.0.0.1:8080 --address=127.0.0.1"
# 启动scheduler服务
[root@master ~]# systemctl start kube-scheduler
[root@master ~]# systemctl enable kube-scheduler
[root@master ~]# ps -ef|grep scheduler
root 11603 1 6 14:54 ? 00:00:01 /opt/kubernetes/bin/kube-scheduler --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --leader-elect --master=127.0.0.1:8080 --address=127.0.0.1
root 11617 1401 0 14:54 pts/0 00:00:00 grep --color=auto scheduler
# 设置开机启动:
[root@master ~]# for i in $(ls /opt/kubernetes/bin);do systemctl enable $i;done
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-apiserver.service to /usr/lib/systemd/system/kube-apiserver.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service to /usr/lib/systemd/system/kube-controller-manager.service.
Failed to execute operation: No such file or directory
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.
# 移动kubectl到执行目录
[root@master ~]# mv /opt/kubernetes/bin/kubectl /usr/local/bin/
# 查看cs
[root@master ~]# kubectl get cs
NAME AGE
scheduler <unknown>
controller-manager <unknown>
etcd-0 <unknown>
etcd-1 <unknown>
etcd-2 <unknown>
# 自动的为kubelet颁发证书,启用了TLS bootstrapping机制
[root@master ~]# cd /opt/kubernetes/cfg/
[root@master /opt/kubernetes/cfg]# ll
total 16
-rw-r--r-- 1 root root 1193 Oct 9 14:32 kube-apiserver.conf
-rw-r--r-- 1 root root 546 Oct 2 22:14 kube-controller-manager.conf
-rw-r--r-- 1 root root 148 Oct 2 22:14 kube-scheduler.conf
-rw-r--r-- 1 root root 83 Oct 2 22:14 token.csv
[root@master /opt/kubernetes/cfg]# cat token.csv
c47ffb939f5ca36231d9e3121a252940,kubelet-bootstrap,10001,"system:node-bootstrapper"
# 给kubelet-bootstrap授权:
[root@master cfg]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created
# token也可自行生成替换:.
head -c 16 /dev/urandom | od -An -t x | tr -d ‘ ‘
#但apiserver配置的token必须要与node节点bootstrap.kubeconfig配置里一致。
master的kube-apiserver.conf中的配置 --token-auth-file=/opt/kubernetes/cfg/token.csv
node的 bootstrap.kubeconfig里 token: c47ffb939f5ca36231d9e3121a252940就是apiserver.conf里token.csv的内容
# 拷贝下载好的k8s-node包
[root@master ~]# scp -r k8s-node.tar.gz root@192.168.0.22:/root/
root@192.168.0.22‘s password:
k8s-node.tar.gz 100% 122MB 76.4MB/s 00:01
[root@node1 ~]# tar xf k8s-node.tar.gz
[root@node1 ~]# ll
total 207876
-rw-r--r-- 1 root root 36662740 Aug 15 19:33 cni-plugins-linux-amd64-v0.8.2.tgz
-rw-r--r-- 1 root root 110 Oct 3 10:01 daemon.json
-rw-r--r-- 1 root root 48047231 Jun 25 16:45 docker-18.09.6.tgz
-rw-r--r-- 1 root root 501 Oct 3 10:01 docker.service
-rw-r--r-- 1 root root 128129460 Oct 9 15:06 k8s-node.tar.gz
-rw-r--r-- 1 root root 268 Oct 2 23:11 kubelet.service
-rw-r--r-- 1 root root 253 Oct 2 23:11 kube-proxy.service
drwxr-xr-x 6 root root 4096 Oct 2 22:14 kubernetes
# 安装docker:
[root@node1 ~]# tar xf docker-18.09.6.tgz
[root@node1 ~]# ll
total 207880
-rw-r--r-- 1 root root 36662740 Aug 15 19:33 cni-plugins-linux-amd64-v0.8.2.tgz
-rw-r--r-- 1 root root 110 Oct 3 10:01 daemon.json
drwxrwxr-x 2 1000 1000 4096 May 4 10:42 docker
-rw-r--r-- 1 root root 48047231 Jun 25 16:45 docker-18.09.6.tgz
-rw-r--r-- 1 root root 501 Oct 3 10:01 docker.service
-rw-r--r-- 1 root root 128129460 Oct 9 15:06 k8s-node.tar.gz
-rw-r--r-- 1 root root 268 Oct 2 23:11 kubelet.service
-rw-r--r-- 1 root root 253 Oct 2 23:11 kube-proxy.service
drwxr-xr-x 6 root root 4096 Oct 2 22:14 kubernetes
[root@node1 ~]# mv docker/* /usr/bin/
[root@node1 ~]# mkdir /etc/docker
[root@node1 ~]# mv daemon.json /etc/docker/
[root@node1 ~]# mv docker.service /usr/lib/systemd/system/
[root@node1 ~]# systemctl start docker
[root@node1 ~]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
# 查看docker版本信息
[root@node1 ~]# docker version
Client: Docker Engine - Community
Version: 18.09.6
API version: 1.39
Go version: go1.10.8
Git commit: 481bc77
Built: Sat May 4 02:33:34 2019
OS/Arch: linux/amd64
Experimental: false
Server: Docker Engine - Community
Engine:
Version: 18.09.6
API version: 1.39 (minimum version 1.12)
Go version: go1.10.8
Git commit: 481bc77
Built: Sat May 4 02:41:08 2019
OS/Arch: linux/amd64
Experimental: false
# 在节点上操作部署kubelet组件
[root@node1 ~]# cd kubernetes/
[root@node1 kubernetes]# ll
total 16
drwxr-xr-x 2 root root 4096 Oct 3 09:07 bin
drwxr-xr-x 2 root root 4096 Oct 3 09:45 cfg
drwxr-xr-x 2 root root 4096 Oct 3 09:18 logs
drwxr-xr-x 2 root root 4096 Oct 3 09:18 ssl
[root@node1 kubernetes]# tree
.
├── bin
│ ├── kubelet
│ └── kube-proxy
├── cfg
│ ├── bootstrap.kubeconfig
│ ├── kubelet.conf
│ ├── kubelet-config.yml
│ ├── kube-proxy.conf
│ ├── kube-proxy-config.yml
│ └── kube-proxy.kubeconfig
├── logs
└── ssl
4 directories, 8 files
conf 基本配置文件
kubeconfig 连接apiserver的配置文件
yaml 常用配置文件
[root@node1 kubernetes/cfg]# cat kubelet.conf
KUBELET_OPTS="--logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --hostname-override=node1 \ #注意修改对应节点名字
--network-plugin=cni --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig --bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig --config=/opt/kubernetes/cfg/kubelet-config.yml --cert-dir=/opt/kubernetes/ssl --pod-infra-container-image=lizhenliang/pause-amd64:3.0"
[root@node1 kubernetes/cfg]# cat bootstrap.kubeconfig
apiVersion: v1
clusters:
- cluster:
certificate-authority: /opt/kubernetes/ssl/ca.pem
server: https://192.168.0.21:6443 #修改master IP
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubelet-bootstrap
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: kubelet-bootstrap
user:
token: c47ffb939f5ca36231d9e3121a252940
# 查看master的tocken
[root@master /opt/kubernetes/cfg]# cat token.csv
c47ffb939f5ca36231d9e3121a252940,kubelet-bootstrap,10001,"system:node-bootstrapper"
[root@node1 kubernetes/cfg]# cat kubelet-config.yml
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- 10.0.0.2
clusterDomain: cluster.local
failSwapOn: false
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /opt/kubernetes/ssl/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
[root@node1 ~]# cat kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Before=docker.service
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kubelet.conf
ExecStart=/opt/kubernetes/bin/kubelet $KUBELET_OPTS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
# 移动service文件:
[root@node1 ~]# mv *service /usr/lib/systemd/system/
#移动kubernetes到/opt/
[root@node1 ~]# mv kubernetes/ /opt/
# 从master的TLS下面拷贝证书到node节点上
[root@master k8s]# scp -r ca.pem kube-proxy*pem root@192.168.0.22:/opt/kubernetes/ssl/
root@172.16.77.121‘s password:
ca.pem 100% 1359 1.0MB/s 00:00
kube-proxy-key.pem 100% 1675 1.3MB/s 00:00
kube-proxy.pem 100% 1403 1.1MB/s 00:00
# 启动kubelet服务:
[root@node1 kubernetes]# systemctl start kubelet
[root@node1 kubernetes]# ps aux|grep kubelet
root 12046 0.1 0.9 430624 37152 ? Ssl 17:02 0:00 /opt/kubernetes/bin/kubelet --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --hostname-override=node1 --network-plugin=cni --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig --bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig --config=/opt/kubernetes/cfg/kubelet-config.yml --cert-dir=/opt/kubernetes/ssl --pod-infra-container-image=lizhenliang/pause-amd64:3.0
# 在master机器上验证下:
# 查看认证授权号
[root@master k8s]# kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-igpk3RzozP8Wo_MPjcRSzgwQXTNBZdWKkHI5Ofnp_oo 118s kubelet-bootstrap Pending
# 认证授权node
[root@master k8s]# kubectl certificate approve node-csr-igpk3RzozP8Wo_MPjcRSzgwQXTNBZdWKkHI5Ofnp_oo
certificatesigningrequest.certificates.k8s.io/node-csr-igpk3RzozP8Wo_MPjcRSzgwQXTNBZdWKkHI5Ofnp_oo approved
# 查看node节点
[root@master k8s]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
node1 NotReady <none> 47s v1.16.0
需要进行cni插件部署完成,node1状态就会变成ready;
# 在上述过程中会生成证书和配置文件:
[root@node1 /opt/kubernetes]# ll ssl/
total 24
-rw-r--r-- 1 root root 1359 Oct 9 17:01 ca.pem
-rw------- 1 root root 1265 Oct 9 17:04 kubelet-client-2019-10-09-17-04-39.pem
lrwxrwxrwx 1 root root 58 Oct 9 17:04 kubelet-client-current.pem -> /opt/kubernetes/ssl/kubelet-client-2019-10-09-17-04-39.pem
-rw-r--r-- 1 root root 2144 Oct 9 16:57 kubelet.crt
-rw------- 1 root root 1675 Oct 9 16:57 kubelet.key
-rw------- 1 root root 1675 Oct 9 17:01 kube-proxy-key.pem
-rw-r--r-- 1 root root 1403 Oct 9 17:01 kube-proxy.pem
[root@node1 /opt/kubernetes]# ll cfg/
total 28
-rw-r--r-- 1 root root 376 Oct 9 16:28 bootstrap.kubeconfig
-rw-r--r-- 1 root root 388 Oct 9 16:25 kubelet.conf
-rw-r--r-- 1 root root 611 Oct 2 22:15 kubelet-config.yml
-rw------- 1 root root 505 Oct 9 17:04 kubelet.kubeconfig
-rw-r--r-- 1 root root 132 Oct 3 09:16 kube-proxy.conf
-rw-r--r-- 1 root root 315 Oct 9 16:48 kube-proxy-config.yml
-rw-r--r-- 1 root root 432 Oct 9 16:45 kube-proxy.kubeconfig
上述部署node1节点与部署node2节点一样,需要注意该修改的ip和名字.
相关配置文件如下:
[root@node1 /opt/kubernetes/cfg]# cat kube-proxy.conf
KUBE_PROXY_OPTS="--logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --config=/opt/kubernetes/cfg/kube-proxy-config.yml"
[root@node1 /opt/kubernetes/cfg]# cat kube-proxy-config.yml
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
address: 0.0.0.0
metricsBindAddress: 0.0.0.0:10249
clientConnection:
kubeconfig: /opt/kubernetes/cfg/kube-proxy.kubeconfig
hostnameOverride: node1 #node2的话修改node2
clusterCIDR: 10.0.0.0/24
mode: ipvs
ipvs:
scheduler: "rr"
iptables:
masqueradeAll: true
[root@node1 /opt/kubernetes/cfg]# cat kube-proxy.kubeconfig
apiVersion: v1
clusters:
- cluster:
certificate-authority: /opt/kubernetes/ssl/ca.pem
server: https://192.168.0.21:6443 #修改为master IP
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kube-proxy
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: kube-proxy
user:
client-certificate: /opt/kubernetes/ssl/kube-proxy.pem
client-key: /opt/kubernetes/ssl/kube-proxy-key.pem
# 启动并加入开机自启动
[root@node2 kubernetes]# systemctl start kube-proxy
[root@node2 kubernetes]# systemctl enable kube-proxy
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.
# 查看进程
[root@node2 kubernetes]# ps axu|grep kube-proxy
root 11891 0.4 0.6 142856 23436 ? Ssl 17:21 0:00 /opt/kubernetes/bin/kube-proxy --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --config=/opt/kubernetes/cfg/kube-proxy-config.yml
root 12049 0.0 0.0 112708 988 pts/0 S+ 17:21 0:00 grep --color=auto kube-proxy
# 查看node节点
[root@master k8s]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
node1 NotReady <none> 17m v1.16.0
node2 NotReady <none> 3m12s v1.16.0
上述部署node1-kube-proxy与部署node2-kube-proxy一样,需要注意该修改的ip和名字.
二进制包下载地址:https://github.com/containernetworking/plugins/releases
# 在node1,node2节点上操作如下
[root@node1 ~]# ll
total 207880
-rw-r--r-- 1 root root 36662740 Aug 15 19:33 cni-plugins-linux-amd64-v0.8.2.tgz
[root@node1 ~]# mkdir /opt/cni/bin -p
[root@node1 ~]# mkdir /opt/cni/net.d -p
[root@node1 ~]# tar xf cni-plugins-linux-amd64-v0.8.2.tgz -C /opt/cni/bin/
[root@node1 ~]# ll /opt/cni/bin/
total 70072
-rwxr-xr-x 1 root root 4159253 Aug 15 18:05 bandwidth
-rwxr-xr-x 1 root root 4619706 Aug 15 18:05 bridge
-rwxr-xr-x 1 root root 12124236 Aug 15 18:05 dhcp
-rwxr-xr-x 1 root root 5968249 Aug 15 18:05 firewall
-rwxr-xr-x 1 root root 3069474 Aug 15 18:05 flannel
-rwxr-xr-x 1 root root 4113755 Aug 15 18:05 host-device
-rwxr-xr-x 1 root root 3614305 Aug 15 18:05 host-local
-rwxr-xr-x 1 root root 4275238 Aug 15 18:05 ipvlan
-rwxr-xr-x 1 root root 3178836 Aug 15 18:05 loopback
-rwxr-xr-x 1 root root 4337932 Aug 15 18:05 macvlan
-rwxr-xr-x 1 root root 3891363 Aug 15 18:05 portmap
-rwxr-xr-x 1 root root 4542556 Aug 15 18:05 ptp
-rwxr-xr-x 1 root root 3392736 Aug 15 18:05 sbr
-rwxr-xr-x 1 root root 2885430 Aug 15 18:05 static
-rwxr-xr-x 1 root root 3261232 Aug 15 18:05 tuning
-rwxr-xr-x 1 root root 4275044 Aug 15 18:05 vlan
# 确保kubelet使用cni接口:
[root@node1 ~]# cat /opt/kubernetes/cfg/kubelet.conf |grep cni
--network-plugin=cni
https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/
# 启动cni接口插件:
# 需要在master上操作执行,如下
# 下载yaml文件
链接:https://pan.baidu.com/s/1C1-GwFR_xs84Pvy24tPPGw
提取码:ndw3
网络模式;
平台
用的是宿主机的网络;
[root@master ~]# kubectl apply -f kube-flannel.yaml
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds-amd64 created
# 查看pods
[root@master ~]# kubectl -n kube-system get pods
NAME READY STATUS RESTARTS AGE
kube-flannel-ds-amd64-7c962 1/1 Running 0 41s
kube-flannel-ds-amd64-g4b4q 1/1 Running 0 41s
# 查看nodes节点
[root@master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
node1 Ready <none> 32m v1.16.0
node2 Ready <none> 18m v1.16.0
# 授权可以用logs查看pod的日志:
[root@master ~]# ll apiserver-to-kubelet-rbac.yaml
-rw-r--r-- 1 root root 745 Oct 2 22:14 apiserver-to-kubelet-rbac.yaml
[root@master ~]# kubectl apply -f apiserver-to-kubelet-rbac.yaml
clusterrole.rbac.authorization.k8s.io/system:kube-apiserver-to-kubelet created
clusterrolebinding.rbac.authorization.k8s.io/system:kube-apiserver created
https://kubernetes.io/docs/tasks/access-application-clu ster/web-ui-dashboard/
在master机器上操作:
[root@master ~]# wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta4/aio/deploy/recommended.yaml
# recommended.yaml配置文件要修改增加30001端口
[root@master ~]# kubectl apply -f recommended.yaml
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created
# 采用的是nodeport方式暴露端口
[root@master ~]# kubectl get pods -n kubernetes-dashboard
NAME READY STATUS RESTARTS AGE
dashboard-metrics-scraper-566cddb686-h5f4h 1/1 Running 0 81s
kubernetes-dashboard-7b5bf5d559-49ltn 1/1 Running 0 81s
[root@master ~]# kubectl get svc -n kubernetes-dashboard
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
dashboard-metrics-scraper ClusterIP 10.0.0.225 <none> 8000/TCP 96s
kubernetes-dashboard NodePort 10.0.0.19 <none> 443:30001/TCP 96s
# 上传dashboard-adminuser.yaml
[root@master ~]# kubectl apply -f dashboard-adminuser.yaml
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created
获取token:
[root@master ~]# kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk ‘{print $1}‘)
Name: admin-user-token-zp7r5
Namespace: kubernetes-dashboard
Labels: <none>
Annotations: kubernetes.io/service-account.name: admin-user
kubernetes.io/service-account.uid: 26a698fd-afd2-482d-9d00-e2a9352113fd
Type: kubernetes.io/service-account-token
Data
====
ca.crt: 1359 bytes
namespace: 20 bytes
token: eyJhbGciOiJSUzI1NiIsImtpZCI6IjFSSGFvclJuU0ZmeDM2SG10YzNaUW9CdDY0UGd4MzZabHZ1dlRXM2J4NVEifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLXpwN3I1Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiIyNmE2OThmZC1hZmQyLTQ4MmQtOWQwMC1lMmE5MzUyMTEzZmQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQ6YWRtaW4tdXNlciJ9.BHZuAKbogTeX2tImy0_Ia02jQEpo0chQP4OJaVo78YfC5NoFBr3n56xhpZ_m-GdcOAN2dt1Z2uPQfJ1bpPP0lObKhK8xEtKaoXm0UUHjsDHlBrOTCcOoaPkMFkLVLulU_BwZecrCO1wyNI1U_dLBX4WjBYp_4PFqU3HflYFvYzUYImkzNKot-GibBiH_9pedYTTkhOFljQWz_sSWMNr6AnhTpNDvut8m1uTgQepMTWzqmZweHtwSt4owbWqsIroviUxcZy9NQ0YxvcNod4l1ppp07h71ECuhhNzGusECTE3ANO2aX4La0kmsg9C0-QlMckIF_1cXbzgBVTZsqxLFrQ
# 上传coredns.yaml,需要修改配置文件dns
[root@master ~]# kubectl apply -f coredns.yaml
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created
[root@master ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-6d8cfdd59d-7ncxd 1/1 Running 0 8s
kube-flannel-ds-amd64-7c962 1/1 Running 0 26m
kube-flannel-ds-amd64-g4b4q 1/1 Running 0 26m
# 首先查看master,node1,node2所有组件是否正常
netstat -lntp
# 查看kubernetes-dashboard是否是Running
kubectl get pods -A -o wide
# 30001端口查看命令:
kubectl get svc -n kubernetes-dashboard
# 火狐浏览器登陆可以,,登陆:
https://192.168.0.22:30001
# token验证码复制粘贴到火狐浏览器,查看token
kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk ‘{print $1}‘)
# 创建一个目录,存放证书等文件
[root@master ~]# mkdir key
[root@master ~]# cd key
# 生成证书
[root@master ~/key]# openssl genrsa -out dashboard.key 2048
Generating RSA private key, 2048 bit long modulus
...............................................................+++
...................................................................................................+++
e is 65537 (0x10001)
[root@master ~/key]# openssl req -new -out dashboard.csr -key dashboard.key -subj ‘/CN=192.168.0.22‘
[root@master ~/key]# openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt
Signature ok
subject=/CN=192.168.0.22
Getting Private key
# 删除原有证书
# 注意新版的Dashboard的namespace已经变为kubernetes-dashboard了
[root@master ~/key]# kubectl delete secret kubernetes-dashboard-certs -n kubernetes-dashboard
secret "kubernetes-dashboard-certs" deleted
# 创建新证书的secret
[root@master ~/key]# kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key --from-file=dashboard.crt -n kubernetes-dashboard
secret/kubernetes-dashboard-certs created
# 查找正在运行的pod
[root@master ~/key]# kubectl get pod -n kubernetes-dashboard
NAME READY STATUS RESTARTS AGE
dashboard-metrics-scraper-566cddb686-g98qw 1/1 Running 1 2d20h
kubernetes-dashboard-7b5bf5d559-gx9h5 1/1 Running 2 2d20h
# 删除pod
[root@master ~/key]# kubectl delete po dashboard-metrics-scraper-566cddb686-g98qw -n kubernetes-dashboard
pod "dashboard-metrics-scraper-566cddb686-g98qw" deleted
[root@master ~/key]# kubectl delete po kubernetes-dashboard-7b5bf5d559-gx9h5 -n kubernetes-dashboard
pod "kubernetes-dashboard-7b5bf5d559-gx9h5" deleted
# 如果pod比较多的时候,可以使用以下这条命令批量删除
[root@master ~/key]# kubectl get pod -n kubernetes-dashboard | grep -v NAME | awk ‘{print "kubectl delete po " $1 " -n kubernetes-dashboard"}‘ | sh
# 查看新的pod,Running没
[root@master ~/key]# kubectl get pod -n kubernetes-dashboard
NAME READY STATUS RESTARTS AGE
dashboard-metrics-scraper-566cddb686-bkskq 1/1 Running 0 9m15s
kubernetes-dashboard-7b5bf5d559-p7928 1/1 Running 0 8m49s
# 再次刷新谷歌浏览器页面就可以正常显示了,tocken获取跟火狐浏览器方法一样、请参考。
# 简单英文汉化中文只需要在谷歌浏览器语言设置中文识别就可以。
# 全部汉化还在研究中 需要改dashborad的yaml文件生效。
# 在192.168.0.24机器搭建docker离线安装版(根据需求选择自己需要的版本)
# 拷贝下载好的docker-18.09.6.tag包
[root@node1 ~]# scp docker-18.09.6.tgz root@192.168.0.24:/root/
root@192.168.0.24‘s password:
docker-18.09.6.tgz 100% 46MB 3.6MB/s 00:12
[root@harbor ~]# tar xf docker-18.09.6.tgz
# 安装docker:
[root@harbor ~]# tar xf docker-18.09.6.tgz
[root@harbor ~]# mv docker/* /usr/bin/
[root@harbor ~]# mkdir /etc/docker
[root@node1 ~]# scp /etc/docker/daemon.json root@192.168.0.24:/etc/docker
[root@node1 ~]# scp /usr/lib/systemd/system/docker.service root@192.168.0.24:/usr/lib/systemd/system/
[root@harbor ~]# systemctl start docker
[root@harbor ~]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
# 查看docker版本信息
[root@harbor ~]# docker version
Client: Docker Engine - Community
Version: 18.09.6
API version: 1.39
Go version: go1.10.8
Git commit: 481bc77
Built: Sat May 4 02:33:34 2019
OS/Arch: linux/amd64
Experimental: false
Server: Docker Engine - Community
Engine:
Version: 18.09.6
API version: 1.39 (minimum version 1.12)
Go version: go1.10.8
Git commit: 481bc77
Built: Sat May 4 02:41:08 2019
OS/Arch: linux/amd64
Experimental: false
# 安装harbor
[root@harbor ~]# cd /opt/
# 上传下载好的harbor包
[root@harbor /opt]# rz harbor-offline-installer-v1.9.0-rc1.tgz
# 解压harbor包
[root@harbor /opt]# tar zxf harbor-offline-installer-v1.9.0-rc1.tgz
#进入到下面的目录
[root@harbor /opt]# cd harbor/
# 编辑harbor配置文件
[root@harbor /opt/harbor]# vim harbor.yml
5 hostname: 192.168.0.24 #harbor IP
10 port: 8888 # 端口8888
27 harbor_admin_password: 123456 #密码123456
40 data_volume: /data/harbor
# 执行安装
[root@harbor /opt/harbor]# yum install docker-compose -y
[root@harbor /opt/harbor]# ./install.sh
# 浏览器访问
http://192,168.0.24:8888
admin
123456
###############注意###############
# 所有节点都配置docker信任harbor仓库并重启docker
cat >/etc/docker/daemon.json <<EOF
{
"registry-mirrors": ["https://ig2l319y.mirror.aliyuncs.com"],
"insecure-registries" : ["http://192.168.0.24:8888"]
}
EOF
systemctl restart docker
###############注意###############
在node1上重启docker后,如果harbor不正常了,重启harbor即可
cd /opt/harbor
docker-compose restart
# 节点机器docker登陆harbor
[root@node1 ~]# docker login 192.168.0.24:8888
admin
123456
# 下载nginx镜像
[root@node1 ~]# docker pull nginx
# nginx镜像打标签
[root@node1 ~]# docker tag nginx:latest 192.168.0.24:8888/k8s/nginx:latest
# 上传nginx镜像到harbor(报错的话要在harbor页面创建一个k8s目录)
[root@node1 ~]# docker push nginx:latest 192.168.0.24:8888/k8s/nginx:latest
The push refers to repository [192.168.0.24:8888/k8s/nginx]
b3003aac411c: Pushed
216cf33c0a28: Pushed
c2adabaecedb: Pushed
latest: digest: sha256:cccef6d6bdea671c394956e24b0d0c44cd82dbe83f543a47fdc790fadea48422 size: 948
# 下载tomcat镜像
[root@node1 ~]# docker pull tomcat-app:v1
# tomcat镜像打标签
[root@node1 ~]# docker tag kubeguide/tomcat-app:v1 192.168.0.24:8888/k8s/tomcat-app:v1
# 上传tomcat镜像到harbor
[root@node1 ~]# docker push 192.168.0.24:8888/k8s/tomcat-app:v1
The push refers to repository [192.168.0.24:8888/k8s/tomcat-app]
fe9a890c4f24: Pushed
5f70bf18a086: Pushed
a072f755a133: Pushed
6d0267f8a9fd: Pushed
7bb92eb08c02: Pushed
d8ba5f179687: Pushed
2275023dea33: Pushed
d490458a60cb: Pushed
bb3e02b5a488: Pushed
3b7a0c95e085: Pushed
02adacdfda2f: Pushed
d2c5e3a8d3d3: Pushed
4dcab49015d4: Pushed
v1: digest: sha256:565bb4e52ac67b4d37feed9ea4626b786f23e0871451587c7187683532a6188f size: 5719
# 下载mysql镜像
[root@node1 ~]# docker pull mysql:5.7
# mysql镜像打标签
[root@node1 ~]# docker tag mysql:5.7 192.168.0.24:8888/k8s/mysql:5.7
# 上传mysql镜像到harbor
[root@node1 ~]# docker push 192.168.0.24:8888/k8s/mysql:5.7
The push refers to repository [192.168.0.24:8888/k8s/mysql]
bb45db375972: Pushed
83daf8b472c1: Pushed
d480fcc12a00: Pushed
b95bdae56125: Pushed
ace74cb61ec0: Pushed
d84f8cf1dc23: Pushed
24bd91e7be37: Pushed
49baacc63c3b: Pushed
8d3b3830445d: Pushed
49003fe88142: Pushed
c2adabaecedb: Pushed
5.7: digest: sha256:8044616c01e46c6bc826d205103a2b650a1679be2f34beab9bbf6c6f642df673 size: 2621
# 节点删除镜像
# 删除nginx镜像
[root@node1 ~]# docker rmi nginx:latest
Untagged: nginx:latest
Untagged: nginx@sha256:86ae264c3f4acb99b2dee4d0098c40cb8c46dcf9e1148f05d3a51c4df6758c12
# 删除nginx标签镜像
[root@node1 ~]# docker rmi 192.168.0.24:8888/k8s/nginx:latest
Untagged: 192.168.0.24:8888/k8s/nginx:latest
Untagged: 192.168.0.24:8888/k8s/nginx@sha256:cccef6d6bdea671c394956e24b0d0c44cd82dbe83f543a47fdc790fadea48422
Deleted: sha256:602e111c06b6934013578ad80554a074049c59441d9bcd963cb4a7feccede7a5
Deleted: sha256:81eaddad75aaa517b4a597912da28c2f5b905f6e9789dce3aea874b040aad201
Deleted: sha256:73cafa8418003ecfaa02360f181c132b2cf4b61433e1bd5c84012941105865c8
Deleted: sha256:c2adabaecedbda0af72b153c6499a0555f3a769d52370469d8f6bd6328af9b13
#删除tomcat镜像
[root@node1 ~]# docker rmi kubeguide/tomcat-app:v1
Untagged: kubeguide/tomcat-app:v1
Untagged: kubeguide/tomcat-app@sha256:7a9193c2e5c6c74b4ad49a8abbf75373d4ab76c8f8db87672dc526b96ac69ac4
# 删除tomcat标签镜像
[root@node1 ~]# docker rmi 192.168.0.24:8888/k8s/tomcat-app:v1
Untagged: 192.168.0.24:8888/k8s/tomcat-app:v1
Untagged: 192.168.0.24:8888/k8s/tomcat-app@sha256:565bb4e52ac67b4d37feed9ea4626b786f23e0871451587c7187683532a6188f
Deleted: sha256:a29e200a18e9b15176cd795710b71d7b1bc97207ed6fcdebba645769c3b01669
Deleted: sha256:a62053ded7e4a78fbb95fe42b7bb7350af119793f0f0074c5e9f739cd7854273
Deleted: sha256:5b0e85206d453320e6ea04b223b130763af20f07cb7779e4f675138e957f3f7f
Deleted: sha256:89ac887e60358c88dfa669821327c1bdacd60fd5f275ae35bce4478774b1dcd9
Deleted: sha256:86de228f5b3f2e75eb6a5dd53e91712f29f90326748276f28968a62496f41b8a
Deleted: sha256:f19e1b5d29ff264a298f8d3b8c0400ee221531ceded26de2e37c7dff4a2c5a65
Deleted: sha256:288f2e1bbccf7334ebede0ecdfa5f2984a4aa624a5344bcdbbb46eb743004406
Deleted: sha256:4f72a3a00d81660f62c5c5431dedd678b8ada11f4c4eae16a661822ff8a53357
Deleted: sha256:008a252b57651a21cc74618609e5b572d1210c5e163f8250bc9f0430dd19af56
Deleted: sha256:be4a7200a08ba03a73963470cba53cc605d6cece1414bc79b461dfea8b193d09
Deleted: sha256:77706b9517ed7f9e104c6916b154b6c93ae8537601e7a4f259174f83c7a20e8b
Deleted: sha256:011645e63d2a36c8dd076fc7c090b4a40392146833267b299ea0c07fc58c5c45
Deleted: sha256:9d5b41f4b6986ef26894594ed35ffee618c53b47f7af896f21ffd5c1eea4bf0d
Deleted: sha256:47b8eed5f752447a2e82dc4e08d372c803aa191d45903f003bffe7f302f13d98
Deleted: sha256:fbb6ab9009191f8cb9945ad8cc3db46015f06febb2b6d2ed08a9cc345db6c413
Deleted: sha256:984a1441034074566b55bd69b2e26884fff72acac19f7887c9f5c4b18ebc9ab0
Deleted: sha256:8877969b1254ecd276a6d9278c511b599c9d9b01403a6cacd88cacddd7f8a513
Deleted: sha256:631b33caab2cacc88aca757bf184c66c39bc9ef66a2ba1e171251c3c7448c66a
Deleted: sha256:15c38ef41f67cb076a98cdcbc4a62fa777b6b65aa5f6a16a7980c58f3a6d3965
Deleted: sha256:93b9c0c9a0b73e40ff7249c552244a2c96554d7d869c7b00a626d88577ab0b92
Deleted: sha256:237df0c38e90e9f542c97f7319fc8601d2a8e2ffae463ecd3bec3a8e339d4ca8
Deleted: sha256:c1ee3ae280201d62a63d42fc1001efc005f9d96c9227298b10483445f113e818
Deleted: sha256:0448588c60dd9136baf0a0a937402aec1f6a76e08657b4e4cd0fae2589235fac
Deleted: sha256:34b43d2b03eeded2b3cee2d11e205f89daa904976efcb8b05842226acadc74b6
Deleted: sha256:cfc706db54d82b97b34712d3da59473b5284abf189f812e119babeb2a2530331
Deleted: sha256:db80480bef0957c557c70969684a28889240dac9f55018d97da96cc2d8948c11
Deleted: sha256:4dcab49015d47e8f300ec33400a02cebc7b54cadd09c37e49eccbc655279da90
# 删除mysql镜像
[root@node1 ~]# docker rmi mysql:5.7
Untagged: mysql:5.7
Untagged: mysql@sha256:95b4bc7c1b111906fdb7a39cd990dd99f21c594722735d059769b80312eb57a7
[root@node1 ~]# docker rmi 192.168.0.24:8888/mysql:5.7
Error: No such image: 192.168.0.24:8888/mysql:5.7
[root@node1 ~]# docker rmi 192.168.0.24:8888/k8s/mysql:5.7
Untagged: 192.168.0.24:8888/k8s/mysql:5.7
Untagged: 192.168.0.24:8888/k8s/mysql@sha256:8044616c01e46c6bc826d205103a2b650a1679be2f34beab9bbf6c6f642df673
Deleted: sha256:5d9483f9a7b21c87e0f5b9776c3e06567603c28c0062013eda127c968175f5e8
Deleted: sha256:6e9a8dddf039fbb90d3f80acd426eaca7604879399110d55ea731c7e0d49dac3
Deleted: sha256:fca6544c7e6840a421a07cd2cd9aa24263f32a49c03898b4adc78dc4d4b2a773
Deleted: sha256:38f7de84f809a3c6beb209f70f3cff6b81249ba028c35743188229f4e9047de7
Deleted: sha256:1c91814ddf810a764fa1815280c38b5bc3ff799d5e768d359ace9443df52a662
Deleted: sha256:af3a3fda5da43f7f03f8b3ca0330d120f9a7e5c0d9c5fc7b738ac45468a7de38
Deleted: sha256:86dceae8843638ef500e2a796a326ecb2ba1fd31cff817c4537afb67f04d6ff2
Deleted: sha256:2021ba03e116f319f5e8a90e3498d338ed213f1e036f59ffacc98266e7d3da6b
Deleted: sha256:3a3e1773b14d8b704f018b086292f46309cc27535f7484e0dfbf5a4308c81202
Deleted: sha256:372a8a077b4e29fa382990aeee3e38181481043fe145cb979ccd52588b4f36be
Deleted: sha256:821bf1f5688724dd59945682c34c0d33694af9c554cc1c3c69ca3aa9ba5d79ea
Deleted: sha256:c2adabaecedbda0af72b153c6499a0555f3a769d52370469d8f6bd6328af9b13
# 删除以前的demo项目(如果有的话删除以前demo项目)
[root@master ~]# kubectl delete -f tomcat-demo.yaml
# 修改demo项目的资源配置清单里的镜像地址
vim tomcat-demo.yaml
image: 192.168.0.24:8888/k8s/mysql:5.7
image: 192.168.0.24:8888/k8s/tomcat-app:v1
# 应用资源配置清单
[root@master ~]# kubectl create -f tomcat-demo.yaml
# 报错的话
此时查看pod状态会发现镜像拉取失败了
[root@node1 ~/demo]# kubectl get pod
NAME READY STATUS RESTARTS AGE
mysql-7d746b5577-jcs7q 0/1 ImagePullBackOff 0 8s
myweb-764df5ffdd-fptn9 0/1 ImagePullBackOff 0 8s
myweb-764df5ffdd-pmkz7 0/1 ErrImagePull 0 8s
# 查看pod创建的详细信息
kubectl describe pod mysql-7d746b5577-jcs7q
# 关键报错信息:
Failed to pull image "10.0.0.11:8888/k8s/mysql:5.7": rpc error: code = Unknown desc = Error response from daemon: pull access denied for 10.0.0.11:8888/k8s/mysql, repository does not exist or may require ‘docker login‘
# 查看docker登陆的密码文件
docker login 192.168.0.24:8888
cat /root/.docker/config.json
# 将docker密码文件解码成base64编码
[root@node1 ~]# cat /root/.docker/config.json|base64
ewoJImF1dGhzIjogewoJCSIxOTIuMTY4LjAuMjQ6ODg4OCI6IHsKCQkJImF1dGgiOiAiWVdSdGFX
NDZNVEl6TkRVMiIKCQl9Cgl9LAoJIkh0dHBIZWFkZXJzIjogewoJCSJVc2VyLUFnZW50IjogIkRv
Y2tlci1DbGllbnQvMTguMDkuNiAobGludXgpIgoJfQp9
创建并应用docker登陆的Secret资源
注意!!!
1.dockerconfigjson: xxx直接写base64的编码,不需要换行
2.base64编码是一整行,不是好几行
3.最后的type字段不能少
cat >harbor-secret.yaml<<EOF
apiVersion: v1
kind: Secret
metadata:
name: harbor-secret
data:
.dockerconfigjson: ewoJImF1dGhzIjogewoJCSIxOTIuMTY4LjAuMjQ6ODg4OCI6IHsKCQkJImF1dGgiOiAiWVdSdGFX
NDZNVEl6TkRVMiIKCQl9Cgl9LAoJIkh0dHBIZWFkZXJzIjogewoJCSJVc2VyLUFnZW50IjogIkRv
Y2tlci1DbGllbnQvMTguMDkuNiAobGludXgpIgoJfQp9
type: kubernetes.io/dockerconfigjson
EOF
# 应用资源配置清单
kubectl create -f harbor-secret.yaml
kubectl get secrets
3 修改demo资源配置清单,添加拉取镜像的参数
# 查看命令帮助
kubectl explain deployment.spec.template.spec.imagePullSecrets
vim tomcat-demo.yaml
23 imagePullSecrets:
24 - name: harbor-secret
60 imagePullSecrets:
61 - name: harbor-secret
# 最终tomcat文件
[root@master ~]# cat tomcat-demo.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: mysql
spec:
replicas: 1
selector:
matchLabels:
app: mysql
template:
metadata:
labels:
app: mysql
spec:
containers:
- name: mysql
image: 192.168.0.24:8888/k8s/mysql:5.7 #修改ip路径
ports:
- containerPort: 3306
env:
- name: MYSQL_ROOT_PASSWORD
value: "123456"
imagePullSecrets:
- name: harbor-secret
---
apiVersion: v1
kind: Service
metadata:
name: mysql
spec:
ports:
- port: 3306
selector:
app: mysql
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: myweb
spec:
replicas: 2
selector:
matchLabels:
app: myweb
template:
metadata:
labels:
app: myweb
spec:
containers:
- name: myweb
image: 192.168.0.24:8888/k8s/tomcat-app:v1 #修改ip路径
ports:
- containerPort: 8080
env:
- name: MYSQL_SERVICE_HOST
value: ‘mysql‘
- name: MYSQL_SERVICE_PORT
value: ‘3306‘
imagePullSecrets:
- name: harbor-secret
---
apiVersion: v1
kind: Service
metadata:
name: myweb
spec:
type: NodePort
ports:
- port: 8080
nodePort: 30002
selector:
app: myweb
# 应用资源配置清单并查看
kubectl create -f tomcat-demo.yaml
kubectl get pod -o wide
# 浏览器查看
http://192.168.0.22:30002
原文:https://www.cnblogs.com/skyy/p/12795723.html