ubuntu版本 20.04
docker版本 19.03.10
kubernetes版本 1.18.2
主机名 | ip |
---|---|
k8s-master01 | 10.0.0.101 |
k8s-master02 | 10.0.0.102 |
k8s-master03 | 10.0.0.103 |
k8s-node01 | 10.0.0.104 |
ufw disabled
systemctl disable ufw.service
swapoff -a
sed -ri ‘s/.*swap.*/#&/‘ /etc/fstab
apt install -y apt-transport-https apt-transport-https ca-certificates curl gnupg-agent software-properties-common lrzsz net-tools
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward=1
EOF
sysctl --system
vim /etc/default/grub
# 添加参数
GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"
update-grub
timedatectl set-timezone Asia/Shanghai
systemctl restart rsyslog
echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables
echo 1 >/proc/sys/net/bridge/bridge-nf-call-ip6tables
echo """
vm.swappiness = 0
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
""" > /etc/sysctl.conf
sysctl -p
1、修改后的文件
root@k8s-master01:~# cat /etc/sysctl.d/10-network-security.conf
# Turn on Source Address Verification in all interfaces to
# prevent some spoofing attacks.
net.ipv4.conf.default.rp_filter=1
net.ipv4.conf.all.rp_filter=1
2、重置系统参数
sysctl --system
1、 添加docker源
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
2、查看可安装的历史版本
apt list docker-ce -a
3、安装docker指定版本
apt install -y docker-ce=5:19.03.10~3-0~ubuntu-focal docker-ce-cli=5:19.03.10~3-0~ubuntu-focal containerd.io
systemctl enable docker && systemctl start docker
1、添加源
curl -s https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | sudo apt-key add -
tee /etc/apt/sources.list.d/kubernetes.list <<EOF
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
apt-get update
2、 查看可安装的版本
apt list kubeadm -a
3、安装指定版本
apt install -y kubeadm=1.18.2-00 kubectl=1.18.2-00 kubelet=1.18.2-00
4、添加配置文件
vim /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
Environment="cgroup-driver=systemd/cgroup-driver=cgroupfs"
5、 启动
systemctl enable kubelet && systemctl start kubelet
在线安装
sudo apt-get install -y libssl-dev openssl libpopt-dev
sudo apt-get install -y keepalived
上传离线包
dpkg -i ipvsadm_1%3a1.31-1_amd64.deb
dpkg -i keepalived_1%3a2.0.19-2_amd64.deb
cat <<EOF > /etc/keepalived/keepalived.conf
global_defs {
router_id K8S-LIVE
}
vrrp_instance VI_1 {
state BACKUP
nopreempt
interface ens32
virtual_router_id 80
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass just0kk
}
virtual_ipaddress {
10.0.0.200
}
}
virtual_server 10.0.0.200 6443 {
delay_loop 6
lb_algo loadbalance
lb_kind DR
net_mask 255.255.255.0
persistence_timeout 0
protocol TCP
real_server 10.0.0.101 6443 {
weight 1
SSL_GET {
url {
path /healthz
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 10.0.0.102 6443 {
weight 1
SSL_GET {
url {
path /healthz
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 10.0.0.103 6443 {
weight 1
SSL_GET {
url {
path /healthz
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
EOF
cat <<EOF > /etc/keepalived/keepalived.conf
global_defs {
router_id K8S-LIVE
}
vrrp_instance VI_1 {
state BACKUP
nopreempt
interface ens32
virtual_router_id 80
priority 50
advert_int 1
authentication {
auth_type PASS
auth_pass just0kk
}
virtual_ipaddress {
10.0.0.200
}
}
virtual_server 10.0.0.200 6443 {
delay_loop 6
lb_algo loadbalance
lb_kind DR net_mask 255.255.255.0
persistence_timeout 0
protocol TCP
real_server 10.0.0.101 6443 {
weight 1
SSL_GET {
url {
path /healthz
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 10.0.0.102 6443 {
weight 1
SSL_GET {
url {
path /healthz
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 10.0.0.103 6443 {
weight 1
SSL_GET {
url {
path /healthz
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
EOF
cat <<EOF > /etc/keepalived/keepalived.conf
global_defs {
router_id K8S-LIVE
}
vrrp_instance VI_1 {
state BACKUP
nopreempt
interface ens32
virtual_router_id 80
priority 30
advert_int 1
authentication {
auth_type PASS
auth_pass just0kk
}
virtual_ipaddress {
10.0.0.200
}
}
virtual_server 10.0.0.200 6443 {
delay_loop 6
lb_algo loadbalance
lb_kind DR
net_mask 255.255.255.0
persistence_timeout 0
protocol TCP
real_server 10.0.0.101 6443 {
weight 1
SSL_GET {
url {
path /healthz
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 10.0.0.102 6443 {
weight 1
SSL_GET {
url {
path /healthz
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 10.0.0.103 6443 {
weight 1
SSL_GET {
url {
path /healthz
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
EOF
systemctl enable keepalived && systemctl start keepalived
观察设置的vip地址是否是正常在master01上
上传镜像
docker load -i 1-18-kube-apiserver.tar.gz
docker load -i 1-18-kube-scheduler.tar.gz
docker load -i 1-18-kube-controller-manager.tar.gz
docker load -i 1-18-pause.tar.gz
docker load -i 1-18-cordns.tar.gz
docker load -i 1-18-etcd.tar.gz
docker load -i 1-18-kube-proxy.tar.gz
说明:
pause版本是3.2,用到的镜像是k8s.gcr.io/pause:3.2
etcd版本是3.4.3,用到的镜像是k8s.gcr.io/etcd:3.4.3-0
cordns版本是1.6.7,用到的镜像是k8s.gcr.io/coredns:1.6.7
apiserver、scheduler、controller-manager、kube-proxy版本是1.18.2,用到的镜像分别是
k8s.gcr.io/kube-apiserver:v1.18.2
k8s.gcr.io/kube-controller-manager:v1.18.2
k8s.gcr.io/kube-scheduler:v1.18.2
k8s.gcr.io/kube-proxy:v1.18.2
cat << EOF > kubeadm-init.yaml
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.18.2
controlPlaneEndpoint: 10.0.0.200:6443
apiServer:
certSANs:
- 10.0.0.101
- 10.0.0.102
- 10.0.0.103
- 10.0.0.104
- 10.0.0.200
networking:
podSubnet: 10.244.0.0/16
EOF
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
EOF
kubeadm init --config kubeadm-init.yaml |tee k8s-join.yaml
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
docker load -i cni.tar.gz
docker load -i calico-node.tar.
在master01节点执行
kubectl apply -f calico.yaml
cd /root && mkdir -p /etc/kubernetes/pki/etcd &&mkdir -p ~/.kube/
cat << EOF > scp.sh
#!/bin/bash
USER=root
CONTROL_PLANE_IPS="10.0.0.102 10.0.0.103"
for host in ${CONTROL_PLANE_IPS}; do
scp /etc/kubernetes/pki/ca.crt "${USER}"@$host:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/ca.key "${USER}"@$host:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.key "${USER}"@$host:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.pub "${USER}"@$host:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/front-proxy-ca.crt "${USER}"@$host:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/front-proxy-ca.key "${USER}"@$host:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/etcd/ca.crt "${USER}"@$host:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/pki/etcd/ca.key "${USER}"@$host:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/admin.conf "${USER}"@$host:/etc/kubernetes/
done
EOF
--control-plane:这个参数表示加入到k8s集群的是master节点
kubeadm join 10.0.0.200:6443 --token gehni0.8zgnoew2cjrd1pz7 --discovery-token-ca-cert-hash sha256:4967cb054bd5899af3e4b6ad3ab0c9f878b549ef7f72842d145b15f500e429ca --control-plane
在master2和master3上操作:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g)$HOME/.kube/config
kubeadm join 10.0.0.200:6443 --token gehni0.8zgnoew2cjrd1pz7 --discovery-token-ca-cert-hash sha256:4967cb054bd5899af3e4b6ad3ab0c9f878b549ef7f72842d145b15f500e429ca
kubectl get nodes
显示如下:
NAME STATUS ROLES AGE VERSION
master1 Ready master 39m v1.18.2
master2 Ready master 5m9s v1.18.2
master3 Ready master 2m33s v1.18.2
kubectl删除节点
删除节点
1、驱逐节点上的pod:kubectl drain k8s-master --delete-local-data --force --ignore-daemonsets
2、删除节点:kubectl delete node 10.20.20.33
ubuntu系统kubernetes1.18多master集群架构部署
原文:https://www.cnblogs.com/dinghc/p/14919823.html