[root@master01 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
localhost.localdomain NotReady master 44h v1.19.3
node01 Ready <none> 44h v1.19.3
node02 Ready <none> 44h v1.19.3
[root@master01 ~]# kubectl get po -n kube-system kube-flannel-ds-w4mwc -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-flannel-ds-w4mwc 0/1 Pending 0 102s <none> localhost.localdomain <none> <none>
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 3m7s default-scheduler Successfully assigned kube-system/kube-flannel-ds-w4mwc to localhost.localdomain
[root@master01 ~]# kubectl delete node node01 node02 localhost.localdomain
node "node01" deleted
node "node02" deleted
node "localhost.localdomain" deleted
[root@master01 ~]# kubectl get nodes
No resources found
# 已经变成Pending状态
[root@master01 ~]# kubectl get csr
NAME AGE SIGNERNAME REQUESTOR CONDITION
csr-6pqpf 43m kubernetes.io/kube-apiserver-client-kubelet system:node:localhost.localdomain Pending
csr-8vj2f 90m kubernetes.io/kube-apiserver-client-kubelet system:node:localhost.localdomain Pending
csr-9cs9r 59m kubernetes.io/kube-apiserver-client-kubelet system:node:localhost.localdomain Pending
csr-9gh45 105m kubernetes.io/kube-apiserver-client-kubelet system:node:localhost.localdomain Pending
csr-9ln7v 151m kubernetes.io/kube-apiserver-client-kubelet system:node:localhost.localdomain Pending
csr-9zbnw 74m kubernetes.io/kube-apiserver-client-kubelet system:node:localhost.localdomain Pending
csr-d7jdl 162m kubernetes.io/kube-apiserver-client-kubelet system:node:localhost.localdomain Pending
csr-dh8hs 136m kubernetes.io/kube-apiserver-client-kubelet system:node:localhost.localdomain Pending
csr-ktbl6 12m kubernetes.io/kube-apiserver-client-kubelet system:node:localhost.localdomain Pending
csr-m7d22 121m kubernetes.io/kube-apiserver-client-kubelet system:node:localhost.localdomain Pending
csr-zsfgr 28m kubernetes.io/kube-apiserver-client-kubelet system:node:localhost.localdomain Pending
[root@master01 ~]# kubeadm reset
[root@master01 ~]# rm -rf $HOME/.kube/config (这个在配置了kubectl的节点执行即可) ## yum install -y net-tools
[root@master01 ~]# ifconfig cni0 down && ip link delete cni0 (如果cni0没干掉就手动干掉)
[root@master01 ~]# ifconfig flannel.1 down && ip link delete flannel.1
[root@master01 ~]# ifconfig kube-ipvs0 down && ip link delete kube-ipvs0
[root@master01 ~]# ifconfig dummy0 down && ip link delete dummy0
[root@master01 ~]# grep -C2 nodeRegistration kubeadm.yaml | grep name
name: master01
[root@master01 ~]# kubeadm init --config kubeadm.yaml
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.1.70:6443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:f4d24e1b28d4dcb1ebaa9c4847221fda503bb08627175dbdacb589cc4ebfaa8a
可以看到,已经改过来了
记录下,以防参坑。一时大意了,给自己留了个坑
"cni0" already has an IP address different from 10.244.1.1/24 (-owide后,到对应节点systemctl restart network)
"cni0" already has an IP address different from 10.244.1.1/24 (或者ifconfig cni0 down && ip link delete cni0)
然后重启coredns,还行就重新初始那节点吧。。。。。
发现cni0的这个网卡地址是10.244.2.1,明显与报错中的10.244.1.1不一致,将其改为10.244.1.1,重启网络服务,回到master,发现容器正常运行
kubeadm reset
systemctl stop kubelet && rm -rf /etc/cni/
ifconfig cni0 down && ifconfig flannel.1 down
ip link delete cni0 && ip link delete flannel.1
systemctl start kubelet
# 获取master的join token
kubeadm token create --print-join-command
# 重新加入节点
kubeadm join 192.168.1.70:6443 --token 1ni0cy.frcpumeb2bdmscqu --discovery-token-ca-cert-hash sha256:f4d24e1b28d4dcb1ebaa9c4847221fda503bb08627175dbdacb589cc4ebfaa8a
[root@master01 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
master01 NotReady master 2m37s v1.19.3
node01 NotReady <none> 34s v1.19.3
node02 NotReady <none> 38s v1.19.3
[root@master01 ~]# kubectl apply -f kube-flannel.yml
[root@master01 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
master01 Ready master 32m v1.19.3
node01 Ready <none> 30m v1.19.3
node02 Ready <none> 30m v1.19.3
kubeadm参考: https://blog.csdn.net/qq_24794401/article/details/106654710
二进制安装可参坑:https://blog.csdn.net/chenshm/article/details/118718644
kubeadm部署的测试环境k8s集群更改matser节点的名字
原文:https://www.cnblogs.com/hsyw/p/15183210.html