环境初始化
1.配置hostname
hostnamectl set-hostname master hostnamectl set-hostname node
2.配置/etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 192.168.1.11 master 192.168.1.12 node
3.关闭防火墙、Selinux、swap
# 停防火墙 systemctl stop firewalld systemctl disable firewalld 关闭Selinux setenfore 0 sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/sysconfig/selinux sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/selinux/config # 关闭Swap swapoff -a sed -i ‘s/.*swap.*/#&/‘ /etc/fstab # 加载br_netfilter modprobe br_netfilter
4.配置内核参数 /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 # 生效文件 sysctl -p /etc/sysctl.d/k8s.conf
5.配置国内tencent yum源、epel源、Kubernetes源地址
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.cloud.tencent.com/repo/centos7_base.repo wget -O /etc/yum.repos.d/epel.repo http://mirrors.cloud.tencent.com/repo/epel-7.repo yum clean all && yum makecache #配置国内Kubernetes源地址 cat <<EOF > /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ enabled=1 gpgcheck=1 repo_gpgcheck=1 gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpgEOF
6.安装依赖包
yum install -y conntrack ipvsadm ipset jq sysstat curl iptables libseccomp bash-completion yum-utils device-mapper-persistent-data lvm2 net-tools conntrack-tools vim libtool-ltdl
7.配置时间同步,所有节点都需要
yum install chrony –y systemctl enable chronyd.service && systemctl start chronyd.service systemctl status chronyd.service chronyc sources
8 初始化环境配置检查
- 重启,做完以上所有操作,最好reboot重启一遍
- ping 每个节点hostname 看是否能ping通
- ssh 对方hostname看互信是否无密码访问成功
- 执行date命令查看每个节点时间是否正确
- 执行 ulimit -Hn 看下最大文件打开数是否是655360
- cat /etc/sysconfig/selinux |grep disabled 查看下每个节点selinux是否都是disabled状态
安装docker ,所有节点都需要装
1.设置docker yum源
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
2.安装docker
# 列出docker 版本信息 [root@master ~]# yum list docker-ce --showduplicates | sort -r # 安装docker 指定18.06.1 yum install -y docker-ce-18.06.1.ce-3.el7 systemctl restart docker # 配置镜像加速器和docker数据存放路径 tee /etc/docker/daemon.json <<-‘EOF‘ { "registry-mirrors": ["https://q2hy3fzi.mirror.aliyuncs.com"], "graph": "/tol/docker-data" } EOF
3.启动docker
systemctl daemon-reload
systemctl restart docker
systemctl enable docker
systemctl status docker
# docker --version
安装kubeadm、kubelet、kubectl,所有节点
? kubeadm: 部署集群用的命令
? kubelet: 在集群中每台机器上都要运行的组件,负责管理pod、容器的生命周期
? kubectl: 集群管理工具
安装工具
yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes systemctl enable kubelet && systemctl start kubelet
镜像下载准备
1.初始化获取要下载的镜像列表
# 查看依赖需要安装的镜像列表
kubeadm config images list
# 生成默认kubeadm.conf文件
kubeadm config print init-defaults > kubeadm.conf
2.绕过墙下载镜像的方法
sed -i "s/imageRepository: .*/imageRepository: registry.aliyuncs.com\/google_containers/g" kubeadm.conf
3.指定kubeadm安装的Kubernetes版本
sed -i "s/kubernetesVersion: .*/kubernetesVersion: v1.13.0/g" kubeadm.conf
4.下载需要的镜像
kubeadm config images pull --config kubeadm.conf
docker images
5.docker tag 镜像
docker tag registry.aliyuncs.com/google_containers/kube-apiserver:v1.13.0 k8s.gcr.io/kube-apiserver:v1.13.0 docker tag registry.aliyuncs.com/google_containers/kube-controller-manager:v1.13.0 k8s.gcr.io/kube-controller-manager:v1.13.0 docker tag registry.aliyuncs.com/google_containers/kube-scheduler:v1.13.0 k8s.gcr.io/kube-scheduler:v1.13.0 docker tag registry.aliyuncs.com/google_containers/kube-proxy:v1.13.0 k8s.gcr.io/kube-proxy:v1.13.0 [root@master ~]# docker tag registry.aliyuncs.com/google_containers/pause:3.1 k8s.gcr.io/pause:3.1 [root@master ~]# docker tag registry.aliyuncs.com/google_containers/etcd:3.2.24 k8s.gcr.io/etcd:3.2.24 [root@master ~]# docker tag registry.aliyuncs.com/google_containers/coredns:1.2.6 k8s.gcr.io/coredns:1.2.6
6.docker rmi 清理下载的镜像
docker rmi registry.aliyuncs.com/google_containers/kube-apiserver:v1.13.0 docker rmi registry.aliyuncs.com/google_containers/kube-controller-manager:v1.13.0 docker rmi registry.aliyuncs.com/google_containers/kube-scheduler:v1.13.0 docker rmi registry.aliyuncs.com/google_containers/kube-proxy:v1.13.0 docker rmi registry.aliyuncs.com/google_containers/pause:3.1 docker rmi registry.aliyuncs.com/google_containers/etcd:3.2.24 docker rmi registry.aliyuncs.com/google_containers/coredns:1.2.6
部署master节点
1.kubeadm init 初始化master节点
# 定义POD的网段为: 172.22.0.0/16 ,api server地址就是master本机IP地址 kubeadm init --kubernetes-version=v1.13.0 --pod-network-cidr=172.22.0.0/16 --apiserver-advertise-address=192.168.1.11 ls /etc/kubernetes/
# kubeadm reset
# kubeadm init --kubernetes-version=v1.13.0 --pod-network-cidr=172.22.0.0/16 --apiserver-advertise-address=192.168.1.11
# 记录下面的信息
kubeadm join 192.168.1.11:6443 --token iazwtj.v3ajyq9kyqftg3et --discovery-token-ca-cert-hash sha256:27aaefd2afc4e75fd34c31365abd3a7357bb4bba7552056bb4a9695fcde14ef5
kubeadm join 192.168.1.11:6443 --token zs4s82.r9svwuj78jc3px43 --discovery-token-ca-cert-hash sha256:45063078d23b3e8d33ff1d81e903fac16fe6c8096189600c709e3bf0ce051ae8
2.验证测试
# 配置kubectl命令 mkdir -p /root/.kube cp /etc/kubernetes/admin.conf /root/.kube/config # 执行获取pods列表命令,查看相关状态 kubectl get pods --all-namespaces # 查看集群的健康状态 kubectl get cs
部署calico网络
1.下载calico 官方镜像
docker pull calico/node:v3.1.4
docker pull calico/cni:v3.1.4
docker pull calico/typha:v3.1.4
2.tag 这三个calico镜像
docker tag calico/node:v3.1.4 quay.io/calico/node:v3.1.4 docker tag calico/cni:v3.1.4 quay.io/calico/cni:v3.1.4 docker tag calico/typha:v3.1.4 quay.io/calico/typha:v3.1.4
3.删除原有镜像
docker rmi calico/node:v3.1.4 docker rmi calico/cni:v3.1.4 docker rmi calico/typha:v3.1.4
4.部署calico
curl https://docs.projectcalico.org/v3.1/gettingstarted/kubernetes/installation/hosted/rbac-kdd.yaml -O kubectl apply -f rbac-kdd.yaml curl https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/kubernetes-datastore/policy-only/1.7/calico.yaml -O
#把ConfigMap 下的 typha_service_name 值由none变成 calico-typha sed -i ‘s/typha_service_name: "none"/typha_service_name: "calico-typha"/g‘ calico.yaml
#设置 Deployment 类目的 spec 下的replicas值为1
sed -i ‘s/replicas: 0/replicas: 1/g‘ calico.yaml
#找到CALICO_IPV4POOL_CIDR,然后值修改成之前定义好的POD网段,我这里是172.22.0.0/16
sed -i ‘s/192.168.0.0/172.22.0.0/g‘ calico.yaml
#把 CALICO_NETWORKING_BACKEND 值设置为 bird ,这个值是设置BGP网络后端模式
value: "bird"
5.部署calico.yaml
kubectl apply -f calico.yaml
kubectl get pods --all-namespaces
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
部署node节点
1.下载镜像
docker pull registry.aliyuncs.com/google_containers/kube-proxy:v1.13.0 docker pull registry.aliyuncs.com/google_containers/pause:3.1 docker pull calico/node:v3.1.4 docker pull calico/cni:v3.1.4 docker pull calico/typha:v3.1.4 docker tag registry.aliyuncs.com/google_containers/kube-proxy:v1.13.0 k8s.gcr.io/kube-proxy:v1.13.0 docker tag registry.aliyuncs.com/google_containers/pause:3.1 k8s.gcr.io/pause:3.1 docker tag calico/node:v3.1.4 quay.io/calico/node:v3.1.4 docker tag calico/cni:v3.1.4 quay.io/calico/cni:v3.1.4 docker tag calico/typha:v3.1.4 quay.io/calico/typha:v3.1.4 docker rmi registry.aliyuncs.com/google_containers/kube-proxy:v1.13.0 docker rmi registry.aliyuncs.com/google_containers/pause:3.1 docker rmi calico/node:v3.1.4 docker rmi calico/cni:v3.1.4 docker rmi calico/typha:v3.1.4
2.把node加入到集群
kubeadm join 192.168.1.11:6443 --token iazwtj.v3ajyq9kyqftg3et --discovery-token-ca-cert-hash sha256:27aaefd2afc4e75fd34c31365abd3a7357bb4bba7552056bb4a9695fcde14ef5
3.在master上查看
原文:https://www.cnblogs.com/ray-mmss/p/10422969.html