172.18.0.10 master 2C2G
172.18.0.11 node1 2C4G
172.18.0.12 node2 2C4G
kubelet: 只是创建容器
kube-proxy : 解决网络问题
# 检查防火墙和selinux
iptables -nL
getenforce
master
节点脚本准备# master 节点上
yum -y install sshpass
# 生成ssh key
ssh-keygen
# all.txt服务器ip地址
cat > /root/all.txt << EOF
172.18.0.10
172.18.0.11
172.18.0.12
EOF
# 免密交互
# fenfa.sh脚本
# 注意: "EOF" 加双引号是为了转义$符号
cat > /root/fenfa.sh << "EOF"
#!/bin/bash
for ip in $(cat /root/all.txt)
do
sshpass -p统一服务器登录密码 ssh-copy-id -i /root/.ssh/id_rsa.pub $ip -o StrictHostKeyChecking=no &>/dev/null
if [ $? -eq 0 ]
then
echo "===============pub_key fenfa ok with $ip========================="
ssh ${ip} hostname
echo ""
else
echo "===============pub_key fenfa failed with $ip========================="
echo ""
fi
done
EOF
# 修改 fenfa.sh脚本内容
vi fenfa.sh
sshpass -p统一服务器登录密码
# 运行脚本
bash fenfa.sh
master
节点操作[root@master ~]# cat > /etc/hosts << EOF
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
172.18.0.10 master
172.18.0.11 node1
172.18.0.12 node2
EOF
# 复制hosts
for i in $(grep -v localhost /etc/hosts | awk ‘{ print $2 }‘);do scp -rp /etc/hosts $i:/etc/; done
# 修改主机名
for i in $(grep -v localhost /etc/hosts | awk ‘{ print $2 }‘);do ssh $i "hostnamectl set-hostname $i "; done
# 获取主机名
for i in $(grep -v localhost /etc/hosts | awk ‘{ print $2 }‘);do ssh $i "hostname";done
# 安装软件
for i in $(grep -v localhost /etc/hosts | awk ‘{ print $1 }‘); do ssh $i "yum install vim wget net-tools -y" ; done
# 更改时区
for i in $(grep -v localhost /etc/hosts | awk ‘{ print $2 }‘); do ssh $i "timedatectl set-timezone Asia/Shanghai"; done
# 不用更改时区的时候同步时间
for i in $(grep -v localhost /etc/hosts |awk ‘{print $2}‘);do ssh $i "ntpdate time1.aliyun.com";done
# 下载docker源
for i in $(grep -v localhost /etc/hosts |awk ‘{print $2}‘);do ssh $i "cd /etc/yum.repos.d/ && wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo";done
# 安装docker
for i in $(grep -v localhost /etc/hosts |awk ‘{print $2}‘);do ssh $i "yum -y install docker-ce-18.09.7-3.el7 docker-ce-cli-18.09.7";done
# 启动和开机自启
for i in $(grep -v localhost /etc/hosts |awk ‘{print $2}‘);do ssh $i "systemctl enable docker && systemctl start docker";done
# 设置cgroup驱动使用systemd
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
# 重新启动docker
systemctl restart docker
# 查看docker版本
docker -v
# 配置国内源
cat >/etc/yum.repos.d/kubernetes.repo<<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
# 安装软件包
yum install -y kubelet-1.15.1 kubeadm-1.15.1 kubectl-1.15.1 ipvsadm
# 配置kubelet禁止使用swap
# 方法一,临时关闭
swapoff -a
# 方法二,修改配置文件
sed -i ‘/swap/d‘ /etc/fstab
# 方法三
cat > /etc/sysconfig/kubelet<<EOF
KUBELET_EXTRA_ARGS="--fail-swap-on=false"
EOF
# 设置内核参数
cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
# 配置刷新
sysctl --system
# 或
sysctl -p
# 设置开机自启动
systemctl enable kubelet && systemctl start kubelet
# 使用IPVS进行负载均衡
cat >/etc/sysconfig/modules/ipvs.modules<<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod +x /etc/sysconfig/modules/ipvs.modules
source /etc/sysconfig/modules/ipvs.modules
lsmod | grep -e ip_vs -e nf_conntrack_ipv
master
节点初始化操作# Master节点执行初始化操作
# apiserver-advertise-address= master节点ip
# --service-cidr=service会生成一个固定ip
# --pod-network-cidr=pod会生成随机ip
[root@master ~]# kubeadm init --apiserver-advertise-address=172.18.0.10 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.15.1 --service-cidr=10.1.0.0/16 --pod-network-cidr=10.2.0.0/16 --service-dns-domain=cluster.local --ignore-preflight-errors=Swap --ignore-preflight-errors=NumCPU
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 172.18.0.10:6443 --token h7o1k7.enq1bz8z9gadjyox --discovery-token-ca-cert-hash sha256:9a0029f9bff257b21dca8ea67f355400a5969f8d600e1accebdf96c2aae47d3f
================================================================
# 为kubectl准备kubeconfig文件
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
# 使用kubectl命令查看组件状态
[root@master ~]# kubectl get cs
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-0 Healthy {"health":"true"}
# 使用kubectl获取Node信息
[root@node1 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
node1 NotReady master 2m38s v1.15.1
# 支持命令补全
yum install bash-completion -y
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
# Master节点操作
# 1.部署canal网络插件
# 注意:因为属于外网下载,所以启动速度会很慢
kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/canal/rbac.yaml
kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/canal/canal.yaml
# 2.查看启动的pod
kubectl get pods --all-namespaces
# 3.查看节点状态
kubectl get nodes
# 1.在Master节点上输出增加节点的命令
[root@master ~]# kubeadm token create --print-join-command
kubeadm join 172.18.0.10:6443 --token h7o1k7.enq1bz8z9gadjyox --discovery-token-ca-cert-hash sha256:9a0029f9bff257b21dca8ea67f355400a5969f8d600e1accebdf96c2aae47d3f
# 2.在Node1和node2节点执行
[root@node1 ~]# kubeadm join 172.18.0.10:6443 --token h7o1k7.enq1bz8z9gadjyox --discovery-token-ca-cert-hash sha256:9a0029f9bff257b21dca8ea67f355400a5969f8d600e1accebdf96c2aae47d3f
master
查看有几个节点[root@master ~]# kubectl get nodes
NAME STATUS AGE
172.18.0.11 Ready 42s
# 新增一个节点
[root@master ~]# kubectl get nodes
NAME STATUS AGE
172.18.0.11 Ready 20m
172.18.0.12 Ready 18s
# 给Node加上标签
kubectl label nodes node1 node-role.kubernetes.io/node=
kubectl label nodes node2 node-role.kubernetes.io/node=
# 查看详细信息
kubectl get nodes -o wide
# 创建一个单点pod的Nginx应用
[root@master ~/k8s]# kubectl create deployment nginx --image=nginx:alpine
# 查看启动的pod
[root@master ~/k8s]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-8f6959bd-wm66d 1/1 Running 0 65s 10.2.1.2 node2 <none> <none>
# 测试访问
[root@master ~/k8s]# curl 10.2.1.2
# 测试多点扩容,扩容2个
[root@master ~/k8s ]# kubectl scale deployment nginx --replicas=2
# 查看状态
[root@master ~/k8s ]# kubectl get pod -o wide
nginx
增加service
网络# deployment 资源
[root@master ~ ]# kubectl expse deployment nginx --port=80 --type=NodePort
# 查看service
[root@master ~ ]# kubectl get service nginx -o wide
# 测试访问(master ip+查看到的端口)
curl 172.18.0.10:32483
# 添加repo源
curl -o /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
# 安装docker
cd /etc/yum.repos.d/
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum -y install docker-ce docker-compose
systemctl enable docker && systemctl start docker
# 上传解压harbor安装包
cd /opt
tar zxf harbor-offline-installer-v1.9.0-rc1.tgz
# 编辑harbor配置文件
cd harbor/
vim harbor.yml
hostname: 172.18.0.13 # 部署的主机地址(如果是云服务器,填写公网ip)
harbor_admin_password: 123456 # 密码(默认为:Harbor12345)
# 安装harbor
cd harbor/
./install
# 所有节点配置docker信任仓库
cat >/etc/docker/daemon.json<<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"insecure-registries" : ["http://172.18.0.13"]
}
EOF
# 重启docker服务
systemctl restart docker
# 所有节点都登录harbor
docker login 172.18.0.13
# 下载镜像并上传
docker pull nginx:1.14.0
docker pull nginx:1.16.0
# 修改镜像标签 (k8s项目需要在web页面创建)
docker tag nginx:1.14.0 172.18.0.13/k8s/nginx:1.14.0
docker tag nginx:1.16.0 172.18.0.13/k8s/nginx:1.16.0
# 上传镜像到harbor
docker push 172.18.0.13/k8s/nginx:1.14.0
docker push 172.18.0.13/k8s/nginx:1.16.0
POD
管理容器# pod内容解释
apiVersion: v1 # 版本号
kind: Pod # Pod
metadata: # 元数据
name: nginx-pod # metadata.name Pod的名称
labels: # metadata.labels 自定义标签列表
app: nginx # 标签名称
spec: # Pod中容器的详细定义
containers: # spec.containers容器列表
- name: nginx # spec.containers容器名称
image: nginx:1.14.0 # spec.containers.image容器镜像名称
ports: # 容器需要暴露的端口号列表
- containerPort: 80 # 容器监听的端口号
# 1.编写Pod配置文件
[root@master ~]# cat >nginx-pod.yaml<<EOF
apiVersion: v1
kind: Pod
metadata:
name: nginx-pod
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.0
ports:
- containerPort: 80
EOF
# 2.创建pod
[root@master ~]# kubectl create -f nginx-pod.yaml
# 3.查看pod
[root@master ~]# kubectl get pod
# 4.删除pod
[root@master ~]# kubectl delete pod nginx-pod
# 5.修改pod配置文件,使用habor地址
[root@master ~]# cat >nginx-pod.yaml<<EOF
apiVersion: v1
kind: Pod
metadata:
name: nginx-pod
labels:
app: nginx
spec:
containers:
- name: nginx
image: 172.18.0.13/k8s/nginx:1.14.0
ports:
- containerPort: 80
EOF
# 6.再次创建pod
[root@master ~]# kubectl create -f nginx-pod.yaml
# 7.再次查看pod
[root@master ~]# kubectl get pod
# 8.查看pod详细信息
[root@master ~]# kubectl describe pod nginx-pod
================================创建不成功,报错信息
Failed to pull image "172.18.0.13/k8s/nginx:1.14.0": rpc error: code = Unknown desc = Error response from daemon: pull access denied for 10.0.1.7/k8s/nginx, repository does not exist or may require ‘docker login‘
# 9.此时需要转换docker密码文件为base64
# /root/.docker/config.json # 登录harbor时会提示保存密码路径
[root@master ~]# cat /root/.docker/config.json |base64
ewoJImF1dGhzIjogewoJCSIxMC4wLjEuNyI6IHsKCQkJImF1dGgiOiAiWVdSdGFXNDZNVEl6TkRVMiIKCQl9Cgl9LAoJIkh0dHBIZWFkZXJzIjogewoJCSJVc2VyLUFnZW50IjogIkRvY2tlci1DbGllbnQvMTguMDkuNyAobGludXgpIgoJfQp9
# 10.编写k8s的secret凭证
[root@master ~]# cat> harbor-secret.yaml <<EOF
apiVersion: v1
kind: Secret
metadata:
name: harbor-secret
namespace: default
data:
.dockerconfigjson: ewoJImF1dGhzIjogewoJCSIxMC4wLjEuNyI6IHsKCQkJImF1dGgiOiAiWVdSdGFXNDZNVEl6TkRVMiIKCQl9Cgl9LAoJIkh0dHBIZWFkZXJzIjogewoJCSJVc2VyLUFnZW50IjogIkRvY2tlci1DbGllbnQvMTguMDkuNyAobGludXgpIgoJfQp9
type: kubernetes.io/dockerconfigjson
EOF
# 11.k8s执行操作并查看
[root@master ~]# kubectl create -f harbor-secret.yaml
[root@master ~]# kubectl get secrets
# 12.修改pod配置文件,增加调用安全凭证的配置
[root@master ~]# cat >nginx-pod-harbor.yaml<<EOF
apiVersion: v1
kind: Pod
metadata:
name: nginx-pod
labels:
app: nginx
spec:
containers:
- name: nginx
image: 172.18.0.13/k8s/nginx:1.14.0
ports:
- containerPort: 80
imagePullSecrets:
- name: harbor-secret
EOF
# 删除失败的pod,新创建带有安全下载凭证的pod
[root@master ~]# kubectl delete pod nginx-pod
[root@master ~]# kubectl create -f nginx-pod-harbor.yaml
# 再次查看发现已经可以正常下载了
[root@master ~]# kubectl get pod -o wide
RC
管理容器# 使用Replication Controller管理Pod
# 1.编写nginx-rc配置文件
[root@master ~]# cat >nginx-rc.yaml<<EOF
apiVersion: v1
kind: ReplicationController
metadata:
name: nginx-rc
spec:
replicas: 3
selector:
app: nginx
template:
metadata:
name: nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: 172.18.0.13/k8s/nginx:1.14.0
ports:
- containerPort: 80
imagePullSecrets:
- name: harbor-secret
EOF
# 创建rc
[root@master ~]# kubectl create -f nginx-rc.yaml
# 查看
[root@master ~]# kubectl get pod -o wide
# 查看rc详细信息
[root@master ~]# kubectl get rc -o wide
# RC扩容
[root@master ~]# kubectl scale rc nginx-rc --replicas=5
[root@master ~]# kubectl get rc -o wide
[root@master ~]# kubectl get pod -o wide
# RC缩容
[root@master ~]# kubectl scale rc nginx-rc --replicas=2
[root@master ~]# kubectl get rc -o wide
[root@master ~]# kubectl get pod -o wide
RS
管理容器# rs控制器管理POD
# 1.编写rs控制
[root@master ~]# cat >nginx-rs.yaml<<EOF
apiVersion: apps/v1
kind: ReplicaSet
metadata:
name: nginx-rs
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: 172.18.0.13/k8s/nginx:1.14.0
ports:
- containerPort: 80
imagePullSecrets:
- name: harbor-secret
EOF
# 创建rs资源
[root@master ~]# kubectl create -f nginx-rs.yaml
# 查看pod
[root@master ~]# kubectl get pod -o wide
# 查看rs详细资源
[root@master ~]# kubectl get rs -o wide
# 删除rc或rs资源
# 第一种方法:
# 删除资源
[root@master ~]# kubectl get rs -o wide
[root@master ~]# kubectl delete rs nginx-rs
# 第二章方法:
# 从配置文件里读取配置并删除
[root@master ~]# kubectl delete -f nginx-rc.yaml
deployment
管理容器# 使用deployment管理POD
# 1.编写deployment配置
[root@master ~]# cat >nginx-deployment.yaml<<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: 172.18.0.13/k8s/nginx:1.14.0
ports:
- containerPort: 80
imagePullSecrets:
- name: harbor-secret
EOF
# 2.创建
[root@master ~]# kubectl create -f nginx-deployment.yaml
# 3.查看
[root@master ~]# kubectl get pod -o wide
# 4.滚动更新
[root@master ~]# kubectl set image deployment/nginx-deployment nginx=172.18.0.13/k8s/nginx:1.16.0
# 5.回滚
# 设置拉取一个不存在的镜像
[root@master ~]# kubectl set image deployment/nginx-deployment nginx=172.18.0.13/k8s/nginx:1.19.0
# 查看Pod会发现提示错误,但是旧的版本还是在运行的
[root@master ~]# kubectl get pod -o wide
# 查看错误deployment详细信息,发现版本已经是1.19了
[root@master ~]# kubectl get deployments.apps nginx-deployment -o wide
# 查看更新历史版本,发现没有日志说明
[root@master ~]# kubectl rollout history deployment nginx-deployment
# 解决方法是创建deployment资源的时候带上参数--record
kubectl create -f nginx-deployment.yaml --record
kubectl get pod -o wide
kubectl set image deployment/nginx-deployment nginx=172.18.0.13/k8s/nginx:1.19.0
kubectl rollout history deployment nginx-deployment
# 回滚到上一个版本
kubectl rollout undo deployment nginx-deployment
# 指定版本
# - 查看指定的版本信息
kubectl rollout history deployment nginx-deployment --revision=5
# - 回滚到指定版本
kubectl rollout undo deployment nginx-deployment --to-revision=5
# - 查看回滚详细信息
kubectl describe deployments.apps nginx-deployment
# 扩容
kubectl scale deployment nginx-deployment --replicas=5
# 缩容
kubectl scale deployment nginx-deployment --replicas=2
# 网络名词
flannel、calico、weave,CNI
VXLAN
service
管理Pod容器访问# 使用Service管理Pod访问
# 1.创建NodeIP
[root@master ~]# cat >nginx-nodeport.yaml<<EOF
apiVersion: v1
kind: Service
metadata:
name: nginx-service
spec:
selector:
app: nginx
ports:
- protocol: TCP
port: 80
targetPort: 80
type: NodePort
EOF
# 2.创建
[root@master ~]# kubectl create -f nginx-nodeport.yaml
# 3.查看service
[root@master ~]# kubectl get service
# 4.查看详细信息
[root@master ~]# kubectl describe service nginx-service
# 5.查看service
[root@master ~]# kubectl get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.1.0.1 <none> 443/TCP 4m42s
nginx-service NodePort 10.1.127.251 <none> 80:31186/TCP 2s
# 6.网页访问测试
172.18.0.13:31186
master
节点安装NFS服务# master节点安装nfs服务
[root@master ~]# yum install nfs-utils -y
[root@master ~]# mkdir /data/nfs/mysql -p
[root@master ~]# cat >/etc/exports<<EOF
/data 172.18.0.0/24(rw,async,no_root_squash,no_all_squash)
EOF
[root@master ~]# systemctl start rpcbind
[root@master ~]# systemctl start nfs
[root@master ~]# showmount -e 172.18.0.10
node
节点安装NFS服务# node节点安装nfs
[root@master ~]# yum install nfs-utils -y
[root@master ~]# systemctl start rpcbind
[root@master ~]# showmount -e 172.18.0.10
master
节点操作12.3.1 PV
操作
# PV 相当于硬盘
# PVC 相当于划分的分区
# 创建NFS的PV配置文件
[root@master ~]# cat >nfs-pv.yaml<<EOF
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv01
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Recycle
storageClassName: nfs
nfs:
path: /data/nfs/mysql
server: 172.18.0.10
EOF
# 创建PV资源
[root@master ~]# kubectl create -f nfs-pv.yaml
# 查看PV详细信息
[root@master ~]# kubectl get pv -o wide
12.3.2 PVC
操作
# 创建pvc配置文件
[root@master ~]# cat >nfs-pvc.yaml<<EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mysql-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: nfs
EOF
# 创建pvc
[root@master ~]# kubectl create -f nfs-pvc.yaml
# 查看pvc详细信息
[root@master ~]# kubectl get pvc -o wide
# 下载mysql镜像
[root@master ~]# docker pull mysql:5.7
[root@master ~]# docker tag mysql:5.7 172.18.0.13/k8s/mysql:5.7
[root@master ~]# docker push 172.18.0.13/k8s/mysql:5.7
# 编写mysql-pvc配置文件
[root@master ~]# cat > mysql-pvc.yaml <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: mysql
spec:
replicas: 1
selector:
matchLabels:
app: mysql
template:
metadata:
labels:
app: mysql
spec:
volumes:
- name: mysql-pvc
persistentVolumeClaim:
claimName: mysql-pvc
- name: mysql-log
hostPath:
path: /var/log/mysql
containers:
- name: mysql
image: 172.18.0.13/k8s/mysql:5.7
ports:
- containerPort: 3306
env:
- name: MYSQL_ROOT_PASSWORD
value: "123456"
volumeMounts:
- name: mysql-pvc
mountPath: /var/lib/mysql
- name: mysql-log
mountPath: /var/log/mysql
imagePullSecrets:
- name: harbor-secret
EOF
# 创建mysql-pvc资源
[root@master ~]# kubectl create -f mysql-pvc.yaml
# 查看mysql-pvc资源详细信息
[root@master ~]# kubectl get pod -o wide
# ===================node节点======================
# 在所在docker运行的node节点上操作
# 验证资源是否持久化
[root@node2 ~]# docker exec -it 52eb9be8b3cd /bin/bash
[root@node2 ~]# mysql -uroot -p123456 -e ‘create database oldzhang;‘
[root@node2 ~]# mysql -uroot -p123456 -e ‘show databases;‘
# ================回到master节点===============
[root@master ~]# kubectl delete -f mysql-pvc.yaml
[root@master ~]# kubectl create -f mysql-pvc.yaml
[root@master ~]# kubectl get pod -o wide
# ===================node节点======================
# 在所在docker运行的node节点上操作
# 验证资源是否持久化
[root@node1 ~]# mysql -uroot -p123456 -e ‘show databases;‘
# 注意:如果下载过程报如下错误
=======
正在连接 raw.githubusercontent.com (raw.githubusercontent.com)|::|:443... 失败:拒绝连接。
=====
# 需修改hosts解析文件
cat /etc/hosts
151.101.76.133 raw.githubusercontent.com
# 下载配置文件
[root@master ~]# wget https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml
# 修改配置文件
# 修改3个地方
# - 修改镜像地址为阿里云
# - 添加nodeIP端口
# - 指定运行在master节点
# 修改后的配置文件
[root@master ~]# cat kubernetes-dashboard.yaml
109 spec:
# 修改 110行,nodeName为部署主机的节点名称
110 nodeName: master
111 containers:
112 - name: kubernetes-dashboard
# 修改 113行镜像源,默认是国外源,修改为阿里源
113 image: registry.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:v1.10.1
142 serviceAccountName: kubernetes-dashboard
143 # Comment the following tolerations if Dashboard must not be deployed on master
# 注释 144行到146行 的内容
144 #tolerations:
145 #- key: node-role.kubernetes.io/master
146 # effect: NoSchedule
158 spec:
# 在 158行下面添加 type字段
159 type: NodePort
160 ports:
161 - port: 443
162 targetPort: 8443
# 在 162行下面添加nodePort对外开放端口
163 nodePort: 30000
# 创建dashboard
[root@master ~]# kubectl create -f kubernetes-dashboard.yaml
# 创建服务账号
[root@master ~]# cat > dashboard_service_account_admin.yaml << EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
EOF
[root@master ~]# kubectl apply -f dashboard_service_account_admin.yaml
# 创建集群角色绑定
[root@master ~]# cat > dashboard_cluster_role_binding_admin.yaml <<EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system
EOF
[root@master ~]# kubectl apply -f dashboard_cluster_role_binding_admin.yaml
# 创建证书
#解决 Google 浏览器不能打开 kubernetes dashboard 方法
[root@master ~]# mkdir key && cd key
#生成证书
[root@master key]# openssl genrsa -out dashboard.key 2048
[root@master key]# openssl req -new -out dashboard.csr -key dashboard.key -subj ‘/CN=172.18.0.10‘
[root@master key]# openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt
#删除原有的证书 secret
[root@master key]# kubectl delete secret kubernetes-dashboard-certs -n kube-system
#创建新的证书 secret
[root@master key]# kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key --from-file=dashboard.crt -n kube-system
#查看pod
[root@master key]# kubectl get pod -n kube-system
#删除 pod,启动新 pod 生效
[root@master key]# kubectl delete pod -n kube-system kubernetes-dashboard-xxxxxx
# 获取用户登陆Token
[root@master ~]# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk ‘{print $1}‘) > admin-token.yaml && cat admin-token.yaml
# web页面操作
https://172.18.0.10:30000
# kubernetes仪表板 -> 令牌(填入令牌)
原文:https://www.cnblogs.com/wshlym/p/13159381.html