原文地址: 点这里
确认helm镜像源并更新镜像仓库
[root@ops1 test]# helm repo add stable http://mirror.azure.cn/kubernetes/charts/ [root@ops1 test]# helm repo list NAME URL local http://127.0.0.1:8879/charts stable http://mirror.azure.cn/kubernetes/charts/ incubator http://mirror.azure.cn/kubernetes/charts-incubator/ [root@ops1 test]# helm repo update [root@ops1 test]# kubectl create namespace efk
[root@ops1 test]# cat <<EOF> elasticsearch-values.yaml
image:
repository: "docker.elastic.co/elasticsearch/elasticsearch-oss"
# repository: "registry.cn-beijing.aliyuncs.com/wangzt/k8s/elasticsearch-oss:6.7.0" 个人镜像仓库
tag: "6.7.0"
client:
serviceType: "NodePort"
httpNodePort: 30920
master:
persistence:
enabled: true # elasticsearch-master使用pvc永久存储,如果是测试,可以换成false
storageClass: "nfs2"
data:
persistence:
enabled: true #elasticsearch-data使用pvc永久存储,如果是测试,可以换成false
storageClass: "nfs2"
EOF
[root@ops1 test]# helm install --name elasticsearch -f elasticsearch-values.yaml --namespace=efk --version=1.32.4 stable/elasticsearch
[root@ops1 test]# kubectl get all -n efk
# 等到全部pod显示正常后,访问k8s工作节点
[root@ops1 test]# curl http://127.0.0.1:30920/
{
"name" : "elasticsearch-client-65bfdd647c-kl9zb",
"cluster_name" : "elasticsearch",
...
"tagline" : "You Know, for Search"
}
# 配置可不加,我是为了日志量太大,和添加监控显示的
[root@ops1 test]# cat <<EOF> fluentd-values.yaml
image:
repository: gcr.io/google-containers/fluentd-elasticsearch # 默认地址可能不可用
# repository: registry.cn-beijing.aliyuncs.com/wangzt/kubernetes/fluentd-elasticsearch
elasticsearch:
buffer_chunk_limit: 32M # 内存缓冲区
service: #启动监控monitor-agent
type: NodePort
ports:
- name: "monitor-agent"
port: 24231
env:
OUTPUT_BUFFER_CHUNK_LIMIT: "32M" # 设置buffer缓存区大小
podAnnotations: # 让prometheus监控monitor-agent
prometheus.io/scrape: "true"
prometheus.io/port: "24231"
tolerations: #监控master
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
EOF
[root@ops1 test]# helm install --name fluentd-elasticsearch -f fluentd-values.yaml --namespace=efk --version=2.0.7 stable/fluentd-elasticsearch
[root@ops1 test]# kubectl get pod -n efk | grep fluentd
#等服务全部正常后,可以看到有索引产生
[root@ops1 test]# curl http://127.0.0.1:30920/_cat/indices
green open logstash-2020.03.18 om-LUsRXQUGcBfww4ioa3w 5 1 26071 0 27.9mb 13.9mb
green open logstash-2020.03.16 3RAWut3DQkqlLWgQu9DxSQ 5 1 22269 0 23.7mb 11.8mb
[root@ops1 test]# cat <<EOF> kibana-values.yaml
files:
kibana.yml:
elasticsearch.hosts: http://elasticsearch-client:9200
service:
type: NodePort
nodePort: 30922
persistentVolumeClaim:
enabled: true # 如果不使用pvc永久存储,只做测试就改为false
storageClass: "nfs2"
EOF
[root@ops1 test]# helm install --name kibana -f kibana-values.yaml --namespace=efk --version=3.2.6 stable/kibana
[root@ops1 test]# kubectl get pod -n efk | grep kibana
kibana-7bf95fb48-nb2z4 1/1 Running 0 36s
原文:https://www.cnblogs.com/wangzhangtao/p/12603107.html