1、修改主机名
# hostname masterNode1
2、创建elastic用户,并设置es软件的权限
# useradd elastic
# chown -R elastic.elastic elasticsearch-7.7.0
3、修改内核参数
# echo "elastic hard memlock unlimited" >> /etc/security/limits.conf
# echo "elastic soft memlock unlimited" >> /etc/security/limits.conf
# echo "elastic soft nofile 65536" >> /etc/security/limits.conf
# echo "elastic hard nofile 65536" >> /etc/security/limits.conf
# echo "elastic soft nproc 4096" >> /etc/security/limits.conf
# echo "elastic hard nproc 4096" >> /etc/security/limits.conf
# echo "vm.max_map_count=262144" >> /etc/sysctl.conf
# sysctl -p
# vim elasticsearch.yml
#定义集群的名字
cluster.name: "test"
#节点的名字
node.name: "masterNode-1"
#es数据的存放目录
path.data: "/opt/hdfs/data/esData"
#日志文件的存放目录
path.logs: "/opt/hdfs/logs/esLogs"
#如果bootstrap.memory_lock这项使用默认配置,会导致硬盘频繁读,IOPS变高,因此bootstrap.memory_lock需要设置成bootstrap.memory_lock: true
bootstrap.memory_lock: true
#绑定监听地址
network.host: "192.168.1.8"
#设置http的监听端口
http.port: 9200
#当与其他主机上的节点组成群集时,你必须使用 discovery.seed_hosts 来提供群集中可以成为master,此设置通常应包含群集中所有可以成为master节点的地址
discovery.seed_hosts: ["192.168.1.9", "192.168.1.15","192.168.1.8"]
#当第一次启动全新的Elasticsearch集群时,会有一个集群引导(cluster bootstrapping)步骤,这个步骤会确定一个 在第一次选举中 投票被计数的、并且可以成为 master节点的集合。在生产模式下启动一个全新的集群时,你必须显式指定那些可以成为master节点的名称或者IP地址,这些节点应该在第一次选举中计算选票数。
cluster.initial_master_nodes: ["masterNode-2", "masterNode-3","masterNode-1"]
#设置此节点是master节点
node.master: true
#此节点不负责存储数据
node.data: false
#控制集群在达到多少个节点之后才会开始数据恢复,通过这个设置可以避免集群自动相互发现的初期,shard分片不全的问题,假如es集群内一共有5个节点,就可以设置为5,那么这个集群必须有5个节点启动后才会开始数据分片,如果设置为3,就有可能另外两个节点没存储数据分片
gateway.recover_after_nodes: 2
action.destructive_requires_name: true
#如果使用elasticsearch-header对集群进行监控时,设置跨域
http.cors.enabled : true
http.cors.allow-origin : "*"
bootstrap.system_call_filter: false
cluster.name: "sdpops"
node.name: "dataNode-1"
path.data: "/opt/hdfs/data/esData"
path.logs: "/opt/hdfs/logs/esLogs"
bootstrap.memory_lock: true
network.host: "192.168.1.11"
http.port: 9200
#这里只配置master节点的主机
discovery.seed_hosts: ["192.168.1.9", "192.168.1.15","192.168.1.8"]
#master节点设置为false
node.master: false
#数据节点设置为true
node.data: true
node.ingest: false
cluster.initial_master_nodes: ["masterNode-2", "masterNode-3","masterNode-1"]
gateway.recover_after_nodes: 3
action.destructive_requires_name: true
http.cors.enabled : true
http.cors.allow-origin : "*"
bootstrap.system_call_filter: false
<p>配置完成后,启动节点
./elasticsearch -d
# wget https://mirror.bit.edu.cn/apache/zookeeper/zookeeper-3.6.1/apache-zookeeper-3.6.1-bin.tar.gz# vim zoo.cfg
tickTime=2000
dataDir=/opt/hdfs/data/zkData
clientPort=2181
initLimit=5
syncLimit=2
server.1=192.168.1.11:2890:3890
server.2=192.168.1.12:2890:3890
server.3=192.168.1.13:2890:3890
server.4=192.168.1.14:2890:3890
server.5=192.168.1.15:2890:3890
# echo 1 > /opt/hdfs/data/zkData/myid
#启动zk
# sh zkServer.sh start
vim server.properties
broker.id=1listeners=PLAINTEXT://192.168.1.11:9092
advertised.listeners=PLAINTEXT://192.168.1.11:9092
num.network.threads=10
num.io.threads=15
socket.send.buffer.bytes=1024000
socket.receive.buffer.bytes=1024000
socket.request.max.bytes=104857600
log.dirs=/opt/hdfs/logs/kafkaLogs
num.partitions=15
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=120
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=192.168.1.11:2181,192.168.1.12:2181,192.168.1.13:2181,192.168.1.14:2181,192.168.1.15:2181/logstash
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
2. 启动kafka
sh kafak-server-start.sh -daemon ../config/server.properties
elasticsearch的master、data节点分离的集群搭建
原文:https://blog.51cto.com/happyting/2538929