cp ips ips_zookeeper
vim ips_zookeeper # 将机器名称写三个
?
cp scp_all.sh scp_all_zookeeper.sh
cp ssh_all.sh ssh_all_zookeeper.sh
cp ssh_root.sh ssh_root_zookeeper.sh
vim scp_all_zookeeper.sh
vim ssh_all_zookeeper.sh
vim ssh_root_zookeeper.sh
#将zookeeper的安装包【zookeeper-3.4.8.tar.gz】上传到/home/hadoop/up 目录下
rz
?
#将安装包分发到每个zookeeper 机器的 /tmp/ 目录下
./scp_all_zookeeper.sh ~/up/zookeeper-3.4.8.tar.gz /tmp/
?
#查看是否分发成功
./ssh_all_zookeeper.sh "ls /tmp | grep zo*"
#将/tmp/zookeeper-3.4.8.tar.gz 解压到 /usr/local/ 目录
./ssh_root_zookeeper.sh tar -xzf /tmp/zookeeper-3.4.8.tar.gz -C /usr/local/
?
#查看是否解压成功
./ssh_root_zookeeper.sh "ls -l /usr/local/ | grep zoo"
#修改每台机器的/usr/local/zookeeper-3.4.8,所属用户为hadoop
./ssh_root_zookeeper.sh chown -R hadoop:hadoop /usr/local/zookeeper-3.4.8
?
#修改每台机器的/usr/local/zookeeper-3.4.8的权限为770
./ssh_root_zookeeper.sh chmod -R 770 /usr/local/zookeeper-3.4.8
?
#查看是否修改成功
./ssh_root_zookeeper.sh "ls -l /usr/local | grep zookeeper-3.4.8"
#在每台机器上给/usr/local/zookeeper-3.4.8创建zookeeper软链接
./ssh_root_zookeeper.sh ln -s /usr/local/zookeeper-3.4.8/ /usr/local/zookeeper
?
#查看每台机器的软连接是否创建成功
./ssh_all_zookeeper.sh "ls -l /usr/local/ | grep zookeeper"
#修改每台机器软连接所属用户组
./ssh_root_zookeeper.sh chown -h hadoop:hadoop /usr/local/zookeeper
#备份原来的配置目录
./ssh_all_zookeeper.sh cp -a /usr/local/zookeeper/conf /usr/local/zookeeper/conf_back
# 改名 zoo_sample.cfg 为 zoo.cfg (在 nn1 上)
mv zoo_sample.cfg zoo.cfg
# 修改为如下配置
vim zoo.cfg
?
#将zoo.cfg 配置文件批量拷贝到每台机器上
./scp_all_zookeeper.sh ~/up/zoo.cfg /usr/local/zookeeper/conf/
#检查是否拷贝成功
./ssh_root_zookeeper.sh ls /usr/local/zookeeper/conf/zoo.cfg
?
#批量删除zoo_sample.cfg 配置文件
./ssh_root_zookeeper.sh rm -f /usr/local/zookeeper/conf/zoo_sample.cfg
#批量检查是否删除成功
./ssh_root_zookeeper.sh ls /usr/local/zookeeper/conf/
#拷贝zkEnv.sh到每台机器的zookeeper的bin目录下
./scp_all_zookeeper.sh /usr/local/zookeeper/bin/zkEnv.sh /usr/local/zookeeper/bin/
?
# 检查下(查看文件大小)
./ssh_all_zookeeper.sh ‘ls -l /usr/local/zookeeper/bin | grep zkEnv.sh‘
#5个机器一起创建 /data目录,因为以后安装hadoop的时候也使用
./ssh_root.sh mkdir /data
?
#把每个机器上的/data目录所有者修改为hadoop用户
./ssh_root.sh chown hadoop:hadoop /data
?
#批量验证
./ssh_root.sh "ls -l / | grep data"
#在每个机器的/data目录下创建myid文件
./ssh_all_zookeeper.sh touch /data/myid
# 第一台:
echo "1" > /data/myid
# 第二台:
echo "2" > /data/myid
# 第三台:
echo "3" > /data/myid
# 查看方式一:
cat /data/myid
# 查看方式二:
./ssh_all_zookeeper.sh cat /data/myid
export JAVA_HOME=/usr/java/jdk1.8.0_144
#/usr/bin/java 连接到 /usr/java/jdk1.8.0_144,可用/usr/bin/java 配置JAVA_HOME
#set Hadoop Path
export HADOOP_HOME=/usr/local/hadoop
export HADOOP_COMMON_HOME=${HADOOP_HOME}
export HADOOP_HDFS_HOME=${HADOOP_HOME}
export HADOOP_MAPRED_HOME=${HADOOP_HOME}
export HADOOP_YARN_HOME=${HADOOP_HOME}
export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export HDFS_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export YARN_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export LD_LIBRARY_PATH=$HADOOP_HOME/lib/native:/usr/lib64
?
export HBASE_HOME=/usr/local/hbase
export HIVE_HOME=/usr/local/hive
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$HBASE_HOME/bin:$HIVE_HOME/bin:/usr/local/zookeeper/bin
#用root用户登录,拷贝/etc/profile 文件到 ~hadoop/up
su - root
# 修改下(追加以上内容)
vim /etc/profile
?
cp /etc/profile ~/hadoop/up
?
#将~hadoop/up/profile 拷贝到其他机器 /tmp 目录下
./scp_all.sh ~/up/profile /tmp/
?
#批量验证
./ssh_all.sh ls -l /tmp/
?
#批量拷贝到/etc/profile
./ssh_root.sh mv /tmp/profile /etc/
?
#批量验证
./ssh_root.sh tail /etc/profile
?
#批量source使配置生效
./ssh_root.sh source /etc/profile
./ssh_root.sh java -version
#一般很大的stdou 和stderr 当你不关心的时候可以利用stdout 和stderr 定向到这里
./command.sh >/dev/null 2>&1 &
#批量启动
./ssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh start
./ssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh status
./ssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh stop
#批量查看java进程用jps
./ssh_all_zookeeper.sh jps
#或者用ps 查看
ps aux | grep zookeeper 或 ps -ef|grep zookeeper
#日志输出文件
cat /data/zookeeper.out
#查看每个机器ZK运行的状态
./ssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh status
?
#整体停止服务
./ssh_all_zookeeper.sh /usr/local/zookeeper/bin/zkServer.sh stop
#启动zkclient,并连接zookeeper集群
/usr/local/zookeeper/bin/zkCli.sh -server nn1.hadoop:2181,nn2.hadoop:2181,s1.hadoop:2181
原文:https://www.cnblogs.com/zss0520/p/10802155.html