最美情侣中文字幕电影,在线麻豆精品传媒,在线网站高清黄,久久黄色视频

歡迎光臨散文網(wǎng) 會員登陸 & 注冊

hadoop+zookeeper+kafka 安裝及使用教程

2023-02-19 11:49 作者:bili_39183997178  | 我要投稿

hadoop安裝

[root@hadoop1 ~]# yum install -y java-1.8.0-openjdk java-1.8.0-openjdk-devel

[root@hadoop1 ~]# tar -zxf hadoop-2.7.7.tar.gz

[root@hadoop1 ~]# mv hadoop-2.7.7 /usr/local/hadoop

[root@hadoop1 ~]# chown -R 0.0 /usr/local/hadoop

配置JAVA運行環(huán)境

[root@hadoop1 ~]# vim /etc/hosts

192.168.1.50 hadoop1

[root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/hadoop-env.sh

25:? export JAVA_HOME="/usr"

33:? export HADOOP_CONF_DIR="/usr/local/hadoop/etc/hadoop"

[root@hadoop1 ~]# /usr/local/hadoop/bin/hadoop version


集群安裝部署


HDFS部署

[root@hadoop1 ~]# yum install -y java-1.8.0-openjdk java-1.8.0-openjdk-devel

[root@hadoop1 ~]# vim /etc/hosts

192.168.1.50 hadoop1

192.168.1.51 node-0001

192.168.1.52 node-0002

192.168.1.53 node-0003

以下操作僅在 hadoop1 上執(zhí)行

[root@hadoop1 ~]# vim /etc/ssh/ssh_config

# 60行新添加

StrictHostKeyChecking no

[root@hadoop1 ~]# ssh-keygen -t rsa -b 2048 -N '' -f /root/.ssh/id_rsa

[root@hadoop1 ~]# for i in hadoop1 node-{0001..0003};do

? ? ? ? ? ? ? ? ? ? ? ssh-copy-id -i /root/.ssh/id_rsa.pub ${i}

? ? ? ? ? ? ? ? ? done


配置文件語法格式 -- 官方手冊?

? ?<property>
? ? ? ?<name></name>
? ? ? ?<value></value>
? ?</property>

1、配置 hadoop-env.sh 參考 配置JAVA運行環(huán)境 案例

2、配置slaves (localhost 必須刪除)

[root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/slaves

node-0001

node-0002

node-0003

3、配置core-site.xml

[root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/core-site.xml

<configuration>

? ? <property>

? ? ? ? <name>fs.defaultFS</name>

? ? ? ? <value>hdfs://hadoop1:9000</value>

? ? </property>

? ? <property>

? ? ? ? <name>hadoop.tmp.dir</name>

? ? ? ? <value>/var/hadoop</value>

? ? </property>

</configuration>

4、配置hdfs-site.xml

[root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/hdfs-site.xml

<configuration>

? ? <property>

? ? ? ? <name>dfs.namenode.http-address</name>

? ? ? ? <value>hadoop1:50070</value>

? ? </property>

? ? <property>

? ? ? ? <name>dfs.namenode.secondary.http-address</name>

? ? ? ? <value>hadoop1:50090</value>

? ? </property>

? ? <property>

? ? ? ? <name>dfs.replication</name>

? ? ? ? <value>2</value>

? ? </property>

</configuration>

5、啟動集群 [以下操作僅在 hadoop1 上執(zhí)行]

[root@hadoop1 ~]# for i in node-{0001..0003};do

? ? ? ? ? ? ? ? ? ? ? rsync -aXSH --delete /usr/local/hadoop ${i}:/usr/local/

? ? ? ? ? ? ? ? ? done

[root@hadoop1 ~]# mkdir /var/hadoop

[root@hadoop1 ~]# /usr/local/hadoop/bin/hdfs namenode -format

[root@hadoop1 ~]# /usr/local/hadoop/sbin/start-dfs.sh


6、驗證集群配置

[root@hadoop1 ~]# /usr/local/hadoop/bin/hdfs dfsadmin -report

mapreduce部署

[root@hadoop1 ~]# cd /usr/local/hadoop/etc/hadoop/

[root@hadoop1 hadoop]# cp mapred-site.xml.template mapred-site.xml

[root@hadoop1 hadoop]# vim mapred-site.xml

<configuration>

? ? <property>

? ? ? ? <name>mapreduce.framework.name</name>

? ? ? ? <value>yarn</value>

? ? </property>

</configuration>


Yarn部署


[root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/yarn-site.xml

<configuration>

? ? <property>

? ? ? ? <name>yarn.resourcemanager.hostname</name>

? ? ? ? <value>hadoop1</value>

? ? </property>

<!-- Site specific YARN configuration properties -->

? ? <property>

? ? ? ? <name>yarn.nodemanager.aux-services</name>

? ? ? ? <value>mapreduce_shuffle</value>

? ? </property>

</configuration>

啟動集群 [以下操作僅在 hadoop1 上執(zhí)行]

[root@hadoop1 ~]# for i in node-{0001..0003};do

? ? ? ? ? ? ? ? ? ? rsync -avXSH --delete /usr/local/hadoop/etc? ${i}:/usr/local/hadoop/

? ? ? ? ? ? ? ? ? done

[root@hadoop1 ~]# /usr/local/hadoop/sbin/start-yarn.sh

驗證集群

[root@hadoop1 ~]# /usr/local/hadoop/bin/yarn node -list

web頁面訪問

namenode: ?http://hadoop1:50070

secondarynamenode: ?http://hadoop1:50090

resourcemanager: ?http://hadoop1:8088/cluster



Hadoop集群管理

重新初始化集群

? ?警告:該方法會丟失所有數(shù)據(jù)

?1、停止集群 ?/usr/local/hadoop/sbin/stop-all.sh ??

?2、刪除所有節(jié)點的 ?/var/hadoop/* ? ?

3、在 hadoop1 上重新格式化 /usr/local/hadoop/bin/hdfs namenode -format ?

?4、啟動集群 ?/usr/local/hadoop/sbin/start-all.sh

[root@hadoop1 ~]# /usr/local/hadoop/sbin/stop-all.sh

[root@hadoop1 ~]# for i in hadoop1 node-{0001..0003};do

? ? ? ? ? ? ? ? ? ? ? ssh ${i} 'rm -rf /var/hadoop/*'

? ? ? ? ? ? ? ? ? done

[root@hadoop1 ~]# /usr/local/hadoop/bin/hdfs namenode -format

[root@hadoop1 ~]# /usr/local/hadoop/sbin/start-all.sh

增加新的節(jié)點

[root@hadoop1 ~]# ssh-copy-id -i /root/.ssh/id_rsa.pub 192.168.1.54

[root@hadoop1 ~]# vim /etc/hosts

192.168.1.50? ? hadoop1

192.168.1.51? ? node-0001

192.168.1.52? ? node-0002

192.168.1.53? ? node-0003

192.168.1.54? ? newnode

[root@hadoop1 ~]# for i in node-{0001..0003} newnode;do

? ? ? ? ? ? ? ? ? ? ? rsync -av /etc/hosts ${i}:/etc/

? ? ? ? ? ? ? ? ? done

[root@hadoop1 ~]# rsync -aXSH /usr/local/hadoop newnode:/usr/local/

新節(jié)點執(zhí)行

[root@newnode ~]# yum install -y java-1.8.0-openjdk-devel

[root@newnode ~]# /usr/local/hadoop/sbin/hadoop-daemon.sh start datanode

[root@newnode ~]# /usr/local/hadoop/bin/hdfs dfsadmin -setBalancerBandwidth 500000000

[root@newnode ~]# /usr/local/hadoop/sbin/start-balancer.sh

[root@newnode ~]# /usr/local/hadoop/sbin/yarn-daemon.sh start nodemanager

[root@newnode ~]# jps

1186 DataNode

1431 NodeManager

1535 Jps

驗證集群(hadoop1上執(zhí)行)

[root@hadoop1 ~]# /usr/local/hadoop/bin/hdfs dfsadmin -report

... ...

-------------------------------------------------

Live datanodes (4):

[root@hadoop1 ~]# /usr/local/hadoop/bin/yarn node -list

刪除節(jié)點

配置數(shù)據(jù)遷移 hdfs-site.xml(hadoop1上做,不需要同步)


[root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/hdfs-site.xml

? ? <property>

? ? ? ? <name>dfs.hosts.exclude</name>

? ? ? ? <value>/usr/local/hadoop/etc/hadoop/exclude</value>

? ? </property>

配置排除主機列表,并遷移數(shù)據(jù)(hadoop1上執(zhí)行)


# 在刪除配置文件中添加 newnode

[root@hadoop1 ~]# echo newnode >/usr/local/hadoop/etc/hadoop/exclude

# 遷移數(shù)據(jù)

[root@hadoop1 ~]# /usr/local/hadoop/bin/hdfs dfsadmin -refreshNodes

# 查看狀態(tài),僅當節(jié)點狀態(tài)為 Decommissioned 時候才可以下線

[root@hadoop1 ~]# /usr/local/hadoop/bin/hdfs dfsadmin -report

下線節(jié)點(newnode執(zhí)行)

[root@newnode ~]# /usr/local/hadoop/sbin/hadoop-daemon.sh stop datanode

[root@newnode ~]# /usr/local/hadoop/sbin/yarn-daemon.sh stop nodemanager



NFS網(wǎng)關(guān)


HDFS用戶授權(quán)

hadoop1與nfsgw都要添加用戶

[root@hadoop1 ~]# groupadd -g 800 nfsuser

[root@hadoop1 ~]# useradd? -g 800 -u 800 -r -d /var/hadoop nfsuser

#----------------------------------------------------------------------------------------

[root@nfsgw ~]# groupadd -g 800 nfsuser

[root@nfsgw ~]# useradd? -g 800 -u 800 -r -d /var/hadoop nfsuser

HDFS集群授權(quán)


[root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/core-site.xml

<configuration>

? ? <property>

? ? ? ? <name>fs.defaultFS</name>

? ? ? ? <value>hdfs://hadoop1:9000</value>

? ? </property>

? ? <property>

? ? ? ? <name>hadoop.tmp.dir</name>

? ? ? ? <value>/var/hadoop</value>

? ? </property>

? ? <property>

? ? ? ? <name>hadoop.proxyuser.nfsuser.groups</name>

? ? ? ? <value>*</value>

? ? </property>

? ? <property>

? ? ? ? <name>hadoop.proxyuser.nfsuser.hosts</name>

? ? ? ? <value>*</value>

? ? </property>

</configuration>

[root@hadoop1 ~]# /usr/local/hadoop/sbin/stop-all.sh

[root@hadoop1 ~]# for i in node-{0001..0003};do

? ? ? ? ? ? ? ? ? ? ? rsync -avXSH /usr/local/hadoop/etc ${i}:/usr/local/hadoop/

? ? ? ? ? ? ? ? ? done

[root@hadoop1 ~]# /usr/local/hadoop/sbin/start-dfs.sh

[root@hadoop1 ~]# jps

5925 NameNode

6122 SecondaryNameNode

6237 Jps

[root@hadoop1 ~]# /usr/local/hadoop/bin/hdfs dfsadmin -report

... ...

-------------------------------------------------

Live datanodes (3):



NFS網(wǎng)關(guān)服務

[root@nfsgw ~]# yum remove -y rpcbind nfs-utils

[root@nfsgw ~]# vim /etc/hosts

192.168.1.50? ? hadoop1

192.168.1.51? ? node-0001

192.168.1.52? ? node-0002

192.168.1.53? ? node-0003

192.168.1.55? ? nfsgw

[root@nfsgw ~]# yum install -y java-1.8.0-openjdk-devel

配置 HDFS 客戶端

[root@nfsgw ~]# rsync -aXSH --delete hadoop1:/usr/local/hadoop /usr/local/

[root@nfsgw ~]# /usr/local/hadoop/bin/hadoop fs -ls /

...

配置網(wǎng)關(guān)服務


[root@nfsgw ~]# vim /usr/local/hadoop/etc/hadoop/hdfs-site.xml

<configuration>

? ? <property>

? ? ? ? <name>dfs.namenode.http-address</name>

? ? ? ? <value>hadoop1:50070</value>

? ? </property>

? ? <property>

? ? ? ? <name>dfs.namenode.secondary.http-address</name>

? ? ? ? <value>hadoop1:50090</value>

? ? </property>

? ? <property>

? ? ? ? <name>dfs.replication</name>

? ? ? ? <value>2</value>

? ? </property>

? ? <property>

? ? ? ? <name>dfs.hosts.exclude</name>

? ? ? ? <value>/usr/local/hadoop/etc/hadoop/exclude</value>

? ? </property>

? ? <property>

? ? ? ? <name>nfs.exports.allowed.hosts</name>

? ? ? ? <value>* rw</value>

? ? </property>

? ? <property>

? ? ? ? <name>nfs.dump.dir</name>

? ? ? ? <value>/var/nfstmp</value>

? ? </property>

</configuration>


啟動網(wǎng)關(guān)服務


[root@nfsgw ~]# mkdir /var/nfstmp

[root@nfsgw ~]# chown nfsuser.nfsuser /var/nfstmp

[root@nfsgw ~]# rm -rf /usr/local/hadoop/logs/*

[root@nfsgw ~]# setfacl -m user:nfsuser:rwx /usr/local/hadoop/logs

[root@nfsgw ~]# getfacl /usr/local/hadoop/logs

[root@nfsgw ~]# cd /usr/local/hadoop/

[root@nfsgw hadoop]# ./sbin/hadoop-daemon.sh --script ./bin/hdfs start portmap

[root@nfsgw hadoop]# jps

1376 Portmap

1416 Jps

[root@nfsgw hadoop]# rm -rf /tmp/.hdfs-nfs

[root@nfsgw hadoop]# sudo -u nfsuser ./sbin/hadoop-daemon.sh --script ./bin/hdfs start nfs3

[root@nfsgw hadoop]# sudo -u nfsuser jps

1452 Nfs3

1502 Jps

mount驗證

[root@newnode ~]# yum install -y nfs-utils

[root@newnode ~]# showmount -e 192.168.1.55

Export list for 192.168.1.55:

/ *

[root@newnode ~]# mount -t nfs -o vers=3,proto=tcp,nolock,noacl,noatime,sync 192.168.1.55:/ /mnt/

[root@newnode ~]# df -h

Filesystem? ? ? Size? Used Avail Use% Mounted on

192.168.1.55:/? 118G? ?15G? 104G? 13% /mnt



zookeeper集群圖例


zookeeper集群

[root@hadoop1 ~]# yum install -y java-1.8.0-openjdk-devel

[root@hadoop1 ~]# tar zxf zookeeper-3.4.13.tar.gz

[root@hadoop1 ~]# mv zookeeper-3.4.13 /usr/local/zookeeper

[root@hadoop1 ~]# cd /usr/local/zookeeper/conf/

[root@hadoop1 conf]# cp zoo_sample.cfg zoo.cfg

[root@hadoop1 conf]# vim zoo.cfg

# 配置文件最后添加

server.1=node-0001:2888:3888

server.2=node-0002:2888:3888

server.3=node-0003:2888:3888

server.4=hadoop1:2888:3888:observer

[root@hadoop1 ~]# for i in node-{0001..0003};do

? ? ? ? ? ? ? ? ? ? ? rsync -aXSH --delete /usr/local/zookeeper ${i}:/usr/local/

? ? ? ? ? ? ? ? ? done

所有節(jié)點手工啟動服務

[root@hadoop1 ~]# mkdir /tmp/zookeeper

[root@hadoop1 ~]# grep -Po "\d+(?==${HOSTNAME})" /usr/local/zookeeper/conf/zoo.cfg >/tmp/zookeeper/myid

[root@hadoop1 ~]# /usr/local/zookeeper/bin/zkServer.sh start

[root@hadoop1 ~]# jps

1001 QuorumPeerMain

當所有節(jié)點啟動完成以后使用命令驗證:

/usr/local/zookeeper/bin/zkServer.sh status

zookeeper集群管理

[root@hadoop1 ~]# yum install -y socat

[root@hadoop1 ~]# socat - TCP:node-0001:2181

ruok

imok

[root@hadoop1 bin]# ./zkstats hadoop1 node-{0001..0003}

? ? ? ? ? ? ?hadoop1 Mode: observer

? ? ? ? ? ?node-0001 Mode: follower

? ? ? ? ? ?node-0002 Mode: leader

? ? ? ? ? ?node-0003 Mode: follower

kafka集群

[root@hadoop1 ~]# yum install -y java-1.8.0-openjdk-devel

[root@hadoop1 ~]# tar zxf kafka_2.12-2.1.0.tgz

[root@hadoop1 ~]# mv kafka_2.12-2.1.0 /usr/local/kafka

[root@hadoop1 ~]# for i in node-{0001..0003};do

? ? ? ? ? ? ? ? ? ? ? rsync -aXSH --delete /usr/local/kafka ${i}:/usr/local/

? ? ? ? ? ? ? ? ? done

2、修改 node-0001,node-0002,node-0003 配置文件并啟動服務

[root@node-0001 ~]# vim /usr/local/kafka/config/server.properties

21? ?broker.id=1

123? zookeeper.connect=node-0001:2181,node-0002:2181,node-0003:2181

[root@node-0001 ~]# /usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties

[root@node-0001 ~]# jps

1400 Kafka

3、驗證(在不同機器上執(zhí)行)

[root@node-0001 ~]# /usr/local/kafka/bin/kafka-topics.sh --create --partitions 1 --replication-factor 1 --zookeeper localhost:2181 --topic mymsg

#----------------------------------------------------------------------------------------

[root@node-0002 ~]# /usr/local/kafka/bin/kafka-console-producer.sh --broker-list? localhost:9092 --topic mymsg

#----------------------------------------------------------------------------------------

[root@node-0003 ~]# /usr/local/kafka/bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic mymsg


Hadoop高可用集群


環(huán)境初始化

hadoop1 上執(zhí)行

[root@hadoop1 ~]# vim /etc/hosts

192.168.1.50? ? hadoop1

192.168.1.56? ? hadoop2

192.168.1.51? ? node-0001

192.168.1.52? ? node-0002

192.168.1.53? ? node-0003

[root@hadoop1 ~]# rsync -aXSH --delete /root/.ssh hadoop2:/root/

[root@hadoop1 ~]# for i in hadoop2 node-{0001..0003};do

? ? ? ? ? ? ? ? ? ? ? rsync -av /etc/hosts ${i}:/etc/

? ? ? ? ? ? ? ? ? done

hadoop2 上執(zhí)行

[root@hadoop2 ~]# yum install -y java-1.8.0-openjdk-devel

[root@hadoop2 ~]# vim /etc/ssh/ssh_config

# 60行新添加

StrictHostKeyChecking no

集群配置文件

在 hadoop1 上完成以下文件的配置

1、配置 hadoop-env.sh

[root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/hadoop-env.sh

25:? export JAVA_HOME="java-1.8.0-openjdk安裝路徑"

33:? export HADOOP_CONF_DIR="/usr/local/hadoop/etc/hadoop"

2、配置 slaves

[root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/slaves

node-0001

node-0002

node-0003

3、配置 core-site.xml

[root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/core-site.xml

<configuration>

? ? <property>

? ? ? ? <name>fs.defaultFS</name>

? ? ? ? <value>hdfs://mycluster</value>

? ? </property>

? ? <property>

? ? ? ? <name>hadoop.tmp.dir</name>

? ? ? ? <value>/var/hadoop</value>

? ? </property>

? ? <property>

? ? ? ? <name>ha.zookeeper.quorum</name>

? ? ? ? <value>node-0001:2181,node-0002:2181,node-0003:2181</value>

? ? </property>

? ? <property>

? ? ? ? <name>hadoop.proxyuser.nfsuser.groups</name>

? ? ? ? <value>*</value>

? ? </property>

? ? <property>

? ? ? ? <name>hadoop.proxyuser.nfsuser.hosts</name>

? ? ? ? <value>*</value>

? ? </property>

</configuration>

4、配置 hdfs-site.xml

[root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/hdfs-site.xml

<configuration>

? ? <property>

? ? ? ? <name>dfs.nameservices</name>

? ? ? ? <value>mycluster</value>

? ? </property>

? ? <property>

? ? ? ? <name>dfs.ha.namenodes.mycluster</name>

? ? ? ? <value>nn1,nn2</value>

? ? </property>

? ? <property>

? ? ? ? <name>dfs.namenode.rpc-address.mycluster.nn1</name>

? ? ? ? <value>hadoop1:8020</value>

? ? </property>

? ? <property>

? ? ? ? <name>dfs.namenode.rpc-address.mycluster.nn2</name>

? ? ? ? <value>hadoop2:8020</value>

? ? </property>

? ? <property>

? ? ? ? <name>dfs.namenode.http-address.mycluster.nn1</name>

? ? ? ? <value>hadoop1:50070</value>

? ? </property>

? ? <property>

? ? ? ? <name>dfs.namenode.http-address.mycluster.nn2</name>

? ? ? ? <value>hadoop2:50070</value>

? ? </property>

? ? <property>

? ? ? ? <name>dfs.namenode.shared.edits.dir</name>

? ? ? ? <value>qjournal://node-0001:8485;node-0002:8485;node-0003:8485/mycluster</value>

? ? </property>

? ? <property>

? ? ? ? <name>dfs.journalnode.edits.dir</name>

? ? ? ? <value>/var/hadoop/journal</value>

? ? </property>

? ? <property>

? ? ? ? <name>dfs.client.failover.proxy.provider.mycluster</name>

? ? ? ? <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>

? ? </property>

? ? <property>?

? ? ? ? <name>dfs.ha.fencing.methods</name>

? ? ? ? <value>sshfence</value>

? ? </property>

? ? <property>

? ? ? ? <name>dfs.ha.fencing.ssh.private-key-files</name>

? ? ? ? <value>/root/.ssh/id_rsa</value>

? ? </property>

? ? <property>

? ? ? ? <name>dfs.ha.automatic-failover.enabled</name>

? ? ? ? <value>true</value>

? ? </property>

? ? <property>

? ? ? ? <name>dfs.replication</name>

? ? ? ? <value>2</value>

? ? </property>

? ? <property>

? ? ? ? <name>dfs.hosts.exclude</name>

? ? ? ? <value>/usr/local/hadoop/etc/hadoop/exclude</value>

? ? </property>

</configuration>

5、配置 mapred-site.xml

[root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/mapred-site.xml

<configuration>

? ? <property>

? ? ? ? <name>mapreduce.framework.name</name>

? ? ? ? <value>yarn</value>

? ? </property>

</configuration>

6、配置 yarn-site.xml

[root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/yarn-site.xml

<configuration>

? ? <property>

? ? ? ? <name>yarn.resourcemanager.ha.enabled</name>

? ? ? ? <value>true</value>

? ? </property>

? ? <property>

? ? ? ? <name>yarn.resourcemanager.recovery.enabled</name>

? ? ? ? <value>true</value>

? ? </property>

? ? <property>

? ? ? ? <name>yarn.resourcemanager.store.class</name>

? ? ? ? <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>

? ? </property>

? ? <property>

? ? ? ? <name>yarn.resourcemanager.zk-address</name>

? ? ? ? <value>node-0001:2181,node-0002:2181,node-0003:2181</value>

? ? </property>

? ? <property>

? ? ? ? <name>yarn.resourcemanager.cluster-id</name>

? ? ? ? <value>yarn-ha</value>

? ? </property>

? ? <property>

? ? ? ? <name>yarn.resourcemanager.ha.rm-ids</name>

? ? ? ? <value>rm1,rm2</value>

? ? </property>

? ? <property>

? ? ? ? <name>yarn.resourcemanager.hostname.rm1</name>

? ? ? ? <value>hadoop1</value>

? ? </property>

? ? <property>

? ? ? ? <name>yarn.resourcemanager.hostname.rm2</name>

? ? ? ? <value>hadoop2</value>

? ? </property>

<!-- Site specific YARN configuration properties -->

? ? <property>

? ? ? ? <name>yarn.nodemanager.aux-services</name>

? ? ? ? <value>mapreduce_shuffle</value>

? ? </property>

</configuration>

初始化啟動集群

1、重啟機器、在 node-0001,node-0002,node-0003 啟動 zookeeper


[root@node-0001 ~]# /usr/local/zookeeper/bin/zkServer.sh start

#----------------------------------------------------------------------------------------

[root@node-0002 ~]# /usr/local/zookeeper/bin/zkServer.sh start

#----------------------------------------------------------------------------------------

[root@node-0003 ~]# /usr/local/zookeeper/bin/zkServer.sh start

#----------------------------------------------------------------------------------------

[root@hadoop1 ~]# zkstats node-{0001..0003}

? ? ? ? ? ?node-0001 Mode: follower

? ? ? ? ? ?node-0002 Mode: leader

? ? ? ? ? ?node-0003 Mode: follower

2、清空實驗數(shù)據(jù)并同步配置文件(hadoop1 上執(zhí)行)

[root@hadoop1 ~]# rm -rf /var/hadoop/* /usr/local/hadoop/logs

[root@hadoop1 ~]# for i in hadoop2 node-{0001..0003};do

? ? ? ? ? ? ? ? ? ? ? rsync -av /etc/hosts ${i}:/etc/

? ? ? ? ? ? ? ? ? ? ? rsync -aXSH --delete /var/hadoop ${i}:/var/

? ? ? ? ? ? ? ? ? ? ? rsync -aXSH --delete /usr/local/hadoop ${i}:/usr/local/

? ? ? ? ? ? ? ? ? done

3、在 node-0001,node-0002,node-0003 啟動 journalnode 服務

[root@node-0001 ~]# /usr/local/hadoop/sbin/hadoop-daemon.sh start journalnode

[root@node-0001 ~]# jps

1037 JournalNode

#----------------------------------------------------------------------------------------

[root@node-0002 ~]# /usr/local/hadoop/sbin/hadoop-daemon.sh start journalnode

#----------------------------------------------------------------------------------------

[root@node-0003 ~]# /usr/local/hadoop/sbin/hadoop-daemon.sh start journalnode

4、初始化(hadoop1 上執(zhí)行)

[root@hadoop1 ~]# /usr/local/hadoop/bin/hdfs zkfc -formatZK

[root@hadoop1 ~]# /usr/local/hadoop/bin/hdfs namenode -format

[root@hadoop1 ~]# /usr/local/hadoop/bin/hdfs namenode -initializeSharedEdits

[root@hadoop1 ~]# rsync -aXSH --delete /var/hadoop/dfs hadoop2:/var/hadoop/

5、停止在 node-0001,node-0002,node-0003 上的 journalnode 服務

[root@node-0001 ~]# /usr/local/hadoop/sbin/hadoop-daemon.sh stop journalnode

#----------------------------------------------------------------------------------------

[root@node-0002 ~]# /usr/local/hadoop/sbin/hadoop-daemon.sh stop journalnode

#----------------------------------------------------------------------------------------

[root@node-0003 ~]# /usr/local/hadoop/sbin/hadoop-daemon.sh stop journalnode

6、啟動集群

#-------------------- 下面這條命令在 hadoop1 上執(zhí)行 ----------------------------------------

[root@hadoop1 ~]# /usr/local/hadoop/sbin/start-all.sh

#-------------------- 下面這條命令在 hadoop2 上執(zhí)行 ----------------------------------------

[root@hadoop2 ~]# /usr/local/hadoop/sbin/yarn-daemon.sh start resourcemanager

驗證集群

[root@hadoop1 ~]# /usr/local/hadoop/bin/hdfs haadmin -getServiceState nn1

[root@hadoop1 ~]# /usr/local/hadoop/bin/hdfs haadmin -getServiceState nn2

[root@hadoop1 ~]# /usr/local/hadoop/bin/yarn rmadmin -getServiceState rm1

[root@hadoop1 ~]# /usr/local/hadoop/bin/yarn rmadmin -getServiceState rm2

[root@hadoop1 ~]# /usr/local/hadoop/bin/hdfs dfsadmin -report

[root@hadoop1 ~]# /usr/local/hadoop/bin/yarn node -list



hadoop+zookeeper+kafka 安裝及使用教程的評論 (共 條)

分享到微博請遵守國家法律
镇沅| 东城区| 会理县| 武川县| 宝清县| 石首市| 乡宁县| 石渠县| 安国市| 仙居县| 西乌珠穆沁旗| 佛山市| 绥阳县| 西林县| 曲松县| 宝鸡市| 兴山县| 平江县| 平原县| 南宫市| 阿拉尔市| 东莞市| 塔河县| 丹棱县| 海盐县| 方山县| 平谷区| 沈丘县| 光泽县| 海安县| 峡江县| 石渠县| 青州市| 张掖市| 遂宁市| 江达县| 中超| 即墨市| 虹口区| 武陟县| 宁晋县|