配置要求
以主机 ubuntu 16.04 为例。
$ sudo apt-get install lxd
$ newgrp lxd
$ sudo lxd init
$ lxc remote list
$ lxc remote add images <URL>
$ lxc launch <images_name>:<version> <host_name>
# images_name 是镜像源名称,就是上一步取名的 images ,是什么可以自己取。
# version 是镜像的版本。
# host_name 是欲创建的实例的主机名。
# 以用默认镜像源,搭建 Hadoop 为例,这里应该是:
$ lxc launch ubuntu-daily:16.04 master
$ lxc list
+--------+---------+--------------------+-----------------------------------------------+------------+-----------+
| NAME | STATE | IPV4 | IPV6 | TYPE | SNAPSHOTS |
+--------+---------+--------------------+-----------------------------------------------+------------+-----------+
| master | RUNNING | 10.71.16.37 (eth0) | fd16:e204:21d5:5295:216:3eff:fec9:dd16 (eth0) | PERSISTENT | 0 |
+--------+---------+--------------------+-----------------------------------------------+------------+-----------+
$ lxc exec master -- /bin/bash
$ lxc exec master -- /bin/bash
root@master# apt-get update
root@master# apt-get upgrade -y
$ hostname
Arch
$ lxc exec master -- /bin/bash
root@Master:~# ping Arch
PING Arch (127.0.1.1) 56(84) bytes of data.
64 bytes from rain-mark (127.0.1.1): icmp_seq=1 ttl=64 time=0.020 ms
64 bytes from rain-mark (127.0.1.1): icmp_seq=2 ttl=64 time=0.031 ms
^C
$ lxc file push ~/Downloads/jdk-8u111-linux-x64.tar.gz master/root/jdk-8u111-linux-x64.tar.gz # 将 HOST 机上的文件 push 进虚拟机。
$ lxc exec master -- /bin/bash
root@master# tar xf jdk-8u111-linux-x64.tar.gz -C /usr/local
root@master# echo "export JAVA_HOME=/usr/local/jdk1.8.0_111" >> ~/.bashrc
root@master# exit
$ lxc file push ~/Downloads/hadoop-2.7.3.tar.gz master/root/hadoop-2.7.3.tar.gz
$ lxc exec master -- /bin/bash
root@master# mkdir -p /home/root/HD_data
root@master# tar xf hadoop-2.7.3.tar.gz -C /usr/local
root@master# cd /usr/local/hadoop-2.7.3
root@master:/usr/local/hadoop-2.7.3# cat << EOF > etc/core-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>hadoop.tmp.dir</name>
<value>/home/root/HD_data</value>
</property>
<property>
<name>fs.defaultFS</name>
<value>hdfs://master:8020</value>
</property>
</configuration>
EOF
root@master:/usr/local/hadoop-2.7.3# cat << EOF > etc/yarn-site.xml
<?xml version="1.0"?>
<configuration>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>master</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>
EOF
root@master:/usr/local/hadoop-2.7.3# cat << EOF > etc/mapred-site.xml
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
EOF
root@master:/usr/local/hadoop-2.7.3# ./bin/hdfs namenode -format
root@master:/usr/local/hadoop-2.7.3# exit
$ lxc copy master slave0
$ lxc start slave0
$ lxc copy master slave1
$ lxc start slave1
$ lxc exec master -- /bin/bash
root@master# cd /usr/local/hadoop-2.7.3
root@master:/usr/local/hadoop-2.7.3# ./sbin/hadoop-daemon.sh start namenode
starting namenode, logging to /usr/local/hadoop-2.7.3/logs/hadoop-root-namenode-master.out
root@master:/usr/local/hadoop-2.7.3# ./sbin/yarn-daemon.sh start resourcemanager
starting resourcemanager, logging to /usr/local/hadoop-2.7.3/logs/yarn-root-resourcemanager-master.out
root@master:/usr/local/hadoop-2.7.3# /usr/local/jdk1.8.0_111/bin/jps
506 NameNode
604 ResourceManager
829 Jps
root@master:/usr/local/hadoop-2.7.3# exit
$ lxc exec slave0 -- /bin/bash
root@slave0:~# cd /usr/local/hadoop-2.7.3/
root@slave0:/usr/local/hadoop-2.7.3# ./sbin/hadoop-daemon.sh start datanode
starting datanode, logging to /usr/local/hadoop-2.7.3/logs/hadoop-root-datanode-slave0.out
root@slave0:/usr/local/hadoop-2.7.3# ./sbin/yarn-daemon.sh start nodemanager
starting nodemanager, logging to /usr/local/hadoop-2.7.3/logs/yarn-root-nodemanager-slave0.out
root@slave0:/usr/local/hadoop-2.7.3# /usr/local/jdk1.8.0_111/bin/jps
433 DataNode
538 NodeManager
670 Jps
root@slave0:/usr/local/hadoop-2.7.3# exit
# 需要对 slave1 同样启动 datanode 以及 nodemanager 。
$ lxc list
+--------+---------+--------------------+-----------------------------------------------+------------+-----------+
| NAME | STATE | IPV4 | IPV6 | TYPE | SNAPSHOTS |
+--------+---------+--------------------+-----------------------------------------------+------------+-----------+
| master | RUNNING | 10.71.16.37 (eth0) | fd16:e204:21d5:5295:216:3eff:fec9:dd16 (eth0) | PERSISTENT | 0 |
+--------+---------+--------------------+-----------------------------------------------+------------+-----------+
| slave0 | RUNNING | 10.71.16.22 (eth0) | fd16:e204:21d5:5295:216:3eff:fe8e:8e57 (eth0) | PERSISTENT | 0 |
+--------+---------+--------------------+-----------------------------------------------+------------+-----------+
| slave1 | RUNNING | 10.71.16.31 (eth0) | fd16:e204:21d5:5295:216:3eff:fe5a:ef1 (eth0) | PERSISTENT | 0 |
+--------+---------+--------------------+-----------------------------------------------+------------+-----------+
基于 LXD 的虚拟化容器,我们可以很方便的、很节约资源的在 Linux 主机下进行 3 节点甚至更多节点的 Hadoop 集群搭建练习。非常利于硬件资源不足的学生、老师进行演示。更多关于 LXD 的资源,以及 Hadoop 的资源,请参考各自官方文档。
原创文章,转载请注明出处,否则必究相关责任!