# java -version
# vim /etc/profile.d/java.sh
export JAVA_HOME=/usr
# yum install java-1.8.0-openjdk-devel
# mkdir /bdapps
# tar xf hadoop-2.7.4.tar.gz -C /bdapps/
# cd /bdapps
# ln -sv hadoop-2.7.4 hadoop
# cd hadoop/
# vim /etc/profile.d/hadoop.sh 编辑环境配置文件
export HADOOP_PREFIX=/bdapps/hadoop
export PATH=$PATH:${HADOOP_PREFIX}/bin:${HADOOP_PREFIX}/sbin
export HADOOP_YARN_HOME=${HADOOP_PREFIX}
export HADOOP_MAPPERD_HOME=${HADOOP_PREFIX}
export HADOOP_COMMON_HOME=${HADOOP_PREFIX}
export HADOOP_HDFS_HOME=${HADOOP_PREFIX}
# ./etc/profile.d/hadoop.sh
# ls
bin/ 存放二进制程序文件
include/ 头文件目录
lib/ 库文件目录
sbin/ 脚本
etc/hadoop .sh格式文件是配置hadoop运行环境 .xml结尾的文件实配置文件
# groupadd hadoop 创建用户和组
# useradd -g hadoop yarn
# useradd -g hadoop hdfs
# useradd -g hadoop mapred
# mkdir -pv /data/hadoop/hdfs{nn,snn,dn} 创建数据库
# chown -R hdfs:hadoop /data/hadoop/hdfs
# ll
# mkdir logs 创建日志目录
# chown -R yarn:hadoop logs
# chown -R yarn:hadoop ./*
配置hadoop
# cd etc/hadoop
# vim core-site.xml 包含了NameNode的一些信息
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://localhost:8020</value>
<final>true</final>
</property>
</configuration>
# vim hdfs-site.xml 配置HDFS相关的属性
<configuration>
<property>
<name>dfs.replication</name>
<value>1<value>
</propery>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:///data/hadoop/hdfs/nn</value>
</propery>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:///data/hadoop/hdfs/dn</value>
</propery>
<property>
<name>fs.checkpoint.dir</name>
<value>file:///data/hadoop/hdfs/snn<value>
</propery>
<property>
<name>fs.checkpoint.dir</name>
<value>file:///data/hadoop/hdfs/snn<value>
</propery>
</configuration>
注意,如果需要其他用户对hdfs有写入权限,还需要添加
<property>
<name>dfs.premissions</name> 不做严格权限限制
<value>false<value>
</propery>
# cp mapred-site.xml.template mapred-site.xml
# vim mapred-site.xml 定义MapReduce
<configuration>
<property>
<name>mapred.framwork.name</name>
<value>yarn</value>
</property>
</configuration>
# vim yarn-site.xml
<configuration> <property> <name>yarn.resourcemanager.address</name> <value>localhost:8032</value> </property> <property> <name>yarn.resourcemanager.scheduleraddress</name> #调度器地址 <value>localhost:8030</value> </property> <property> <name>yarn.resourcemanager.resource-tracker.address</name> #资源追踪器地址 <value>localhost:8031</value> </property> <property> <name>yarn.resourcemanager.admin.address</name> #管理地址 <value>localhost:8033</value> </property> <property> <name>yarn.resourcemanager.webapp.address</name> <value>localhost:8088</value> </property> <property> <name>yarn.nodemanager.aux-service</name> #辅助服务 <value>mapreduce_shuffle</value> </property> <property> <name>yarn.nodemanager.auxservice.mapreduce_shuffle.class</name> #shuffle类 <value>org.apache.hadoop.mapred.ShuffleHandler</value> </property> <property> <name>yarn.resourcemanager.scheduler.class</name> <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value> </property> </configuration>
格式化HDFS
以hadoop用户运行如下命令
# su - hdfs $ hadoop namenode -format
其中的“Storage directory /hadoop/temp/dfs/name has been successfully formatted”一行信息表明对应的存储已经格式化成功。
启动hadoop
$ hadoop-daemon.sh start namenode
$ jps 查看正在运行的Hadoop进程
$ hadoop-daemon.sh start secondarynamenode 启动辅助名称节点
$ hadoop-daemon.sh start datanode
$ hdfs dfs -ls / 查看目录
$ hdfs dfs -mkdir /test
drwxr-xr-x - hdfs supergroup
$ hdfs -dfs -put /etc/fstab /test/fstab
$ hdfs dfs -lsR / 查看是否上传成功
$ hdfs dfs -cat /test/fstab (对应cat /data/hadoop/dfs/dn/current/...)
$ su -yarn
$ yarn-daemon.sh start resourcemanager
$ jps
$ yarn-daemon.sh start nodemanager
HDFS和YARN ResourceManager各自提供了一个Web接口,通过这些接口可检查HDFS集群以及YARN集群的相关状态信息
HDFS-NameNode http://<NameNodeHost>:50070
YARN-ResourceManager http://<ResourceManagerHost>:8088
运行测试程序
hadoop-YARN自带了许多样例程序,它们位于hadoop安装路径下/share/hadoop/mapreduce目录中,其中hadoop-mapreduce-example可做mapreduce程序测试
# su - hdfs $ yarn jar /bdapps/hadoop//share/hadoop/mapreduce/hadoop-mapreduce-example-2.7.4.jar
原创文章,作者:nene,如若转载,请注明出处:http://www.178linux.com/89608

