vim hadoop-env.shexportJAVA_HOME=/export/server/jdk1.8.0_341exportHDFS_NAMENODE_USER=root
exportHDFS_DATANODE_USER=root
exportHDFS_SECONDARYNAMENODE_USER=root
exportYARN_RESOURCEMANAGER_USER=root
exportYARN_NODEMANAGER_USER=root
中心装备文件,共有四个需求修正
vim core-site.xml
<configuration><!-- 设置默许运用的文件系统 --><property><name>fs.defaultFS</name><value>hdfs://192.168.88.128:8020</value></property><!-- 设置Hadoop本地保存数据途径 --><property><name>hadoop.tmp.dir</name><value>/export/server/hadoop-3.3.0</value></property><!-- 设置HDFS web UI用户身份 --><property><name>hadoop.http.staticuser.user</name><value>root</value></property><!-- 整合hive 用户署理设置 --><property><name>hadoop.proxyuser.root.hosts</name><value>*</value></property><property><name>hadoop.proxyuser.root.groups</name><value>*</value></property><!-- 文件垃圾桶保存时刻 --><property><name>fs.trash.interval</name><value>1440</value></property></configuration>
vim hdfs-site.xml
<configuration><!-- 设置SNN进程运转机器位置信息 --><property><name>dfs.namenode.secondary.http-address</name><value>192.168.88.129:9868</value></property><property><name>dfs.namenode.datanode.registration.ip-hostname-check</name><value>false</value></property></configuration>
vim mapred-site.xml
<configuration><!-- 设置MR程序默许运转形式:yarn集群形式 local本地形式--><property><name>mapreduce.framework.name</name><value>yarn</value></property><!-- MR程序前史服务地址--><property><name>mapreduce.jobhistory.address</name><value>192.168.88.128:10020</value></property><!-- MR程序前史服务器web端地址--><property><name>mapreduce.jobhistory.webapp.address</name><value>192.168.88.128:19888</value></property><property><name>yarn.app.mapreduce.am.env</name><value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value></property><property><name>mapreduce.map.env</name><value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value></property><property><name>mapreduce.reduce.env</name><value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value></property></configuration>
vim yarn-site.xml
<configuration><!-- 设置YARN集群主人物运转机器位置--><property><name>yarn.resourcemanager.hostname</name><value>192.168.88.128</value></property><property><name>yarn.nodemanager.aux-services</name><value>mapreduce_shuffle</value></property><!-- 是否将对容器施行物理内存约束--><property><name>yarn.nodemanager.pmem-check-enabled</name><value>false</value></property><!-- 是否将对容器实虚拟内存约束--><property><name>yarn.nodemanager.vmem-check-enabled</name><value>false</value></property><!-- 敞开日志集合--><property><name>yarn.log-aggregation-enable</name><value>true</value></property><!-- 设置yarn前史服务器地址--><property><name>yarn.log.server.url</name><value>http://192.168.88.128:19888/jobhistory/logs</value></property><!-- 前史日志保存的时刻 7天--><property><name>yarn.log-aggregation.retain-seconds</name><value>604800</value></property></configuration>
设置集群所在的网络地址
vim workers
192.168.88.128
192.168.88.129
192.168.88.130
将hadoop增加到环境变量
vim /etc/profile
export HADOOP_HOME=/export/server/hadoop-3.3.0
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
编译环境变量,source /etc/profile,履行hadoop指令,验证是否装备成功
将hadoop及环境变量文件同步到其他两个节点,履行编译
初始化hdfs
在主节点机器履行hdfs namenode -format,假如呈现storage directory /export/server/hadoop-3.3.0/dfs/name has been successfully formatted,阐明成功
vim hive-env.shexportHADOOP_HOME=/export/server/hadoop-3.3.0exportHIVE_CONF_DIR=/export/server/hive-3.1.2/conf
exportHIVE_AUX_JARS_PATH=/export/server/hive-3.1.2/lib
conf目录下的hive-site.xml,增加如下装备
vim hive-site.xml
<configuration><!-- 存储元数据mysql相关数据--><property><name>javax.jdo.option.ConnectionURL</name><value>jdbc:mysql://192.168.26.1/hive3?createDatabaseIfNotExist=true&useSSL=false&useUnicode=true&characterEncoding=UTF-8</value></property><property><name>javax.jdo.option.ConnectionDriverName</name><value>com.mysql.jdbc.Driver</value></property><property><name>javax.jdo.option.ConnectionUserName</name><value>root</value></property><property><name>javax.jdo.option.ConnectionPassword</name><value>root</value></property><!-- H2S运转绑定host--><property><name>hive.server2.thrift.bind.host</name><value>node1</value></property><!-- 长途形式布置metastore metastore地址--><property><name>hive.metastore.uris</name><value>thrift://node1:9083</value></property><!-- 关闭元数据存储授权--><property><name>hive.metastore.event.db.notification.api.auth</name><value>false</value></property></configuration>