首页 > 技术文章 > hadoop 随笔1

walkersss 2020-04-26 18:48 原文

sed -i 's/#auth\t\trequired\tpam_wheel.so/auth\t\trequired\tpam_wheel.so/g' '/etc/pam.d/su'
cp /etc/login.defs /etc/login.defs_bak
echo "SU_WHEEL_ONLY yes" >> /etc/login.defs
gpasswd -a hadoop wheel





cat /etc/hosts
192.168.80.166 nn1.hadoop
192.168.80.167 nn2.hadoop
192.168.80.168 s1.hadoop
192.168.80.169 s2.hadoop
192.168.80.170 s3.hadoop

scp /etc/hosts root@192.168.80.167:/etc/
scp /etc/hosts root@192.168.80.168:/etc/
scp /etc/hosts root@192.168.80.169:/etc/
scp /etc/hosts root@192.168.80.170:/etc/

yum install -y ntp
/usr/sbin/ntpdate ntp1.aliyun.com

su - hadoop
rm -rf .ssh
mkdir .ssh
chmod 700 ./.ssh
ssh-keygen -t rsa


mv id_rsa.pub id_rsa.pubs2
mv id_rsa.pub id_rsa.pubs1
mv id_rsa.pub id_rsa.pubnn1
mv id_rsa.pub id_rsa.pubnn2

nn1.hadoop
touch authorized_keys
chmod 600 authorized_keys

scp id_rsa.pubnn1 hadoop@nn2.hadoop:$PWD
scp id_rsa.pubnn1 hadoop@s1.hadoop:$PWD
scp id_rsa.pubnn1 hadoop@s2.hadoop:$PWD

nn2.hadoop
cd ~/.ssh
cat id_rsa.pubnn1 >> authorized_keys

sudo chown -R hadoop:hadoop /home/hadoop

zkServer.sh start
zkServer.sh status
jps


yum -y install svn ncurses-devel gcc* lzo-devel zlib-devel autoconf automake libtool cmake openssl-devel bzip2



export JAVA_HOME=/usr/local/jdk1.8.0_201
export JRE_HOME=$JAVA_HOME/jre
export PATH=$PATH:$JAVA_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar


#!/bin/bash
#文件名:ssh_all.sh
RUN_HOME=$(cd "$(dirname "$0")"; echo "${PWD}")

NOW_LIST=(`cat ${RUN_HOME}/ips`)

SSH_USER="hadoop"
for i in ${NOW_LIST[@]}; do
    f_cmd="ssh $SSH_USER@$i \"$*\""
    echo $f_cmd
    if eval $f_cmd; then
        echo "OK"
    else
        echo "FAIL"
    fi
done




#!/bin/bash
#文件名:ssh_root.sh
RUN_HOME=$(cd "$(dirname "$0")"; echo "${PWD}")

NOW_LIST=(`cat ${RUN_HOME}/ips`)

SSH_USER="hadoop"
for i in ${NOW_LIST[@]}; do
    f_cmd="ssh $SSH_USER@i ~/exe.sh \"$*\""
    echo $f_cmd
    if eval $f_cmd; then
        echo "OK"
    else
        echo "FAIL"
    fi
done



#文件名exe.sh
cmd=$*

su - <<EOF
$cmd

EOF



#!/bin/bash
RUN_HOME=$(cd "(dirname "$0")"; echo "${PWD}")

NOW_LIST=(`cat ${UN_HOME}/ips`)

SSH_USER="hadoop"
for i in ${NOW_LIST[@]}; do
    f_cmd="scp $1 $SSH_USER@i:$2"
    echo $f_cmd
    if eval $f_cmd; then
        echo "ok"
    else
        echo "FAIL"
    fi
done


chown -R hadoop:hadoop /usr/local/hadoop-2.7.3
chmod -R 770 /usr/local/hadoop-2.7.3
ln -s /usr/local/hadoop-2.7.3 /usr/local/hadoop
chown -R hadoop:hadoop /usr/local/hadoop



export JAVA_HOME=/usr/local/jdk1.8.0_201
export JRE_HOME=$JAVA_HOME/jre
export PATH=$PATH:$JAVA_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar

export HADOOP_HOME=/usr/local/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib:$HADOOP_COMMON_LIB_NATIVE_DIR"
#Zookeeper
export ZOOKEEPER_HOME=/home/hadoop/apps/zookeeper-3.4.14
#export PATH=$PATH:$ZOOKEEPER_HOME/bin


#set Hadoop_compile
export MAVEN_HOME=/usr/local/apache-maven-3.3.9
export FINDBUGS_HOME=/usr/local/findbugs-3.0.1
export PROTOBUF_HOME=/usr/local/protobuf-2.5.0
export ANT_HOME=/usr/local/apache-ant-1.9.14
export PATH=$PATH:$ZOOKEEPER_HOME/bin:PATH=$JAVA_HOME/bin:$MAVEN_HOME/bin:$FINDBUGS_HOME/bin:$ANT_HOME/bin
export MAVEN_OPTS="-Xmx2g -XX:MaxMetaspaceSize=512M -XX:ReservedCodeCacheSize=512m"





cp -a /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.bak
wget -O /etc/yum.repos.d/CentOS-Base.repo https://repo.huaweicloud.com/repository/conf/CentOS-7-reg.repo
yum clean all
yum makecache


yum install -y openssh-server vim gcc gcc-c++ glibc-headers bzip2-devel lzo-devel curl wget openssh-clients zlib-devel autoconf automake cmake libtool openssl-devel fuse-devel snappy-devel telnet unzip zip net-tools.x86_64 firewalld systemd



dataDir=/data/zkdata
logDir=/data/log/zklog

sudo mkdir -p /data/zkdata

sudo mkdir -p /data/log/zklog

sudo chown -R hadoop:hadoop /data
sudo chown -R hadoop:hadoop /usr/local/zookeeper


文件在末尾添加
#set Hadoop Path
export JAVA_HOME=/usr/local/jdk1.8.0_201
export JRE_HOME=$JAVA_HOME/jre
export PATH=$PATH:$JAVA_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar

export HADOOP_HOME=/usr/local/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib:$HADOOP_COMMON_LIB_NATIVE_DIR"
export HADOOP_COMMON_HOME=${HADOOP_HOME}
export HADOOP_HDFS_HOME=${HADOOP_HOME}
export HADOOP_MAPRED_HOME=${HADOOP_HOME}
export HADOOP_YARN_HOME=${HADOOP_HOME}
export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export HDFS_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export YARN_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export LD_LIBRARY_PATH=$HADOOP_HOME/lib/native:/usr/lib64

export HBASE_HOME=/usr/local/hbase
export HIVE_HOME=/usr/local/hive
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$HBASE_HOME/bin:$HIVE_HOME/bin:/usr/local/zookeeper/bin

#Zookeeper
export ZOOKEEPER_HOME=/usr/local/zookeeper
export PATH=$PATH:$ZOOKEEPER_HOME/bin


#set Hadoop_compile
export MAVEN_HOME=/usr/local/apache-maven-3.3.9
export FINDBUGS_HOME=/usr/local/findbugs-3.0.1
export PROTOBUF_HOME=/usr/local/protobuf-2.5.0
export ANT_HOME=/usr/local/apache-ant-1.9.14
export PATH=$PATH:$ZOOKEEPER_HOME/bin:PATH=$JAVA_HOME/bin:$MAVEN_HOME/bin:$FINDBUGS_HOME/bin:$ANT_HOME/bin
export MAVEN_OPTS="-Xmx2g -XX:MaxMetaspaceSize=512M -XX:ReservedCodeCacheSize=512m"





<属性>
  <name> dfs.nameservices </ name>
  <value> mycluster </ value>
</ property>

<属性>
  <name> dfs.ha.namenodes.mycluster </ name>
  <value> nn1,nn2 </ value>
</ property>

<属性>
  <name> dfs.namenode.rpc-address.mycluster.nn1 </ name>
  <value> machine1.example.com:8020 </ value>
</ property>
<属性>
  <name> dfs.namenode.rpc-address.mycluster.nn2 </ name>
  <value> machine2.example.com:8020 </ value>
</ property>

<属性>
  <name> dfs.namenode.http-address.mycluster.nn1 </ name>
  <value> machine1.example.com:50070 </ value>
</ property>
<属性>
  <name> dfs.namenode.http-address.mycluster.nn2 </ name>
  <value> machine2.example.com:50070 </ value>
</ property>


<属性>
  <name> dfs.namenode.shared.edits.dir </ name>
  <value> qjournal://node1.example.com:8485; node2.example.com:8485; node3.example.com:8485 / mycluster </ value>
</ property>


<属性>
  <name> dfs.client.failover.proxy.provider.mycluster </ name>
  <value> org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider </ value>
</ property>



sudo mkdir -p /data/hadoopdata/
sudo chown -R hadoop:hadoop /data

推荐阅读