Created
April 6, 2013 19:15
-
-
Save ay65535/5327270 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/sh | |
# | |
# 参考: http://metasearch.sourceforge.jp/wiki/index.php?Hadoop%A5%BB%A5%C3%A5%C8%A5%A2%A5%C3%A5%D7 | |
# | |
# 色 | |
bla="\033[30m" #黒 | |
red="\033[31m" #赤 | |
gre="\033[32m" #緑 | |
yel="\033[33m" #黄 | |
blu="\033[34m" #青 | |
pur="\033[35m" #紫 | |
cya="\033[36m" #水 | |
lgra="\033[1;30m" #ライトグレー | |
lred="\033[1;31m" #ライトレッド | |
lgre="\033[1;32m" #ライトグリーン | |
lyel="\033[1;33m" #ライトイエロー | |
lblu="\033[1;34m" #ライトブルー | |
lpur="\033[1;35m" #ライトパープル | |
lcya="\033[1;36m" #ライトシアン | |
whit="\033[1;37m" #ホワイト | |
defa="\033[0m" #デフォルトカラー | |
# ユーザ作成 | |
if [ -z "`grep hadoop /etc/passwd`" ]; then | |
echo -n "\n${lgre}起動ユーザを追加します。${defa}" | |
read x | |
sudo adduser --shell /bin/sh --uid 10001 --disabled-password hadoop | |
fi | |
if [ -z "`grep hdfs /etc/passwd`" ]; then | |
sudo adduser --shell /bin/sh --uid 10002 --disabled-password hdfs | |
sudo usermod -G hadoop hdfs | |
fi | |
if [ -z "`grep mapred /etc/passwd`" ]; then | |
sudo adduser --shell /bin/sh --uid 10003 --disabled-password mapred | |
sudo usermod -G hadoop mapred | |
fi | |
# ディレクトリ作成 | |
if [ ! -d /grid ]; then | |
echo "\n${lgre}==> ${whit}ディレクトリを作成します.${defa}" | |
read x | |
sudo mkdir -p /grid/usr | |
sudo mkdir -p /grid/vol/0/var/lib | |
sudo mkdir -p /grid/vol/0/var/log/hdfs | |
sudo mkdir -p /grid/vol/0/var/run | |
sudo chown -R root:hadoop /grid/vol/0/var | |
sudo chmod -R 775 /grid/vol/0/var | |
#sudo mkdir -p /grid/etc/keytabs | |
#sudo mkdir -p /grid/vol/0/var/lib/hdfs/checkpoint | |
#sudo chown hdfs:hdfs /grid/vol/0/var/lib/hdfs/checkpoint | |
#sudo mkdir /grid/vol/0/var/lib/hdfs/name | |
#sudo chown hdfs:hdfs /grid/vol/0/var/lib/hdfs/name | |
#sudo mkdir /grid/vol/0/var/lib/hdfs/data | |
#sudo chown hdfs:hdfs /grid/vol/0/var/lib/hdfs/data | |
#sudo chmod 700 /grid/vol/0/var/lib/hdfs/data | |
#sudo mkdir -p /grid/vol/0/var/lib/mapred/history | |
#sudo chown mapred:mapred /grid/vol/0/var/lib/mapred/history | |
#sudo mkdir -p /grid/vol/0/var/lib/mapred/local | |
#sudo chown mapred:mapred /grid/vol/0/var/lib/mapred/local | |
#sudo mkdir /grid/vol/0/var/log | |
#sudo chgrp hadoop /grid/vol/0/var/log | |
#sudo chmod 775 /grid/vol/0/var/log | |
#sudo chown hdfs:hadoop /grid/vol/0/var/log/hdfs | |
#sudo mkdir /grid/vol/0/var/log/mapred | |
#sudo chown mapred:hadoop /grid/vol/0/var/log/mapred | |
#sudo chgrp hadoop /grid/vol/0/var/run | |
#sudo chmod 775 /grid/vol/0/var/run | |
#sudo mkdir /grid/vol/0/var/run/hdfs | |
#sudo mkdir /grid/vol/0/tmp | |
#sudo chmod 1777 /grid/vol/0/tmp | |
fi | |
HADOOP_HOME=/grid/usr/hadoop | |
# アーカイブ展開 | |
cd ~/Downloads | |
if [ ! -f hadoop-1.0.2-bin.tar.gz ]; then | |
wget -P ~/Downloads http://archive.apache.org/dist/hadoop/core/hadoop-1.0.2/hadoop-1.0.2-bin.tar.gz | |
fi | |
if [ ! -d /grid/usr/hadoop-1.0.2 ]; then | |
echo "\n${lgre}==> ${whit}アーカイブを適当な場所に展開します。${defa}" | |
read x | |
sudo tar zxf hadoop-1.0.2-bin.tar.gz -C /grid/usr | |
fi | |
cd /grid/usr | |
[ ! -e hadoop ] && sudo ln -s hadoop-1.0.2 hadoop | |
# 設定追加 | |
cd ${HADOOP_HOME}/conf | |
[ -z "`grep java-6-oracle hadoop-env.sh`" ] && \ | |
echo "\n${lgre}==> ${whit}設定を追加します。${defa}" && \ | |
read x && \ | |
sudo sed -i.orig 's/^# export JAVA_HOME=\/usr\/lib\/j2sdk1.5-sun$/# export JAVA_HOME=\/usr\/lib\/j2sdk1.5-sun\n#export JAVA_HOME=\/usr\/lib\/jvm\/java-6-sun\n#export JAVA_HOME=\/usr\/lib\/jvm\/java-6-openjdk\n#export JAVA_HOME=\/usr\/lib\/jvm\/java-6-openjdk-i386\n#export JAVA_HOME=\/usr\/lib\/jvm\/java-6-openjdk-amd64\nexport JAVA_HOME=\/usr\/lib\/jvm\/java-6-oracle\n# The directory where pid files are stored. \/tmp by default.\nexport HADOOP_PID_DIR=\/grid\/vol\/0\/var\/run\/$USER\n# Where log files are stored. $HADOOP_HOME\/logs by default.\nexport HADOOP_LOG_DIR=\/grid\/vol\/0\/var\/log\/$USER\n/' ${HADOOP_HOME}/conf/hadoop-env.sh | |
[ -z "`grep fs.checkpoint.dir core-site.xml`" ] && \ | |
sudo sed -i.orig 's/^<configuration>$/<configuration>\n <property>\n <name>fs.default.name<\/name>\n <value>hdfs:\/\/localhost:9000<\/value>\n <\/property>\n <property>\n <name>hadoop.tmp.dir<\/name>\n <value>\/tmp\/hadoop-${user.name}<\/value>\n <\/property>\n <property>\n <name>fs.checkpoint.dir<\/name>\n <value>\/grid\/vol\/0\/var\/lib\/${user.name}\/checkpoint<\/value>\n <\/property>/' core-site.xml | |
[ -z "`grep dfs.replication hdfs-site.xml`" ] && \ | |
sudo sed -i.orig 's/^<configuration>$/<configuration>\n <property>\n <name>dfs.name.dir<\/name>\n <value>\/grid\/vol\/0\/var\/lib\/${user.name}\/name<\/value>\n <\/property>\n <property>\n <name>dfs.data.dir<\/name>\n <value>\/grid\/vol\/0\/var\/lib\/${user.name}\/data<\/value>\n <\/property>\n <property>\n <name>dfs.replication<\/name>\n <value>1<\/value>\n <\/property>/' hdfs-site.xml | |
# HDFS をフォーマット | |
echo "\n${lgre}==> ${whit}HDFSをフォーマットします。${defa}" | |
read x | |
cd ${HADOOP_HOME} | |
sudo -u hdfs ./bin/hadoop namenode -format | |
# 各デーモンの起動 | |
echo "\n${lgre}==> ${whit}各デーモンを起動します。${defa}" | |
read x | |
[ -z "`ps a | grep [D]proc_namenode`" ] && \ | |
sudo -u hdfs ./bin/hadoop-daemon.sh start namenode | |
[ -z "`ps a | grep [D]proc_secondarynamenode`" ] && \ | |
sudo -u hdfs ./bin/hadoop-daemon.sh start secondarynamenode | |
[ -z "`ps a | grep [D]proc_datanode`" ] && \ | |
sudo -u hdfs ./bin/hadoop-daemon.sh start datanode | |
# 設定ファイル(${HADOOP_HOME}/conf/mapred-site.xml)を編集します。 | |
echo "\n${lgre}==> ${whit}MapReduce設定ファイルを編集します。${defa}" | |
read bbb | |
cd ${HADOOP_HOME}/conf | |
if [ -z "`grep mapred.job.tracker mapred-site.xml`" ]; then | |
sudo sed -i.orig 's/^<configuration>$/<configuration>\n <property>\n <name>mapred.job.tracker<\/name>\n <value>localhost:9001<\/value>\n <\/property>\n <property>\n <name>mapred.system.dir<\/name>\n <value>\/grid\/vol\/0\/var\/lib\/${user.name}\/system<\/value>\n <\/property>\n <property>\n <name>mapreduce.jobtracker.staging.root.dir<\/name>\n <value>\/user<\/value>\n <\/property>/' mapred-site\.xml | |
fi | |
# HDFS上に必要なディレクトリを作成します。 | |
cd ${HADOOP_HOME} | |
echo "\n${lgre}==> ${whit}HDFS上に必要なディレクトリを作成します。${defa}" | |
read x | |
sudo -u hdfs ./bin/hadoop fs -mkdir /grid/vol/0/var/lib/mapred | |
sudo -u hdfs ./bin/hadoop fs -chown mapred:mapred /grid/vol/0/var/lib/mapred | |
sudo -u mapred ./bin/hadoop-daemon.sh start jobtracker | |
sudo -u mapred ./bin/hadoop-daemon.sh start tasktracker | |
# 各デーモンを起動します。 | |
echo "MapReduce 各デーモンを起動します。" | |
[ -z "`ps a | grep \"[D]proc_jobtracker\"`" ] && \ | |
sudo -u mapred ./bin/hadoop-daemon.sh start jobtracker | |
[ -z "`ps a | grep \"[D]proc_tasktracker\"`" ] && \ | |
sudo -u mapred ./bin/hadoop-daemon.sh start tasktracker | |
# テスト | |
echo "テストユーザを追加し、サンプルジョブを実行してみます。" | |
if ! id alice >/dev/null; then | |
sudo adduser alice | |
fi | |
sudo -u hdfs ./bin/hadoop fs -mkdir /user/alice | |
sudo -u hdfs ./bin/hadoop fs -chown alice:aclice /user/alice | |
sudo -u alice ./bin/hadoop jar hadoop-examples-1.0.2.jar pi 5 10 | |
# デーモンを停止します。 | |
sudo -u mapred ./bin/hadoop-daemon.sh stop jobtracker | |
sudo -u hdfs ./bin/hadoop-daemon.sh stop namenode | |
sudo -u hdfs ./bin/hadoop-daemon.sh stop secondarynamenode | |
sudo -u mapred ./bin/hadoop-daemon.sh stop tasktracker | |
sudo -u hdfs ./bin/hadoop-daemon.sh stop datanode |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment