Last active
August 29, 2015 14:05
-
-
Save c0mpiler/ab94ca96f9ab258c560f to your computer and use it in GitHub Desktop.
hadoop_2.5.0_Initial_setup_centos
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
cd ~ && mkdir tmp | |
sudo chown -R harsha:harsha tmp | |
cd tmp | |
wget http://www.carfab.com/apachesoftware/hadoop/common/stable/hadoop-2.5.0.tar.gz | |
tar xvzf hadoop-2.5.0.tar.gz | |
sudo mv hadoop-2.5.0 /usr/local/hadoop | |
sudo yum install -y java-1.7.0-openjdk java-1.7.0-openjdk-devel | |
java -version | |
echo "export JRE_HOME=/usr/lib/jvm/java-1.7.0-openjdk-1.7.0.65-2.5.1.2.el7_0.x86_64/jre | |
export PATH=$PATH:$JRE_HOME/bin | |
export JAVA_HOME=/usr/lib/jvm/java-1.7.0-openjdk-1.7.0.65-2.5.1.2.el7_0.x86_64 | |
export JAVA_PATH=$JAVA_HOME | |
export PATH=$PATH:$JAVA_HOME/bin" | sudo tee /etc/profile.d/java.sh | |
source /etc/profile.d/java.sh | |
echo $JAVA_HOME | |
echo "export HADOOP_PREFIX="/usr/local/hadoop" | |
export HADOOP_HOME=/usr/local/hadoop | |
export HADOOP_COMMON_HOME=/usr/local/hadoop | |
export HADOOP_CONF_DIR=/usr/local/hadoop/etc/hadoop | |
export HADOOP_HDFS_HOME=/usr/local/hadoop | |
export HADOOP_MAPRED_HOME=/usr/local/hadoop | |
export HADOOP_YARN_HOME=/usr/local/hadoop" | sudo tee /etc/profile.d/hadoop.sh | |
source /etc/profile.d/hadoop.sh | |
echo $HADOOP_HOME | |
echo '<?xml version="1.0" encoding="UTF-8"?> | |
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> | |
<!-- Put site-specific property overrides in this file. --> | |
<configuration> | |
<property> | |
<name>dfs.datanode.data.dir</name> | |
<value>file:///usr/local/hadoop/hdfs/datanode</value> | |
<description>Comma separated list of paths on the local filesystem of a DataNode where it should store its blocks.</description> | |
</property> | |
<property> | |
<name>dfs.namenode.name.dir</name> | |
<value>file:///usr/local/hadoop/hdfs/namenode</value> | |
<description>Path on the local filesystem where the NameNode stores the namespace and transaction logs persistently.</description> | |
</property> | |
</configuration> | |
' | sudo tee $HADOOP_PREFIX/etc/hadoop/hdfs-site.xml | |
echo '<?xml version="1.0" encoding="UTF-8"?> | |
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> | |
<!-- Put site-specific property overrides in this file. --> | |
<configuration> | |
<property> | |
<name>fs.defaultFS</name> | |
<value>hdfs://node0/</value> | |
<description>NameNode URI</description> | |
</property> | |
</configuration>' | sudo tee $HADOOP_PREFIX/etc/hadoop/core-site.xml | |
echo "<?xml version='1.0'?> | |
<configuration> | |
<!-- Site specific YARN configuration properties --> | |
<property> | |
<name>yarn.scheduler.minimum-allocation-mb</name> | |
<value>128</value> | |
<description>Minimum limit of memory to allocate to each container request at the Resource Manager.</description> | |
</property> | |
<property> | |
<name>yarn.scheduler.maximum-allocation-mb</name> | |
<value>2048</value> | |
<description>Maximum limit of memory to allocate to each container request at the Resource Manager.</description> | |
</property> | |
<property> | |
<name>yarn.scheduler.minimum-allocation-vcores</name> | |
<value>1</value> | |
<description>The minimum allocation for every container request at the RM, in terms of virtual CPU cores. Requests lower than this won't take effect, and the specified value will get allocated the minimum.</description> | |
</property> | |
<property> | |
<name>yarn.scheduler.maximum-allocation-vcores</name> | |
<value>2</value> | |
<description>The maximum allocation for every container request at the RM, in terms of virtual CPU cores. Requests higher than this won't take effect, and will get capped to this value.</description> | |
</property> | |
<property> | |
<name>yarn.nodemanager.resource.memory-mb</name> | |
<value>4096</value> | |
<description>Physical memory, in MB, to be made available to running containers</description> | |
</property> | |
<property> | |
<name>yarn.nodemanager.resource.cpu-vcores</name> | |
<value>4</value> | |
<description>Number of CPU cores that can be allocated for containers.</description> | |
</property> | |
<property> | |
<name>yarn.resourcemanager.hostname</name> | |
<value>node0</value> | |
<description>The hostname of the RM.</description> | |
</property> | |
</configuration>" | sudo tee $HADOOP_PREFIX/etc/hadoop/yarn-site.xml | |
$HADOOP_PREFIX/bin/hdfs namenode -format | |
$HADOOP_PREFIX/sbin/hadoop-daemon.sh start namenode | |
$HADOOP_PREFIX/sbin/hadoop-daemon.sh start datanode | |
$HADOOP_PREFIX/sbin/yarn-daemon.sh start resourcemanager | |
$HADOOP_PREFIX/sbin/yarn-daemon.sh start nodemanager | |
jps | |
$HADOOP_PREFIX/sbin/hadoop-daemon.sh stop namenode | |
$HADOOP_PREFIX/sbin/hadoop-daemon.sh stop datanode | |
$HADOOP_PREFIX/sbin/yarn-daemon.sh stop resourcemanager | |
$HADOOP_PREFIX/sbin/yarn-daemon.sh stop nodemanager | |
jps | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment