Last active
August 29, 2015 14:09
-
-
Save oza/e05dd42ab0ca5a2e9a8d to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
<?xml version="1.0" encoding="UTF-8"?> | |
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> | |
<!-- | |
Licensed under the Apache License, Version 2.0 (the "License"); | |
you may not use this file except in compliance with the License. | |
You may obtain a copy of the License at | |
http://www.apache.org/licenses/LICENSE-2.0 | |
Unless required by applicable law or agreed to in writing, software | |
distributed under the License is distributed on an "AS IS" BASIS, | |
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
See the License for the specific language governing permissions and | |
limitations under the License. See accompanying LICENSE file. | |
--> | |
<!-- Put site-specific property overrides in this file. --> | |
<configuration> | |
<property> | |
<name>master.address</name> | |
<value>hadoop-master</value> | |
</property> | |
<property> | |
<name>fs.defaultFS</name> | |
<value>hdfs://${master.address}:9000</value> | |
</property> | |
<property> | |
<name>dfs.permissions</name> | |
<value>false</value> | |
</property> | |
<property> | |
<name>hadoop.security.authorization</name> | |
<value>false</value> | |
</property> | |
<property> | |
<name>hadoop.tmp.dir</name> | |
<value>/hadoop1/tmp</value> | |
</property> | |
<property> | |
<name>io.file.buffer.size</name> | |
<value>131072</value> | |
</property> | |
</configuration> |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
<?xml version="1.0" encoding="UTF-8"?> | |
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> | |
<!-- | |
Licensed under the Apache License, Version 2.0 (the "License"); | |
you may not use this file except in compliance with the License. | |
You may obtain a copy of the License at | |
http://www.apache.org/licenses/LICENSE-2.0 | |
Unless required by applicable law or agreed to in writing, software | |
distributed under the License is distributed on an "AS IS" BASIS, | |
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
See the License for the specific language governing permissions and | |
limitations under the License. See accompanying LICENSE file. | |
--> | |
<!-- Put site-specific property overrides in this file. --> | |
<configuration> | |
<property> | |
<name>dfs.replication</name> | |
<value>1</value> | |
</property> | |
<property> | |
<name>dfs.namenode.name.dir</name> | |
<value>file:///hadoop1/data/hdfs/namenode</value> | |
</property> | |
<property> | |
<name>dfs.datanode.data.dir</name> | |
<value> | |
file:///hadoop1/data/hdfs/datanode, | |
file:///hadoop2/data/hdfs/datanode, | |
<!-- | |
file:///hadoop3/data/hdfs/datanode, | |
file:///hadoop4/data/hdfs/datanode, | |
file:///hadoop5/data/hdfs/datanode, | |
file:///hadoop6/data/hdfs/datanode, | |
file:///hadoop7/data/hdfs/datanode, | |
file:///hadoop8/data/hdfs/datanode | |
--> | |
</value> | |
</property> | |
<property> | |
<name>dfs.permissions.enabled</name> | |
<value>false</value> | |
</property> | |
</configuration> |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
<?xml version="1.0"?> | |
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> | |
<!-- Put site-specific property overrides in this file. --> | |
<configuration> | |
<property> | |
<name>mapreduce.framework.name</name> | |
<value>yarn</value> | |
</property> | |
<property> | |
<name>mapreduce.jobhistory.address</name> | |
<value>${master.address}:10020</value> | |
</property> | |
<property> | |
<name>mapreduce.map.memory.mb</name> | |
<value>4096</value> | |
</property> | |
<property> | |
<name>mapreduce.reduce.memory.mb</name> | |
<value>4096</value> | |
</property> | |
<property> | |
<name>mapreduce.map.cpu.vcores</name> | |
<value>1</value> | |
</property> | |
<property> | |
<name>mapreduce.reduce.cpu.vcores</name> | |
<value>1</value> | |
</property> | |
<property> | |
<name>mapreduce.map.java.opts</name> | |
<value>-server -Xmx3800m -Djava.net.preferIPv4Stack=true</value> | |
<!-- <value>-server -Xmx1024m -Djava.net.preferIPv4Stack=true -agentlib:hprof=file=/tmp/hprof/@[email protected],format=b</value> --> | |
</property> | |
<property> | |
<name>mapreduce.reduce.java.opts</name> | |
<value>-server -Xmx3800m -Djava.net.preferIPv4Stack=true</value> | |
<!-- <value>-server -Xmx1024m -Djava.net.preferIPv4Stack=true -agentlib:hprof=file=/tmp/hprof/@[email protected],format=b</value> --> | |
</property> | |
<property> | |
<name>mapreduce.cluster.local.dir</name> | |
<value> | |
/hadoop1/mapred, | |
/hadoop2/mapred, | |
<!-- | |
/hadoop3/mapred, | |
/hadoop4/mapred, | |
/hadoop5/mapred, | |
/hadoop6/mapred, | |
/hadoop7/mapred, | |
/hadoop8/mapred | |
--> | |
</value> | |
</property> | |
</configuration> |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Default system properties included when running spark-submit. | |
# This is useful for setting default environmental settings. | |
# Example: | |
# spark.master spark://master:7077 | |
# spark.eventLog.enabled true | |
# spark.eventLog.dir hdfs://namenode:8021/directory | |
# spark.serializer org.apache.spark.serializer.KryoSerializer | |
# spark.driver.memory 5g | |
# spark.executor.extraJavaOptions -XX:+PrintGCDetails -Dkey=value -Dnumbers="one two three" | |
spark.serializer org.apache.spark.serializer.KryoSerializer | |
spark.shuffle.manager org.apache.spark.shuffle.sort.SortShuffleManager | |
spark.shuffle.consolidateFiles true | |
spark.shuffle.spill true | |
spark.shuffle.memoryFraction 0.7 | |
spark.storage.memoryFraction 0.5 |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env bash | |
# This file is sourced when running various Spark programs. | |
# Copy it as spark-env.sh and edit that to configure Spark for your site. | |
# Options read when launching programs locally with | |
# ./bin/run-example or ./bin/spark-submit | |
# - HADOOP_CONF_DIR, to point Spark towards Hadoop configuration files | |
# - SPARK_LOCAL_IP, to set the IP address Spark binds to on this node | |
# - SPARK_PUBLIC_DNS, to set the public dns name of the driver program | |
# - SPARK_CLASSPATH, default classpath entries to append | |
# Options read by executors and drivers running inside the cluster | |
# - SPARK_LOCAL_IP, to set the IP address Spark binds to on this node | |
# - SPARK_PUBLIC_DNS, to set the public DNS name of the driver program | |
# - SPARK_CLASSPATH, default classpath entries to append | |
# - SPARK_LOCAL_DIRS, storage directories to use on this node for shuffle and RDD data | |
# - MESOS_NATIVE_LIBRARY, to point to your libmesos.so if you use Mesos | |
# Options read in YARN client mode | |
# - HADOOP_CONF_DIR, to point Spark towards Hadoop configuration files | |
# - SPARK_EXECUTOR_INSTANCES, Number of workers to start (Default: 2) | |
# - SPARK_EXECUTOR_CORES, Number of cores for the workers (Default: 1). | |
# - SPARK_EXECUTOR_MEMORY, Memory per Worker (e.g. 1000M, 2G) (Default: 1G) | |
# - SPARK_DRIVER_MEMORY, Memory for Master (e.g. 1000M, 2G) (Default: 512 Mb) | |
# - SPARK_YARN_APP_NAME, The name of your application (Default: Spark) | |
# - SPARK_YARN_QUEUE, The hadoop queue to use for allocation requests (Default: ?~@~Xdefault?~@~Y) | |
# - SPARK_YARN_DIST_FILES, Comma separated list of files to be distributed with the job. | |
# - SPARK_YARN_DIST_ARCHIVES, Comma separated list of archives to be distributed with the job. | |
HADOOP_CONF_DIR=/home/ozawa/hadoop/etc/hadoop/ | |
SPARK_EXECUTOR_INSTANCES=14 | |
SPARK_EXECUTOR_MEMORY=4G | |
SPARK_DRIVER_MEMORY=2G | |
# Options for the daemons used in the standalone deploy mode: | |
# - SPARK_MASTER_IP, to bind the master to a different IP address or hostname | |
# - SPARK_MASTER_PORT / SPARK_MASTER_WEBUI_PORT, to use non-default ports for the master | |
# - SPARK_MASTER_OPTS, to set config properties only for the master (e.g. "-Dx=y") | |
# - SPARK_WORKER_CORES, to set the number of cores to use on this machine | |
# - SPARK_WORKER_MEMORY, to set how much total memory workers have to give executors (e.g. 1000m, 2g) | |
# - SPARK_WORKER_PORT / SPARK_WORKER_WEBUI_PORT, to use non-default ports for the worker | |
# - SPARK_WORKER_INSTANCES, to set the number of worker processes per node | |
# - SPARK_WORKER_DIR, to set the working directory of worker processes | |
# - SPARK_WORKER_OPTS, to set config properties only for the worker (e.g. "-Dx=y") | |
# - SPARK_HISTORY_OPTS, to set config properties only for the history server (e.g. "-Dx=y") | |
# - SPARK_DAEMON_JAVA_OPTS, to set config properties for all daemons (e.g. "-Dx=y") | |
# - SPARK_PUBLIC_DNS, to set the public dns name of the master or workers |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
<?xml version="1.0" encoding="UTF-8"?> | |
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> | |
<!-- | |
Licensed under the Apache License, Version 2.0 (the "License"); | |
you may not use this file except in compliance with the License. | |
You may obtain a copy of the License at | |
http://www.apache.org/licenses/LICENSE-2.0 | |
Unless required by applicable law or agreed to in writing, software | |
distributed under the License is distributed on an "AS IS" BASIS, | |
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
See the License for the specific language governing permissions and | |
limitations under the License. See accompanying LICENSE file. | |
--> | |
<!-- Put site-specific property overrides in this file. --> | |
<configuration> | |
<property> | |
<name>tez.lib.uris</name> | |
<value>${fs.defaultFS}/apps/tez,${fs.defaultFS}/apps/tez/lib</value> | |
</property> | |
<property> | |
<name>tez.am.mode.session</name> | |
<value>false</value> | |
</property> | |
<property> | |
<name>tez.am.acl.enabled</name> | |
<value>false</value> | |
</property> | |
<property> | |
<name>tez.am.log.level</name> | |
<value>WARN</value> | |
</property> | |
<property> | |
<name>tez.task.log.level</name> | |
<value>INFO</value> | |
</property> | |
<property> | |
<name>tez.shuffle-vertex-manager.enable.auto-parallel</name> | |
<value>true</value> | |
</property> | |
<property> | |
<name>tez.am.max.app.attempts</name> | |
<value>1</value> | |
</property> | |
<property> | |
<name>tez.am.task.max.failed.attempts</name> | |
<value>1</value> | |
</property> | |
<!-- | |
<property> | |
<name>tez.shuffle-vertex-manager.desired-task-input-size</name> | |
<value>true</value> | |
</property> | |
<property> | |
<name>tez.shuffle-vertex-manager.min-src-fraction</name> | |
<value>0.10</value> | |
</property> | |
<property> | |
<name>tez.shuffle-vertex-manager.max-src-fraction</name> | |
<value>1.00</value> | |
</property> | |
--> | |
<property> | |
<name>tez.am.launch.cmd-opts</name> | |
<value>-server -Xmx3800m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC -Dhadoop.metrics.log.level=WARN</value> | |
</property> | |
<property> | |
<name>tez.am.resource.memory.mb</name> | |
<value>4096</value> | |
</property> | |
<property> | |
<name>tez.task.launch.cmd-opts</name> | |
<value>-server -Xmx3800m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC -Dhadoop.metrics.log.level=WARN</value> | |
</property> | |
<property> | |
<name>tez.task.resource.memory.mb</name> | |
<value>4096</value> | |
</property> | |
<property> | |
<name>tez.task.resource.cpu.vcores</name> | |
<value>1</value> | |
</property> | |
<!-- | |
<property> | |
<name>tez.runtime.sort.spill.percent</name> | |
<value>0.5</value> | |
</property> | |
<property> | |
<name>tez.runtime.shuffle.merge.percent</name> | |
<value>0.5</value> | |
</property> | |
<property> | |
<name>tez.runtime.shuffle.buffersize</name> | |
<value>8192</value> | |
</property> | |
--> | |
<property> | |
<name>tez.runtime.sort.threads</name> | |
<value>1</value> | |
</property> | |
<!-- | |
<property> | |
<name>tez.runtime.io.sort.factor</name> | |
<value>100</value> | |
</property> | |
<property> | |
<name>tez.runtime.task.input.buffer.percent</name> | |
<value>0.2</value> | |
</property> | |
<property> | |
<name>shuffle.memory.limit.percent</name> | |
<value>0.5</value> | |
</property> | |
<property> | |
<name>tez.runtime.shuffle.memory-to-memory.enable</name> | |
<value>true</value> | |
</property> | |
<property> | |
<name>tez.runtime.optimize.local.fetch</name> | |
<value>true</value> | |
</property> | |
--> | |
</configuration> |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
<?xml version="1.0"?> | |
<configuration> | |
<!-- Site specific YARN configuration properties --> | |
<property> | |
<name>yarn.nodemanager.aux-services</name> | |
<value>mapreduce_shuffle</value> | |
</property> | |
<property> | |
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name> | |
<value>org.apache.hadoop.mapred.ShuffleHandler</value> | |
</property> | |
<property> | |
<name>yarn.scheduler.minimum-allocation-mb</name> | |
<value>512</value> | |
</property> | |
<property> | |
<name>yarn.scheduler.maximum-allocation-mb</name> | |
<value>32768</value> | |
</property> | |
<property> | |
<name>yarn.nodemanager.resource.cpu-vcores</name> | |
<value>16</value> | |
</property> | |
<property> | |
<name>yarn.nodemanager.resource.memory-mb</name> | |
<value>57344</value> | |
</property> | |
<property> | |
<name>yarn.nodemanager.container-monitor.resource-calculator.class</name> | |
<value>org.apache.hadoop.yarn.util.LinuxResourceCalculatorPlugin</value> | |
</property> | |
<property> | |
<name>yarn.resourcemanager.address</name> | |
<value>${master.address}:8081</value> | |
</property> | |
<property> | |
<name>yarn.resourcemanager.resource-tracker.address</name> | |
<value>${master.address}:8082</value> | |
</property> | |
<property> | |
<name>yarn.resourcemanager.scheduler.address</name> | |
<value>${master.address}:8083</value> | |
</property> | |
<property> | |
<name>yarn.resourcemanager.admin.address</name> | |
<value>${master.address}:8084</value> | |
</property> | |
<property> | |
<name>yarn.resourcemanager.webapp.address</name> | |
<value>${master.address}:8088</value> | |
</property> | |
<property> | |
<name>yarn.nodemanager.vmem-check-enabled</name> | |
<value>false</value> | |
</property> | |
<property> | |
<name>yarn.nodemanager.pmem-check-enabled</name> | |
<value>true</value> | |
</property> | |
<property> | |
<name>yarn.acl.enable</name> | |
<value>false</value> | |
</property> | |
<property> | |
<name>yarn.nodemanager.vmem-pmem-ratio</name> | |
<value>1000</value> | |
</property> | |
<property> | |
<name>yarn.log-aggregation-enable</name> | |
<value>true</value> | |
</property> | |
<property> | |
<name>yarn.log-aggregation.retain-seconds</name> | |
<value>604800</value> | |
<!-- one week --> | |
</property> | |
<property> | |
<name>yarn.nodemanager.linux-container-executor.path</name> | |
<value>/home/ubuntu/hadoop/bin/container-executor</value> | |
<!-- one week --> | |
</property> | |
<!-- | |
<property> | |
<name>yarn.nodemanager.container-executor.class</name> | |
<value>org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor</value> | |
</property> | |
--> | |
<property> | |
<name>yarn.nodemanager.linux-container-executor.group</name> | |
<value><!-- TODO your user group here. should match container-executor.cfg --></value> | |
</property> | |
<property> | |
<name>yarn.nodemanager.local-dirs</name> | |
<value> | |
/hadoop1/yarn, | |
/hadoop2/yarn, | |
<!-- | |
/hadoop3/yarn, | |
/hadoop4/yarn, | |
/hadoop5/yarn, | |
/hadoop6/yarn, | |
/hadoop7/yarn, | |
/hadoop8/yarn, | |
--> | |
</value> | |
</property> | |
</configuration> |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment