Skip to content

Instantly share code, notes, and snippets.

View gbraccialli's full-sized avatar

Gui Braccialli gbraccialli

View GitHub Profile
#cat perf_disks_write_data.sh
datetime=`date +%Y-%m-%d:%H:%M:%S`
for i in $(seq 12)
do
dd if=/dev/zero of=/data${i}/testfile bs=128K count=20000 oflag=direct > /tmp/disk_write_`hostname`_disk_${i}_${datetime}.out 2>&1 &
done
#cat perf_disks_write_so.sh
datetime=`date +%Y-%m-%d:%H:%M:%S`
dd if=/dev/zero of=/testfile bs=128K count=20000 oflag=direct > /tmp/disk_write_`hostname`_disk_root_${datetime}.out 2>&1
clush -a "nohup iperf3 -s > /tmp/iperf3.out 2>&1 &"
for host in `cat hosts.txt | paste -s -d' '`
do
for host2 in `cat hosts2.txt | paste -s -d' '`
do
ssh ${host} "iperf3 -c ${host2} -P 3 -t 3" > /tmp/iperf3_from_${host}_to_${host2}
done
done
curl -u admin:admin -H 'X-Requested-By:ambari' -X DELETE 'http://localhost:8080/api/v1/clusters/cluster3/services/RANGER'
set tez.queue.name=user;
set tez.grouping.split-count=1000;
select transform (1) using '/tmp/sleep.sh 1000000000' from wikipedia limit 10;
set tez.queue.name=production;
set tez.grouping.split-count=1000;
select transform (1) using '/tmp/sleep.sh 1000000000' from wikipedia limit 10;
set hive.execution.engine=mr;
set mapred.job.queue.name=user;
http://docs.oracle.com/javase/7/docs/technotes/tools/index.html#monitor
[terminal]
jps
[xterminal]
jconsole
[terminal]
jmap -dump:format=b,file=/tmp/dump.hprof 2133
[osx client]
drop table wikipedia;
create external table wikipedia(
domain string,
page string,
count bigint,
size bigint
)
partitioned by (datewiki int)
row format delimited fields terminated by ' '
stored as textfile
#mysql -u root
#CREATE USER 'testsqoop'@'localhost' IDENTIFIED BY 'pwd';
#CREATE USER 'testsqoop'@'%' IDENTIFIED BY 'pwd';
#GRANT ALL PRIVILEGES ON *.* TO 'testsqoop'@'localhost' WITH GRANT OPTION;
#GRANT ALL PRIVILEGES ON *.* TO 'testsqoop'@'%' WITH GRANT OPTION;
#FLUSH PRIVILEGES;
sqoop list-tables --connect jdbc:mysql://sandbox.hortonworks.com/hive --username=testsqoop --password=pwd
sqoop import --verbose --connect jdbc:mysql://sandbox.hortonworks.com/hive --username=testsqoop --password=pwd --table TBLS --hcatalog-table sqoop_test --hcatalog-storage-stanza "stored as orc" -m 1 --create-hcatalog-table
sqoop import --verbose --connect jdbc:mysql://sandbox.hortonworks.com/hive --username=testsqoop --password=pwd --table TBLS --hcatalog-table sqoop_test2 -m 1 --create-hcatalog-table
///usr/hdp/2.3.2.1-12/spark/bin/spark-shell --packages org.apache.spark:spark-streaming-kafka_2.10:1.5.2
import kafka.serializer.StringDecoder
import org.apache.spark.streaming._
import org.apache.spark.streaming.kafka._
import org.apache.spark.SparkConf
import org.apache.spark.sql.Row;
import org.apache.spark.sql.types.{StructType,StructField,StringType};
val ssc = new StreamingContext(sc, Seconds(2))
#curl -sSL https://gist.githubusercontent.com/gbraccialli/ba2b8586c5a69dce3d97/raw/ | sudo -E sh
wget http://sourceforge.net/projects/nmon/files/nmon_x86_64_centos6
cp nmon_x86_64_centos6 /usr/bin/nmon
chmod +x /usr/bin/nmon
nmon_x86_64_centos6 -f -s 1 -t -T -c 100000000000000000000000