Skip to content

Instantly share code, notes, and snippets.

@mkcor
Created June 25, 2013 20:01
Show Gist options
  • Save mkcor/5861862 to your computer and use it in GitHub Desktop.
Save mkcor/5861862 to your computer and use it in GitHub Desktop.
SIDUS helps you identify sources of performance variability among HPC cluster nodes.
#!/usr/bin/env python
# 2013-01-10 : Initial version
# 2013-01-17 : RDMA version suppressed due to instability, move to IPoIB version
#
# Copyleft Emmanuel QUEMENER <[email protected]>
from ClusterShell.Task import task_self
from ClusterShell.NodeSet import NodeSet
import time
import posix
import socket
import os
# If True, launch, if False, check commands on stdin
SHELL=True
QUIET=False
# GFSIP : True for IPoIB use, False instead
GFSIP=True
servers="sl230-[1-4]"
clients="sl230-[5-20]"
# Size of RDK device in GB
RDSIZE=60
#RDSIZE=12
#RDSIZE=3
# Size of Block for test in KB
BSIZE=16384
# Percentage of RDK size for IOZONE
PERCENT=80
# Number of Launches
#LAUNCHES=20
LAUNCHES=5
#LAUNCHES=5
# RDK folder
RDDIR="/media/ramdisk"
# RDK folder
HDDIR="/media/harddisk"
# GlusterFS folder
GFSDIR="/media/glusterfs"
# IOZONE options
# IOZONE command
#IOZONE="/usr/bin/iozone %s" % IOZONE_OPTIONS
# IOZONE command defined in test : autosize necessary
LOGDIR="/root"
# SLEEP
SLEEP="sleep 5"
#DATE=datetime.date.today().strftime("%Y%m%d")
DATE=time.strftime("%Y%m%d%H%M")
HOSTNAME=socket.gethostname()
def Distribute(first,second,style):
output={}
LenFirst=len(NodeSet(first))
LenSecond=len(NodeSet(second))
if style=="uniform":
if LenFirst==LenSecond:
print "Same number of hosts between lists %i & %i " % (LenFirst,LenSecond)
for i in range(LenFirst):
output[NodeSet(first)[i]]=NodeSet(second)[i]
else:
print "Different number of hosts between lists %i & %i " % (LenFirst,LenSecond)
if not QUIET:
print output
return(output)
def AllOnOne(clients,server):
output={}
for client in NodeSet(clients):
output[client]=server
if not QUIET:
print output
return(output)
def CreatePools(servers,number,style):
# Return for each server the pool associated
# List of servers as ClusterShell
# Number of servers per pool
output={}
LenServers=len(NodeSet(servers))
LenPools=LenServers/number
if style=="round-robin":
if LenServers==LenPools*number:
print "Each server has a pool, %i pools created" % (LenPools)
for i in range(LenServers):
pool='pool%i' % (i/number+1)
if not output.has_key(pool):
output[pool]=list()
output[pool].append(NodeSet(servers)[i])
else:
print "Different number of hosts between lists %i & %i " % (LenFirst,LenSecond)
if not QUIET:
print output
return(output)
def UnameA(hosts):
task = task_self()
task.shell("uname -a", nodes=hosts)
task.resume()
if not QUIET:
for buf,nodes in task.iter_buffers() :
print nodes,buf
return(task)
def Hostname(hosts):
task = task_self()
task.shell("echo $HOSTNAME", nodes=hosts)
task.resume()
if not QUIET:
for buf,nodes in task.iter_buffers() :
print nodes,buf
return(task)
def MultiLines(hosts):
task = task_self()
task.shell("""
echo This is the First Command
uname -a
date
""", nodes=hosts)
task.resume()
if not QUIET:
for buf,nodes in task.iter_buffers() :
print nodes,buf
return(task)
def Shell(command,hosts):
task = task_self()
if SHELL:
task.shell(command, nodes=hosts)
else:
task.shell("echo -e \"%s\"" % command, nodes=hosts)
task.resume()
if not QUIET:
for buf,nodes in task.iter_buffers() :
print nodes,buf
return(task)
def Command(command):
if SHELL:
os.system(command)
else:
os.system("echo -e \"%s\"" % command)
def StartOnServers(FS,hosts):
print "StartOnServer for filesystem %s" % FS
Shell("[ ! -d %s ] && mkdir -p %s" % (RDDIR,RDDIR),hosts)
if FS=='tmpfs':
#Shell("mount -o noatime,size=$((2**20*%s))k -t tmpfs none %s" % (RDSIZE,RDDIR),hosts)
Shell("mount -o noatime,size=$((2**20*%s))k,nr_inodes=$((10*2**10*%s))k -t tmpfs none %s" % (RDSIZE,RDSIZE,RDDIR),hosts)
else:
Shell("modprobe brd rd_nr=1 rd_size=$((2**20*%s))" % RDSIZE,hosts)
if FS=='ext2':
Shell("mkfs.ext2 -E stride=16 -F -m 0 /dev/ram0",hosts)
Shell("mount -o noatime -t %s /dev/ram0 %s" % (FS,RDDIR),hosts)
if GFSIP:
Shell("""
# Create volume
RDDIR=%s
IP=$(ip -4 -o addr show ib0 | awk '{ print $4 }' | awk -F'/' '{ print $1 }')
gluster volume create ramdisk transport tcp $IP:$RDDIR
gluster volume set ramdisk performance.cache-size 2048MB
gluster volume set ramdisk performance.io-thread-count 32
gluster volume set ramdisk nfs.disable on
gluster volume info
""" % RDDIR,hosts)
else:
Shell("""
# Create volume
RDDIR=%s
gluster volume create ramdisk transport tcp $HOSTNAME:$RDDIR
gluster volume set ramdisk performance.cache-size 2048MB
#gluster volume set ramdisk performance.io-thread-count 16
gluster volume set ramdisk nfs.disable on
gluster volume info
""" % RDDIR,hosts)
Shell("gluster volume start ramdisk",hosts)
def StartOnServersDisk(FS,hosts):
print "StartOnServer for filesystem %s" % FS
Shell("[ ! -d %s ] && mkdir -p %s" % (HDDIR,HDDIR),hosts)
if FS=='ext2':
Shell("mkfs.ext2 -E stride=16 -F -m 0 /dev/sda",hosts)
Shell("mount -o noatime -t %s /dev/sda %s" % (FS,HDDIR),hosts)
if GFSIP:
Shell("""
IP=$(ip -4 -o addr show ib0 | awk '{ print $4 }' | awk -F'/' '{ print $1 }')
# Create volume
gluster volume create ramdisk transport tcp $IP:%s
# Tune Volume
gluster volume set ramdisk performance.cache-size 2048MB
gluster volume set ramdisk performance.io-thread-count 32
gluster volume set ramdisk nfs.disable on
""" % HDDIR,hosts)
else:
Shell("""
# Create volume
gluster volume create ramdisk transport tcp $HOSTNAME:%s
# Tune volume
gluster volume set ramdisk performance.cache-size 2048MB
gluster volume set ramdisk performance.io-thread-count 32
gluster volume set ramdisk nfs.disable on
""" % HDDIR,hosts)
Shell("gluster volume start ramdisk",hosts)
def StartOnPools(FS,pools):
# Grosse ruse de vieux coyote
# http://stackoverflow.com/questions/11331908/how-to-use-reduce-with-list-of-lists
hosts=NodeSet.fromlist(sum(pools.itervalues(),[]))
print "StartOnPools for filesystem %s" % FS
# Initialisation of local partitions
Shell("[ ! -d %s ] && mkdir -p %s" % (RDDIR,RDDIR),hosts)
if FS=='tmpfs':
Shell("mount -o noatime,size=$((2**20*%s))k -t tmpfs none %s" % (RDSIZE,RDDIR),hosts)
else:
Shell("modprobe brd rd_nr=1 rd_size=$((2**20*%s))" % RDSIZE,hosts)
if FS=='ext2':
Shell("mkfs.ext2 -E stride=16 -F -m 0 /dev/ram0",hosts)
Shell("mount -o noatime -t %s /dev/ram0 %s" % (FS,RDDIR),hosts)
masters=[]
# Create Pools and Volumes
for pool in pools.keys():
# Initialize OSD file
Shell("echo > /tmp/OSD.gfs",pools[pool][0])
stripe=len(pools[pool])
# Create pool
for host in pools[pool]:
# Add peer to master
# Add OSD element in OSD file
if GFSIP:
Shell("""
REMOTE=%s
RDDIR=%s
IP=$(host -4 $REMOTE | awk '{print $4}' | sed -e 's/172.16.20/10.11.12/g')
gluster peer probe $IP
echo -ne "$IP:$RDDIR " >> /tmp/OSD.gfs
""" % (host,RDDIR),pools[pool][0])
else:
Shell("""
IP=%s
RDDIR=%s
gluster peer probe $IP
echo -ne "$IP:$RDDIR " >> /tmp/OSD.gfs
""" % (host,RDDIR),pools[pool][0])
masters.append(pools[pool][0])
# Create volume
Shell("""
OSD=$(cat /tmp/OSD.gfs)
# Create volume
gluster volume create ramdisk stripe %i transport tcp $OSD
# Tune volume
gluster volume set ramdisk performance.cache-size 2048MB
gluster volume set ramdisk performance.io-thread-count 32
gluster volume set ramdisk nfs.disable on
""" % stripe,pools[pool][0])
# Start volume
Shell("gluster volume start ramdisk",pools[pool][0])
# View volume
Shell("gluster volume info",pools[pool][0])
return(masters)
def StartDiskOnPools(FS,DEVICE,pools):
# Grosse ruse de vieux coyote
# http://stackoverflow.com/questions/11331908/how-to-use-reduce-with-list-of-lists
hosts=NodeSet.fromlist(sum(pools.itervalues(),[]))
print "StartDiskOnPools for filesystem %s and %s" % (FS,DEVICE)
# Initialisation of local partitions
Shell("[ ! -d %s ] && mkdir -p %s" % (RDDIR,RDDIR),hosts)
if FS=='ext2':
Shell("mkfs.ext2 -E stride=16 -F -m 0 %s" % DEVICE,hosts)
elif FS=='ext3':
Shell("mkfs.ext3 -E stride=16 -F -m 0 %s" % DEVICE,hosts)
elif FS=='ext4':
Shell("mkfs.ext4 -E stride=16 -F -m 0 %s" % DEVICE,hosts)
elif FS=='xfs':
Shell("mkfs.xfs -f %s" % DEVICE,hosts)
elif FS=='jfs':
Shell("mkfs.jfs -q %s" % DEVICE,hosts)
elif FS=='btrfs':
Shell("mkfs.btrfs %s" % DEVICE,hosts)
elif FS=='zfs':
Shell('modprobe zfs',hosts)
Shell('touch /etc/zfs/zpool.cache',hosts)
time.sleep(5)
Shell("zpool create -m %s -f -o cachefile=none harddisk %s" % (RDDIR,DEVICE),hosts)
time.sleep(5)
if FS!='zfs':
Shell("mount -o noatime -t %s %s %s" % (FS,DEVICE,RDDIR),hosts)
masters=[]
# Create Pools and Volumes
for pool in pools.keys():
# Initialize OSD file
Shell("echo > /tmp/OSD.gfs",pools[pool][0])
stripe=len(pools[pool])
# Create pool
for host in pools[pool]:
# Add peer to master
# Add OSD element in OSD file
if GFSIP:
Shell("""
REMOTE=%s
RDDIR=%s
IP=$(host -4 $REMOTE | awk '{print $4}' | sed -e 's/172.16.20/10.11.12/g')
gluster peer probe $IP
echo -ne "$IP:$RDDIR " >> /tmp/OSD.gfs
""" % (host,RDDIR),pools[pool][0])
else:
Shell("""
IP=%s
RDDIR=%s
gluster peer probe $IP
echo -ne "$IP:$RDDIR " >> /tmp/OSD.gfs
""" % (host,RDDIR),pools[pool][0])
masters.append(pools[pool][0])
# Create volume
Shell("""
OSD=$(cat /tmp/OSD.gfs)
# Create volume
gluster volume create ramdisk stripe %i transport tcp $OSD
# Tune volume
gluster volume set ramdisk performance.cache-size 2048MB
gluster volume set ramdisk performance.io-thread-count 32
gluster volume set ramdisk nfs.disable on
""" % stripe,pools[pool][0])
# Start volume
Shell("gluster volume start ramdisk",pools[pool][0])
# View volume
Shell("gluster volume info",pools[pool][0])
return(masters)
def StartDisksOnPools(FS,DEVICES,pools):
# Grosse ruse de vieux coyote
# http://stackoverflow.com/questions/11331908/how-to-use-reduce-with-list-of-lists
hosts=NodeSet.fromlist(sum(pools.itervalues(),[]))
print "StartDisksOnPools for filesystem %s and %s" % (FS,DEVICE)
# Initialisation of local partitions
Shell("[ ! -d %s ] && mkdir -p %s" % (HDDIR,HDDIR),hosts)
if FS=='btrfs':
Shell("mkfs.btrfs %s" % DEVICES,hosts)
elif FS=='zfs':
Shell("modprobe brd rd_nr=1 rd_size=$((2**20*%s))" % 1,hosts)
Shell("mkfs.ext2 -E stride=16 -F -m 0 /dev/ram0",hosts)
Shell("mount -o noatime -t ext2 /dev/ram0 /etc/zfs",hosts)
Shell('modprobe zfs',hosts)
Shell('touch /etc/zfs/zpool.cache',hosts)
time.sleep(5)
Shell("zpool create -m %s -f -o cachefile=none harddisk %s" % (HDDIR,DEVICES),hosts)
time.sleep(5)
if FS!='zfs':
Shell("mount -o noatime -t %s %s %s" % (FS,DEVICES,HDDIR),hosts)
masters=[]
# Create Pools and Volumes
for pool in pools.keys():
# Initialize OSD file
Shell("echo > /tmp/OSD.gfs",pools[pool][0])
stripe=len(pools[pool])
# Create pool
for host in pools[pool]:
# Add peer to master
# Add OSD element in OSD file
if GFSIP:
Shell("""
REMOTE=%s
HDDIR=%s
IP=$(host -4 $REMOTE | awk '{print $4}' | sed -e 's/172.16.20/10.11.12/g')
gluster peer probe $IP
echo -ne "$IP:$HDDIR " >> /tmp/OSD.gfs
""" % (host,HDDIR),pools[pool][0])
else:
Shell("""
IP=%s
HDDIR=%s
gluster peer probe $IP
echo -ne "$IP:$HDDIR " >> /tmp/OSD.gfs
""" % (host,HDDIR),pools[pool][0])
masters.append(pools[pool][0])
# Create volume
Shell("""
OSD=$(cat /tmp/OSD.gfs)
# Create volume
gluster volume create ramdisk stripe %i transport tcp $OSD
# Tune volume
gluster volume set ramdisk performance.cache-size 2048MB
gluster volume set ramdisk performance.io-thread-count 32
gluster volume set ramdisk nfs.disable on
""" % stripe,pools[pool][0])
# Start volume
Shell("gluster volume start ramdisk",pools[pool][0])
# View volume
Shell("gluster volume info",pools[pool][0])
return(masters)
def StopOnServers(FS,hosts):
print "StopOnServers for filesystem %s" % FS
# Drop Caches
Shell("sync ; # echo 3 > /proc/sys/vm/drop_caches",hosts)
# Stop & Destroy
Shell("echo y | gluster volume stop ramdisk force",hosts)
Shell("echo y | gluster volume delete ramdisk",hosts)
# Unmount GlusterFS volume
Shell("umount -l %s" % RDDIR,hosts)
# Unload Ramdisk
Shell("[ %s != 'tmpfs' ] && rmmod brd" % FS,hosts)
def StopOnServersDisk(FS,hosts):
print "StopOnServers for filesystem %s" % FS
# Drop Caches
Shell("sync ; # echo 3 > /proc/sys/vm/drop_caches",hosts)
# Stop & Destroy
Shell("echo y | gluster volume stop ramdisk force",hosts)
Shell("echo y | gluster volume delete ramdisk",hosts)
# Unmount GlusterFS volume
Shell("umount -l %s" % HDDIR,hosts)
def StopOnPools(FS,pools):
hosts=NodeSet.fromlist(sum(pools.itervalues(),[]))
print "StopOnPools for filesystem %s" % FS
# Create Pools and Volumes
for pool in pools.keys():
# Drop Caches
Shell("sync ; # echo 3 > /proc/sys/vm/drop_caches",pools[pool][0])
# Stop volume
Shell("echo y | gluster volume stop ramdisk force",pools[pool][0])
# Destroy volume
Shell("echo y | gluster volume delete ramdisk",pools[pool][0])
for host in pools[pool]:
if GFSIP:
Shell("""
REMOTE=%s
IP=$(host -4 $REMOTE | awk '{print $4}' | sed -e 's/172.16.20/10.11.12/g')
gluster peer detach $IP
""" % host,pools[pool][0])
else:
Shell("gluster peer detach %s" % host,pools[pool][0])
# Unmount GlusterFS volume
Shell("umount -l %s" % RDDIR,hosts)
# Unload Ramdisk
Shell("[ %s != 'tmpfs' ] && rmmod brd" % FS,hosts)
def StopDiskOnPools(FS,DEVICE,pools):
hosts=NodeSet.fromlist(sum(pools.itervalues(),[]))
print "StopDiskOnPools for filesystem %s and %s" % (FS,DEVICE)
# Create Pools and Volumes
for pool in pools.keys():
# Drop Caches
Shell("sync ; # echo 3 > /proc/sys/vm/drop_caches",pools[pool][0])
# Stop volume
Shell("echo y | gluster volume stop ramdisk force",pools[pool][0])
# Destroy volume
Shell("echo y | gluster volume delete ramdisk",pools[pool][0])
for host in pools[pool]:
if GFSIP:
Shell("""
REMOTE=%s
IP=$(host -4 $REMOTE | awk '{print $4}' | sed -e 's/172.16.20/10.11.12/g')
gluster peer detach $IP
""" % host,pools[pool][0])
else:
Shell("gluster peer detach %s" % host,pools[pool][0])
# Unmount GlusterFS volume
if FS=='zfs':
Shell("zpool destroy -f harddisk",hosts)
time.sleep(5)
Shell('rmmod zfs',hosts)
else:
Shell("umount -l %s" % RDDIR,hosts)
def IOZoneOnClients(FS,TSTSIZE,match,hosts):
#DATE=time.strftime("%Y%m%d%H%M")
print "TestOnClients for filesystem %s" % FS
# Create Mount Point
# Client commands
Shell("[ ! -d %s ] && mkdir -p %s" % (GFSDIR,GFSDIR),hosts)
#
LogFile=dict()
# Mount GlusterFS volume
for node in NodeSet(hosts):
if GFSIP:
Shell("""
REMOTE=%s
IP=$(host -4 $REMOTE | awk '{print $4}' | sed -e 's/172.16.20/10.11.12/g')
# Create script to launch //ly via ClusterShell
echo -e "#!/bin/bash\nmount -t glusterfs $REMOTE:ramdisk %s" > /tmp/MountGlusterFS.sh
chmod 755 /tmp/MountGlusterFS.sh
""" % (match[node],GFSDIR),node)
else:
Shell("""
echo -e "#!/bin/bash\necho Mount GlusterFS on client $HOSTNAME\nmount -t glusterfs %s:ramdisk %s" > /tmp/MountGlusterFS.sh
chmod 755 /tmp/MountGlusterFS.sh
""" % (match[node],GFSDIR),node)
LogFile[node]="%s/iozone.%s.%s.%s.%sG.%sG.%s.log" % (LOGDIR,node,match[node],FS,TSTSIZE,RDSIZE,DATE)
Shell("echo %s > /root/logfile" % LogFile[node],node)
# Suppress Record Rewrite operation to avoid shity values for reduction
IOZONE_OPTIONS="-M -i 0 -i 1 -i 2 -i 3 -i 5 -i 6 -i 7 -r %s -s %sg " % (BSIZE,TSTSIZE)
#IOZONE_OPTIONS="-M -i 0 -i 1 -r %s -s %sg " % (BSIZE,TSTSIZE)
IOZONE="/usr/bin/iozone %s" % IOZONE_OPTIONS
Shell("time sh /tmp/MountGlusterFS.sh",hosts)
Shell("""
GFSDIR="%s"
FS="%s"
LAUNCHES="%s"
IOZONE="%s -f $GFSDIR/iozone.$HOSTNAME"
cd $GFSDIR
echo $IOZONE
SLEEP="sleep 5"
$SLEEP
sync
# Check if FS already mounted
if [ $(grep fuse.glusterfs /etc/mtab | grep $GFSDIR | wc -l) -eq 0 ]
then
echo -e "GlusterFS volume not mounted : exit"
exit 0
else
echo -e "GlusterFS volume mounted on $GFSDIR : continue"
fi
$SLEEP
# Launch Test
LOG=$(cat /root/logfile)
echo -ne "Launch Test on $FS in $LOG: ..."
L=0
# Drop Caches
# echo 3 > /proc/sys/vm/drop_caches
sync
echo sysctl -a > $LOG
while [ $L -lt $LAUNCHES ]
do
echo -ne " $L"
(time $IOZONE) >>$LOG 2>&1
L=$(($L+1))
$SLEEP
done
echo -e " done"
$SLEEP
""" % (GFSDIR,FS,LAUNCHES,IOZONE),hosts)
Shell("umount -l %s" % GFSDIR,hosts)
# Create folder on master
LOCALLOGDIR="%s/GlusterFS.%s/%s" % (LOGDIR,DATE,len(NodeSet(hosts)))
try:
os.makedirs(LOCALLOGDIR)
except:
print "%s already exists" % LOCALLOGDIR
# Retrieve log files from nodes to local folder
print "Copy files to %s" % LOCALLOGDIR
for node in NodeSet(hosts):
Command("scp %s:%s %s" % (node,LogFile[node],LOCALLOGDIR))
def DDOnClients(FS,TSTSIZE,match,hosts):
#DATE=time.strftime("%Y%m%d%H%M")
print "TestOnClients for filesystem %s" % FS
# Create Mount Point
# Client commands
Shell("[ ! -d %s ] && mkdir -p %s" % (GFSDIR,GFSDIR),hosts)
#
LogFile=dict()
# Mount GlusterFS volume
for node in NodeSet(hosts):
print "Mounting GlusterFS from %s on node %s" % (match[node],node)
if GFSIP:
Shell("""
REMOTE=%s
IP=$(host -4 $REMOTE | awk '{print $4}' | sed -e 's/172.16.20/10.11.12/g')
time mount -t glusterfs $IP:ramdisk %s
""" % (match[node],GFSDIR) ,node)
else:
Shell("""
time mount -t glusterfs %s:ramdisk %s
""" % (match[node],GFSDIR) ,node)
LogFile[node]="%s/dd.%s.%s.%s.%sG.%sG.%s.log" % (LOGDIR,node,match[node],FS,TSTSIZE,RDSIZE,DATE)
Shell("echo %s > /root/logfile" % LogFile[node],node)
DD="dd bs=%sk count=$((2**20*%s/%s))" % (BSIZE,TSTSIZE,BSIZE)
Shell("""
GFSDIR="%s"
FS="%s"
LAUNCHES="%s"
cd $GFSDIR
DD_CMD="%s"
DDW="$DD_CMD if=/dev/zero of=$GFSDIR/dd.$HOSTNAME"
DDR="$DD_CMD of=/dev/null if=$GFSDIR/dd.$HOSTNAME"
echo $DD
SLEEP="sleep 5"
$SLEEP
sync
# Check if FS already mounted
if [ $(grep fuse.glusterfs /etc/mtab | grep $GFSDIR | wc -l) -eq 0 ]
then
echo -e "GlusterFS volume not mounted : exit"
exit 0
else
echo -e "GlusterFS volume mounted on $GFSDIR : continue"
fi
$SLEEP
# Launch Test
LOG=$(cat /root/logfile)
echo -ne "Launch Test on $FS in $LOG: ..."
L=0
# Drop Caches
# echo 3 > /proc/sys/vm/drop_caches
sync
echo sysctl -a > $LOG
while [ $L -lt $LAUNCHES ]
do
echo -ne " $L"
[ -f $GFSDIR/dd.$HOSTNAME ] && rm $GFSDIR/dd.$HOSTNAME
sync
$SLEEP
(time $DDW) >>$LOG 2>&1
$SLEEP
(time $DDR) >>$LOG 2>&1
L=$(($L+1))
$SLEEP
done
echo -e " done"
$SLEEP
""" % (GFSDIR,FS,LAUNCHES,DD),hosts)
Shell("umount -l %s" % GFSDIR,hosts)
# Create folder on master
LOCALLOGDIR="%s/GlusterFS.%s/%s" % (LOGDIR,DATE,len(NodeSet(hosts)))
try:
os.makedirs(LOCALLOGDIR)
except:
print "%s already exists" % LOCALLOGDIR
# Retrieve log files from nodes to local folder
print "Copy files to %s" % LOCALLOGDIR
for node in NodeSet(hosts):
Command("scp %s:%s %s" % (node,LogFile[node],LOCALLOGDIR))
if __name__=='__main__':
# clients='172.16.20.[171-180]'
# servers='172.16.20.[181-190]'
clients='sl230-[1-10]'
servers='sl230-[11-20]'
# clients='x41z[1-8]'
# servers='x41z[9-16]'
# association=Distribute(clients,servers,'uniform')
# MaxClients=1
# TSTSIZE=PERCENT*RDSIZE/100/MaxClients
# for FS in FS2CHECK:
# StartOnServers(FS,servers)
# DDOnClients(FS,TSTSIZE,association,clients)
# StopOnServers(FS,servers)
# pools=CreatePools(servers,2,'round-robin')
# for FS in FS2CHECK:
# StartOnPools(FS,pools)
# clients='sl230-[5-20]'
# association=AllOnOne(clients,'sl230-1')
# TestOnClients(FS,TSTSIZE,association,clients)
# StopOnPools(FS,pools)
#servers='172.16.20.69'
# servers='172.16.20.69'
# Test all in one (from 1 to 16 clients on 1 server)
# servers="sl230-2"
# StartClient=5
# MaxClients=16
# for EndClient in range(StartClient,MaxClients+StartClient):
# TSTSIZE=PERCENT*RDSIZE/100/MaxClients
# for FS in FS2CHECK:
# StartOnServers(FS,servers)
# clients='sl230-[%s-%s]' % (StartClient,EndClient)
# association=AllOnOne(clients,servers)
# IOZoneOnClients(FS,TSTSIZE,association,clients)
# StopOnServers(FS,servers)
# Test all on 2 (from 1 to 16 clients on 2 servers)
# Test on RamDisks
# stride=4
# servers="sl230-[1-%s]" % stride
# pools=CreatePools(servers,stride,'round-robin')
# StartClient=stride+1
# MaxClients=20-stride
# for EndClient in range(StartClient,MaxClients+StartClient):
# TSTSIZE=PERCENT*RDSIZE/100/MaxClients*stride
# print TSTSIZE
# for FS in FS2CHECK:
# masters=StartOnPools(FS,pools)
# clients='sl230-[%s-%s]' % (StartClient,EndClient)
# association=AllOnOne(clients,masters[0])
# IOZoneOnClients(FS,TSTSIZE,association,clients)
# StopOnPools(FS,pools)
# Test on FileSystems
stride=4
servers="v40z[1-%i]" % stride
pools=CreatePools(servers,stride,'round-robin')
Devices='/dev/sda /dev/sdb /dev/sdc /dev/sdd /dev/sde'
masters=StartDisksOnPools(FS,Devices,pools)
StopDiskOnPools(FS,Devices,pools)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment