Skip to content

Instantly share code, notes, and snippets.

@andrewgross
Created September 11, 2012 22:39
Show Gist options
  • Save andrewgross/3702689 to your computer and use it in GitHub Desktop.
Save andrewgross/3702689 to your computer and use it in GitHub Desktop.
Data Volume LWRP
#
# Cookbook Name:: yipit_data_volume
# Recipe:: default
#
# Copyright 2012, Yipit.com
#
# All rights reserved - Do Not Redistribute
#
require 'rubygems'
require 'chef/log'
require 'chef/mixin/shell_out'
require 'chef/provider'
action :create do
if ! Chef::Config[:solo]
unless `cat /etc/fstab`.include?(@new_resource.name)
aws = data_bag_item("IAM", "EBS")
raid_members = 8
raid_member_size = get_raid_member_size(@new_resource.size, @new_resource.level, raid_members)
raid_location = get_free_raid_point
mount_location = @new_resource.location
attach_location = get_free_attach_point
# Get our disks
(1..raid_members).each do |disk|
aws_ebs_volume "yipit_data_volume#{disk} at #{attach_location}" do
aws_access_key aws['AWS_ACCESS_KEY']
aws_secret_access_key aws['AWS_SECRET_KEY']
size raid_member_size
device "#{attach_location.gsub('xvd', 'sd')}#{disk}"
action [:create, :attach]
timeout 300
availability_zone 'us-east-1c'
end
end
# Build our Raid
yipit_mdadm raid_location do
devices (1..raid_members).to_a.map{|i| "#{attach_location}#{i}" }
action [ :create, :assemble ]
end
# Set our Read Ahead
execute "Set readahead #{raid_location}" do
command "blockdev --setra 65536 #{raid_location}"
user "root"
not_if "blockdev --getra #{raid_location} | grep 65536"
end
# Format our disks
execute "format disk #{raid_location}" do
command "mkfs.xfs #{raid_location}"
user "root"
not_if "mount | grep #{raid_location} | grep xfs"
end
# Make our mount point
directory mount_location do
owner "root"
group "root"
mode "0755"
end
# Mount Up (and set up /etc/fstab)
mount mount_location do
device raid_location
options "defaults,noatime,nobootwait"
fstype "xfs"
action [ :mount, :enable ]
pass 0
end
new_resource.updated_by_last_action(d.updated_by_last_action?)
else
Chef::Log.debug("#{@new_resource} data volume already exists, skipping create (#{@new_resource.name})")
end
else
Chef::Log.debug("Using Chef-Solo, skipping RAID creation calls")
end
end
action :destroy do
# This will unmount the drive, remove the entry from fstab, stop the raid, delete the AWS volumes.
end
action :check do
# Someday
end
action :repair do
# Maybe
end
def get_open_point(existing_device_prefix, new_device_range_start, new_device_range_stop)
# existing_device_prefix should be something like 'xvd' or 'md'
# new_device_range should be similar to '0' and '9' or 'f' and 'p'
possible_attach_points = (new_device_range_start..new_device_range_stop).map do |drive_letter|
"/dev/#{existing_device_prefix}#{drive_letter}"
end
Chef::Log.debug("#{@new_resource} Possible Attach Points: #{possible_attach_points}")
# Get a list of in use devices (fold numbered partitions together if looking at disk drives)
existing_devices = `ls /dev/#{existing_device_prefix}*`.split().each do |existing_device|
if existing_device_prefix.include? "xvd"
existing_device.gsub!(/[0-9]/, "")
end
end
Chef::Log.debug("#{@new_resource} Existing Devices: #{existing_devices}")
existing_devices.uniq!
# Return an array of all open attach points
free_points = possible_attach_points - existing_devices
Chef::Log.debug("#{@new_resource} Free Points: #{free_points}")
if not free_points.any?
# Fail over to operator control
raise "No open mount points in the range /dev/#{existing_device_prefix}[#{new_device_range_start}-#{new_device_range_stop}]"
end
return free_points
end
def get_free_attach_point
# Linux kernel 3.0+ uses /dev/xvd* instead of /dev/sd*
# AWS recommends using mount points f - p, although others are available
free_point = get_open_point("xvd", "f", "p").first
Chef::Log.debug("#{@new_resource} Free Attach Point: #{free_point}")
return free_point
end
def get_free_raid_point
# If we have over 10 raids per machine, god help us
free_point = get_open_point("md", "0", "9").first
Chef::Log.debug("#{@new_resource} Free Raid Point: #{free_point}")
return free_point
end
def get_raid_member_size(requested_volume_size, level, raid_members)
# Depending on the RAID level, 0,1,5,10 we need to adjust the size of our member disks
# so that we end up with a logical volume that is the requested size (or very close)
# For RAID0 we lose none of the sum of the disk sizes
# For RAID1 we lose 50% of the sum of the disk sizes
# For RAID10 we lose 50% of the sum of the disk sizes
# For RAID5 we lose 100/#{num_disks}% of the sum of the disk sizes.
# Let's not support RAID5, it sucks on AWS anyways
scaling_factors = { 0 => 1, 1 => 2, 10 => 2}
if not scaling_factors.has_key?(level)
# Fail over to operator control
raise "Invalid RAID level specified. Supported levels are #{scaling_factors.keys}"
end
raid_member_size = (requested_volume_size/raid_members)*scaling_factors[level]
Chef::Log.debug("#{@new_resource} Calculated individual drive size of #{raid_member_size} for #{raid_members} drives in RAID#{level} for a total size of #{requested_volume_size}")
# We max out at 1TB for individual EBS drives
return (raid_member_size.ceil < 1000 ) ? raid_member_size.ceil : 1000
end
def check_mount
end
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment