Created
December 17, 2011 20:54
-
-
Save cixelsyd/1491351 to your computer and use it in GitHub Desktop.
"rsyncdb.rb" zips and moves backups from settings inside data bags
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# | |
# Cookbook Name:: smashrun | |
# Recipe:: rsyncdb | |
# | |
# Copyright 2010, Smashrun, Inc. | |
# Author:: Steven Craig <[email protected]> | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
# | |
# http://msdn.microsoft.com/en-us/library/ms175477(v=SQL.90).aspx | |
# http://msdn.microsoft.com/en-us/library/ms190217(v=SQL.90).aspx | |
# http://msdn.microsoft.com/en-us/library/ms190440(v=SQL.90).aspx | |
# translog truncation | |
# http://msdn.microsoft.com/en-US/library/ms189085(v=SQL.90).aspx | |
# organize backups | |
log("begin search data bags for rsyncdb config settings") { level :debug } | |
# running_database holds which databases run on which database hosts | |
# database_info holds location and file information | |
running_database = [] | |
database_info = [] | |
# gather necessary information | |
search(:running_database, "id:#{node[:hostname]}") do |host| | |
running_database << host["database"].split(",") | |
running_database.flatten! | |
["full", "trans"].each do |backuptype| | |
running_database.each do |dbname| | |
unless File.exists?("#{node[:backupdb][:sqlscripts]}\\rsync_#{backuptype}-#{dbname}.log") | |
search(:database_info, "id:#{dbname}") do |db| | |
dbinfo = { "id" => db["id"], | |
"dbname" => db["id"], | |
"backupdrive" => db["backupdrive"], | |
"backupdir" => db["backupdir"], | |
"collation" => db["collation"], | |
"comment" => db["comment"], | |
"datadrive" => db["datadrive"], | |
"datadir" => db["datadir"], | |
"datafile" => db["datafile"], | |
"datainitsize" => db["datainitsize"], | |
"datamaxsize" => db["datamaxsize"], | |
"owner" => db["owner"], | |
"restoredrive" => db["restoredrive"], | |
"restoredir" => db["restoredir"], | |
"transdrive" => db["transdrive"], | |
"transdir" => db["transdir"], | |
"transfile" => db["transfile"], | |
"transinitsize" => db["transinitsize"], | |
"transmaxsize" => db["transmaxsize"] | |
} | |
database_info << dbinfo | |
end | |
end | |
end | |
# log(database_info.inspect) {level :debug} | |
# begin to process the backups: zip, rsync offsite | |
database_info.each do |db| | |
# stamp trans files with the hour, stamp fulls with "0" | |
if backuptype =~ /trans/ | |
time = Time.new | |
hour = time.hour | |
# copy backups so that once unzipped they will retain their "hour" designator | |
# copy, not move, so that backup files are available where MSSQL expects to see them | |
execute "stamp_backup" do | |
cwd "#{node[:backupdb][:backupdir]}" | |
timeout 300 | |
command %Q(copy #{node[:backupdb][:backupdir]}\\#{backuptype}-#{db["dbname"]}.bak #{node[:backupdb][:backupdir]}\\#{backuptype}-#{db["dbname"]}-#{hour}.bak) | |
not_if { File.exists?("#{node[:backupdb][:sqlscripts]}\\rsync_#{backuptype}-#{db["dbname"]}-#{hour}.log") } | |
end | |
else | |
hour = 0 | |
end | |
# zip backup files | |
execute "zipbackup_#{node[:hostname]}-#{backuptype}-#{db["dbname"]}-#{hour}" do | |
cwd "#{node[:backupdb][:sqlscripts]}" | |
timeout 900 | |
command %Q(#{node[:sevenzip][:installdir]}\\7z.exe a -tzip -r #{node[:backupdb][:backupdir]}\\#{node[:hostname]}-#{backuptype}-#{db["dbname"]}-#{hour}.zip #{node[:backupdb][:backupdir]}\\#{backuptype}-#{db["dbname"]}-#{hour}.bak) | |
not_if { File.exists?("#{node[:backupdb][:sqlscripts]}\\rsync_#{backuptype}-#{db["dbname"]}-#{hour}.log") } | |
ignore_failure true | |
end | |
# rsync the zip file | |
execute "rsync_#{backuptype}-#{db["dbname"]}-#{hour}" do | |
cwd "#{node[:backupdb][:tempdir]}" | |
timeout 3600 | |
action :run | |
command %Q(c:\\deltacopy\\rsync.exe -v -rlt -z --chmod=a=rw,Da+x --log-file="#{node[:backupdb][:sqlscripts]}\\rsync_#{backuptype}-#{db["dbname"]}-#{hour}.log" --modify-window=2 --password-file="#{node[:deltacopy][:installdir]}\\#{node[:deltacopy][:passwdfile]}" -rltODvzao "/cygdrive/D/MSSQL/Backup/#{node[:hostname]}-#{backuptype}-#{db["dbname"]}-#{hour}.zip" "#{node[:deltacopy][:user]}@qdb001.smashrun.com::restore/Restore/") | |
not_if { File.exists?("#{node[:backupdb][:sqlscripts]}\\rsync_#{backuptype}-#{db["dbname"]}-#{hour}.log") } | |
end | |
# tweet | |
log("tweet #{db["dbname"]} #{node[:backupdb][:tag]} #{backuptype} end rsync") { level :debug } | |
if "#{backuptype}" =~ /full/ | |
ruby_block "tweet #{db["dbname"]} #{node[:backupdb][:tag]} #{backuptype} end rsync" do | |
block { | |
unless File.exists?("#{node[:backupdb][:sqlscripts]}\\rsync_#{backuptype}-#{db["dbname"]}-#{hour}.log") | |
node[:tweeter][:admin].each { |a| | |
Tweeter.tweet(a, "rsync #{db["dbname"]} #{node[:backupdb][:tag]} #{backuptype} success from #{node[:hostname]}") } | |
end | |
} | |
ignore_failure true | |
end | |
end | |
end | |
end | |
end |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment