|
#!/usr/bin/env perl |
|
# Backup-script for UNIX severs. |
|
# https://gist.github.com/mugifly/4966246/ |
|
# Required: perl, openssl-devel, squashfs-tools |
|
|
|
use Amazon::S3; |
|
use Data::Dumper; |
|
use File::Path; |
|
use Digest::SHA::PurePerl; |
|
use Config::Pit; |
|
|
|
# Config ####### |
|
|
|
our $PATH_STORE_DIR = '/backup/Store/'; # アーカイブの保管先 |
|
our @PATH_BACKUP_TARGETS = ('/var','/home','/etc'); # バックアップ対称のディレクトリ |
|
our $UPLOAD_S3_PREFIX = 'AMAZON_S3_BUCKET_PREFIX'; # 作成されるBucket名の接頭辞 |
|
|
|
our $KEEP_GEN_LOCAL = 2; # ローカルで2世代前まで保持 |
|
our $KEEP_GEN_S3 = 1; # S3で1世代前まで保持 |
|
|
|
################## |
|
|
|
our $errFlag = 0; |
|
our $config = {}; |
|
our $stime = time(); |
|
our $state = {}; |
|
our $newBackupDirPath = ""; |
|
|
|
print "Server-Backup Script.\n"; |
|
print "(C) Masanori. - ". get_time_str() . "\n"; |
|
init(); |
|
|
|
if(defined($config->{mysql_username})){ |
|
backup_db_mysql(); |
|
} |
|
|
|
backup_filesystem(); |
|
post_s3(); |
|
|
|
print "* Complete! - " . get_time_str() . "\n"; |
|
exit($errFlag); |
|
|
|
# 初期化 |
|
sub init{ |
|
print "* init...\n"; |
|
|
|
# Config::Pitによる設定の読み込み |
|
my $c = pit_get("backup-mysql_localhost"); |
|
if(defined($c->{username})){ |
|
$config->{mysql_username} = $c->{username}; |
|
$config->{mysql_password} = $c->{password}; |
|
} |
|
$c = pit_get("backup-aws_localhost"); |
|
$config->{awsAccessKeyId} = $c->{accesskey}; |
|
$config->{awsSecretAccessKey} = $c->{secretaccess}; |
|
|
|
# バックアップ先ディレクトリのチェック |
|
storedir_path_check(); |
|
|
|
#古いバックアップディレクトリの列挙 |
|
print " * find old-backup-directories...\n"; |
|
opendir(DIR, $PATH_STORE_DIR); |
|
my @old_dir_numbers = (); |
|
foreach(readdir(DIR)){ |
|
next if /^\.{1,2}$/;#カレントをスキップ |
|
my $name = $_; |
|
if($name =~ /backup-(\d+)/){ |
|
print " * directory: $name\n"; |
|
push(@old_dir_numbers, $1); |
|
} |
|
} |
|
closedir(DIR); |
|
|
|
#保持世代数よりも古いバックアップディレクトリの削除 |
|
@old_dir_numbers = sort { $b <=> $a } @old_dir_numbers;#番号を最新順にソート |
|
for(my $i=$KEEP_GEN_LOCAL;$i<=$#old_dir_numbers;$i++){ |
|
my $num = $old_dir_numbers[$i]; |
|
print " * delete old-backup: ".$bu->{bucket}."\n"; |
|
rmtree($PATH_STORE_DIR."backup-".$num); |
|
} |
|
|
|
#新しいバックアップディレクトリを作成 |
|
$newBackupDirPath = $PATH_STORE_DIR."backup-${stime}/"; |
|
print " * create new backup-directory: backup-${stime}\n"; |
|
mkdir($newBackupDirPath); |
|
|
|
#バックアップ情報ファイルの作成 |
|
open(FH, "> ${newBackupDirPath}backup-info.txt"); |
|
print FH "backup-$stime - ".get_time_str($stime)."\n"; |
|
close(FH); |
|
} |
|
|
|
# ファイルシステムのバックアップ |
|
sub backup_filesystem{ |
|
print "* Backup-FileSystem...\n"; |
|
foreach my $dir (@PATH_BACKUP_TARGETS){ |
|
my $arc_path = $newBackupDirPath . get_archive_name_str($dir) . ".squashfs"; |
|
print " * mksquashfs - $dir -> $arc_path\n"; |
|
my $ret = system "mksquashfs $dir $arc_path -noappend --decompresslookups --lzmafastbytes=150"; |
|
write_log_file("SQUASHFS",$arc_path,$dir); |
|
} |
|
} |
|
|
|
# MySQLのバックアップ |
|
sub backup_db_mysql{ |
|
print "* Backup-MySQL...\n"; |
|
my @dblist = (/\r\n/,`ls -p /var/lib/mysql | grep / | tr -d /`); |
|
my $mysql_user = $config->{mysql_username}; |
|
my $mysql_pass = $config->{mysql_password}; |
|
|
|
foreach my $dbname(@dblist){ |
|
chomp($dbname); |
|
my $arc_path = $newBackupDirPath . get_archive_name_str("mysql_$dbname") . "/"; |
|
my $tar_archive_path = $newBackupDirPath.get_archive_name_str("mysql_$dbname").".tar.gz"; |
|
mkdir($arc_path); |
|
print " * table : $dbname > $arc_path\n"; |
|
|
|
#MySQLのホットコピー |
|
my $table_count = ""; |
|
$table_count = `mysql -u $mysql_user -p$mysql_pass -B -e "show tables" $dbname|wc -l`; |
|
chomp($table_count); |
|
if($table_count eq 0){ |
|
print " * no table, skip.\n"; |
|
next; |
|
} |
|
|
|
print " * mysqlhotcopy... $table_count\n"; |
|
my $ret = system "mysqlhotcopy $dbname -u $mysql_user -p $mysql_pass $arc_path"; |
|
if($ret ne 0){ |
|
print " * mysqlhotcopy error! [ERROR]\n";$errFlag = 1; |
|
print " * remove temporary directory...\n"; |
|
rmtree($arc_path); |
|
next; |
|
} |
|
|
|
#tar.gz圧縮 |
|
print " * tar.gz compress...\n"; |
|
$ret = system("/bin/tar -cvzf $tar_archive_path $arc_path"); |
|
if($ret ne 0){ |
|
print " * tar error! [ERROR]\n";$errFlag = 1; |
|
print " * remove temporary directory...\n"; |
|
rmtree($arc_path); |
|
next; |
|
} |
|
write_log_file("MYSQL",$tar_archive_path,$dbname); |
|
|
|
#ホットコピーディレクトリの削除 |
|
print " * remove temporary directory...\n"; |
|
rmtree($arc_path); |
|
} |
|
} |
|
|
|
# AmazonS3へのアップロード |
|
sub post_s3{ |
|
my ($fpath) = @_; |
|
print "* Backup to AmazonS3...\n"; |
|
my $s3_filename = get_archive_name_str($fpath); |
|
my $s3 = Amazon::S3->new({ |
|
aws_access_key_id => $config->{awsAccessKeyId}, |
|
aws_secret_access_key => $config->{awsSecretAccessKey}, |
|
retry => 4, |
|
secure => 1, |
|
}); |
|
print " * get buckets list...\n"; |
|
my $response = $s3->buckets; |
|
my $resp = $response->{buckets}; |
|
my @resps = @$resp; |
|
my $bucket = undef; |
|
|
|
print " * find old-backups...\n"; |
|
my $old_buckets = {}; |
|
my @old_bucket_numbers = (); |
|
# Bucketを反復してバックアップバケットのみを$old_bucketsへ抽出 |
|
foreach my $bu(@resps) { |
|
if($bu->{bucket} =~ /${UPLOAD_S3_PREFIX}(\d+)/){ |
|
my $name = $bu->{bucket}; |
|
print " * bucket: ${name}\n"; |
|
$old_buckets->{$name} = $bu; |
|
push(@old_bucket_numbers, $1); |
|
} |
|
} |
|
#保持世代数よりも古いバックアップバケットの削除 |
|
@old_bucket_numbers = sort { $b <=> $a } @old_bucket_numbers;#番号を最新順にソート |
|
for(my $i=$KEEP_GEN_S3;$i<=$#old_bucket_numbers;$i++){ |
|
my $num = $old_bucket_numbers[$i]; |
|
my $bu = $old_buckets->{${UPLOAD_S3_PREFIX}.${num}}; |
|
print " * delete old bucket keys: ".$bu->{bucket}."\n"; |
|
my $response = $bu->list; |
|
for my $key (@{ $response->{keys} }) { |
|
print " * delete bucket-key: ".$key->{key}."\n"; |
|
$bu->delete_key($key->{key}); |
|
} |
|
print " * delete bucket.\n"; |
|
$bu->delete_bucket; |
|
} |
|
|
|
#新しいバックアップバケットを作成 |
|
my $new_bucket_name = $UPLOAD_S3_PREFIX.$stime; |
|
print " * create new bucket: ".$new_bucket_name . "\n"; |
|
my $new_bucket = $s3->add_bucket( { bucket => $new_bucket_name } ) or die $s3->err . ": " . $s3->errstr; |
|
|
|
#現在の新しいバックアップディレクトリ内のファイルを列挙 |
|
print " * list-up backup file...\n"; |
|
opendir(DIR, $newBackupDirPath); |
|
my @backup_files = (); |
|
foreach(readdir(DIR)){ |
|
next if /^\.{1,2}$/;#カレントをスキップ |
|
my $file = $_; |
|
push(@backup_files, $file); |
|
my $file_size = -s ($newBackupDirPath.$file); |
|
print(" * $file - (${file_size} byte) \n"); |
|
} |
|
closedir(DIR); |
|
|
|
unless(defined($backup_files[0])){ |
|
print " * not a backup file!\n"; |
|
return 1; |
|
} |
|
#アップロードを開始 |
|
print " * start upload...\n"; |
|
foreach my $fname(@backup_files){ |
|
print " * $fname\n"; |
|
$new_bucket->add_key_filename( |
|
get_archive_name_str($fname), $newBackupDirPath.$fname,{content_type=> 'application/octet-stream'} |
|
); |
|
} |
|
print " * upload done.\n"; |
|
} |
|
|
|
# バックアップ先ディレクトリのチェック |
|
sub storedir_path_check{ |
|
if(-d $PATH_STORE_DIR) { |
|
print "* BackupDirectory: $PATH_STORE_DIR\n [OK]\n"; |
|
}else{ |
|
print "* BackupDirectory: $PATH_STORE_DIR\n [NG] not found.\n";$errFlag = 1; |
|
exit; |
|
} |
|
} |
|
|
|
# パスなどからアーカイブファイル名として利用できる文字列を生成 |
|
sub get_archive_name_str{ |
|
my ($arcname) = @_; |
|
$arcname =~ s|\/|_|g; |
|
return $arcname; |
|
} |
|
|
|
# 日付文字列の生成 |
|
sub get_time_str{ |
|
my ($ttime) = shift || time(); |
|
my @wdays = ('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'); |
|
my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime($ttime); |
|
$year += 1900;$mon += 1; |
|
return "$year/$mon/$mday ($wdays[$wday]) $hour:$min:$sec\n"; |
|
} |
|
|
|
# ログの出力 |
|
sub write_log{ |
|
my ($str) = @_; |
|
open(FH, ">> ${newBackupDirPath}backup-info.txt"); |
|
print FH $str."\n"; |
|
close(FH); |
|
} |
|
|
|
# ログの出力 (単一ファイル用) |
|
sub write_log_file{ |
|
my ($type, $backup_name, $source_name) = @_; |
|
my $sha = Digest::SHA::PurePerl->new("sha256"); |
|
$sha->addfile($backup_name); |
|
my $hashdg = $sha->hexdigest; |
|
write_log("${type} ${backup_name} ${source_name} ${hashdg}"); |
|
} |