Location: PHPKode > scripts > woodyWebBacker > woodyWebBacker_0_1/woodyWebBacker.php
<?php

// woodyWebBacker
/********************************************
 * EMAIL & FTP & AMAZON S3 SERVER BACKUP SCRIPT (website & databases)
 * By georgfly
 * based on a script by Jordi Romkema (http://www.jor-on.com) and Eric Nagel http://www.ericnagel.com/2009/05/ftp-or-amazon-s3-server-backup-php-script.html 
 * definitely check out their websites for more info on e.g. s3 cloud backup
 * Version 0.1: October 19th 2011
 *******************************************/

// 1. shell_exec needs to be allowed on your server
// 2. this script runs on windows and linux machines; windows machines need an installation of winrar; linux machines compress with 'tar' and also uses 'split' for splitting large archives
// 3. it uses your servers mysqldump (mysqldump.exe for windows) function to dump all databases it can find and then compresses them with winrar / tar
// 4. it compresses a specified root folder of your website including all subfolders (either all root-level subfolders and files into separate archives, or everything bundeled into one large archive)
//	in case the backup-folder (the folder where we temporarily store the backup archives) is inside the website that is being backed up, it will be automatically excluded from the compression
// 5. it splits all produced archives into chunks of specified size (winrar can do this while it is compressing, for 'tar' the script uses 'split' subsequently)
//  to extract the split linux archives (e.g. html_20111020022526.tar.gz.000, etc.) on windows, you have to first fuse the partial archives using windows command line: copy /b html_20111020022526.tar.gz.* html_20111020022526.tar.gz
//  the resulting tar.gz can be opened in winrar
// 6. it sends these archives to a specified email address, ftp server or amazon s3 server (i have never used the amazon function, so no guarantee that it works)
// 7. you can set how long the backups will be kept on the ftp / s3 server before this script will again delete them (e.g. for 3 months); emails will NOT be automatically deleted
// 8. you can set a time interval how often this script shall be executed (e.g. once a week);
// 9. however, you need to set up a cron job for every e.g. 10 minutes since the script processes the data in chunks
// 	(e.g. it does not send all emails at once, but needs to be called repeatedly); when the entire backup- and sending process is completed, a timestamp will be stored in a small file to ensure that the script is not started again before the specified backup-interval
// 10. the script also makes sure that it cannot be executed before the previous execution is completed (by safing a 'locked' tag in a small file); so - no worries about setting up the cron-job too frequently
// 11. the script can also be started from a browser (e.g. if you cannot run a cron job on your server) by calling woodyWebBacker_execute.php
// 12. we can override the specified time interval (8) by calling woodyWebBacker_execute.php?getvar1=ignoreinterval
// 13. we can override the lock (10) (in the unlikely case that the script ever got stuck in a locked state) by calling woodyWebBacker_execute.php?getvar2=ignorelock
// 	(=> woodyWebBacker_execute.php?getvar1=ignoreinterval&getvar2=ignorelock should therefore force the start of the script under any circumstance)
// 14. deleting $process_file (see below) from the server will remove lock and interval timestamp, so the script can also be started immediately; a new $process_file will be generated automatically
// 15. backup archives will be deleted from your server immediately after sending them away, the backup working folder remains and contains the small file that contains interval timestamp and lock tag




set_time_limit(3600);
error_reporting(E_ALL);

/********************************************
 * MySQL variables
 *******************************************/
$backup_mysql = true; // Do you want to backup MySQL databases?
$mysqldump = "/usr/bin/mysqldump"; // required for linux
$mysqldumpexe = "C:/xampp/mysql/bin/mysqldump.exe"; // required for windows
$dumpordumpexe = "dump"; // // this is just a tag to tell the script what to use: enter 'dumpexe' for windows and 'dump' for linux; we could of course try to autodetect operating system...

$mysql_server = "localhost";
$mysql_username = "root";
$mysql_password = "";

/********************************************
 * Httpdocs variables
 *******************************************/
$backup_httpd = true; // Do you want to backup httpdocs directories?
//$vhosts_dir = "C:/xampp/htdocs/testsite1.mytestsites/";
$vhosts_dir = "/home/www/web289/html/";
$vhosts_bundlesubdirs = true; // if true, we will tar all subdirs into one large tar file ($maxfilesize will be applied subsquently)


/********************************************
 * Compression utility variables
 *******************************************/
$tar = "/bin/tar"; // required for linux
$split = "/usr/bin/split"; // required for linux
$rar = "C:/Program Files/WinRAR/Rar.exe"; // required for windows
$tarorrar = "tar"; // this is just a tag to tell the script what to use: enter 'rar' for windows and 'tar' for linux

/********************************************
 * Settings
 *******************************************/
$days = 35; // How many days of backups to keep?
$maxfilesize = 5; // What's the maximum file size (in MB)
$interval = 7*24; // Every x hours this script will backup the server.
//$working_dir = "C:/xampp/htdocs/testsite1.mytestsites/sec/bak/"; // This is where temporary files are written to, please make sure this script has permissions to write, read and delete in this directory.
$working_dir = "/home/www/web289/files/backup/";
$process_file = $working_dir . "woodyWebBacker.tmp"; // This file is used to read/write the state of the script to. If a process for some reason doesn't continue, it can help to delete this file. You don't have to create it yourself, the script will do this for you
$suffix = date("YmdHis"); // What suffix to add to filenames, if daily then make it the date. If you going to run it hourly, include the time aswell or make it unique.
$debug = true; // Turn on debug, you'll see debug messages in your console
$debug_time_format = "d-M-Y H:i:s"; // Date/time format for debug messages

/********************************************
 * Amazon S3 setup
 *******************************************/
$backup_s3 = false; // Do you want to send your backups to your Amazon S3 account?

if ($backup_s3)
{
    $s3_access_key_id = "XXXX";
    $s3_secret_access_key = "XXXX";
    $s3_bucket_name = "XXXX";
    
    /*
     * Amazon S3 PHP class
     * 
     * @link http://undesigned.org.za/2007/10/22/amazon-s3-php-class
     */
    
    include ("/path/to/AmazonS3/Class.php");
}

/********************************************
 * FTP setup
 *******************************************/
$backup_ftp = true; // Do you want to send your backups to an FTP account?

if ($backup_ftp)
{
    $ftp_server = "myserver.com";
    $ftp_username = "myusername";
    $ftp_password = "mypassword";
    $ftp_directory = "backupfolder/subfolder";
    $ftp_passive = true;
}


/********************************************
 * EMAIL setup
 *******************************************/
$backup_email = true;
$to_email = "hide@address.com";
$from_name = "woodyWebBacker";
$from_email = "hide@address.com";
$mailsubject = "[woodyWebBacker] from myserver";
$mailbody = "";
$protectextension_httpdocs = "trimme"; // some email servers make problems accepting archives (especially when they contain exe or other 'suspicious' files)
$protectextension_mysql = "trimme";



/********************************************
 * You are all set, no need to
 * change anything below this line!
 *******************************************/


 // just some path cleaning up: we convert everything into linux path with trailing slash
$vhosts_dir = rtrim(str_replace('\\', '/',$vhosts_dir),'/').'/';
$working_dir = rtrim(str_replace('\\', '/',$working_dir),'/').'/';


function d($message)
{
    global $debug, $debug_time_format;
    
    if ($debug)
    {
        echo(date($debug_time_format, time()) . ": " . $message . "<br>\n");
    }
}

function get_process_data($file, $default)
{
    $data = "";
    
    if (file_exists($file))
    {
        $handle = fopen($file, "r");
        
        while (!feof($handle))
        {
            $data .= fread($handle, 8192);
        }
        
        fclose($handle);
    }
    
    if (empty($data))
    {
        $data = $default;
    }
    else
    {
        $data = unserialize($data);
    }
    
    return $data;
}

function save_process_data($file, $data)
{
    $handle = fopen($file, "w");
    
    fwrite($handle, serialize($data));
    fclose($handle);
}

function shutdown()
{
    global $process_file, $process_data;
    
    $process_data["locked"] = false;
    
    save_process_data($process_file, $process_data);
}

register_shutdown_function("shutdown");

chdir($working_dir);

// get content of the process data file
$process_data = get_process_data($process_file, array(
    "state"        =>    "mysql",
    "locked"    =>    false,
    "start"        =>    time(),
    "suffix" => $suffix
));

$ignore_process_lock = false;

if (isset($argv[1]))
{
    if ($argv[1] == "ignore-lock")
    {
        d("Ignoring process lock");
        $ignore_process_lock = true;
    }
}


// if getvar1=ignoreinterval has been passed, then we will override $interval
if(!empty($_GET["getvar1"])) {
    if ($_GET["getvar1"] == "ignoreinterval") {
    	$interval = 0;
    }
}

// if getvar2=ignorelock has been passed, then we will ignore locked status
if(!empty($_GET["getvar2"])) {
    if ($_GET["getvar2"] == "ignorelock") {
    	$ignore_process_lock = true;
    }
}


// is the process locked?
// if it's locked it means that this script was called again too soon, the same script should already be running... this is okay though, don't worry! it should continue normally once the other script has finished doing its job and the process will be unlocked.
if (($process_data["locked"] == true) && ($ignore_process_lock == false))
{
    // process is locked, the script is already running, no need to continue...
    d("Process is locked, exiting now");
    exit();
}
else
{
    // lock current process for now until we're finished
    $process_data["locked"] = true;
    d("Process is unlocked, lock current process until we're finished");
    
    save_process_data($process_file, $process_data);
}

// what's the current state of the script?
$current_state = $process_data["state"];
if ($current_state == "mysql"){ // mysql is the first state => we need to create a new suffix
	$current_suffix = $suffix;
} else {
	$current_suffix = $process_data["suffix"];
}
$mysql_backups_dir = $working_dir . "serverbak_mysql_" . $current_suffix . "/";
$httpdocs_backups_dir = $working_dir . "serverbak_httpdocs_" . $current_suffix . "/";


// put in some more logic here: there are three milestones: mysql,httpdocs,cleanup;
// we can skip milstones if backup set false
if ($current_state == "mysql" && !$backup_mysql) {
	$current_state = "httpdocs";
	d("Skipping mysql backup, change the config if you would like to execute mysql backups as well...");
}
if ($current_state == "httpdocs" && !$backup_httpd) {
	$current_state = "cleanup";
	d("Skipping httpdocs backup, change the config if you would like to execute httpdocs backups as well...");
}
if ($current_state == "cleanup" &&  !$backup_s3 && !$backup_ftp){
	$current_state = "finished";
	d("Skipping backup cleanup because no backups were uploaded anywhere, change the config if you would like to upload backups...");
}

$new_state = $current_state;

d("Current state of process is: $current_state");

switch ($current_state)
{        
    case "mysql":
        // first we do mysql backups
        
        if ($backup_mysql)
        {
        	
            d("Trying to get a list of all databases...");
            
            $mysql_connection = mysql_connect($mysql_server, $mysql_username, $mysql_password);
            $db_list = mysql_list_dbs($mysql_connection);

            if (!is_dir($mysql_backups_dir))
            {
                d("Creating temporary mysql backup directory: " . $mysql_backups_dir);
                mkdir($mysql_backups_dir);
            }
            
            if ($dumpordumpexe == "dumpexe") { // windows, rar
            
	            while ($row = mysql_fetch_object($db_list)) {
                $database = $row->Database;
                $sql_filename = $database . "_" . $suffix . ".sql";
                $compr_filename = $sql_filename . ".rar";
                $compr_filepath = $mysql_backups_dir . $compr_filename;
                
                d("Database $database found");
                d("Dumping database $database to $sql_filename...");
                
                $shellcmd = "\"" . str_replace('/','\\',$mysqldumpexe) . "\" -u" . $mysql_username . " -p" . $mysql_password . " " . $database . " > " . $sql_filename;
                $cli_result = shell_exec($shellcmd);
                
                // and now compress
                $compr_filepath = str_replace('/', '\\', $compr_filepath);
		            $shellcmd = "\"" . str_replace('/','\\',$rar) . "\" a -m5 -v" . floor($maxfilesize*1024*1024) . "b -vn -r \"" . $compr_filepath . "\"";
		            $shellcmd .= " ".$sql_filename;
		            d("Compressing mysql dump to $compr_filename...");
								$cli_result = shell_exec($shellcmd);
								
								$resultdirhandle = opendir($mysql_backups_dir);
		            $resultfiles = array();
		            $info = pathinfo($compr_filename);
	            	$compr_filename_noext = basename($compr_filename,'.'.$info['extension']);
		            while (false !== ($result_filename = readdir($resultdirhandle))) {
		            	if (substr($result_filename,0,strlen($compr_filename_noext)) == $compr_filename_noext) {
			            	if (!in_array($result_filename, array(".", "..", "chroot", "default", ".skel"))) {
		                    	$resultfiles[] = $result_filename;
		                    	d($result_filename." generated (size: " . filesize($mysql_backups_dir.$result_filename) . "bytes)");
		                }
	              	}
		            }
		            closedir($resultdirhandle);
		            if (empty($resultfiles)) {
		            	d("NO ARCHIVE GENERATED!");
		            }
								
                d("Cleaning up mysql dump...");
                unlink($sql_filename);
							} // while
	            
	          } elseif ($dumpordumpexe == "dump") { // linux, tar
	            	
	            	while ($row = mysql_fetch_object($db_list)) {
	                $database = $row->Database;
	                $sql_filename = $database . "_" . $suffix . ".sql";
	                $compr_filename = $sql_filename . ".tar.gz";
	                $compr_filepath = $mysql_backups_dir . $compr_filename;
	                
	                d("Database $database found");
	                d("Dumping database $database to $sql_filename...");
	                                
	                $cli_result = shell_exec($mysqldump . " -u" . $mysql_username . " -p" . $mysql_password . " " . $database . " > " . $sql_filename);
	                
	                d("Compressing mysql dump to $compr_filename...");
	                
	                $cli_result = shell_exec($tar . " -cpzf " . $compr_filepath . " " . $sql_filename);
	                
	                d("Cleaning up mysql dump...");
	                
	                unlink($sql_filename);
	                
	                $size = explode(" ", shell_exec("du -b " . $compr_filepath));
	                $size = (int) $size[0];
	            
	                d("File size of $compr_filepath is $size bytes");
	                
	                // check if tar.gz file is larger than $maxfilesize
	                if ($size > ($maxfilesize * 1024 * 1024))
	                {
	                    d("File $compr_filepath is larger than maximum allowed size, we need to split this file...");
	                    
	                    shell_exec("split --bytes=" . floor($maxfilesize*1024*1024) . " -d " . $compr_filepath . " " . $compr_filepath . ".p");
	                    
	                    d("File has been split... deleting original file");
	                    
	                    unlink($compr_filepath);
	                }
            		} // while
	            } // linux
	             
              // we're done here... next we need to upload the single archive file or the multiple split files
              $new_state = "mysql_upload";
        }       
        break;
        
    case "mysql_upload":
        // get a list of all the files in the mysql backups directory, we need to upload
        $files = array();

        d("Looking for mysql backup files to upload...");
                    
        if ($handle = opendir($mysql_backups_dir))
        {
            while (false !== ($file = readdir($handle)))
            {
                if (!in_array($file, array(".", "..", "chroot", "default", ".skel")))
                {
                    $files[] = $file;
                }
            }
        }
        
        closedir($handle);
                
        if (!empty($files))
        {
            $backup_file = $mysql_backups_dir . $files[0];
            unset($files[0]);
            
            d("Found $backup_file, now uploading...");
            
            if ($backup_s3)
            {
                $s3 = new S3($s3_access_key_id, $s3_secret_access_key);
                $s3->useSSL = false;
                $s3->putObjectFile($backup_file, $s3_bucket_name, basename($backup_file), S3::ACL_PRIVATE);
            }
            
            if ($backup_ftp)
            {
                $ftp_handle = ftp_connect($ftp_server);
                $ftpsuccess = $ftp_handle;
                if ($ftpsuccess) {
	                $ftpsuccess = ftp_login($ftp_handle, $ftp_username, $ftp_password);
	                ftp_pasv($ftp_handle, $ftp_passive);
	                if ($ftpsuccess) {
	                	if (!empty($ftp_directory)) {
	                		$ftpsuccess = ftp_chdir($ftp_handle,$ftp_directory);
	                	}
	                	if ($ftpsuccess) {
	                		$ftpsuccess = ftp_put($ftp_handle, basename($backup_file), $backup_file, FTP_BINARY);
	                	}
	                }
                }
                ftp_close($ftp_handle);
                if ($ftpsuccess) {
                	d("Ftp transfer successful: $backup_file");
                } else {
                	d("FTP FAILURE: $backup_file");
                }
            }
            
            
            if ($backup_email) {
            	$mailsuccess = email_attachment($to_email, $mailbody, $mailsubject,$from_name, $from_email, $backup_file, $default_filetype='application/zip',$protectextension_mysql);
            	if ($mailsuccess) {
            		d("Email successful: $backup_file");
            	} else {
            		d("EMAIL FAILURE: $backup_file");
            	}
            }
            
            d("Uploaded $backup_file, now deleting the original backup file");
            
            unlink($backup_file);
            
            if (count($files) > 0)
            {
                // there are still more files to upload, stay in the same state...
                $new_state = "mysql_upload";
            }
            else
            {
                $new_state = "httpdocs";
            }
        }
        else
        {
            // no more files to upload, go to the next state
            $new_state = "httpdocs";
        }
        
        if ($new_state == "httpdocs")
        {
            rmdir($mysql_backups_dir);
        }
        
        break;
        
    case "httpdocs":
        // then we do httpdocs backups
        
        if ($backup_httpd)
        {
            chdir($vhosts_dir);
            // check if working dir is inside vhosts dir
            // if yes => we must exclude it
            if (substr($working_dir,0,strlen($vhosts_dir)) == $vhosts_dir && strlen($working_dir) > strlen($vhosts_dir)){
            	$excludestr = " --no-wildcards --exclude " . substr($working_dir,strlen($vhosts_dir),-1) . "*";
            	$excludestrrar = " -x".str_replace('/', '\\',substr($working_dir,strlen($vhosts_dir),-1))."";
            } else {
            	$excludestr = "";
            	$excludestrrar = "";
            }
            
            
            if (!is_dir($httpdocs_backups_dir))
            {
                mkdir($httpdocs_backups_dir);
            }
            
            d("Trying to get a list of all domains...");
            
            if ($dir_handle = opendir($vhosts_dir)) {
                while (false !== ($httpd_filename = readdir($dir_handle))) {
                    if (!in_array($httpd_filename, array(".", "..", "chroot", "default", ".skel"))) {
                    	$filestotar[] = $httpd_filename;
                    }
                }
            }
            
						if ($tarorrar == "rar") {
            
	            if ($vhosts_bundlesubdirs && !empty($filestotar)){
		            $savefilename = basename($vhosts_dir);
		            // windows stuff
		            $compr_filename = $savefilename . "_" . $suffix . ".rar";
		            $compr_filepath = $httpdocs_backups_dir . $compr_filename ;
		            $compr_filepath = str_replace('/', '\\', $compr_filepath);
		            $shellcmd = "\"" . str_replace('/','\\',$rar) . "\" a -m5 -v" . floor($maxfilesize*1024*1024) . "b -vn -r \"" . $compr_filepath . "\"";
		            foreach ($filestotar as $filetotar) {
		            	$shellcmd .= " \"" . $filetotar ."\"";
		            	d("Domain $filetotar found");
		            }
		            d("Rar compressing all files and directories in $vhosts_dir to $compr_filepath...");
		            $shellcmd .= $excludestrrar;
		            $cli_result = shell_exec($shellcmd);
	              //d($cli_result); //debug
		            $resultdirhandle = opendir($httpdocs_backups_dir);
		            $resultfiles = array();
		            $info = pathinfo($compr_filename);
		            $compr_filename_noext = basename($compr_filename,'.'.$info['extension']);
		            while (false !== ($result_filename = readdir($resultdirhandle))) {
		            	if (substr($result_filename,0,strlen($compr_filename_noext)) == $compr_filename_noext) {
			            	if (!in_array($result_filename, array(".", "..", "chroot", "default", ".skel"))) {
		                    	$resultfiles[] = $result_filename;
		                    	d($result_filename." generated (size: " . filesize($httpdocs_backups_dir.$result_filename) . "bytes)");
		                }
	              	}
		            }
		            closedir($resultdirhandle);
		            if (empty($resultfiles)) {
		            	d("NO BACKUP FILES GENERATED!");
		            }
	          	
	            } else { // no bundle
	            	foreach ($filestotar as $filetotar) {
	            		$compr_filename = $filetotar . "_" . $suffix . ".rar";
	                $compr_filepath = $httpdocs_backups_dir . $compr_filename;
	                d("Rar compressing $filetotar to $compr_filepath...");
	                $shellcmd = "\"" . str_replace('/','\\',$rar) . "\" a -m5 -v" . floor($maxfilesize*1024*1024) . "b -vn -r \"" . $compr_filepath . "\"";
	                $shellcmd .= " \"" . $filetotar . "\"" . $excludestrrar;
	                $cli_result = shell_exec($shellcmd);
	                //d($cli_result); //debug
	               	$resultdirhandle = opendir($httpdocs_backups_dir);
			            $resultfiles = array();
			            $info = pathinfo($compr_filename);
		            	$compr_filename_noext = basename($compr_filename,'.'.$info['extension']);
			            while (false !== ($result_filename = readdir($resultdirhandle))) {
			            	if (substr($result_filename,0,strlen($compr_filename_noext)) == $compr_filename_noext) {
				            	if (!in_array($result_filename, array(".", "..", "chroot", "default", ".skel"))) {
			                    	$resultfiles[] = $result_filename;
			                    	d($result_filename." generated (size: " . filesize($httpdocs_backups_dir.$result_filename) . "bytes)");
			                }
		              	}
			            }
			            closedir($resultdirhandle);
			            if (empty($resultfiles)) {
			            	d("NO ARCHIVE GENERATED!");
			            }
			            
              	} // foreach
	            } // else
	            
            } elseif ($tarorrar == "tar") { //tar
            
	            // tar everything into one large tar file
	            if ($vhosts_bundlesubdirs && !empty($filestotar)){
	            	$savefilename = basename($vhosts_dir);
	            	$compr_filename = $savefilename . "_" . $suffix . ".tar.gz";
	              $compr_filepath = $httpdocs_backups_dir . $compr_filename;
	              $shellcmd = $tar . $excludestr . " -cpzf " . $compr_filepath;
	              foreach ($filestotar as $filetotar) {
	              	$shellcmd .= " " . $filetotar;
	              	d("Domain $filetotar found");
	              }
								d("Tar compressing all files and directories in $vhosts_dir to $compr_filepath...");
	              $cli_result = shell_exec($shellcmd);
	              
	              $size = explode(" ", shell_exec("du -b " . $compr_filepath));
	              $size = (int) $size[0];
	                          
	              d("File size of $compr_filepath is $size bytes");
	              
	              // check if tar.gz file is larger than $maxfilesize
	              if ($size > ($maxfilesize * 1024 * 1024))
	              {
	                  d("File $compr_filepath is larger than maximum allowed size, we need to split this file...");
	                  
	                  shell_exec("split -a 3 --bytes=" . floor($maxfilesize*1024*1024) . " -d " . $compr_filepath . " " . $compr_filepath . ".");
	                  
	                  d("File has been split... deleting original file");
	                  
	                  unlink($compr_filepath);
	              }
	            }
	            
	            else { // no bundlesubdirs
	            	foreach ($filestotar as $filetotar) {
	            		$compr_filename = $filetotar . "_" . $suffix . ".tar.gz";
	                $compr_filepath = $httpdocs_backups_dir . $compr_filename;
	              	d("Rar compressing $filetotar to $compr_filepath...");
	                $cli_result = shell_exec($tar . " -cpzf " . $compr_filepath . " " . $filetotar);
	                $size = explode(" ", shell_exec("du -b " . $compr_filepath));
	                $size = (int) $size[0];
	                            
	                d("File size of $compr_filepath is $size bytes");
	        
	                // check if tar.gz file is larger than $maxfilesize
	                if ($size > ($maxfilesize * 1024 * 1024))
	                {
	                    d("File $compr_filepath is larger than maximum allowed size, we need to split this file...");
	                    
	                    shell_exec("split -a 3 --bytes=" . floor($maxfilesize*1024*1024) . " -d " . $compr_filepath . " " . $compr_filepath . ".");
	                    
	                    d("File has been split... deleting original file");
	                    
	                    unlink($compr_filepath);
	                }
	              }
	            }
            
          	}
            
            closedir($dir_handle);
            
            // we're done here... next we need to upload the single archive file or the multiple split files
            $new_state = "httpdocs_upload";
        }
        break;
        
    case "httpdocs_upload":        
        // get a list of all the files in the httpdocs backups directory, we need to upload
        $files = array();

        d("Looking for httpdocs backup files to upload...");
                    
        if ($handle = opendir($httpdocs_backups_dir))
        {
            while (false !== ($file = readdir($handle)))
            {
                if (!in_array($file, array(".", "..", "chroot", "default", ".skel")))
                {
                    $files[] = $file;
                }
            }
        }
				
        closedir($handle);
                
        if (!empty($files))
        {
            $backup_file = $httpdocs_backups_dir . $files[0];
            unset($files[0]);
            
            d("Found $backup_file, now uploading...");
            
            if ($backup_s3)
            {
                $s3 = new S3($s3_access_key_id, $s3_secret_access_key);
                $s3->useSSL = false;
                $s3->putObjectFile($backup_file, $s3_bucket_name, basename($backup_file), S3::ACL_PRIVATE);
            }
            
            if ($backup_ftp)
            {
                $ftp_handle = ftp_connect($ftp_server);
                $ftpsuccess = $ftp_handle;
                if ($ftpsuccess) {
	                $ftpsuccess = ftp_login($ftp_handle, $ftp_username, $ftp_password);
	                ftp_pasv($ftp_handle, $ftp_passive);
	                if ($ftpsuccess) {
	                	if (!empty($ftp_directory)) {
	                		$ftpsuccess = ftp_chdir($ftp_handle,$ftp_directory);
	                	}
	                	if ($ftpsuccess) {
	                		$ftpsuccess = ftp_put($ftp_handle, basename($backup_file), $backup_file, FTP_BINARY);
	                	}
	                }
                }
                ftp_close($ftp_handle);
                if ($ftpsuccess) {
                	d("Ftp transfer successful: $backup_file");
                } else {
                	d("FTP FAILURE: $backup_file");
                }
            }
                 
            if ($backup_email) {
            	$mailsuccess = email_attachment($to_email, $mailbody, $mailsubject,$from_name, $from_email, $backup_file, $default_filetype='application/zip',$protectextension_httpdocs);
            	if ($mailsuccess) {
            		d("Email successful: $backup_file");
            	} else {
            		d("EMAIL FAILURE: $backup_file");
            	}
            }
            
            
            d("Uploaded / Emailed $backup_file, now deleting the original backup file");
            
            unlink($backup_file);
            
            if (count($files) > 0)
            {
                // there are still more files to upload, stay in the same state...
                $new_state = "httpdocs_upload";
            }
            else
            {
                $new_state = "cleanup";
            }
        }
        else
        {
            // no more files to upload, go to the next state
            $new_state = "cleanup";
        }
        
        if ($new_state == "cleanup")
        {
            rmdir($httpdocs_backups_dir);
        }
        
        break;
        
    case "cleanup":
        // clean up old files from the server
        
        d("Cleaning up old files from the server...");
        
        if ($backup_s3)
        {
            $s3 = new S3($s3_access_key_id, $s3_secret_access_key);
            $s3->useSSL = false;
            
            $s3_files = $s3->getBucket($s3_bucket_name);
            
            while (list($s3_file, $s3_file_data) = each($s3_files))
            {
                if ($s3_file_data["time"] + ($days * 24 * 60 * 60) < time())
                {
                    d("Deleting outdated file from S3: " . $s3_file);
                    $s3->deleteObject($s3_bucket_name, $s3_file);
                }
            }            
        }
        
        if ($backup_ftp)
        {
            $ftp_handle = ftp_connect($ftp_server);
            ftp_login($ftp_handle, $ftp_username, $ftp_password);
            ftp_pasv($ftp_handle, $ftp_passive);
            ftp_chdir($ftp_handle,$ftp_directory);
            
            $list = @ftp_rawlist($ftp_handle, ".");
            
            $ftp_files = array();
            
            foreach ($list as $_)
            {
                preg_replace(
                    '`^(.{10}+)(\s*)(\d{1})(\s*)(\d*|\w*)'.
                    '(\s*)(\d*|\w*)(\s*)(\d*)\s'.
                    '([a-zA-Z]{3}+)(\s*)([0-9]{1,2}+)'.
                    '(\s*)([0-9]{2}+):([0-9]{2}+)(\s*)(.*)$`Ue',
                
                    '$ftp_files[]=array(
                    "rights"=>"$1",
                    "number"=>"$3",
                    "owner"=>"$5", "group"=>"$7",
                    "file_size"=>"$9",
                    "mod_time"=>"$10 $12 $14:$15",
                    "file"=>"$17",
                    "type"=>print_r((preg_match("/^d/","$1"))?"dir":"file",1));',
                $_);
            }

            foreach ($ftp_files as $ftp_file)
            {
                $ftp_filename = trim($ftp_file["file"]);
                $mod_time = strtotime($ftp_file["mod_time"]);
                $ftp_type = trim($ftp_file["type"]);
                
                if ($ftp_type == "file" && ($mod_time + ($days * 24 * 60 * 60)) < time())
                {
                    d("Deleting outdated file from FTP: " . $ftp_filename);
                    ftp_delete($ftp_handle, $ftp_filename);
                }
            }
            
            ftp_close($ftp_handle);
        }
        
        d("Finished cleaning up, we're done!");
        
        $new_state = "finished";
        
        break;
        
    case "finished":
        
        d("Previous process has finished, do we need to start a new one already?");
        
        if ($process_data["start"] + ($interval * 60 * 60) < time())
        {
            d("Yes, new process should be started on the next run!");
            
            $new_state = "mysql";
            // and we also need to update $process_data["start"]
            $process_data["start"] = time();
        }
        else
        {
            $minutes_to_go = round(($process_data["start"] + ($interval * 60 * 60) - time()) / 60, 0);
            d("No, only $minutes_to_go minute" . (($minutes_to_go == 1) ? "" : "s") . " to go before starting a new process..");
        }        
        
        break;    
}

// write new process data to file and unlock it, we're finished... for now at least...
$process_data["state"] = $new_state;
$process_data["suffix"] = $current_suffix;
$process_data["locked"] = false;

d("New state of process is: " . $new_state);
d("We're finished, for now... unlock current process");

save_process_data($process_file, $process_data);

d("Exiting...");






// $email: email body text
// $our_email: our email address
// $our_email_name: sender name
// $file_location: path of attachment
// $filename_addextension: we sometimes need to camouflage the file's real extension because some email servers are picky; so we append a short pseudo-extension (just pass "" if this is not required)
function email_attachment($to_email, $email, $subject,$our_email_name, $our_email, $file_location, $default_filetype='application/zip',$filename_addextension){
    $filename_addextension = trim($filename_addextension,'.');
    $email = '<font face="arial">' . $email . '</font>';
    $fileatt = $file_location;
    if(function_exists(mime_content_type)){
        $fileatttype = mime_content_type($file_location);
    }else{
        $fileatttype = $default_filetype;;
    }
    $fileattname =basename($file_location).".$filename_addextension";
    //prepare attachment
    $file = fopen( $fileatt, 'rb' );
    $data = fread( $file, filesize( $fileatt ) );
    fclose( $file );
    $data = chunk_split( base64_encode( $data ) );
    //create mime boundary
    $semi_rand = md5( time() );
    $mime_boundary = "==Multipart_Boundary_x{$semi_rand}x";
    //create email  section
    $message = "This is a multi-part message in MIME format.\n\n" .
    "--{$mime_boundary}\n" .
    "Content-type: text/html; charset=us-ascii\n" .
    "Content-Transfer-Encoding: 7bit\n\n" .
    $email . "\n\n";
     //create attachment section
    $message .= "--{$mime_boundary}\n" .
     "Content-Type: {$fileatttype};\n" .
     " name=\"{$fileattname}\"\n" .
     "Content-Disposition: attachment;\n" .
     " filename=\"{$fileattname}\"\n" .
     "Content-Transfer-Encoding: base64\n\n" .
     $data . "\n\n" .
     "--{$mime_boundary}--\n";
     //headers
    $exp=explode('@', $our_email);
    $domain = $exp[1];
    $headers = "From: $our_email_name<$our_email>" . "\n";
    $headers .= "Reply-To: $our_email"."\n";
    $headers .= "Return-Path: $our_email" . "\n";    // these two to set reply address
    $headers .= "Message-ID: <".time()."@" . $domain . ">"."\n";
    $headers .= "X-Mailer: Edmonds Commerce Email Attachment Function"."\n";          // These two to help avoid spam-filters
    $headers .= "Date: ".date("r")."\n";
    $headers .= "MIME-Version: 1.0\n" .
                    "Content-Type: multipart/mixed;\n" .
                    " boundary=\"{$mime_boundary}\"";
    
    return mail($to_email,$subject,$message, $headers, '-f ' . $our_email);
}

?> 
Return current item: woodyWebBacker