Hacked By AnonymousFox
import os
import shutil
import subprocess
import sys
import time
from datetime import datetime, timedelta
from hw_cpbackup.active_backups import ActiveBackup
from hw_cpbackup.util.alert import send_alert
from hw_cpbackup.util.log import logger
from hw_cpbackup.util.s3 import S3AuthorizationError, S3ConnectionError
from hw_cpbackup.util.whm.filesystem import tail_latest_log, latest_log
from hw_cpbackup.util.whm.whmapi import WhmApi
from hw_cpbackup.util.windy import Windy
try:
from pathlib import Path
except ImportError:
from pathlib2 import Path
__version__ = '2.1.30'
BACKUP_ATTEMPTS_MAX = 3
BACKUP_ATTEMPTS_INTERVAL = 60
PID_OF_SELF = os.getpid()
def get_current_backup_processes():
logger.debug('checking for current processes...')
try:
logger.debug('this process has ID: %s', PID_OF_SELF)
find_backup_proccesses = subprocess.Popen(executable='/bin/bash',
args=['pgrep hw_cpbackup'],
stdout=subprocess.PIPE,
shell=True)
found_process_ids, _ = find_backup_proccesses.communicate()
found_process_ids = list(filter(lambda p: int(p) != PID_OF_SELF, found_process_ids.decode('utf-8').splitlines()))
return found_process_ids
except Exception:
raise LookupError("Failed to lookup backups PID(s)")
def is_backup_process_outdated(pid):
logger.debug("checking backup process (%s) start time: ", pid)
try:
find_process_start_time = subprocess.Popen(executable='/bin/bash',
args=['ps -q %s -o lstart --no-headers' % pid],
stdout=subprocess.PIPE,
shell=True)
process_start_time_output = find_process_start_time.communicate()[0].decode('utf-8')
process_start_time = [x.strip() for x in process_start_time_output.split(' ') if x != '']
process_datatime = datetime(year=int(process_start_time[-1]),
month=datetime.strptime(process_start_time[1], '%b').month,
day=int(process_start_time[2]),
hour=int(process_start_time[3].split(':')[0]),
minute=int(process_start_time[3].split(':')[1]),
second=int(process_start_time[3].split(':')[2]))
if datetime.now() - process_datatime > timedelta(days=1):
return True
except Exception as e:
logger.debug(e)
return False
return False
def kill_backup_process(pid):
logger.debug('killing outdated backup process (%s)', pid)
try:
_kill_backup_process = subprocess.Popen(executable='/bin/bash',
args=['kill -s SIGKILL %s' % pid],
shell=True)
_kill_backup_process.wait()
assert _kill_backup_process.returncode == 0
except Exception:
raise Exception('Failed to kill backup process (%s)' % pid)
class SharedBackupsAgent:
def __init__(self, clean=False, force=False, alert=True, dry_run=False):
self.alert = alert
self.dry_run = dry_run
self.attempts_file = '/tmp/hw_cpbackup_attempts'
self.assert_no_current_processes()
if clean:
self.clear_logs(state='all' if force else 'outdated')
self.clear_archives(state='current')
if force:
self.force_whm_backup()
self.windy = Windy()
self.whm = WhmApi()
def assert_no_current_processes(self):
"""Assert there are no current processes of the backup agent
"""
backup_pids = []
try:
backup_pids = get_current_backup_processes()
for pid in backup_pids:
logger.debug("checking state of backup process (%s)", pid)
# check if process is outdated (older than 24 hours)
if is_backup_process_outdated(pid):
logger.debug("process is outdated (%s)", pid)
# try to kill the process
try:
kill_backup_process(pid)
logger.debug('process killed (%s)', pid)
del backup_pids[backup_pids.index(pid)]
except Exception as e:
logger.debug(e)
logger.error("Failed to kill outdated backups process (%s)", pid)
# send an alert we failed to kill an outdated process
if self.alert:
send_alert(message='Failed to kill outdated backups process (%s)' % pid)
assert not backup_pids
except AssertionError:
logger.warning('existing backup processes were found with ID(s): %s', ' '.join(backup_pids))
# alert when multiple processes were enumerated
if len(backup_pids) > 1 and self.alert:
send_alert(message='Multiple backup agent processes detected')
# increment the killed process count and update attempts file
attempted_process_count = self.get_attempted_process_count()
attempted_process_count += 1
self.update_attempted_process_count(attempted_process_count)
# send alert when max attempts exceeded
if attempted_process_count >= BACKUP_ATTEMPTS_MAX and self.alert:
send_alert(message='Existing backups process has not completed after %s attempts' % attempted_process_count)
# exit due to existing processes
sys.exit(1)
except LookupError as e:
logger.error(e)
# send alert indicating process lookup error
if self.alert:
send_alert(message=e.__str__())
# exit due to failed PID lookup - not safe to proceeed
sys.exit(1)
else:
self.update_attempted_process_count(0)
def update_attempted_process_count(self, count):
with open(self.attempts_file, 'w') as f:
f.write(str(count))
def get_attempted_process_count(self):
if not Path(self.attempts_file).exists():
return 0
with open(self.attempts_file, 'r') as f:
return int(f.read().strip())
def clear_logs(self, state='outdated'):
"""
Remove previous log artifacts from WHM backup
:return: None
"""
logger.debug('attempting %s log removal...', state)
try:
# will break backups if latest log is removed without 'force' mode
logs_to_purge = list(filter(lambda l: l.stem != Path(latest_log()).stem, Path.glob(Path('/usr/local/cpanel/logs/cpbackup'), '*.log')))
if state == 'all':
logs_to_purge = Path.glob(Path('/usr/local/cpanel/logs/cpbackup'), '*.log')
assert logs_to_purge
if not self.dry_run:
for backup_log in logs_to_purge:
Path.unlink(backup_log)
logger.debug('artifact removed: %s', backup_log.name)
logger.info('Completed %s log removal', state)
except AssertionError:
logger.debug('no logs were found')
except Exception as e:
logger.error('Failed to clear old logs: %s', e)
sys.exit(1)
def clear_archives(self, state='outdated'):
"""
Remove outdated user archives from the backup root
:return: None
"""
logger.debug('attempting %s archive removal...', state)
try:
timestamp = datetime.strftime(datetime.today() - timedelta(days=1), "%Y-%m-%d")
if state == 'current':
timestamp = datetime.strftime(datetime.today(), "%Y-%m-%d")
backup_root = Path('/backup/{day}/accounts/'.format(day=timestamp))
assert backup_root.exists()
if not self.dry_run:
items_to_remove = list(Path.rglob(backup_root, '*'))
items_to_remove.extend(list(Path.rglob(backup_root, '.*')))
for item in items_to_remove:
if Path.is_dir(item):
shutil.rmtree(str(item))
elif Path.is_file(item):
Path.unlink(item)
logger.debug('artifact removed: %s', item.name)
Path.rmdir(backup_root)
shutil.rmtree(str(backup_root.parent))
logger.info('Completed %s archive removal', state)
except AssertionError:
logger.debug('no %s archives were found', state)
except Exception as e:
logger.error('Failed to clear old archives: %s', e)
sys.exit(1)
def force_whm_backup(self):
"""
Execute subprocess to forcefully initiate WHM backup
:return: None
"""
try:
logger.debug('forcing WHM backup...')
if not self.dry_run:
subprocess.call(['/usr/local/cpanel/bin/backup', '--force'])
time.sleep(10)
logger.debug('forced WHM backup')
except Exception as e:
logger.exception(e)
logger.error('Failed to force WHM backup')
sys.exit(1)
@staticmethod
def is_backup_completed():
"""
Determine if backups completed
:return: bool
"""
if latest_log:
latest_log_lines = tail_latest_log(lines=60)
return any(['Completed at' in x for x in latest_log_lines])
else:
return False
@staticmethod
def is_backup_failed():
"""
Determine if backups failed
:return:
"""
latest_log_lines = tail_latest_log(lines=50)
# We check for the string 'Try/Tiny.pm' because this is cPanel's try/catch Perl module that gets logged when exceptions occur during backup
# For more info see: https://github.com/p5sagit/Try-Tiny
return any([('Backups will not run' in x or 'Try/Tiny.pm' in x) for x in latest_log_lines])
def _backup(self, users, attempt):
"""Create ActiveBackup class and run backup"""
for user in users:
try:
active_backup = ActiveBackup(user, dry_run=self.dry_run, alert=self.alert)
active_backup.run()
except S3ConnectionError as e:
# if any connection related errors occur for any user - ra-raise,
# so we can catch and retry backups for all users
# once designated retry interval has lapsed
#
# this helps to prevent taxing the S3 endpoint when we know
# the error was related to the connection
if attempt == BACKUP_ATTEMPTS_MAX and self.alert:
send_alert(username=user['user'], message="Connection error occurred during Ceph S3 request")
raise e
except S3AuthorizationError:
logger.warning('S3 disabled - skipping')
continue
except Exception as e:
logger.debug(e, exc_info=True)
logger.warning('Backups failed for user [%s]', user['user'])
if self.alert:
send_alert(username=user['user'], message="Backups encountered an unexpected error")
continue
def run(self, user=None):
"""
Run the backup agent
:param user:
:return: None
"""
if not self.dry_run:
self.clear_archives() # always attempt to remove outdated archives first
active_backup_users = self.windy.get_shared_backups_user_data()['users']
active_backup_user_names = [x['user'] for x in active_backup_users]
inactive_backup_users = [x for x in self.whm.get_users_with_backups_enabled() if x['user'] not in active_backup_user_names]
logger.debug('enumerated %s backup users on this server', len(active_backup_users))
in_progress = False
if not self.is_backup_completed():
logger.warning('Backup still in progress')
in_progress = True
if not in_progress:
if self.is_backup_failed():
check_failure_reason = tail_latest_log(lines=50)
if any(['error: Permission denied' in x for x in check_failure_reason]):
# skip sending alerts for non-fatal permissions errors that raised an exception
# 'package_account' function in 'util/whm/filesystem.py' will handle adjustment of permissions
logger.warning('Detected permissions related error in WHM backup')
elif any(['Available disk space' in x for x in check_failure_reason]):
# if this condition is met, a disk upgrade is likely required as outdated cache has already been purged
logger.error('Detected disk space related error in WHM backup')
# we create a lockfile the first time a disk space error is detected to prevent duplicate alerts
# the backup and sanity routines will throw their own alerts when needed
lockfile = Path('/backup/{day}/.disk_space_error'.format(day=datetime.strftime(datetime.today(), "%Y-%m-%d")))
if not Path.exists(lockfile):
logger.debug('creating disk space failure lockfile: %s', lockfile)
Path.touch(lockfile)
if self.alert:
send_alert(message='WHM backup failed due to disk space!')
if user:
active_backup_users = [x for x in active_backup_users if x['user'] == user]
inactive_backup_users = [x for x in inactive_backup_users if x['user'] == user]
if active_backup_users:
for attempt in range(BACKUP_ATTEMPTS_MAX):
attempt = attempt + 1
logger.debug('Backup attempt (%s/%s)', attempt, BACKUP_ATTEMPTS_MAX)
try:
self._backup(active_backup_users, attempt)
break
except S3ConnectionError:
logger.critical('Failed connection to Ceph S3')
if attempt != BACKUP_ATTEMPTS_MAX:
logger.info('Waiting %s seconds before re-attempt...', BACKUP_ATTEMPTS_INTERVAL)
time.sleep(BACKUP_ATTEMPTS_INTERVAL)
else:
if self.alert:
send_alert(message='All Ceph S3 connection attempts exhausted')
else:
logger.error("All backup attempts exhausted")
else:
logger.info('No backup users found')
for user in inactive_backup_users:
try:
self.whm.disable_user_backups(user['user'])
except:
logger.warning('Failed to disable cPanel backups for user [%s]', user['user'])
if self.alert:
send_alert(username=user['user'], message="Failed to disable backups in cPanel for user!")
Hacked By AnonymousFox1.0, Coded By AnonymousFox