mirror of
https://github.com/usmannasir/cyberpanel.git
synced 2025-11-18 03:01:01 +01:00
restart backup if killed prematurely
This commit is contained in:
@@ -361,17 +361,41 @@ class IncScheduler():
|
||||
|
||||
|
||||
for backupjob in NormalBackupJobs.objects.all():
|
||||
|
||||
jobConfig = json.loads(backupjob.config)
|
||||
destinationConfig = json.loads(backupjob.owner.config)
|
||||
|
||||
currentTime = time.strftime("%m.%d.%Y_%H-%M-%S")
|
||||
|
||||
if destinationConfig['type'] == 'local':
|
||||
|
||||
finalPath = '%s/%s' % (destinationConfig['path'].rstrip('/'), currentTime)
|
||||
command = 'mkdir -p %s' % (finalPath)
|
||||
ProcessUtilities.executioner(command)
|
||||
|
||||
if jobConfig[IncScheduler.frequency] == type:
|
||||
|
||||
### Check if an old job prematurely killed, then start from there.
|
||||
try:
|
||||
oldJobContinue = 1
|
||||
pid = jobConfig['pid']
|
||||
stuckDomain = jobConfig['website']
|
||||
finalPath = jobConfig['finalPath']
|
||||
jobConfig['pid'] = os.getpid()
|
||||
|
||||
command = 'ps aux'
|
||||
result = ProcessUtilities.outputExecutioner(command)
|
||||
|
||||
if result.find(pid) > -1 and result.find('IncScheduler.py') > -1:
|
||||
quit(1)
|
||||
|
||||
|
||||
except:
|
||||
### Save some important info in backup config
|
||||
oldJobContinue = 0
|
||||
jobConfig['pid'] = str(os.getpid())
|
||||
jobConfig['finalPath'] = finalPath
|
||||
|
||||
NormalBackupJobLogs.objects.filter(owner=backupjob).delete()
|
||||
NormalBackupJobLogs(owner=backupjob, status=backupSchedule.INFO,
|
||||
message='Starting %s backup on %s..' % (type, time.strftime("%m.%d.%Y_%H-%M-%S"))).save()
|
||||
@@ -379,21 +403,35 @@ class IncScheduler():
|
||||
actualDomain = 0
|
||||
try:
|
||||
if jobConfig[IncScheduler.allSites] == 'all':
|
||||
websites = Websites.objects.all()
|
||||
websites = Websites.objects.all().order_by('domain')
|
||||
actualDomain = 1
|
||||
else:
|
||||
websites = backupjob.normalbackupsites_set.all()
|
||||
websites = backupjob.normalbackupsites_set.all().order_by('domain__domain')
|
||||
except:
|
||||
websites = backupjob.normalbackupsites_set.all()
|
||||
websites = backupjob.normalbackupsites_set.all().order_by('domain__domain')
|
||||
|
||||
doit = 0
|
||||
|
||||
for site in websites:
|
||||
|
||||
if actualDomain:
|
||||
domain = site.domain
|
||||
else:
|
||||
domain = site.domain.domain
|
||||
|
||||
|
||||
## Save currently backing domain in db, so that i can restart from here when prematurely killed
|
||||
|
||||
jobConfig['website'] = domain
|
||||
backupjob.config = json.dumps(jobConfig)
|
||||
backupjob.save()
|
||||
|
||||
if oldJobContinue and not doit:
|
||||
if domain == stuckDomain:
|
||||
doit = 1
|
||||
pass
|
||||
else:
|
||||
continue
|
||||
|
||||
retValues = backupSchedule.createLocalBackup(domain, '/dev/null')
|
||||
|
||||
if retValues[0] == 0:
|
||||
@@ -407,12 +445,12 @@ class IncScheduler():
|
||||
sender = 'root@%s' % (socket.gethostname())
|
||||
TO = [adminEmail]
|
||||
message = """\
|
||||
From: %s
|
||||
To: %s
|
||||
Subject: %s
|
||||
From: %s
|
||||
To: %s
|
||||
Subject: %s
|
||||
|
||||
Automatic backup failed for %s on %s.
|
||||
""" % (sender, ", ".join(TO), SUBJECT, domain, currentTime)
|
||||
Automatic backup failed for %s on %s.
|
||||
""" % (sender, ", ".join(TO), SUBJECT, domain, currentTime)
|
||||
|
||||
logging.SendEmail(sender, TO, message)
|
||||
else:
|
||||
@@ -424,6 +462,12 @@ class IncScheduler():
|
||||
NormalBackupJobLogs(owner=backupjob, status=backupSchedule.INFO,
|
||||
message='Backup completed for %s on %s.' % (
|
||||
domain, time.strftime("%m.%d.%Y_%H-%M-%S"))).save()
|
||||
|
||||
jobConfig = json.loads(backupjob.config)
|
||||
if jobConfig['pid']:
|
||||
del jobConfig['pid']
|
||||
backupjob.config = json.dumps(jobConfig)
|
||||
backupjob.save()
|
||||
else:
|
||||
import subprocess
|
||||
import shlex
|
||||
@@ -440,12 +484,12 @@ class IncScheduler():
|
||||
actualDomain = 0
|
||||
try:
|
||||
if jobConfig[IncScheduler.allSites] == 'all':
|
||||
websites = Websites.objects.all()
|
||||
websites = Websites.objects.all().order_by('domain')
|
||||
actualDomain = 1
|
||||
else:
|
||||
websites = backupjob.normalbackupsites_set.all()
|
||||
websites = backupjob.normalbackupsites_set.all().order_by('domain__domain')
|
||||
except:
|
||||
websites = backupjob.normalbackupsites_set.all()
|
||||
websites = backupjob.normalbackupsites_set.all().order_by('domain__domain')
|
||||
|
||||
for site in websites:
|
||||
|
||||
|
||||
Reference in New Issue
Block a user