diff --git a/api/views.py b/api/views.py
index 291faa8b9..be6badb08 100644
--- a/api/views.py
+++ b/api/views.py
@@ -570,7 +570,7 @@ def changeAdminPassword(request):
firstName="Cyber", lastName="Panel", acl=acl, token=token)
admin.save()
- vers = version(currentVersion="1.7", build=5)
+ vers = version(currentVersion="1.7", build=6)
vers.save()
package = Package(admin=admin, packageName="Default", diskSpace=1000,
diff --git a/cloudAPI/cloudManager.py b/cloudAPI/cloudManager.py
index 00ed367e9..9023fc5a0 100644
--- a/cloudAPI/cloudManager.py
+++ b/cloudAPI/cloudManager.py
@@ -23,6 +23,7 @@ from plogical.httpProc import httpProc
from s3Backups.s3Backups import S3Backups
import os
from serverStatus.views import topProcessesStatus, killProcess
+from plogical.mysqlUtilities import mysqlUtilities
class CloudManager:
def __init__(self, data=None, admin = None):
@@ -925,5 +926,145 @@ class CloudManager:
try:
request.session['userID'] = self.admin.pk
return killProcess(request)
+ except BaseException, msg:
+ return self.ajaxPre(0, str(msg))
+
+
+ def connectAccountDO(self, request):
+ try:
+ request.session['userID'] = self.admin.pk
+ s3 = S3Backups(request, self.data, 'connectAccountDO')
+ return s3.connectAccountDO()
+ except BaseException, msg:
+ return self.ajaxPre(0, str(msg))
+
+ def fetchBucketsDO(self, request):
+ try:
+ request.session['userID'] = self.admin.pk
+ s3 = S3Backups(request, self.data, 'fetchBucketsDO')
+ return s3.fetchBucketsDO()
+ except BaseException, msg:
+ return self.ajaxPre(0, str(msg))
+
+
+ def createPlanDO(self, request):
+ try:
+ request.session['userID'] = self.admin.pk
+ s3 = S3Backups(request, self.data, 'createPlanDO')
+ return s3.createPlanDO()
+ except BaseException, msg:
+ return self.ajaxPre(0, str(msg))
+
+ def fetchBackupPlansDO(self, request):
+ try:
+ request.session['userID'] = self.admin.pk
+ s3 = S3Backups(request, self.data, 'fetchBackupPlansDO')
+ return s3.fetchBackupPlansDO()
+ except BaseException, msg:
+ return self.ajaxPre(0, str(msg))
+
+ def deletePlanDO(self, request):
+ try:
+ request.session['userID'] = self.admin.pk
+ s3 = S3Backups(request, self.data, 'deletePlanDO')
+ return s3.deletePlanDO()
+ except BaseException, msg:
+ return self.ajaxPre(0, str(msg))
+
+ def fetchWebsitesInPlanDO(self, request):
+ try:
+ request.session['userID'] = self.admin.pk
+ s3 = S3Backups(request, self.data, 'fetchWebsitesInPlanDO')
+ return s3.fetchWebsitesInPlanDO()
+ except BaseException, msg:
+ return self.ajaxPre(0, str(msg))
+
+ def fetchBackupLogsDO(self, request):
+ try:
+ request.session['userID'] = self.admin.pk
+ s3 = S3Backups(request, self.data, 'fetchBackupLogsDO')
+ return s3.fetchBackupLogsDO()
+ except BaseException, msg:
+ return self.ajaxPre(0, str(msg))
+
+ def deleteDomainFromPlanDO(self, request):
+ try:
+ request.session['userID'] = self.admin.pk
+ s3 = S3Backups(request, self.data, 'deleteDomainFromPlanDO')
+ return s3.deleteDomainFromPlanDO()
+ except BaseException, msg:
+ return self.ajaxPre(0, str(msg))
+
+ def savePlanChangesDO(self, request):
+ try:
+ request.session['userID'] = self.admin.pk
+ s3 = S3Backups(request, self.data, 'savePlanChangesDO')
+ return s3.savePlanChangesDO()
+ except BaseException, msg:
+ return self.ajaxPre(0, str(msg))
+
+ def forceRunAWSBackupDO(self, request):
+ try:
+ request.session['userID'] = self.admin.pk
+ s3 = S3Backups(request, self.data, 'forceRunAWSBackupDO')
+ s3.start()
+ return self.ajaxPre(1, None)
+ except BaseException, msg:
+ return self.ajaxPre(0, str(msg))
+
+ def showStatus(self, request):
+ try:
+ request.session['userID'] = self.admin.pk
+ currentACL = ACLManager.loadedACL( self.admin.pk)
+
+ if currentACL['admin'] == 0:
+ return self.ajaxPre(0, 'Only administrators can see MySQL status.')
+
+ finalData = mysqlUtilities.showStatus()
+
+ finalData = json.dumps(finalData)
+ return HttpResponse(finalData)
+ except BaseException, msg:
+ return self.ajaxPre(0, str(msg))
+
+
+ def fetchRam(self, request):
+ try:
+ request.session['userID'] = self.admin.pk
+ currentACL = ACLManager.loadedACL( self.admin.pk)
+
+ if currentACL['admin'] == 0:
+ return self.ajaxPre(0, 'Only administrators can see MySQL status.')
+
+ from psutil import virtual_memory
+ import math
+
+ finalData = {}
+ mem = virtual_memory()
+ inGB = math.ceil(float(mem.total)/float(1024 * 1024 * 1024))
+ finalData['ramInGB'] = inGB
+ finalData['conf'] = subprocess.check_output(shlex.split('sudo cat /etc/my.cnf'))
+ finalData['status'] = 1
+
+ finalData = json.dumps(finalData)
+ return HttpResponse(finalData)
+ except BaseException, msg:
+ return self.ajaxPre(0, str(msg))
+
+ def applyMySQLChanges(self, request):
+ try:
+ request.session['userID'] = self.admin.pk
+ currentACL = ACLManager.loadedACL( self.admin.pk)
+
+ if currentACL['admin'] == 0:
+ return self.ajaxPre(0, 'Only administrators can see MySQL status.')
+
+ result = mysqlUtilities.applyMySQLChanges(self.data)
+
+ if result[0] == 0:
+ return self.ajaxPre(0, result[1])
+ else:
+ return self.ajaxPre(1, None)
+
except BaseException, msg:
return self.ajaxPre(0, str(msg))
\ No newline at end of file
diff --git a/cloudAPI/views.py b/cloudAPI/views.py
index f43c2faee..de6beeec6 100644
--- a/cloudAPI/views.py
+++ b/cloudAPI/views.py
@@ -201,6 +201,32 @@ def router(request):
return cm.systemStatus(request)
elif controller == 'killProcess':
return cm.killProcess(request)
+ elif controller == 'connectAccountDO':
+ return cm.connectAccountDO(request)
+ elif controller == 'fetchBucketsDO':
+ return cm.fetchBucketsDO(request)
+ elif controller == 'createPlanDO':
+ return cm.createPlanDO(request)
+ elif controller == 'fetchBackupPlansDO':
+ return cm.fetchBackupPlansDO(request)
+ elif controller == 'deletePlanDO':
+ return cm.deletePlanDO(request)
+ elif controller == 'fetchWebsitesInPlanDO':
+ return cm.fetchWebsitesInPlanDO(request)
+ elif controller == 'fetchBackupLogsDO':
+ return cm.fetchBackupLogsDO(request)
+ elif controller == 'deleteDomainFromPlanDO':
+ return cm.deleteDomainFromPlanDO(request)
+ elif controller == 'savePlanChangesDO':
+ return cm.savePlanChangesDO(request)
+ elif controller == 'forceRunAWSBackupDO':
+ return cm.forceRunAWSBackupDO(request)
+ elif controller == 'showStatus':
+ return cm.showStatus(request)
+ elif controller == 'fetchRam':
+ return cm.fetchRam(request)
+ elif controller == 'applyMySQLChanges':
+ return cm.applyMySQLChanges(request)
else:
return cm.ajaxPre(0, 'This function is not available in your version of CyberPanel.')
diff --git a/install/install.py b/install/install.py
index ad18593b7..43433f44c 100644
--- a/install/install.py
+++ b/install/install.py
@@ -780,7 +780,7 @@ class preFlightsChecks:
os.chdir(self.path)
- command = "wget http://cyberpanel.sh/CyberPanel.1.7.5.tar.gz"
+ command = "wget http://cyberpanel.sh/CyberPanel.1.7.6.tar.gz"
#command = "wget http://cyberpanel.sh/CyberPanelTemp.tar.gz"
preFlightsChecks.call(command, self.distro, '[download_install_CyberPanel]',
'CyberPanel Download',
@@ -789,7 +789,7 @@ class preFlightsChecks:
##
count = 0
- command = "tar zxf CyberPanel.1.7.5.tar.gz"
+ command = "tar zxf CyberPanel.1.7.6.tar.gz"
#command = "tar zxf CyberPanelTemp.tar.gz"
preFlightsChecks.call(command, self.distro, '[download_install_CyberPanel]',
'Extract CyberPanel',1, 1, os.EX_OSERR)
diff --git a/loginSystem/views.py b/loginSystem/views.py
index 3dbec47f7..150137107 100644
--- a/loginSystem/views.py
+++ b/loginSystem/views.py
@@ -156,7 +156,7 @@ def loadLoginPage(request):
firstName="Cyber",lastName="Panel", acl=acl, token=token)
admin.save()
- vers = version(currentVersion="1.7", build=5)
+ vers = version(currentVersion="1.7", build=6)
vers.save()
package = Package(admin=admin, packageName="Default", diskSpace=1000,
diff --git a/managePHP/php70.xml b/managePHP/php70.xml
index 90755d862..7432b178f 100644
--- a/managePHP/php70.xml
+++ b/managePHP/php70.xml
@@ -260,4 +260,10 @@
0
+
+ lsphp70-ioncube
+ ioncube loaders
+ 0
+
+
\ No newline at end of file
diff --git a/managePHP/php71.xml b/managePHP/php71.xml
index 1437f3ec2..4fb5292f8 100644
--- a/managePHP/php71.xml
+++ b/managePHP/php71.xml
@@ -260,4 +260,10 @@
0
+
+ lsphp71-ioncube
+ ioncube loaders
+ 0
+
+
\ No newline at end of file
diff --git a/managePHP/php72.xml b/managePHP/php72.xml
index 4735feae8..d9f151853 100644
--- a/managePHP/php72.xml
+++ b/managePHP/php72.xml
@@ -260,4 +260,10 @@
0
+
+ lsphp72-ioncube
+ ioncube loaders
+ 0
+
+
\ No newline at end of file
diff --git a/managePHP/php73.xml b/managePHP/php73.xml
index e90b00f1b..b24f99ec8 100644
--- a/managePHP/php73.xml
+++ b/managePHP/php73.xml
@@ -260,4 +260,10 @@
0
+
+ lsphp73-ioncube
+ ioncube loaders
+ 0
+
+
\ No newline at end of file
diff --git a/managePHP/ubuntuphp70.xml b/managePHP/ubuntuphp70.xml
index 81a425e72..125028e6e 100644
--- a/managePHP/ubuntuphp70.xml
+++ b/managePHP/ubuntuphp70.xml
@@ -110,4 +110,10 @@
1
+
+ lsphp70-ioncube
+ ioncube loaders
+ 0
+
+
\ No newline at end of file
diff --git a/managePHP/ubuntuphp71.xml b/managePHP/ubuntuphp71.xml
index c36b53b02..e968c31fd 100644
--- a/managePHP/ubuntuphp71.xml
+++ b/managePHP/ubuntuphp71.xml
@@ -110,4 +110,10 @@
1
+
+ lsphp71-ioncube
+ ioncube loaders
+ 0
+
+
diff --git a/managePHP/ubuntuphp72.xml b/managePHP/ubuntuphp72.xml
index 54e3e5b5c..4725cedd8 100644
--- a/managePHP/ubuntuphp72.xml
+++ b/managePHP/ubuntuphp72.xml
@@ -110,4 +110,10 @@
1
+
+ lsphp72-ioncube
+ ioncube loaders
+ 0
+
+
diff --git a/managePHP/ubuntuphp73.xml b/managePHP/ubuntuphp73.xml
index fe18432c4..33c90dcf9 100644
--- a/managePHP/ubuntuphp73.xml
+++ b/managePHP/ubuntuphp73.xml
@@ -110,4 +110,10 @@
1
+
+ lsphp73-ioncube
+ ioncube loaders
+ 0
+
+
diff --git a/plogical/mysqlUtilities.py b/plogical/mysqlUtilities.py
index 23565a3fc..778a26ee8 100644
--- a/plogical/mysqlUtilities.py
+++ b/plogical/mysqlUtilities.py
@@ -9,7 +9,9 @@ import shlex
from websiteFunctions.models import Websites
from databases.models import Databases
import MySQLdb as mysql
-
+import json
+from random import randint
+from plogical.processUtilities import ProcessUtilities
class mysqlUtilities:
@@ -192,3 +194,143 @@ class mysqlUtilities:
return website.databases_set.all()
except:
0
+
+ @staticmethod
+ def showStatus():
+ try:
+
+ connection, cursor = mysqlUtilities.setupConnection()
+
+ if connection == 0:
+ return 0
+
+ cursor.execute("SHOW GLOBAL STATUS")
+ result = cursor.fetchall()
+
+ data = {}
+ data['status'] = 1
+
+ for items in result:
+ if items[0] == 'Uptime':
+ data['uptime'] = mysqlUtilities.GetTime(items[1])
+ elif items[0] == 'Connections':
+ data['connections'] = items[1]
+ elif items[0] == 'Slow_queries':
+ data['Slow_queries'] = items[1]
+
+ ## Process List
+
+ cursor.execute("show processlist")
+ result = cursor.fetchall()
+
+ json_data = "["
+ checker = 0
+
+ for items in result:
+ if len(str(items[1])) == 0:
+ database = 'NULL'
+ else:
+ database = items[1]
+
+ if len(str(items[6])) == 0:
+ state = 'NULL'
+ else:
+ state = items[6]
+
+ if len(str(items[7])) == '':
+ info = 'NULL'
+ else:
+ info = items[7]
+
+ dic = {
+ 'id': items[0],
+ 'user': items[1],
+ 'database': database,
+ 'command': items[4],
+ 'time': items[5],
+ 'state': state,
+ 'info': info,
+ 'progress': items[8],
+ }
+
+ if checker == 0:
+ json_data = json_data + json.dumps(dic)
+ checker = 1
+ else:
+ json_data = json_data + ',' + json.dumps(dic)
+
+ json_data = json_data + ']'
+
+ data['processes'] = json_data
+
+ ##
+
+ return data
+
+ except BaseException, msg:
+ logging.CyberCPLogFileWriter.writeToFile(str(msg) + "[showStatus]")
+ return 0
+
+ @staticmethod
+ def GetTime(seconds):
+ time = float(seconds)
+ day = time // (24 * 3600)
+ time = time % (24 * 3600)
+ hour = time // 3600
+ time %= 3600
+ minutes = time // 60
+ time %= 60
+ seconds = time
+
+ return ("%d:%d:%d:%d" % (day, hour, minutes, seconds))
+
+ @staticmethod
+ def applyMySQLChanges(data):
+ try:
+ command = 'sudo mv /etc/my.cnf /etc/my.cnf.bak'
+ ProcessUtilities.executioner(command)
+
+ ## Temp
+
+ tempPath = "/home/cyberpanel/" + str(randint(1000, 9999))
+ writeToFile = open(tempPath, 'w')
+ writeToFile.write(data['suggestedContent'])
+ writeToFile.close()
+
+ ##
+
+ command = 'sudo mv ' + tempPath + ' /etc/my.cnf'
+ ProcessUtilities.executioner(command)
+
+ command = 'sudo systemctl restart mysql'
+ ProcessUtilities.executioner(command)
+
+ return 1, None
+
+ except BaseException, msg:
+ command = 'sudo mv /etc/my.cnf.bak /etc/my.cnf'
+ subprocess.call(shlex.split(command))
+ logging.CyberCPLogFileWriter.writeToFile(str(msg))
+ return 0, str(msg)
+
+ @staticmethod
+ def fetchVariables():
+ try:
+
+ connection, cursor = mysqlUtilities.setupConnection()
+
+ if connection == 0:
+ return 0
+
+ cursor.execute("SHOW VARIABLES")
+ result = cursor.fetchall()
+
+ for items in result:
+ logging.CyberCPLogFileWriter.writeToFile(str(items))
+
+
+ ##
+
+ except BaseException, msg:
+ logging.CyberCPLogFileWriter.writeToFile(str(msg) + "[showStatus]")
+ return 0
\ No newline at end of file
diff --git a/plogical/upgrade.py b/plogical/upgrade.py
index 01d4192f1..2894dfedc 100644
--- a/plogical/upgrade.py
+++ b/plogical/upgrade.py
@@ -359,6 +359,57 @@ WantedBy=multi-user.target"""
CONSTRAINT `s3Backups_backuplogs_owner_id_7b4653af_fk_s3Backups` FOREIGN KEY (`owner_id`) REFERENCES `s3Backups_backupplan` (`id`)
)"""
+ try:
+ cursor.execute(query)
+ except:
+ pass
+
+ query = """CREATE TABLE `s3Backups_backupplando` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `name` varchar(50) NOT NULL,
+ `bucket` varchar(50) NOT NULL,
+ `freq` varchar(50) NOT NULL,
+ `retention` int(11) NOT NULL,
+ `type` varchar(5) NOT NULL,
+ `region` varchar(5) NOT NULL,
+ `lastRun` varchar(50) NOT NULL,
+ `owner_id` int(11) NOT NULL,
+ PRIMARY KEY (`id`),
+ UNIQUE KEY `name` (`name`),
+ KEY `s3Backups_backupplan_owner_id_1a3ec86d_fk_loginSyst` (`owner_id`),
+ CONSTRAINT `s3Backups_backupplan_owner_id_1a3ec86d_fk_loginSyst` FOREIGN KEY (`owner_id`) REFERENCES `loginSystem_administrator` (`id`)
+)"""
+
+ try:
+ cursor.execute(query)
+ except:
+ pass
+
+ query = """CREATE TABLE `s3Backups_websitesinplando` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `domain` varchar(100) NOT NULL,
+ `owner_id` int(11) NOT NULL,
+ PRIMARY KEY (`id`),
+ KEY `s3Backups_websitesin_owner_id_cef3ea04_fk_s3Backups` (`owner_id`),
+ CONSTRAINT `s3Backups_websitesin_owner_id_cef3ea04_fk_s3Backups` FOREIGN KEY (`owner_id`) REFERENCES `s3Backups_backupplando` (`id`)
+)"""
+
+ try:
+ cursor.execute(query)
+ except:
+ pass
+
+ query = """CREATE TABLE `s3Backups_backuplogsdo` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `timeStamp` varchar(200) NOT NULL,
+ `level` varchar(5) NOT NULL,
+ `msg` varchar(500) NOT NULL,
+ `owner_id` int(11) NOT NULL,
+ PRIMARY KEY (`id`),
+ KEY `s3Backups_backuplogs_owner_id_c7cb5872_fk_s3Backups` (`owner_id`),
+ CONSTRAINT `s3Backups_backuplogs_owner_id_c7cb5872_fk_s3Backups` FOREIGN KEY (`owner_id`) REFERENCES `s3Backups_backupplando` (`id`)
+)"""
+
try:
cursor.execute(query)
except:
diff --git a/s3Backups/models.py b/s3Backups/models.py
index 60325e2d9..81a12de36 100644
--- a/s3Backups/models.py
+++ b/s3Backups/models.py
@@ -23,4 +23,25 @@ class BackupLogs(models.Model):
owner = models.ForeignKey(BackupPlan,on_delete=models.CASCADE)
timeStamp = models.CharField(max_length=200)
level = models.CharField(max_length=5)
+ msg = models.CharField(max_length=500)
+
+class BackupPlanDO(models.Model):
+ owner = models.ForeignKey(Administrator, on_delete=models.CASCADE)
+ name = models.CharField(max_length=50, unique=True)
+ bucket = models.CharField(max_length=50, default='NONE')
+ freq = models.CharField(max_length=50)
+ retention = models.IntegerField()
+ type = models.CharField(max_length=5, default='DO')
+ region = models.CharField(max_length=5)
+ lastRun = models.CharField(max_length=50, default='0:0:0')
+
+class WebsitesInPlanDO(models.Model):
+ owner = models.ForeignKey(BackupPlanDO, on_delete=models.CASCADE)
+ domain = models.CharField(max_length=100)
+
+
+class BackupLogsDO(models.Model):
+ owner = models.ForeignKey(BackupPlanDO, on_delete=models.CASCADE)
+ timeStamp = models.CharField(max_length=200)
+ level = models.CharField(max_length=5)
msg = models.CharField(max_length=500)
\ No newline at end of file
diff --git a/s3Backups/s3Backups.py b/s3Backups/s3Backups.py
index 560696e04..ffb121001 100644
--- a/s3Backups/s3Backups.py
+++ b/s3Backups/s3Backups.py
@@ -39,6 +39,8 @@ class S3Backups(multi.Thread):
self.connectAccount()
elif self.function == 'forceRunAWSBackup':
self.forceRunAWSBackup()
+ elif self.function == 'forceRunAWSBackupDO':
+ self.forceRunAWSBackupDO()
elif self.function == 'runAWSBackups':
self.runAWSBackups()
except BaseException, msg:
@@ -468,6 +470,401 @@ class S3Backups(multi.Thread):
plan = BackupPlan.objects.get(name=self.data['planName'])
BackupLogs(owner=plan, timeStamp=time.strftime("%b %d %Y, %H:%M:%S"), level='ERROR', msg=str(msg)).save()
+ def connectAccountDO(self):
+ try:
+
+ proc = httpProc(self.request, None, None)
+
+ userID = self.request.session['userID']
+ currentACL = ACLManager.loadedACL(userID)
+
+ if currentACL['admin'] == 0:
+ return proc.ajax(0, 'Only administrators can use AWS S3 Backups.')
+
+ mailUtilities.checkHome()
+
+ path = '/home/cyberpanel/.do'
+
+ if not os.path.exists(path):
+ os.mkdir(path)
+
+ credentials = path + '/credentials'
+
+ credFile = open(credentials, 'w')
+ credFile.write(self.data['credData'])
+ credFile.close()
+
+ ##
+
+ cronPath = '/etc/crontab'
+
+ command = 'sudo cat ' + cronPath
+ output = subprocess.check_output(shlex.split(command)).split('\n')
+
+ insertCron = 1
+
+ for items in output:
+ if items.find('s3backups.py') > -1:
+ insertCron = 0
+ break
+
+ if insertCron:
+ pathToFile = "/home/cyberpanel/" + str(randint(1000, 9999))
+ writeToFile = open(pathToFile, 'w')
+ for items in output:
+ writeToFile.writelines(items + '\n')
+ writeToFile.writelines('0 0 * * * cyberpanel /usr/local/CyberCP/bin/python2 /usr/local/CyberCP/s3Backups/s3Backups.py\n')
+ writeToFile.close()
+ command = 'sudo mv ' + pathToFile + ' /etc/crontab'
+ ProcessUtilities.executioner(command)
+
+ return proc.ajax(1, None)
+
+ except BaseException, msg:
+ proc = httpProc(self.request, None, None)
+ return proc.ajax(0, str(msg))
+
+ def fetchBucketsDO(self):
+ try:
+
+ proc = httpProc(self.request, None, None)
+
+ userID = self.request.session['userID']
+ currentACL = ACLManager.loadedACL(userID)
+
+ if currentACL['admin'] == 0:
+ return proc.ajax(0, 'Only administrators can use AWS S3 Backups.')
+
+ filePath = '/home/cyberpanel/.do/credentials'
+
+ data = open(filePath, 'r').readlines()
+
+ accessID = data[1].split('=')[1].strip(' ').strip('\n')
+ secret = data[2].split('=')[1].strip(' ').strip('\n')
+
+ session = boto3.session.Session()
+ client = session.client(
+ 's3',
+ region_name=self.data['doRegion'],
+ endpoint_url='https://' + self.data['doRegion'] + '.digitaloceanspaces.com',
+ aws_access_key_id=accessID,
+ aws_secret_access_key=secret
+ )
+ response = client.list_buckets()
+ spaces = [space['Name'] for space in response['Buckets']]
+ json_data = "["
+ checker = 0
+
+ for space in spaces:
+ dic = {'name': space}
+
+ if checker == 0:
+ json_data = json_data + json.dumps(dic)
+ checker = 1
+ else:
+ json_data = json_data + ',' + json.dumps(dic)
+
+ json_data = json_data + ']'
+ final_json = json.dumps({'status': 1, 'error_message': "None", "data": json_data})
+ return HttpResponse(final_json)
+
+ except BaseException, msg:
+ logging.writeToFile(str(msg))
+ proc = httpProc(self.request, None, None)
+ return proc.ajax(0, str(msg))
+
+ def createPlanDO(self):
+ try:
+
+ proc = httpProc(self.request, None, None)
+
+ userID = self.request.session['userID']
+ currentACL = ACLManager.loadedACL(userID)
+
+ if currentACL['admin'] == 0:
+ return proc.ajax(0, 'Only administrators can use AWS S3 Backups.')
+
+ admin = Administrator.objects.get(pk=userID)
+
+ newPlan = BackupPlanDO(owner=admin, name=self.data['planName'], freq = self.data['frequency'],
+ retention= self.data['retenion'], bucket= self.data['bucketName'], type= self.data['type'],
+ region= self.data['region'])
+ newPlan.save()
+
+ for items in self.data['websitesInPlan']:
+ wp = WebsitesInPlanDO(owner=newPlan, domain=items)
+ wp.save()
+
+ return proc.ajax(1, None)
+
+ except BaseException, msg:
+ logging.writeToFile(str(msg) + ' [createPlanDO]')
+ proc = httpProc(self.request, None, None)
+ return proc.ajax(0, str(msg))
+
+ def fetchBackupPlansDO(self):
+ try:
+
+ proc = httpProc(self.request, None, None)
+
+ userID = self.request.session['userID']
+ currentACL = ACLManager.loadedACL(userID)
+
+ if currentACL['admin'] == 0:
+ return proc.ajax(0, 'Only administrators can use AWS S3 Backups.')
+
+
+ admin = Administrator.objects.get(pk=userID)
+ json_data = "["
+ checker = 0
+
+ for plan in admin.backupplando_set.filter(type= self.data['type']):
+ dic = {
+ 'name': plan.name,
+ 'bucket': plan.bucket,
+ 'freq': plan.freq,
+ 'retention': plan.retention,
+ 'lastRun': plan.lastRun,
+ }
+
+ if checker == 0:
+ json_data = json_data + json.dumps(dic)
+ checker = 1
+ else:
+ json_data = json_data + ',' + json.dumps(dic)
+
+ json_data = json_data + ']'
+ final_json = json.dumps({'status': 1, 'error_message': "None", "data": json_data})
+ return HttpResponse(final_json)
+
+ except BaseException, msg:
+ proc = httpProc(self.request, None, None)
+ return proc.ajax(0, str(msg))
+
+ def deletePlanDO(self):
+ try:
+
+ proc = httpProc(self.request, None, None)
+
+ userID = self.request.session['userID']
+ currentACL = ACLManager.loadedACL(userID)
+
+ if currentACL['admin'] == 0:
+ return proc.ajax(0, 'Only administrators can use AWS S3 Backups.')
+
+ delPlan = BackupPlanDO.objects.get(name=self.data['planName'])
+ delPlan.delete()
+
+ return proc.ajax(1, None)
+
+ except BaseException, msg:
+ proc = httpProc(self.request, None, None)
+ return proc.ajax(0, str(msg))
+
+ def fetchWebsitesInPlanDO(self):
+ try:
+
+ proc = httpProc(self.request, None, None)
+
+ userID = self.request.session['userID']
+ currentACL = ACLManager.loadedACL(userID)
+
+ if currentACL['admin'] == 0:
+ return proc.ajax(0, 'Only administrators can use AWS S3 Backups.')
+
+
+ plan = BackupPlanDO.objects.get(name=self.data['planName'])
+ json_data = "["
+ checker = 0
+
+ for website in plan.websitesinplando_set.all():
+ dic = {
+ 'id': website.id,
+ 'domain': website.domain,
+ }
+
+ if checker == 0:
+ json_data = json_data + json.dumps(dic)
+ checker = 1
+ else:
+ json_data = json_data + ',' + json.dumps(dic)
+
+ json_data = json_data + ']'
+ final_json = json.dumps({'status': 1, 'error_message': "None", "data": json_data})
+ return HttpResponse(final_json)
+
+ except BaseException, msg:
+ proc = httpProc(self.request, None, None)
+ return proc.ajax(0, str(msg))
+
+ def fetchBackupLogsDO(self):
+ try:
+ proc = httpProc(self.request, None, None)
+
+ userID = self.request.session['userID']
+ currentACL = ACLManager.loadedACL(userID)
+
+ if currentACL['admin'] == 0:
+ return proc.ajax(0, 'Only administrators can use AWS S3 Backups.')
+
+ recordsToShow = int(self.data['recordsToShow'])
+ page = int(self.data['page'])
+
+ backupPlan = BackupPlanDO.objects.get(name=self.data['planName'])
+ logs = backupPlan.backuplogsdo_set.all().order_by('-id')
+
+ pagination = S3Backups.getPagination(len(logs), recordsToShow)
+ endPageNumber, finalPageNumber = S3Backups.recordsPointer(page, recordsToShow)
+ jsonData = S3Backups.getLogsInJson(logs[finalPageNumber:endPageNumber])
+
+ data = {}
+ data['data'] = jsonData
+ data['pagination'] = pagination
+
+ return proc.ajax(1, None, data)
+
+ except BaseException, msg:
+ proc = httpProc(self.request, None, None)
+ return proc.ajaxPre(0, str(msg))
+
+ def deleteDomainFromPlanDO(self):
+ try:
+
+ proc = httpProc(self.request, None, None)
+
+ userID = self.request.session['userID']
+ currentACL = ACLManager.loadedACL(userID)
+
+ if currentACL['admin'] == 0:
+ return proc.ajax(0, 'Only administrators can use AWS S3 Backups.')
+
+ plan = BackupPlanDO.objects.get(name=self.data['planName'])
+ web = WebsitesInPlanDO.objects.get(owner=plan, domain=self.data['domainName'])
+ web.delete()
+
+ return proc.ajax(1, None)
+
+ except BaseException, msg:
+ proc = httpProc(self.request, None, None)
+ return proc.ajax(0, str(msg))
+
+ def savePlanChangesDO(self):
+ try:
+
+ proc = httpProc(self.request, None, None)
+
+ userID = self.request.session['userID']
+ currentACL = ACLManager.loadedACL(userID)
+
+ if currentACL['admin'] == 0:
+ return proc.ajax(0, 'Only administrators can use AWS S3 Backups.')
+
+ logging.writeToFile('hello world')
+
+ changePlan = BackupPlanDO.objects.get(name=self.data['planName'])
+
+ changePlan.bucket = self.data['bucketName']
+ changePlan.freq = self.data['frequency']
+ changePlan.retention = self.data['retention']
+ changePlan.region = self.data['region']
+
+ changePlan.save()
+
+ return proc.ajax(1, None)
+
+ except BaseException, msg:
+ proc = httpProc(self.request, None, None)
+ return proc.ajax(0, str(msg))
+
+ def forceRunAWSBackupDO(self):
+ try:
+
+ plan = BackupPlanDO.objects.get(name=self.data['planName'])
+ bucketName = plan.bucket.strip('\n').strip(' ')
+ runTime = time.strftime("%d:%m:%Y")
+
+ ## Setup DO Client
+
+ filePath = '/home/cyberpanel/.do/credentials'
+
+ data = open(filePath, 'r').readlines()
+
+ accessID = data[1].split('=')[1].strip(' ').strip('\n')
+ secret = data[2].split('=')[1].strip(' ').strip('\n')
+
+ session = boto3.session.Session()
+ client = session.client(
+ 's3',
+ region_name=plan.region,
+ endpoint_url='https://' + plan.region + '.digitaloceanspaces.com',
+ aws_access_key_id=accessID,
+ aws_secret_access_key=secret
+ )
+
+ config = TransferConfig(multipart_threshold=1024 * 25, max_concurrency=10,
+ multipart_chunksize=1024 * 25, use_threads=True)
+
+ ## Set Expiration for objects
+ try:
+
+ client.put_bucket_lifecycle_configuration(
+ Bucket='string',
+ LifecycleConfiguration={
+ 'Rules': [
+ {
+ 'Expiration': {
+ 'Days': plan.retention,
+ 'ExpiredObjectDeleteMarker': True
+ },
+ 'ID': plan.name,
+ 'Prefix': '',
+ 'Filter': {
+ 'Prefix': plan.name + '/',
+ },
+ 'Status': 'Enabled',
+
+ },
+ ]
+ }
+ )
+ except BaseException, msg:
+ BackupLogsDO(owner=plan, timeStamp=time.strftime("%b %d %Y, %H:%M:%S"), level='ERROR', msg=str(msg)).save()
+
+ ##
+
+ userID = self.request.session['userID']
+ currentACL = ACLManager.loadedACL(userID)
+
+ if currentACL['admin'] == 0:
+ BackupLogsDO(owner=plan, timeStamp=time.strftime("%b %d %Y, %H:%M:%S"), level='INFO', msg='Unauthorised user tried to run AWS Backups.').save()
+ return 0
+
+ BackupLogsDO(owner=plan,level='INFO', timeStamp=time.strftime("%b %d %Y, %H:%M:%S"), msg='Starting backup process..').save()
+
+ for items in plan.websitesinplando_set.all():
+ result = self.createBackup(items.domain)
+ if result[0]:
+ key = plan.name + '/' + runTime + '/' + result[1].split('/')[-1] + ".tar.gz"
+ client.upload_file(
+ result[1] + ".tar.gz",
+ bucketName,
+ key,
+ Config=config,
+ )
+ BackupLogsDO(owner=plan, level='INFO', timeStamp=time.strftime("%b %d %Y, %H:%M:%S"), msg='Backup successful for ' + items.domain + '.').save()
+ else:
+ BackupLogsDO(owner=plan, level='ERROR', timeStamp=time.strftime("%b %d %Y, %H:%M:%S"), msg='Backup failed for ' + items.domain + '. Error: ' + result[1]).save()
+
+
+ plan.lastRun = runTime
+ plan.save()
+
+ BackupLogsDO(owner=plan, level='INFO', timeStamp=time.strftime("%b %d %Y, %H:%M:%S"), msg='Backup Process Finished.').save()
+ except BaseException, msg:
+ logging.writeToFile(str(msg) + ' [S3Backups.forceRunAWSBackupDO]')
+ plan = BackupPlanDO.objects.get(name=self.data['planName'])
+ BackupLogsDO(owner=plan, timeStamp=time.strftime("%b %d %Y, %H:%M:%S"), level='ERROR', msg=str(msg)).save()
+
def runAWSBackups(self):
try:
admin = Administrator.objects.get(pk=1)
@@ -496,6 +893,29 @@ class S3Backups(multi.Thread):
self.data['planName'] = plan.name
self.forceRunAWSBackup()
+ for plan in BackupPlanDO.objects.all():
+ lastRunDay = plan.lastRun.split(':')[0]
+ lastRunMonth = plan.lastRun.split(':')[1]
+
+ if plan.freq == 'Daily' and lastRunDay != time.strftime("%d"):
+ self.data = {}
+ self.data['planName'] = plan.name
+ self.forceRunAWSBackupDO()
+ else:
+ if lastRunMonth == time.strftime("%m"):
+ days = int(time.strftime("%d")) - int(lastRunDay)
+ if days >=6:
+ self.data = {}
+ self.data['planName'] = plan.name
+ self.forceRunAWSBackupDO()
+ else:
+ days = 30 - int(lastRunDay)
+ days = days + int(time.strftime("%d"))
+ if days >=6:
+ self.data = {}
+ self.data['planName'] = plan.name
+ self.forceRunAWSBackupDO()
+
except BaseException, msg:
logging.writeToFile(str(msg) + ' [S3Backups.runAWSBackups]')