Merge pull request #1714 from master3395/v2.5.5-dev

V2.5.5 dev
This commit is contained in:
Master3395
2026-02-26 21:10:27 +01:00
committed by GitHub
54 changed files with 2210 additions and 1184 deletions

4
.gitignore vendored
View File

@@ -123,4 +123,6 @@ mysql_password.txt
test.php
test.sh
*.test.php
*.test.sh
*.test.sh# Patreon secrets
patreon_config.py
patreon_secrets.env

View File

@@ -0,0 +1,58 @@
#!/bin/bash
# Change phpMyAdmin version: download chosen version, preserve config.inc.php and phpmyadminsignin.php.
# Run as root: bash /usr/local/CyberCP/CPScripts/phpmyadmin_version_changer.sh [VERSION]
set -e
PMA_DIR="/usr/local/CyberCP/public/phpmyadmin"
TMP_CONFIG="/tmp/cyberpanel_pma_config.inc.php.bak"
TMP_SIGNON="/tmp/cyberpanel_pma_phpmyadminsignin.php.bak"
LOG="/var/log/cyberpanel_upgrade_debug.log"
log() { echo -e "[$(date +"%Y-%m-%d %H:%M:%S")] $*" | tee -a "$LOG"; }
if [[ $(id -u) -ne 0 ]]; then
echo "Run as root: sudo bash $0 [VERSION]"
exit 1
fi
PMA_VER="${1:-}"
if [[ -z "$PMA_VER" ]]; then
PMA_VER=$(curl -sS "https://api.github.com/repos/phpmyadmin/phpmyadmin/releases/latest" 2>/dev/null | grep -o '"tag_name": "[^"]*' | sed 's/"tag_name": "//;s/^RELEASE_//;s/_/./g' | head -1)
[[ -z "$PMA_VER" ]] && PMA_VER="5.2.3"
fi
PMA_VER="${PMA_VER// /}"
[[ "$PMA_VER" =~ ^[0-9]_[0-9]_[0-9]$ ]] && PMA_VER="${PMA_VER//_/.}"
log "Using phpMyAdmin version: $PMA_VER"
[[ -d "/usr/local/CyberCP/public" ]] || mkdir -p /usr/local/CyberCP/public
SAVED_CONFIG=false
SAVED_SIGNON=false
[[ -f "$PMA_DIR/config.inc.php" ]] && cp -a "$PMA_DIR/config.inc.php" "$TMP_CONFIG" && SAVED_CONFIG=true
[[ -f "$PMA_DIR/phpmyadminsignin.php" ]] && cp -a "$PMA_DIR/phpmyadminsignin.php" "$TMP_SIGNON" && SAVED_SIGNON=true
TARBALL="/usr/local/CyberCP/public/phpmyadmin.tar.gz"
URL="https://files.phpmyadmin.net/phpMyAdmin/${PMA_VER}/phpMyAdmin-${PMA_VER}-all-languages.tar.gz"
wget -q -O "$TARBALL" "$URL" || { log "ERROR: Download failed"; exit 1; }
[[ $(stat -c%s "$TARBALL" 2>/dev/null) -gt 1000000 ]] || { log "ERROR: Tarball too small"; exit 1; }
rm -rf "$PMA_DIR"
tar -xzf "$TARBALL" -C /usr/local/CyberCP/public/
rm -f "$TARBALL"
EXTRACTED=$(ls -d /usr/local/CyberCP/public/phpMyAdmin-*-all-languages 2>/dev/null | head -1)
[[ -n "$EXTRACTED" ]] && [[ -d "$EXTRACTED" ]] && mv "$EXTRACTED" "$PMA_DIR" || { log "ERROR: Extract failed"; exit 1; }
if [[ "$SAVED_CONFIG" = true ]] && [[ -f "$TMP_CONFIG" ]]; then
cp -a "$TMP_CONFIG" "$PMA_DIR/config.inc.php"
rm -f "$TMP_CONFIG"
fi
if [[ ! -f "$PMA_DIR/config.inc.php" ]] && [[ -f "$PMA_DIR/config.sample.inc.php" ]]; then
cp -a "$PMA_DIR/config.sample.inc.php" "$PMA_DIR/config.inc.php"
fi
[[ -f "$PMA_DIR/config.inc.php" ]] && (grep -q "TempDir" "$PMA_DIR/config.inc.php" 2>/dev/null || echo -e "\n\$cfg['TempDir'] = '/usr/local/CyberCP/public/phpmyadmin/tmp';" >> "$PMA_DIR/config.inc.php")
[[ "$SAVED_SIGNON" = true ]] && [[ -f "$TMP_SIGNON" ]] && cp -a "$TMP_SIGNON" "$PMA_DIR/phpmyadminsignin.php" && rm -f "$TMP_SIGNON"
[[ "$SAVED_SIGNON" != true ]] && [[ -f /usr/local/CyberCP/plogical/phpmyadminsignin.php ]] && cp -a /usr/local/CyberCP/plogical/phpmyadminsignin.php "$PMA_DIR/phpmyadminsignin.php"
sed -i "s/'localhost'/'127.0.0.1'/g" "$PMA_DIR/phpmyadminsignin.php" 2>/dev/null || true
mkdir -p "$PMA_DIR/tmp"
id lscpd &>/dev/null && chown -R lscpd:lscpd "$PMA_DIR"
chmod -R 755 "$PMA_DIR"
log "phpMyAdmin changed to version $PMA_VER"
echo "phpMyAdmin version $PMA_VER installed."

View File

@@ -0,0 +1,85 @@
#!/bin/bash
# Change SnappyMail version: download chosen version, preserve data dirs, replace app files, fix data path and perms.
# Run as root: bash /usr/local/CyberCP/CPScripts/snappymail_version_changer.sh [VERSION]
# Example: bash snappymail_version_changer.sh 2.38.2
# Data under /usr/local/lscp/cyberpanel/snappymail/data is never removed.
set -e
PUBLIC_SNAPPY="/usr/local/CyberCP/public/snappymail"
DATA_PATH="/usr/local/lscp/cyberpanel/snappymail/data"
LOG="/var/log/cyberpanel_upgrade_debug.log"
log() { echo -e "[$(date +"%Y-%m-%d %H:%M:%S")] $*" | tee -a "$LOG"; }
if [[ $(id -u) -ne 0 ]]; then
echo "Run as root: sudo bash $0 [VERSION]"
exit 1
fi
# Version: argument or latest from API or default
SNAPPY_VER="${1:-}"
if [[ -z "$SNAPPY_VER" ]]; then
SNAPPY_VER=$(curl -sS "https://api.github.com/repos/the-djmaze/snappymail/releases/latest" 2>/dev/null | grep -o '"tag_name": "v[^"]*' | sed 's/"tag_name": "v//' | head -1)
[[ -z "$SNAPPY_VER" ]] && SNAPPY_VER="2.38.2"
log "Using SnappyMail version: $SNAPPY_VER (from API or default)"
else
SNAPPY_VER="${SNAPPY_VER// /}"
log "Using SnappyMail version: $SNAPPY_VER (from argument)"
fi
[[ -d "/usr/local/CyberCP/public" ]] || mkdir -p /usr/local/CyberCP/public
cd /usr/local/CyberCP/public || exit 1
# Download zip (data dirs are NOT under public/snappymail; we only replace app tree)
ZIP="snappymail-${SNAPPY_VER}.zip"
URL="https://github.com/the-djmaze/snappymail/releases/download/v${SNAPPY_VER}/${ZIP}"
log "Downloading $URL ..."
if ! wget -q -O "$ZIP" "$URL"; then
log "ERROR: Download failed. Check version at https://github.com/the-djmaze/snappymail/releases"
exit 1
fi
# Replace only app tree; do not remove DATA_PATH or public/snappymail/data if it exists
if [[ -d "$PUBLIC_SNAPPY" ]]; then
rm -rf "$PUBLIC_SNAPPY"
log "Removed existing public/snappymail app tree (data preserved under $DATA_PATH)"
fi
unzip -q "$ZIP" -d "$PUBLIC_SNAPPY"
rm -f "$ZIP"
# Fix data path in include.php
INCLUDE_PHP=""
for inc in "$PUBLIC_SNAPPY"/snappymail/v/*/include.php; do
[[ -f "$inc" ]] && INCLUDE_PHP="$inc" && break
done
if [[ -n "$INCLUDE_PHP" ]] && [[ -f "$INCLUDE_PHP" ]]; then
if grep -q "\$sCustomDataPath = ''" "$INCLUDE_PHP" 2>/dev/null; then
sed -i "s|\$sCustomDataPath = '';|\$sCustomDataPath = '/usr/local/lscp/cyberpanel/snappymail/data';|" "$INCLUDE_PHP"
log "Set data path in include.php"
fi
fi
# Ensure data dirs exist
mkdir -p "$DATA_PATH/_data_/_default_/configs"
mkdir -p "$DATA_PATH/_data_/_default_/domains"
mkdir -p "$DATA_PATH/_data_/_default_/storage"
mkdir -p "$DATA_PATH/_data_/_default_/temp"
mkdir -p "$DATA_PATH/_data_/_default_/cache"
# Permissions
find "$PUBLIC_SNAPPY" -type d -exec chmod 755 {} \;
find "$PUBLIC_SNAPPY" -type f -exec chmod 644 {} \;
if id lscpd &>/dev/null; then
chown -R lscpd:lscpd "$PUBLIC_SNAPPY"
chown -R lscpd:lscpd "$DATA_PATH"
log "Set ownership lscpd:lscpd"
fi
chmod -R 775 "$DATA_PATH" 2>/dev/null || true
# Optional: run CyberPanel SnappyMail integration if present
if [[ -f /usr/local/CyberCP/snappymail_cyberpanel.php ]]; then
for php in /usr/local/lsws/lsphp83/bin/php /usr/local/lsws/lsphp82/bin/php /usr/local/lsws/lsphp81/bin/php /usr/local/lsws/lsphp80/bin/php; do
[[ -x "$php" ]] && $php /usr/local/CyberCP/snappymail_cyberpanel.php 2>/dev/null && break
done
fi
log "SnappyMail changed to version $SNAPPY_VER"
echo "SnappyMail version changed to $SNAPPY_VER. Data preserved under $DATA_PATH"

17
CyberCP/csrfMiddleware.py Normal file
View File

@@ -0,0 +1,17 @@
# -*- coding: utf-8 -*-
"""
Custom CSRF middleware that exempts /phpmyadmin/ and /snappymail/ so their
PHP sign-in forms (POST) do not get 403 CSRF verification failed.
"""
from django.middleware.csrf import CsrfViewMiddleware
class CsrfExemptPhpMyAdminMiddleware(CsrfViewMiddleware):
"""CSRF middleware that skips verification for phpMyAdmin and SnappyMail paths."""
EXEMPT_PREFIXES = ('/phpmyadmin/', '/snappymail/')
def process_view(self, request, callback, callback_args, callback_kwargs):
if request.path.startswith(self.EXEMPT_PREFIXES):
return None # Skip CSRF check
return super().process_view(request, callback, callback_args, callback_kwargs)

View File

@@ -151,22 +151,30 @@ WSGI_APPLICATION = 'CyberCP.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
# Prefer password from /etc/cyberpanel/mysqlPassword so panel stays in sync with CLI/install scripts.
_def_mysql_pass = '1XTy1XOV0BZPnM'
try:
_mysql_pass_file = '/etc/cyberpanel/mysqlPassword'
if os.path.exists(_mysql_pass_file):
with open(_mysql_pass_file, 'r') as _f:
_def_mysql_pass = (_f.read() or '').strip() or _def_mysql_pass
except Exception:
pass
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'cyberpanel',
'USER': 'cyberpanel',
'PASSWORD': '1XTy1XOV0BZPnM',
'PASSWORD': _def_mysql_pass,
'HOST': 'localhost',
'PORT':''
'PORT': ''
},
'rootdb': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'mysql',
'USER': 'root',
'PASSWORD': '1XTy1XOV0BZPnM',
'PASSWORD': _def_mysql_pass,
'HOST': 'localhost',
'PORT': '',
},

View File

@@ -20,20 +20,27 @@ from django.conf import settings
from django.conf.urls.static import static
from django.views.static import serve
from django.views.generic import RedirectView
from django.views.decorators.csrf import csrf_exempt
from firewall import views as firewall_views
@csrf_exempt
def serve_phpmyadmin(request, path):
"""Serve phpMyAdmin files; CSRF exempt so sign-in form POST does not get 403."""
return serve(request, path, document_root=os.path.join(settings.PUBLIC_ROOT, 'phpmyadmin'))
# Plugin routes are no longer hardcoded here; pluginHolder.urls dynamically
# includes each installed plugin (under /plugins/<name>/) so settings and
# other plugin pages work for any installed plugin.
# Optional app: may be missing after clean clone or git clean -fd (not in all repo trees).
# When missing or broken, register a placeholder so {% url 'emailMarketing' %} in templates never raises Reverse not found.
# When missing or broken, register a placeholder so {% url 'emailMarketing' %} in templates never raises Reverse not found. Redirect to Plugin Store.
_optional_email_marketing = []
try:
_optional_email_marketing.append(path('emailMarketing/', include('emailMarketing.urls')))
except (ModuleNotFoundError, ImportError, AttributeError):
_optional_email_marketing.append(
path('emailMarketing/', RedirectView.as_view(url='/base/', permanent=False), name='emailMarketing')
path('emailMarketing/', RedirectView.as_view(url='/plugins/installed?view=store', permanent=False), name='emailMarketing')
)
urlpatterns = [
@@ -43,7 +50,7 @@ urlpatterns = [
re_path(r'^snappymail/?$', RedirectView.as_view(url='/snappymail/index.php', permanent=False)),
re_path(r'^snappymail/(?P<path>.*)$', serve, {'document_root': os.path.join(settings.PUBLIC_ROOT, 'snappymail')}),
re_path(r'^phpmyadmin/?$', RedirectView.as_view(url='/phpmyadmin/index.php', permanent=False)),
re_path(r'^phpmyadmin/(?P<path>.*)$', serve, {'document_root': os.path.join(settings.PUBLIC_ROOT, 'phpmyadmin')}),
re_path(r'^phpmyadmin/(?P<path>.*)$', serve_phpmyadmin),
path('base/', include('baseTemplate.urls')),
path('imunifyav/', firewall_views.imunifyAV, name='imunifyav_root'),
path('ImunifyAV/', firewall_views.imunifyAV, name='imunifyav_root_legacy'),

View File

@@ -98,46 +98,60 @@ Third-party repositories may provide older or niche versions; verify compatibili
sh <(curl -s https://cyberpanel.net/install.sh || wget -O - https://cyberpanel.net/install.sh)
```
➡️ See `guides/INSTALLATION.md` for platform-specific options and non-interactive installs.
➡️ See `guides/INSTALLATION.md` (or `docs/` on this repo) for platform-specific options and non-interactive installs.
---
## Upgrade (recommended)
## Upgrade
The upgrade uses a **modular loader** (`cyberpanel_upgrade.sh`) that works on both **stable** and **v2.5.5-dev**. When run via the one-liner (no repo on disk), the loader fetches `upgrade_modules/` from the chosen branch. Use **preUpgrade.sh** (recommended) or the direct loader URL below.
### Upgrade to stable (recommended)
```bash
sh <(curl -s https://raw.githubusercontent.com/usmannasir/cyberpanel/stable/preUpgrade.sh || wget -O - https://raw.githubusercontent.com/usmannasir/cyberpanel/stable/preUpgrade.sh)
sh <(curl -sL https://raw.githubusercontent.com/master3395/cyberpanel/stable/preUpgrade.sh || wget -qO - https://raw.githubusercontent.com/master3395/cyberpanel/stable/preUpgrade.sh)
```
**Post-upgrade checklist:** verify email, DNS, SSL, and run a smoke test on key sites.
PreUpgrade downloads the loader from `stable` and runs it with `-b stable`, so modules are taken from the stable branch. No `-b` flag needed.
---
**Post-upgrade:** verify email, DNS, SSL, and run a smoke test on key sites.
## Upgrade to v2.5.5-dev (non-interactive)
### Upgrade to v2.5.5-dev
Upgrade to v2.5.5-dev without branch or MariaDB prompts.
**MariaDB version options:** `10.11`, `11.8` (LTS default), `12.1` (latest). Use `--mariadb` for 10.11, or `--mariadb-version X` to choose explicitly. If you want to **default to 11.8** and skip the prompt, use `--mariadb-version 11.8`.
Use `-b v2.5.5-dev` so the loader fetches modules from the dev branch.
```bash
# Upgrade to v2.5.5-dev without prompts (script will prompt for MariaDB unless you pass a flag)
sh <(curl -s https://raw.githubusercontent.com/usmannasir/cyberpanel/v2.5.5-dev/preUpgrade.sh || wget -O - https://raw.githubusercontent.com/usmannasir/cyberpanel/v2.5.5-dev/preUpgrade.sh) -b v2.5.5-dev
# Interactive (branch + MariaDB prompts)
sh <(curl -sL https://raw.githubusercontent.com/master3395/cyberpanel/v2.5.5-dev/preUpgrade.sh || wget -qO - https://raw.githubusercontent.com/master3395/cyberpanel/v2.5.5-dev/preUpgrade.sh) -b v2.5.5-dev
# Default to MariaDB 11.8 (LTS) — recommended, non-interactive
sh <(curl -s https://raw.githubusercontent.com/usmannasir/cyberpanel/v2.5.5-dev/preUpgrade.sh || wget -O - https://raw.githubusercontent.com/usmannasir/cyberpanel/v2.5.5-dev/preUpgrade.sh) -b v2.5.5-dev --mariadb-version 11.8
# MariaDB 10.11 (non-interactive)
sh <(curl -s https://raw.githubusercontent.com/usmannasir/cyberpanel/v2.5.5-dev/preUpgrade.sh || wget -O - https://raw.githubusercontent.com/usmannasir/cyberpanel/v2.5.5-dev/preUpgrade.sh) -b v2.5.5-dev --mariadb
# MariaDB 12.1 (latest)
sh <(curl -s https://raw.githubusercontent.com/usmannasir/cyberpanel/v2.5.5-dev/preUpgrade.sh || wget -O - https://raw.githubusercontent.com/usmannasir/cyberpanel/v2.5.5-dev/preUpgrade.sh) -b v2.5.5-dev --mariadb-version 12.1
# Non-interactive: v2.5.5-dev + MariaDB 11.8 (LTS) — recommended
sh <(curl -sL https://raw.githubusercontent.com/master3395/cyberpanel/v2.5.5-dev/preUpgrade.sh || wget -qO - https://raw.githubusercontent.com/master3395/cyberpanel/v2.5.5-dev/preUpgrade.sh) -b v2.5.5-dev --mariadb-version 11.8
```
**Full non-interactive (v2.5.5-dev + MariaDB 11.8):**
**MariaDB options:** `10.11`, `11.8` (LTS default), `12.x` (e.g. 12.1, 12.2). Use `--mariadb` for 10.11, or `--mariadb-version X.Y` to set explicitly.
```bash
sh <(curl -s https://raw.githubusercontent.com/usmannasir/cyberpanel/v2.5.5-dev/preUpgrade.sh || wget -O - https://raw.githubusercontent.com/usmannasir/cyberpanel/v2.5.5-dev/preUpgrade.sh) -b v2.5.5-dev --mariadb-version 11.8
# MariaDB 10.11
sh <(curl -sL .../preUpgrade.sh) -b v2.5.5-dev --mariadb
# MariaDB 12.1
sh <(curl -sL .../preUpgrade.sh) -b v2.5.5-dev --mariadb-version 12.1
```
### Direct loader (advanced)
If you prefer to run the upgrade script without preUpgrade (e.g. already have the branch in mind):
```bash
# Stable (default; modules fetched from stable)
sudo bash <(curl -sL https://raw.githubusercontent.com/master3395/cyberpanel/stable/cyberpanel_upgrade.sh)
# Dev (pass -b so modules are fetched from v2.5.5-dev)
sudo bash <(curl -sL https://raw.githubusercontent.com/master3395/cyberpanel/stable/cyberpanel_upgrade.sh) -b v2.5.5-dev
```
Optional flags (same as with preUpgrade): `--mariadb-version 11.8`, `--debug`, `--mirror`, etc.
---
## Troubleshooting (common)

View File

@@ -625,11 +625,14 @@ install_cyberpanel_direct() {
# Ask MariaDB version (after web server choice) if not set via --mariadb-version
if [ -z "$MARIADB_VER" ]; then
echo ""
echo " MariaDB version: 10.11, 11.8 (LTS, default) or 12.1?"
read -r -t 60 -p " Enter 10.11, 11.8 or 12.1 [11.8]: " MARIADB_VER || true
echo " MariaDB version: 10.11, 11.8 (LTS, default), 12.1, 12.2, 12.3 or other X.Y?"
read -r -t 60 -p " Enter version [11.8]: " MARIADB_VER || true
MARIADB_VER="${MARIADB_VER:-11.8}"
MARIADB_VER="${MARIADB_VER// /}"
if [ "$MARIADB_VER" != "10.11" ] && [ "$MARIADB_VER" != "11.8" ] && [ "$MARIADB_VER" != "12.1" ]; then
# Normalize to major.minor (e.g. 12.3.1 -> 12.3)
if [[ "$MARIADB_VER" =~ ^([0-9]+)\.([0-9]+) ]]; then
MARIADB_VER="${BASH_REMATCH[1]}.${BASH_REMATCH[2]}"
else
MARIADB_VER="11.8"
fi
echo " Using MariaDB $MARIADB_VER"

View File

@@ -792,7 +792,8 @@ EOF
sed -i 's|https://yum.mariadb.org/RPM-GPG-KEY-MariaDB|https://cyberpanel.sh/yum.mariadb.org/RPM-GPG-KEY-MariaDB|g' /etc/yum.repos.d/MariaDB.repo
fi
dnf clean metadata --disablerepo='*' --enablerepo=mariadb 2>/dev/null || true
# MariaDB 10 -> 11 or 11 -> 12: RPM scriptlet blocks in-place upgrade; do manual stop, remove old server, install target, start, mariadb-upgrade
# MariaDB 10 -> 11 or 11 -> 12: RPM scriptlet blocks in-place upgrade; do manual stop, remove old server, install target, start, mariadb-upgrade.
# Data in /var/lib/mysql is preserved; no databases are dropped.
MARIADB_OLD_10=$(rpm -qa 'MariaDB-server-10*' 2>/dev/null | head -1)
[[ -z "$MARIADB_OLD_10" ]] && MARIADB_OLD_10=$(rpm -qa 2>/dev/null | grep -E '^MariaDB-server-10\.' | head -1)
MARIADB_OLD_11=$(rpm -qa 'MariaDB-server-11*' 2>/dev/null | head -1)
@@ -811,7 +812,7 @@ EOF
printf "[client]\nssl=0\nskip-ssl\n" > /etc/my.cnf.d/cyberpanel-client.cnf 2>/dev/null || true
systemctl start mariadb 2>/dev/null || true
sleep 2
mariadb-upgrade -u root 2>/dev/null || true
mariadb-upgrade --force -u root 2>/dev/null || true
echo -e "[$(date +"%Y-%m-%d %H:%M:%S")] MariaDB manual upgrade to $MARIADB_VER_REPO completed." | tee -a /var/log/cyberpanel_upgrade_debug.log
elif [[ -n "$MARIADB_OLD_11" ]] && [[ "$MARIADB_VER_REPO" =~ ^12\. ]]; then
echo -e "[$(date +"%Y-%m-%d %H:%M:%S")] MariaDB 11.x detected; performing manual upgrade to $MARIADB_VER_REPO (stop, remove, install, start, mariadb-upgrade)..." | tee -a /var/log/cyberpanel_upgrade_debug.log
@@ -825,7 +826,7 @@ EOF
printf "[client]\nssl=0\nskip-ssl\n" > /etc/my.cnf.d/cyberpanel-client.cnf 2>/dev/null || true
systemctl start mariadb 2>/dev/null || true
sleep 2
mariadb-upgrade -u root 2>/dev/null || true
mariadb-upgrade --force -u root 2>/dev/null || true
echo -e "[$(date +"%Y-%m-%d %H:%M:%S")] MariaDB manual upgrade to $MARIADB_VER_REPO completed (11->12)." | tee -a /var/log/cyberpanel_upgrade_debug.log
else
# Normal install/upgrade (same version or 10.11)
@@ -848,7 +849,7 @@ EOF
printf "[client]\nssl=0\nskip-ssl\n" > /etc/my.cnf.d/cyberpanel-client.cnf 2>/dev/null || true
systemctl start mariadb 2>/dev/null || true
sleep 2
mariadb-upgrade -u root 2>/dev/null || true
mariadb-upgrade --force -u root 2>/dev/null || true
echo -e "[$(date +"%Y-%m-%d %H:%M:%S")] MariaDB manual 11->12 fallback completed." | tee -a /var/log/cyberpanel_upgrade_debug.log
fi
fi
@@ -899,7 +900,7 @@ EOF
printf "[client]\nssl=0\nskip-ssl\n" > /etc/my.cnf.d/cyberpanel-client.cnf 2>/dev/null || true
systemctl start mariadb 2>/dev/null || true
sleep 2
mariadb-upgrade -u root 2>/dev/null || true
mariadb-upgrade --force -u root 2>/dev/null || true
echo -e "[$(date +"%Y-%m-%d %H:%M:%S")] MariaDB manual upgrade to $MARIADB_VER_REPO completed (AlmaLinux 9)." | tee -a /var/log/cyberpanel_upgrade_debug.log
elif [[ -n "$MARIADB_OLD_11_AL9" ]] && [[ "$MARIADB_VER_REPO" =~ ^12\. ]]; then
echo -e "[$(date +"%Y-%m-%d %H:%M:%S")] MariaDB 11.x detected (AlmaLinux 9); manual upgrade to $MARIADB_VER_REPO..." | tee -a /var/log/cyberpanel_upgrade_debug.log
@@ -913,7 +914,7 @@ EOF
printf "[client]\nssl=0\nskip-ssl\n" > /etc/my.cnf.d/cyberpanel-client.cnf 2>/dev/null || true
systemctl start mariadb 2>/dev/null || true
sleep 2
mariadb-upgrade -u root 2>/dev/null || true
mariadb-upgrade --force -u root 2>/dev/null || true
echo -e "[$(date +"%Y-%m-%d %H:%M:%S")] MariaDB manual upgrade to $MARIADB_VER_REPO completed (AlmaLinux 9, 11->12)." | tee -a /var/log/cyberpanel_upgrade_debug.log
else
dnf install -y --enablerepo=mariadb MariaDB-server MariaDB-devel 2>/dev/null || dnf install -y mariadb-server mariadb-devel

52
deploy-docker-containers-fix.sh Executable file
View File

@@ -0,0 +1,52 @@
#!/bin/bash
# Deploy Docker containers fix to live panel (/usr/local/CyberCP).
# Run this script ON the server (e.g. 84.247.184.182) as root or with sudo.
# Fix: HTTP 500 on /docker/containers - error handling + auto-migrate.
set -e
CYBERCP_ROOT="${CYBERCP_ROOT:-/usr/local/CyberCP}"
REPO_URL="${REPO_URL:-https://github.com/master3395/cyberpanel.git}"
BRANCH="${BRANCH:-v2.5.5-dev}"
WORK_DIR="/tmp/cyberpanel-deploy-docker-$$"
echo "[$(date -Iseconds)] Deploying Docker containers fix to ${CYBERCP_ROOT}"
# Clone repo (shallow, branch only)
mkdir -p "$WORK_DIR"
git clone --depth 1 --branch "$BRANCH" "$REPO_URL" "$WORK_DIR"
# Backup and copy fixed files
for f in dockerManager/container.py dockerManager/views.py; do
src="$WORK_DIR/$f"
dest="$CYBERCP_ROOT/$f"
if [ ! -f "$src" ]; then
echo "ERROR: $src not found in repo"
exit 1
fi
if [ -f "$dest" ]; then
cp -a "$dest" "${dest}.bak.$(date +%Y%m%d%H%M%S)"
fi
cp -a "$src" "$dest"
echo " -> $dest"
done
# Run migrations for dockerManager (creates table if missing)
if [ -x "$CYBERCP_ROOT/bin/python" ] && [ -f "$CYBERCP_ROOT/manage.py" ]; then
echo "Running: manage.py migrate dockerManager --noinput"
"$CYBERCP_ROOT/bin/python" "$CYBERCP_ROOT/manage.py" migrate dockerManager --noinput || true
fi
# Restart panel service so Django loads new code
if systemctl is-active --quiet lscpd 2>/dev/null; then
echo "Restarting lscpd..."
systemctl restart lscpd
elif systemctl is-active --quiet gunicorn 2>/dev/null; then
echo "Restarting gunicorn..."
systemctl restart gunicorn
else
echo "Restart lscpd or gunicorn manually so the new code is loaded."
fi
# Cleanup
rm -rf "$WORK_DIR"
echo "[$(date -Iseconds)] Done. Test: https://YOUR_IP:2087/docker/containers"

View File

@@ -13,10 +13,12 @@ import json
try:
from plogical.dnsUtilities import DNS
from loginSystem.models import Administrator
from .models import Domains,Records
from .models import Domains, Records
from plogical.mailUtilities import mailUtilities
except:
pass
from websiteFunctions.models import Websites, ChildDomains
except Exception:
Websites = None
ChildDomains = None
import os
from re import match,I,M
from plogical.acl import ACLManager
@@ -33,10 +35,18 @@ class DNSManager:
self.extraArgs = extraArgs
def loadCFKeys(self):
cfFile = '%s%s' % (DNS.CFPath, self.admin.userName)
data = open(cfFile, 'r').readlines()
self.email = data[0].rstrip('\n')
self.key = data[1].rstrip('\n')
self.email = ''
self.key = ''
try:
cfFile = '%s%s' % (DNS.CFPath, self.admin.userName)
with open(cfFile, 'r') as f:
data = f.readlines()
if len(data) >= 1:
self.email = (data[0] or '').rstrip('\n')
if len(data) >= 2:
self.key = (data[1] or '').rstrip('\n')
except (IOError, OSError, IndexError) as e:
logging.CyberCPLogFileWriter.writeToFile('loadCFKeys: %s' % str(e))
def loadDNSHome(self, request = None, userID = None):
admin = Administrator.objects.get(pk=userID)
@@ -636,33 +646,47 @@ class DNSManager:
return HttpResponse(final_json)
def addDeleteDNSRecordsCloudFlare(self, request = None, userID = None):
currentACL = ACLManager.loadedACL(userID)
if not os.path.exists('/home/cyberpanel/powerdns'):
status = 0
else:
status = 1
admin = Administrator.objects.get(pk=userID)
try:
currentACL = ACLManager.loadedACL(userID)
if not os.path.exists('/home/cyberpanel/powerdns'):
status = 0
else:
status = 1
admin = Administrator.objects.get(pk=userID)
CloudFlare = 0
CloudFlare = 0
domainsList = []
cfEmail = ''
cfToken = ''
cfPath = '%s%s' % (DNS.CFPath, admin.userName)
cfPath = '%s%s' % (DNS.CFPath, admin.userName)
if os.path.exists(cfPath):
self.admin = admin
self.loadCFKeys()
cfEmail = getattr(self, 'email', '') or ''
cfToken = getattr(self, 'key', '') or ''
if cfEmail or cfToken:
CloudFlare = 1
try:
allDomains = ACLManager.findAllDomains(currentACL, userID)
domainsList = [domain for domain in allDomains if domain.count('.') == 1]
except Exception as e:
logging.CyberCPLogFileWriter.writeToFile('addDeleteDNSRecordsCloudFlare findAllDomains: %s' % str(e))
domainsList = []
if os.path.exists(cfPath):
CloudFlare = 1
allDomains = ACLManager.findAllDomains(currentACL, userID)
# Filter to only show main domains (domains with exactly one dot, e.g., "example.com")
# Sub-domains have two or more dots (e.g., "subdomain.example.com")
domainsList = [domain for domain in allDomains if domain.count('.') == 1]
self.admin = admin
self.loadCFKeys()
data = {"domainsList": domainsList, "status": status, 'CloudFlare': CloudFlare, 'cfEmail': self.email,
'cfToken': self.key}
else:
data = {"status": status, 'CloudFlare': CloudFlare}
template = 'dns/addDeleteDNSRecordsCloudFlare.html'
proc = httpProc(request, template, data, 'addDeleteRecords')
return proc.render()
data = {
"domainsList": domainsList,
"status": status,
'CloudFlare': CloudFlare,
'cfEmail': cfEmail,
'cfToken': cfToken,
}
template = 'dns/addDeleteDNSRecordsCloudFlare.html'
proc = httpProc(request, template, data, 'addDeleteRecords')
return proc.render()
except Exception as e:
logging.CyberCPLogFileWriter.writeToFile('addDeleteDNSRecordsCloudFlare: %s' % str(e))
raise
def saveCFConfigs(self, userID = None, data = None):
try:
@@ -847,6 +871,455 @@ class DNSManager:
final_json = json.dumps(final_dic)
return HttpResponse(final_json)
def getExportRecordsCloudFlare(self, userID=None, data=None):
"""Fetch all DNS records for a zone (all types) for export. Returns JSON list."""
try:
currentACL = ACLManager.loadedACL(userID)
if ACLManager.currentContextPermission(currentACL, 'addDeleteRecords') == 0:
return ACLManager.loadErrorJson('fetchStatus', 0)
zone_domain = data.get('selectedZone', '').strip()
if not zone_domain:
final_json = json.dumps({'status': 0, 'fetchStatus': 0, 'error_message': 'Zone is required.', 'data': '[]'})
return HttpResponse(final_json)
admin = Administrator.objects.get(pk=userID)
self.admin = admin
if ACLManager.checkOwnershipZone(zone_domain, admin, currentACL) != 1:
return ACLManager.loadErrorJson()
self.loadCFKeys()
params = {'name': zone_domain, 'per_page': 50}
cf = CloudFlare.CloudFlare(email=self.email, token=self.key)
zones = cf.zones.get(params=params)
if not zones:
final_json = json.dumps({'status': 0, 'fetchStatus': 0, 'error_message': 'Zone not found.', 'data': '[]'})
return HttpResponse(final_json)
zone_id = sorted(zones, key=lambda v: v['name'])[0]['id']
all_records = []
page = 1
per_page = 100
while True:
try:
dns_records = cf.zones.dns_records.get(zone_id, params={'per_page': per_page, 'page': page})
except BaseException as e:
final_json = json.dumps({'status': 0, 'fetchStatus': 0, 'error_message': str(e), 'data': '[]'})
return HttpResponse(final_json)
if not dns_records:
break
for dns_record in dns_records:
ttl = 'AUTO' if dns_record.get('ttl') == 1 else dns_record.get('ttl', 3600)
prio = dns_record.get('priority') or 0
all_records.append({
'id': dns_record.get('id'),
'type': dns_record.get('type'),
'name': dns_record.get('name'),
'content': dns_record.get('content'),
'priority': prio,
'ttl': ttl,
'proxy': dns_record.get('proxied', False),
'proxiable': dns_record.get('proxiable', False),
})
if len(dns_records) < per_page:
break
page += 1
final_json = json.dumps({
'status': 1,
'fetchStatus': 1,
'error_message': '',
'data': json.dumps(all_records),
}, default=str)
return HttpResponse(final_json)
except BaseException as msg:
final_json = json.dumps({'status': 0, 'fetchStatus': 0, 'error_message': str(msg), 'data': '[]'})
return HttpResponse(final_json)
def clearAllDNSRecordsCloudFlare(self, userID=None, data=None):
"""Delete all DNS records for a zone. Returns list of deleted records for local backup/restore."""
try:
currentACL = ACLManager.loadedACL(userID)
if ACLManager.currentContextPermission(currentACL, 'addDeleteRecords') == 0:
return ACLManager.loadErrorJson('delete_status', 0)
zone_domain = data.get('selectedZone', '').strip()
if not zone_domain:
final_json = json.dumps({'status': 0, 'delete_status': 0, 'error_message': 'Zone is required.', 'deleted_records': []})
return HttpResponse(final_json)
admin = Administrator.objects.get(pk=userID)
self.admin = admin
if ACLManager.checkOwnershipZone(zone_domain, admin, currentACL) != 1:
return ACLManager.loadErrorJson()
self.loadCFKeys()
params = {'name': zone_domain, 'per_page': 50}
cf = CloudFlare.CloudFlare(email=self.email, token=self.key)
zones = cf.zones.get(params=params)
if not zones:
final_json = json.dumps({'status': 0, 'delete_status': 0, 'error_message': 'Zone not found.', 'deleted_records': []})
return HttpResponse(final_json)
zone_id = sorted(zones, key=lambda v: v['name'])[0]['id']
deleted = []
page = 1
per_page = 100
while True:
try:
dns_records = cf.zones.dns_records.get(zone_id, params={'per_page': per_page, 'page': page})
except BaseException as e:
final_json = json.dumps({'status': 0, 'delete_status': 0, 'error_message': str(e), 'deleted_records': deleted})
return HttpResponse(final_json)
if not dns_records:
break
for dns_record in dns_records:
rec_type = dns_record.get('type', '')
if rec_type in ('SOA', 'NS'):
continue
rec_id = dns_record.get('id')
ttl = 'AUTO' if dns_record.get('ttl') == 1 else dns_record.get('ttl', 3600)
prio = dns_record.get('priority') or 0
deleted.append({
'id': rec_id,
'type': rec_type,
'name': dns_record.get('name'),
'content': dns_record.get('content'),
'priority': prio,
'ttl': ttl,
'proxy': dns_record.get('proxied', False),
'proxiable': dns_record.get('proxiable', False),
})
try:
cf.zones.dns_records.delete(zone_id, rec_id)
except BaseException as e:
final_json = json.dumps({'status': 0, 'delete_status': 0, 'error_message': str(e), 'deleted_records': deleted})
return HttpResponse(final_json)
if len(dns_records) < per_page:
break
page += 1
final_json = json.dumps({'status': 1, 'delete_status': 1, 'error_message': '', 'deleted_records': deleted}, default=str)
return HttpResponse(final_json)
except BaseException as msg:
final_json = json.dumps({'status': 0, 'delete_status': 0, 'error_message': str(msg), 'deleted_records': []})
return HttpResponse(final_json)
def importDNSRecordsCloudFlare(self, userID=None, data=None):
"""Import DNS records from a list. Creates each record via CloudFlare API."""
try:
currentACL = ACLManager.loadedACL(userID)
if ACLManager.currentContextPermission(currentACL, 'addDeleteRecords') == 0:
return ACLManager.loadErrorJson('import_status', 0)
zone_domain = data.get('selectedZone', '').strip()
records = data.get('records', [])
if not zone_domain:
final_json = json.dumps({'status': 0, 'import_status': 0, 'error_message': 'Zone is required.', 'imported': 0, 'failed': []})
return HttpResponse(final_json)
if not isinstance(records, list):
final_json = json.dumps({'status': 0, 'import_status': 0, 'error_message': 'records must be a list.', 'imported': 0, 'failed': []})
return HttpResponse(final_json)
admin = Administrator.objects.get(pk=userID)
self.admin = admin
if ACLManager.checkOwnershipZone(zone_domain, admin, currentACL) != 1:
return ACLManager.loadErrorJson()
self.loadCFKeys()
params = {'name': zone_domain, 'per_page': 50}
cf = CloudFlare.CloudFlare(email=self.email, token=self.key)
zones = cf.zones.get(params=params)
if not zones:
final_json = json.dumps({'status': 0, 'import_status': 0, 'error_message': 'Zone not found.', 'imported': 0, 'failed': []})
return HttpResponse(final_json)
zone_id = sorted(zones, key=lambda v: v['name'])[0]['id']
imported = 0
failed = []
for rec in records:
name = (rec.get('name') or '').strip()
rec_type = (rec.get('type') or '').strip().upper()
content = (rec.get('content') or '').strip()
if not name or not rec_type or not content:
failed.append({'name': name or '(empty)', 'error': 'Name, type and content required.'})
continue
ttl_val = rec.get('ttl', 3600)
if ttl_val == 'AUTO' or ttl_val == 1:
ttl_int = 1
else:
try:
ttl_int = int(ttl_val)
if ttl_int < 0:
ttl_int = 1
elif ttl_int > 86400 and ttl_int != 1:
ttl_int = 86400
except (ValueError, TypeError):
ttl_int = 3600
priority = 0
try:
priority = int(rec.get('priority', 0) or 0)
except (ValueError, TypeError):
pass
proxied = bool(rec.get('proxy', False) and rec.get('proxiable', True))
try:
DNS.createDNSRecordCloudFlare(cf, zone_id, name, rec_type, content, priority, ttl_int, proxied=proxied)
imported += 1
except BaseException as e:
failed.append({'name': name, 'error': str(e)})
final_json = json.dumps({
'status': 1,
'import_status': 1,
'error_message': '',
'imported': imported,
'failed': failed,
}, default=str)
return HttpResponse(final_json)
except BaseException as msg:
final_json = json.dumps({'status': 0, 'import_status': 0, 'error_message': str(msg), 'imported': 0, 'failed': []})
return HttpResponse(final_json)
def _get_valid_hostnames_for_zone(self, zone_domain):
"""Return set of valid hostnames for this zone (main domain + child domains in panel)."""
valid = set()
valid.add(zone_domain.lower().strip())
if Websites is None or ChildDomains is None:
return valid
try:
website = Websites.objects.get(domain=zone_domain)
valid.add(website.domain.lower())
for child in website.childdomains_set.all():
valid.add(child.domain.lower())
except (Websites.DoesNotExist, Exception):
pass
return valid
def getStaleDNSRecordsCloudFlare(self, userID=None, data=None):
"""List DNS records that point to subdomains/hostnames no longer in the panel (orphan/stale)."""
try:
currentACL = ACLManager.loadedACL(userID)
if ACLManager.currentContextPermission(currentACL, 'addDeleteRecords') == 0:
return ACLManager.loadErrorJson('fetchStatus', 0)
zone_domain = data.get('selectedZone', '').strip()
if not zone_domain:
final_json = json.dumps({'status': 0, 'fetchStatus': 0, 'error_message': 'Zone is required.', 'stale_records': []})
return HttpResponse(final_json)
admin = Administrator.objects.get(pk=userID)
self.admin = admin
if ACLManager.checkOwnershipZone(zone_domain, admin, currentACL) != 1:
return ACLManager.loadErrorJson()
valid_hostnames = self._get_valid_hostnames_for_zone(zone_domain)
self.loadCFKeys()
params = {'name': zone_domain, 'per_page': 50}
cf = CloudFlare.CloudFlare(email=self.email, token=self.key)
zones = cf.zones.get(params=params)
if not zones:
final_json = json.dumps({'status': 0, 'fetchStatus': 0, 'error_message': 'Zone not found.', 'stale_records': []})
return HttpResponse(final_json)
zone_id = sorted(zones, key=lambda v: v['name'])[0]['id']
stale = []
page = 1
per_page = 100
zone_lower = zone_domain.lower()
# Only consider A, AAAA, CNAME as "subdomain" records that can be stale
host_record_types = ('A', 'AAAA', 'CNAME')
while True:
try:
dns_records = cf.zones.dns_records.get(zone_id, params={'per_page': per_page, 'page': page})
except BaseException as e:
final_json = json.dumps({'status': 0, 'fetchStatus': 0, 'error_message': str(e), 'stale_records': []})
return HttpResponse(final_json)
if not dns_records:
break
for dns_record in dns_records:
rec_type = (dns_record.get('type') or '').strip().upper()
if rec_type not in host_record_types:
continue
name = (dns_record.get('name') or '').strip()
if not name:
continue
fqdn = name.lower().rstrip('.')
if fqdn in valid_hostnames:
continue
ttl = 'AUTO' if dns_record.get('ttl') == 1 else dns_record.get('ttl', 3600)
stale.append({
'id': dns_record.get('id'),
'type': rec_type,
'name': name,
'content': dns_record.get('content', ''),
'priority': dns_record.get('priority') or 0,
'ttl': ttl,
'proxy': dns_record.get('proxied', False),
})
if len(dns_records) < per_page:
break
page += 1
final_json = json.dumps({
'status': 1,
'fetchStatus': 1,
'error_message': '',
'stale_records': stale,
}, default=str)
return HttpResponse(final_json)
except BaseException as msg:
final_json = json.dumps({'status': 0, 'fetchStatus': 0, 'error_message': str(msg), 'stale_records': []})
return HttpResponse(final_json)
def removeStaleDNSRecordsCloudFlare(self, userID=None, data=None):
"""Remove DNS records that are stale (optionally by id list). Returns deleted list for backup."""
try:
currentACL = ACLManager.loadedACL(userID)
if ACLManager.currentContextPermission(currentACL, 'addDeleteRecords') == 0:
return ACLManager.loadErrorJson('delete_status', 0)
zone_domain = data.get('selectedZone', '').strip()
ids_to_remove = data.get('ids', [])
if not zone_domain:
final_json = json.dumps({'status': 0, 'delete_status': 0, 'error_message': 'Zone is required.', 'deleted_records': []})
return HttpResponse(final_json)
admin = Administrator.objects.get(pk=userID)
self.admin = admin
if ACLManager.checkOwnershipZone(zone_domain, admin, currentACL) != 1:
return ACLManager.loadErrorJson()
self.loadCFKeys()
params = {'name': zone_domain, 'per_page': 50}
cf = CloudFlare.CloudFlare(email=self.email, token=self.key)
zones = cf.zones.get(params=params)
if not zones:
final_json = json.dumps({'status': 0, 'delete_status': 0, 'error_message': 'Zone not found.', 'deleted_records': []})
return HttpResponse(final_json)
zone_id = sorted(zones, key=lambda v: v['name'])[0]['id']
if not ids_to_remove:
valid_hostnames = self._get_valid_hostnames_for_zone(zone_domain)
zone_lower = zone_domain.lower()
host_record_types = ('A', 'AAAA', 'CNAME')
page = 1
per_page = 100
while True:
dns_records = cf.zones.dns_records.get(zone_id, params={'per_page': per_page, 'page': page})
if not dns_records:
break
for dns_record in dns_records:
rec_type = (dns_record.get('type') or '').strip().upper()
if rec_type not in host_record_types:
continue
name = (dns_record.get('name') or '').strip()
if not name:
continue
fqdn = name.lower().rstrip('.')
if fqdn not in valid_hostnames:
ids_to_remove.append(dns_record.get('id'))
if len(dns_records) < per_page:
break
page += 1
deleted = []
for rec_id in ids_to_remove:
try:
rec = cf.zones.dns_records.get(zone_id, rec_id)
ttl = 'AUTO' if rec.get('ttl') == 1 else rec.get('ttl', 3600)
deleted.append({
'id': rec_id,
'type': rec.get('type'),
'name': rec.get('name'),
'content': rec.get('content'),
'priority': rec.get('priority') or 0,
'ttl': ttl,
'proxy': rec.get('proxied', False),
})
cf.zones.dns_records.delete(zone_id, rec_id)
except BaseException as e:
final_json = json.dumps({'status': 0, 'delete_status': 0, 'error_message': str(e), 'deleted_records': deleted})
return HttpResponse(final_json)
final_json = json.dumps({'status': 1, 'delete_status': 1, 'error_message': '', 'deleted_records': deleted}, default=str)
return HttpResponse(final_json)
except BaseException as msg:
final_json = json.dumps({'status': 0, 'delete_status': 0, 'error_message': str(msg), 'deleted_records': []})
return HttpResponse(final_json)
def fixDNSRecordsCloudFlare(self, userID=None, data=None):
"""Ensure panel domains/subdomains have A/AAAA while checking all existing record types to avoid conflicts."""
try:
currentACL = ACLManager.loadedACL(userID)
if ACLManager.currentContextPermission(currentACL, 'addDeleteRecords') == 0:
return ACLManager.loadErrorJson('fix_status', 0)
zone_domain = data.get('selectedZone', '').strip()
if not zone_domain:
final_json = json.dumps({'status': 0, 'fix_status': 0, 'error_message': 'Zone is required.', 'added': 0, 'skipped': 0})
return HttpResponse(final_json)
admin = Administrator.objects.get(pk=userID)
self.admin = admin
if ACLManager.checkOwnershipZone(zone_domain, admin, currentACL) != 1:
return ACLManager.loadErrorJson()
valid_hostnames = self._get_valid_hostnames_for_zone(zone_domain)
if not valid_hostnames:
final_json = json.dumps({'status': 1, 'fix_status': 1, 'error_message': '', 'added': 0, 'skipped': 0})
return HttpResponse(final_json)
self.loadCFKeys()
params = {'name': zone_domain, 'per_page': 50}
cf = CloudFlare.CloudFlare(email=self.email, token=self.key)
zones = cf.zones.get(params=params)
if not zones:
final_json = json.dumps({'status': 0, 'fix_status': 0, 'error_message': 'Zone not found.', 'added': 0, 'skipped': 0})
return HttpResponse(final_json)
zone_id = sorted(zones, key=lambda v: v['name'])[0]['id']
existing = set()
existing_types_by_name = {}
page = 1
per_page = 100
while True:
try:
dns_records = cf.zones.dns_records.get(zone_id, params={'per_page': per_page, 'page': page})
except BaseException as e:
final_json = json.dumps({'status': 0, 'fix_status': 0, 'error_message': str(e), 'added': 0, 'skipped': 0})
return HttpResponse(final_json)
if not dns_records:
break
for rec in dns_records:
n = (rec.get('name') or '').lower().rstrip('.')
t = (rec.get('type') or '').strip().upper()
if not n or not t:
continue
existing_types_by_name.setdefault(n, set()).add(t)
if t in ('A', 'AAAA', 'CNAME'):
existing.add((n, t))
if len(dns_records) < per_page:
break
page += 1
server_ip = None
try:
server_ip = ACLManager.GetServerIP()
except Exception:
pass
server_ipv6 = None
try:
server_ipv6 = ACLManager.GetServerIPv6()
except Exception:
pass
ttl = 3600
added = 0
skipped = 0
for hostname in valid_hostnames:
name_lower = hostname.lower().rstrip('.')
host_types = existing_types_by_name.get(name_lower, set())
has_cname = 'CNAME' in host_types
# A/AAAA cannot coexist with CNAME on same hostname.
if has_cname:
skipped += 2 if server_ipv6 else 1
continue
if (name_lower, 'A') not in existing and server_ip:
try:
DNS.createDNSRecordCloudFlare(cf, zone_id, hostname, 'A', server_ip, 0, ttl)
existing.add((name_lower, 'A'))
existing_types_by_name.setdefault(name_lower, set()).add('A')
added += 1
except BaseException as e:
final_json = json.dumps({'status': 0, 'fix_status': 0, 'error_message': str(e), 'added': added, 'skipped': skipped})
return HttpResponse(final_json)
elif (name_lower, 'A') in existing:
skipped += 1
if server_ipv6 and (name_lower, 'AAAA') not in existing:
try:
DNS.createDNSRecordCloudFlare(cf, zone_id, hostname, 'AAAA', server_ipv6, 0, ttl)
existing.add((name_lower, 'AAAA'))
existing_types_by_name.setdefault(name_lower, set()).add('AAAA')
added += 1
except BaseException as e:
pass
elif (name_lower, 'AAAA') in existing:
skipped += 1
final_json = json.dumps({'status': 1, 'fix_status': 1, 'error_message': '', 'added': added, 'skipped': skipped}, default=str)
return HttpResponse(final_json)
except BaseException as msg:
final_json = json.dumps({'status': 0, 'fix_status': 0, 'error_message': str(msg), 'added': 0, 'skipped': 0})
return HttpResponse(final_json)
def updateDNSRecordCloudFlare(self, userID=None, data=None):
"""Update an existing CloudFlare DNS record (name, type, ttl, content, priority, proxied)."""
try:
@@ -897,7 +1370,7 @@ class DNSManager:
zone_id = zone_list[0]['id']
update_data = {'name': name, 'type': record_type, 'content': content, 'ttl': ttl_int, 'priority': priority_int}
if record_type in ['A', 'CNAME']:
if record_type in ['A', 'AAAA', 'CNAME']:
update_data['proxied'] = bool(proxied)
cf.zones.dns_records.put(zone_id, record_id, data=update_data)

View File

@@ -496,9 +496,33 @@ app.controller('addModifyDNSRecords', function ($scope, $http) {
}
};
$scope.deleteRecord = function (id) {
$scope.confirmDeleteRecord = function (record) {
var msg = 'Delete DNS record?\n\nName: ' + (record.name || '') + '\nType: ' + (record.type || '') + '\nValue: ' + (record.content || '');
if (!$window.confirm(msg)) {
return;
}
var zone = $scope.selectedZone;
if (!zone) {
return;
}
if (!$scope.cfDeletedBackup[zone]) {
$scope.cfDeletedBackup[zone] = [];
}
$scope.cfDeletedBackup[zone].push({
type: record.type,
name: record.name,
content: record.content,
priority: parseInt(record.priority, 10) || 0,
ttl: record.ttlNum || record.ttl || 3600,
proxy: record.proxy,
proxiable: record.proxiable !== false
});
$scope.deleteRecord(record.id);
};
var selectedZone = $scope.selectedZone;
$scope.deleteRecord = function (id) {
var selectedZone = $scope.selectedZone;
url = "/dns/deleteDNSRecord";
@@ -732,6 +756,22 @@ app.controller('configureDefaultNameservers', function ($scope, $http) {
/* Java script code for CloudFlare */
app.directive('cfImportFile', function () {
return {
link: function (scope, element) {
element.on('change', function (ev) {
var files = ev.target && ev.target.files;
if (files && files.length && scope.onImportFile) {
scope.$apply(function () {
scope.onImportFile(files);
});
}
ev.target.value = '';
});
}
};
});
app.controller('addModifyDNSRecordsCloudFlare', function ($scope, $http, $window) {
$scope.saveCFConfigs = function () {
@@ -813,6 +853,14 @@ app.controller('addModifyDNSRecordsCloudFlare', function ($scope, $http, $window
$scope.couldNotAddRecord = true;
$scope.recordValueDefault = false;
$scope.records = [];
$scope.cfDeletedBackup = {};
$scope.exportLoading = false;
$scope.clearAllLoading = false;
$scope.restoreLoading = false;
$scope.staleRecords = [];
$scope.staleModalVisible = false;
$scope.staleLoading = false;
$scope.fixDNSLoading = false;
// Hide records boxes
$(".aaaaRecord").hide();
@@ -1140,6 +1188,221 @@ app.controller('addModifyDNSRecordsCloudFlare', function ($scope, $http, $window
};
$scope.hasBackupForZone = function () {
var zone = $scope.selectedZone;
if (!zone) return false;
var list = $scope.cfDeletedBackup[zone];
return list && list.length > 0;
};
$scope.confirmClearAll = function () {
var zone = $scope.selectedZone;
if (!zone) return;
var msg1 = 'This will remove ALL DNS records for this zone in CloudFlare. This action cannot be undone on CloudFlare.\n\nA local copy will be kept so you can use Restore.\n\nContinue?';
if (!$window.confirm(msg1)) return;
var msg2 = 'Type the zone name below to confirm:\n\n' + zone;
var typed = $window.prompt(msg2);
if (typed === null) return;
if (typed.trim() !== zone) {
new PNotify({ title: 'Cancelled', text: 'Zone name did not match. No records were deleted.', type: 'warning' });
return;
}
$scope.clearAllLoading = true;
url = '/dns/clearAllDNSRecordsCloudFlare';
var data = { selectedZone: zone };
var config = { headers: { 'X-CSRFToken': getCookie('csrftoken') } };
$http.post(url, data, config).then(function (response) {
$scope.clearAllLoading = false;
if (response.data.delete_status === 1 && response.data.deleted_records) {
$scope.cfDeletedBackup[zone] = response.data.deleted_records;
$scope.canNotFetchRecords = true;
$scope.recordsFetched = false;
$scope.recordDeleted = false;
populateCurrentRecords();
new PNotify({ title: 'Done', text: 'All DNS records were deleted. Use Restore to undo.', type: 'success' });
} else {
$scope.errorMessage = response.data.error_message || 'Clear all failed';
new PNotify({ title: 'Error', text: $scope.errorMessage, type: 'error' });
}
}, function () {
$scope.clearAllLoading = false;
new PNotify({ title: 'Error', text: 'Could not connect to server.', type: 'error' });
});
};
$scope.restoreFromBackup = function () {
var zone = $scope.selectedZone;
var list = $scope.cfDeletedBackup[zone];
if (!zone || !list || list.length === 0) return;
$scope.restoreLoading = true;
url = '/dns/importDNSRecordsCloudFlare';
var data = { selectedZone: zone, records: list };
var config = { headers: { 'X-CSRFToken': getCookie('csrftoken') } };
$http.post(url, data, config).then(function (response) {
$scope.restoreLoading = false;
if (response.data.import_status === 1) {
$scope.cfDeletedBackup[zone] = [];
populateCurrentRecords();
var failed = response.data.failed || [];
var msg = response.data.imported + ' record(s) restored.';
if (failed.length) msg += ' ' + failed.length + ' failed.';
new PNotify({ title: 'Restore done', text: msg, type: failed.length ? 'warning' : 'success' });
} else {
new PNotify({ title: 'Error', text: response.data.error_message || 'Restore failed', type: 'error' });
}
}, function () {
$scope.restoreLoading = false;
new PNotify({ title: 'Error', text: 'Could not connect to server.', type: 'error' });
});
};
$scope.exportRecords = function () {
var zone = $scope.selectedZone;
if (!zone) return;
$scope.exportLoading = true;
url = '/dns/getExportRecordsCloudFlare';
var data = { selectedZone: zone };
var config = { headers: { 'X-CSRFToken': getCookie('csrftoken') } };
$http.post(url, data, config).then(function (response) {
$scope.exportLoading = false;
if (response.data.fetchStatus === 1 && response.data.data) {
var arr = typeof response.data.data === 'string' ? JSON.parse(response.data.data) : response.data.data;
var blob = new Blob([JSON.stringify(arr, null, 2)], { type: 'application/json' });
var a = document.createElement('a');
a.href = (window.URL || window.webkitURL).createObjectURL(blob);
a.download = 'dns-records-' + zone.replace(/\./g, '-') + '.json';
a.click();
if (a.href) (window.URL || window.webkitURL).revokeObjectURL(a.href);
new PNotify({ title: 'Export done', text: 'DNS records downloaded.', type: 'success' });
} else {
new PNotify({ title: 'Error', text: response.data.error_message || 'Export failed', type: 'error' });
}
}, function () {
$scope.exportLoading = false;
new PNotify({ title: 'Error', text: 'Could not connect to server.', type: 'error' });
});
};
$scope.onImportFile = function (files) {
if (!files || !files.length) return;
var zone = $scope.selectedZone;
if (!zone) {
new PNotify({ title: 'Error', text: 'Select a zone first.', type: 'error' });
return;
}
var file = files[0];
var reader = new FileReader();
reader.onload = function (e) {
var text = e.target && e.target.result;
if (!text) {
new PNotify({ title: 'Error', text: 'Could not read file.', type: 'error' });
return;
}
var arr;
try {
arr = JSON.parse(text);
} catch (err) {
new PNotify({ title: 'Error', text: 'Invalid JSON: ' + (err.message || ''), type: 'error' });
return;
}
if (!Array.isArray(arr)) {
if (arr && Array.isArray(arr.records)) arr = arr.records;
else if (arr && arr.data) arr = Array.isArray(arr.data) ? arr.data : [arr.data];
else arr = [arr];
}
url = '/dns/importDNSRecordsCloudFlare';
var data = { selectedZone: zone, records: arr };
var config = { headers: { 'X-CSRFToken': getCookie('csrftoken') } };
$http.post(url, data, config).then(function (response) {
if (response.data.import_status === 1) {
populateCurrentRecords();
var failed = response.data.failed || [];
var msg = response.data.imported + ' record(s) imported.';
if (failed.length) msg += ' ' + failed.length + ' failed.';
new PNotify({ title: 'Import done', text: msg, type: failed.length ? 'warning' : 'success' });
} else {
new PNotify({ title: 'Error', text: response.data.error_message || 'Import failed', type: 'error' });
}
}, function () {
new PNotify({ title: 'Error', text: 'Could not connect to server.', type: 'error' });
});
};
reader.readAsText(file, 'UTF-8');
};
$scope.checkStaleRecords = function () {
var zone = $scope.selectedZone;
if (!zone) return;
$scope.staleLoading = true;
url = '/dns/getStaleDNSRecordsCloudFlare';
var data = { selectedZone: zone };
var config = { headers: { 'X-CSRFToken': getCookie('csrftoken') } };
$http.post(url, data, config).then(function (response) {
$scope.staleLoading = false;
if (response.data.fetchStatus === 1) {
$scope.staleRecords = response.data.stale_records || [];
$scope.staleModalVisible = true;
} else {
new PNotify({ title: 'Error', text: response.data.error_message || 'Could not fetch stale records', type: 'error' });
}
}, function () {
$scope.staleLoading = false;
new PNotify({ title: 'Error', text: 'Could not connect to server.', type: 'error' });
});
};
$scope.closeStaleModal = function () {
$scope.staleModalVisible = false;
$scope.staleRecords = [];
};
$scope.fixDNS = function () {
var zone = $scope.selectedZone;
if (!zone) return;
$scope.fixDNSLoading = true;
url = '/dns/fixDNSRecordsCloudFlare';
var data = { selectedZone: zone };
var config = { headers: { 'X-CSRFToken': getCookie('csrftoken') } };
$http.post(url, data, config).then(function (response) {
$scope.fixDNSLoading = false;
if (response.data.fix_status === 1) {
populateCurrentRecords();
var msg = response.data.added + ' record(s) added.';
if (response.data.skipped) msg += ' ' + response.data.skipped + ' already present.';
new PNotify({ title: 'Fix DNS done', text: msg, type: 'success' });
} else {
new PNotify({ title: 'Error', text: response.data.error_message || 'Fix DNS failed', type: 'error' });
}
}, function () {
$scope.fixDNSLoading = false;
new PNotify({ title: 'Error', text: 'Could not connect to server.', type: 'error' });
});
};
$scope.removeStaleRecords = function () {
if (!$scope.staleRecords || $scope.staleRecords.length === 0) return;
var zone = $scope.selectedZone;
var msg = 'Remove ' + $scope.staleRecords.length + ' orphan DNS record(s)? A local copy will be kept for Restore.';
if (!$window.confirm(msg)) return;
var ids = $scope.staleRecords.map(function (r) { return r.id; });
url = '/dns/removeStaleDNSRecordsCloudFlare';
var data = { selectedZone: zone, ids: ids };
var config = { headers: { 'X-CSRFToken': getCookie('csrftoken') } };
$http.post(url, data, config).then(function (response) {
if (response.data.delete_status === 1 && response.data.deleted_records) {
if (!$scope.cfDeletedBackup[zone]) $scope.cfDeletedBackup[zone] = [];
$scope.cfDeletedBackup[zone] = $scope.cfDeletedBackup[zone].concat(response.data.deleted_records);
$scope.closeStaleModal();
populateCurrentRecords();
new PNotify({ title: 'Done', text: response.data.deleted_records.length + ' orphan record(s) removed. Use Restore to undo.', type: 'success' });
} else {
new PNotify({ title: 'Error', text: response.data.error_message || 'Remove failed', type: 'error' });
}
}, function () {
new PNotify({ title: 'Error', text: 'Could not connect to server.', type: 'error' });
});
};
$scope.syncCF = function () {
$scope.recordsLoading = false;

View File

@@ -959,6 +959,28 @@
{% trans "DNS Records" %}
</h4>
<div class="mb-3" ng-if="selectedZone" style="display: flex; flex-wrap: wrap; gap: 8px; align-items: center;">
<button type="button" ng-click="exportRecords()" class="btn-secondary" ng-disabled="exportLoading">
<i class="fas fa-download"></i> {% trans "Export" %}
</button>
<label class="btn-secondary" style="margin-bottom: 0; cursor: pointer;">
<i class="fas fa-upload"></i> {% trans "Import" %}
<input type="file" accept=".json,application/json" style="display: none;" cf-import-file>
</label>
<button type="button" ng-click="confirmClearAll()" class="btn-secondary" style="border-color: #ef4444; color: #ef4444;" ng-disabled="clearAllLoading">
<i class="fas fa-trash-alt"></i> {% trans "Clear all DNS records" %}
</button>
<button type="button" ng-click="restoreFromBackup()" class="btn-primary" ng-show="hasBackupForZone()" ng-disabled="restoreLoading">
<i class="fas fa-undo"></i> {% trans "Restore" %}
</button>
<button type="button" ng-click="checkStaleRecords()" class="btn-secondary" ng-disabled="staleLoading" title="{% trans 'Find DNS records for subdomains that no longer exist in the panel' %}">
<i class="fas fa-broom"></i> {% trans "Check orphan DNS" %}
</button>
<button type="button" ng-click="fixDNS()" class="btn-primary" ng-disabled="fixDNSLoading" title="{% trans 'Add A/AAAA records for all domains and subdomains in the panel; skip if already present' %}">
<i class="fas fa-wrench"></i> {% trans "Fix DNS" %}
</button>
</div>
<div class="dns-search-wrap mb-3" ng-if="!loadingRecords && records.length > 0">
<span class="dns-search-icon-left"><i class="fas fa-search"></i></span>
<input type="text" class="form-control dns-search-input" ng-model="dnsSearch.filter" placeholder="{% trans 'Search name, type, value...' %}" title="{% trans 'Search through all records' %}">
@@ -1040,7 +1062,7 @@
<td style="text-align: center;">
<i class="fas fa-trash delete-icon"
style="color: #ef4444; cursor: pointer;"
ng-click="deleteRecord(record.id)"
ng-click="confirmDeleteRecord(record)"
title="{% trans 'Delete Record' %}"></i>
</td>
</tr>
@@ -1128,6 +1150,46 @@
</div>
</div>
<!-- Stale / Orphan DNS Records Modal -->
<div class="edit-record-overlay" ng-show="staleModalVisible" ng-click="closeStaleModal()">
<div class="edit-record-modal" ng-click="$event.stopPropagation()" style="max-width: 600px;">
<h4 class="mb-4" style="color: var(--text-primary, #1e293b); font-weight: 600;">
<i class="fas fa-broom"></i> {% trans "Orphan / Stale DNS Records" %}
</h4>
<p class="text-muted small mb-3">{% trans "Records below point to subdomains or hostnames that no longer exist in the panel. You can remove them to clean up CloudFlare." %}</p>
<div ng-if="staleRecords.length === 0" class="alert alert-success">
<i class="fas fa-check-circle"></i> {% trans "No orphan records found." %}
</div>
<div ng-if="staleRecords.length > 0">
<table class="records-table activity-table" style="margin-bottom: 1rem;">
<thead>
<tr>
<th>{% trans "Name" %}</th>
<th>{% trans "Type" %}</th>
<th>{% trans "Value" %}</th>
</tr>
</thead>
<tbody>
<tr ng-repeat="r in staleRecords">
<td><strong ng-bind="r.name"></strong></td>
<td ng-bind="r.type"></td>
<td class="cell-value" ng-bind="r.content" title="{{ r.content }}"></td>
</tr>
</tbody>
</table>
<div style="display: flex; gap: 10px; justify-content: flex-end; margin-top: 1rem;">
<button type="button" class="btn-secondary" ng-click="closeStaleModal()">{% trans "Close" %}</button>
<button type="button" class="btn-primary" style="border-color: #ef4444; color: #ef4444;" ng-click="removeStaleRecords()">
<i class="fas fa-trash-alt"></i> {% trans "Remove all orphan records" %}
</button>
</div>
</div>
<div ng-if="staleRecords.length === 0" style="margin-top: 1rem;">
<button type="button" class="btn-secondary" ng-click="closeStaleModal()">{% trans "Close" %}</button>
</div>
</div>
</div>
<!-- Alert Messages -->
<div style="margin-top: 2rem;">
<div ng-hide="canNotFetchRecords" class="alert alert-danger">

View File

@@ -30,4 +30,10 @@ urlpatterns = [
re_path(r'^updateDNSRecordCloudFlare$', views.updateDNSRecordCloudFlare, name='updateDNSRecordCloudFlare'),
re_path(r'^syncCF$', views.syncCF, name='syncCF'),
re_path(r'^enableProxy$', views.enableProxy, name='enableProxy'),
re_path(r'^getExportRecordsCloudFlare$', views.getExportRecordsCloudFlare, name='getExportRecordsCloudFlare'),
re_path(r'^clearAllDNSRecordsCloudFlare$', views.clearAllDNSRecordsCloudFlare, name='clearAllDNSRecordsCloudFlare'),
re_path(r'^importDNSRecordsCloudFlare$', views.importDNSRecordsCloudFlare, name='importDNSRecordsCloudFlare'),
re_path(r'^getStaleDNSRecordsCloudFlare$', views.getStaleDNSRecordsCloudFlare, name='getStaleDNSRecordsCloudFlare'),
re_path(r'^removeStaleDNSRecordsCloudFlare$', views.removeStaleDNSRecordsCloudFlare, name='removeStaleDNSRecordsCloudFlare'),
re_path(r'^fixDNSRecordsCloudFlare$', views.fixDNSRecordsCloudFlare, name='fixDNSRecordsCloudFlare'),
]

View File

@@ -377,3 +377,57 @@ def enableProxy(request):
return redirect(loadLoginPage)
except (ValueError, TypeError):
return HttpResponse(json.dumps({'status': 0, 'error_message': 'Invalid request'}), status=400, content_type='application/json')
def getExportRecordsCloudFlare(request):
try:
userID = request.session['userID']
dm = DNSManager()
return dm.getExportRecordsCloudFlare(userID, json.loads(request.body or '{}'))
except KeyError:
return redirect(loadLoginPage)
def clearAllDNSRecordsCloudFlare(request):
try:
userID = request.session['userID']
dm = DNSManager()
return dm.clearAllDNSRecordsCloudFlare(userID, json.loads(request.body or '{}'))
except KeyError:
return redirect(loadLoginPage)
def importDNSRecordsCloudFlare(request):
try:
userID = request.session['userID']
dm = DNSManager()
return dm.importDNSRecordsCloudFlare(userID, json.loads(request.body or '{}'))
except KeyError:
return redirect(loadLoginPage)
def getStaleDNSRecordsCloudFlare(request):
try:
userID = request.session['userID']
dm = DNSManager()
return dm.getStaleDNSRecordsCloudFlare(userID, json.loads(request.body or '{}'))
except KeyError:
return redirect(loadLoginPage)
def removeStaleDNSRecordsCloudFlare(request):
try:
userID = request.session['userID']
dm = DNSManager()
return dm.removeStaleDNSRecordsCloudFlare(userID, json.loads(request.body or '{}'))
except KeyError:
return redirect(loadLoginPage)
def fixDNSRecordsCloudFlare(request):
try:
userID = request.session['userID']
dm = DNSManager()
return dm.fixDNSRecordsCloudFlare(userID, json.loads(request.body or '{}'))
except KeyError:
return redirect(loadLoginPage)

View File

@@ -16,6 +16,7 @@ import plogical.CyberCPLogFileWriter as logging
from plogical.errorSanitizer import secure_error_response, secure_log_error
from django.shortcuts import HttpResponse, render, redirect
from django.urls import reverse
from django.db.utils import OperationalError
from loginSystem.models import Administrator
import subprocess
import shlex
@@ -217,47 +218,72 @@ class ContainerManager(multi.Thread):
return HttpResponse('Operation failed')
def listContainers(self, request=None, userID=None, data=None):
client = docker.from_env()
dockerAPI = docker.APIClient()
def _render_list():
client = docker.from_env()
docker.APIClient() # ensure API is usable
currentACL = ACLManager.loadedACL(userID)
containers = ACLManager.findAllContainers(currentACL, userID)
currentACL = ACLManager.loadedACL(userID)
containers = ACLManager.findAllContainers(currentACL, userID)
allContainers = client.containers.list()
containersList = []
showUnlistedContainer = True
allContainers = client.containers.list()
showUnlistedContainer = True
# TODO: Add condition to show unlisted Containers only if user has admin level access
unlistedContainers = []
for container in allContainers:
if container.name not in containers:
unlistedContainers.append(container)
unlistedContainers = []
for container in allContainers:
if container.name not in containers:
unlistedContainers.append(container)
if not unlistedContainers:
showUnlistedContainer = False
if not unlistedContainers:
showUnlistedContainer = False
adminNames = ACLManager.loadAllUsers(userID)
adminNames = ACLManager.loadAllUsers(userID)
pages = float(len(containers)) / float(10)
pagination = []
pages = float(len(containers)) / float(10)
pagination = []
if pages <= 1.0:
pages = 1
pagination.append('<li><a href="\#"></a></li>')
else:
pages = ceil(pages)
finalPages = int(pages) + 1
if pages <= 1.0:
pages = 1
pagination.append('<li><a href="\#"></a></li>')
else:
pages = ceil(pages)
finalPages = int(pages) + 1
for i in range(1, finalPages):
pagination.append('<li><a href="\#">' + str(i) + '</a></li>')
for i in range(1, finalPages):
pagination.append('<li><a href="\#">' + str(i) + '</a></li>')
template = 'dockerManager/listContainers.html'
proc = httpProc(request, template, {"pagination": pagination,
"unlistedContainers": unlistedContainers,
"adminNames": adminNames,
"showUnlistedContainer": showUnlistedContainer}, 'admin')
return proc.render()
template = 'dockerManager/listContainers.html'
proc = httpProc(request, template, {"pagination": pagination,
"unlistedContainers": unlistedContainers,
"adminNames": adminNames,
"showUnlistedContainer": showUnlistedContainer}, 'admin')
return proc.render()
try:
return _render_list()
except OperationalError as e:
logging.writeToFile(
"Docker containers list: DB error (table may be missing). Running migrations. Error: %s" % str(e)
)
try:
from django.core.management import call_command
call_command('migrate', 'dockerManager', verbosity=0)
return _render_list()
except Exception as migrate_err:
logging.writeToFile(
"Docker containers list: migrate failed. Error: %s" % str(migrate_err)
)
return render(
request,
'baseTemplate/error.html',
{'error_message': 'Docker Manager database not ready. Please run upgrade or: manage.py migrate dockerManager'}
)
except Exception as e:
secure_log_error(e, 'docker_list_containers')
return render(
request,
'baseTemplate/error.html',
{'error_message': 'Containers list could not be loaded. Check error logs.'}
)
def getContainerLogs(self, userID=None, data=None):
try:

View File

@@ -2713,4 +2713,3 @@ app.controller('manageImages', function ($scope, $http) {
})
}
});

View File

@@ -186,14 +186,26 @@ def listContainersPage(request):
"""
try:
userID = request.session['userID']
currentACL = ACLManager.loadedACL(userID)
cm = ContainerManager()
resp = cm.listContainers(request, userID)
resp['Cache-Control'] = 'no-store, no-cache, must-revalidate, max-age=0'
resp['Pragma'] = 'no-cache'
resp['Expires'] = '0'
if hasattr(resp, '__setitem__'):
resp['Cache-Control'] = 'no-store, no-cache, must-revalidate, max-age=0'
resp['Pragma'] = 'no-cache'
resp['Expires'] = '0'
return resp
except KeyError:
return redirect(loadLoginPage)
except Exception as e:
from django.shortcuts import render
from plogical.CyberCPLogFileWriter import CyberCPLogFileWriter as logging
logging.writeToFile("listContainersPage error: %s" % str(e))
return render(
request,
'baseTemplate/error.html',
{'error_message': 'Containers page could not be loaded. Check error logs.'},
status=500
)
@preDockerRun

View File

@@ -27,6 +27,27 @@ app.add_middleware(
SSH_USER = "your_website_user" # Replace with a real user for testing
AUTHORIZED_KEYS_PATH = f"/home/{SSH_USER}/.ssh/authorized_keys"
# Read the actual SSH port from sshd_config (fixes WebTerminal when SSH uses custom port)
def get_ssh_port() -> int:
try:
with open("/etc/ssh/sshd_config", "r") as f:
for line in f:
line = line.strip()
if not line or line.startswith('#'):
continue
line = line.split('#')[0].strip()
parts = line.split()
if len(parts) >= 2 and parts[0].lower() == 'port':
port = int(parts[1])
logging.info(f"[get_ssh_port] SSH port detected: {port}")
return port
except Exception as e:
logging.warning(f"[get_ssh_port] Could not read sshd_config: {e}")
logging.warning("[get_ssh_port] Falling back to default port 22")
return 22
SSH_PORT = get_ssh_port()
# Helper to generate a keypair
def generate_ssh_keypair():
key = paramiko.RSAKey.generate(2048)
@@ -90,7 +111,8 @@ async def websocket_endpoint(websocket: WebSocket, token: str = Query(None), ssh
process = None
try:
conn = await asyncssh.connect(
"localhost",
"127.0.0.1",
port=SSH_PORT,
username=user,
client_keys=[keyfile_path],
known_hosts=None

47
fix-phpmyadmin-install.sh Executable file
View File

@@ -0,0 +1,47 @@
#!/bin/bash
# Install/fix phpMyAdmin under /usr/local/CyberCP/public/phpmyadmin (creates signin + full app)
set -e
PUBLIC=/usr/local/CyberCP/public
PMA_DIR=$PUBLIC/phpmyadmin
VERSION=5.2.3
TARBALL=$PUBLIC/phpmyadmin.tar.gz
echo "[$(date -Iseconds)] Installing phpMyAdmin to $PMA_DIR ..."
sudo mkdir -p "$PUBLIC"
sudo rm -rf "$PMA_DIR"
sudo wget -q -O "$TARBALL" "https://files.phpmyadmin.net/phpMyAdmin/${VERSION}/phpMyAdmin-${VERSION}-all-languages.tar.gz" || { echo "Download failed"; exit 1; }
[ -f "$TARBALL" ] && [ $(stat -c%s "$TARBALL") -gt 1000000 ] || { echo "Tarball missing or too small"; exit 1; }
sudo tar -xzf "$TARBALL" -C "$PUBLIC"
if [ -d "$PUBLIC/phpMyAdmin-${VERSION}-all-languages" ]; then
sudo mv "$PUBLIC/phpMyAdmin-${VERSION}-all-languages" "$PMA_DIR"
else
sudo mv "$PUBLIC/phpMyAdmin-"*"-all-languages" "$PMA_DIR" 2>/dev/null || true
fi
sudo rm -f "$TARBALL"
[ -d "$PMA_DIR" ] || { echo "phpmyadmin dir not created"; exit 1; }
# Config: use sample if present, then ensure signon block
BLOWFISH=$(openssl rand -hex 16)
if [ -f "$PMA_DIR/config.sample.inc.php" ]; then
sudo cp "$PMA_DIR/config.sample.inc.php" "$PMA_DIR/config.inc.php"
sudo sed -i "s|blowfish_secret.*|blowfish_secret'] = '${BLOWFISH}';|" "$PMA_DIR/config.inc.php" 2>/dev/null || true
fi
sudo bash -c 'cat >> '"$PMA_DIR"'/config.inc.php << "PMACONF"
$i = 0;
$i++;
$cfg["Servers"][$i]["AllowNoPassword"] = false;
$cfg["Servers"][$i]["auth_type"] = "signon";
$cfg["Servers"][$i]["SignonSession"] = "SignonSession";
$cfg["Servers"][$i]["SignonURL"] = "phpmyadminsignin.php";
$cfg["Servers"][$i]["LogoutURL"] = "phpmyadminsignin.php?logout";
$cfg["Servers"][$i]["host"] = "127.0.0.1";
$cfg["Servers"][$i]["port"] = "3306";
$cfg["TempDir"] = "/usr/local/CyberCP/public/phpmyadmin/tmp";
PMACONF'
sudo mkdir -p "$PMA_DIR/tmp"
sudo cp /usr/local/CyberCP/plogical/phpmyadminsignin.php "$PMA_DIR/phpmyadminsignin.php"
sudo chown -R lscpd:lscpd "$PMA_DIR"
echo "[$(date -Iseconds)] phpMyAdmin install done. Test: https://YOUR_IP:2087/phpmyadmin/phpmyadminsignin.php"
exit 0

View File

@@ -55,6 +55,24 @@ FetchCloudLinuxAlmaVersionVersion = install_utils.FetchCloudLinuxAlmaVersionVers
get_distro = install_utils.get_distro
def _normalize_mariadb_version(ver):
"""Accept 10.3-10.11, 11.0-11.8, 12.0-12.x; return major.minor for repo or 11.8 if invalid."""
if not ver or not isinstance(ver, str):
return '11.8'
v = ver.strip()
m = re.match(r'^(\d+)\.(\d+)(?:\.\d+)*$', v)
if not m:
return '11.8'
major, minor = int(m.group(1)), int(m.group(2))
if major == 10 and 3 <= minor <= 11:
return '10.%d' % minor
if major == 11 and 0 <= minor <= 8:
return '11.%d' % minor
if major == 12 and 0 <= minor <= 99:
return '12.%d' % minor
return '11.8'
def get_Ubuntu_release():
release = install_utils.get_Ubuntu_release(use_print=False, exit_on_error=True)
if release == -1:
@@ -1871,7 +1889,7 @@ module cyberpanel_ols {
except (ValueError, TypeError):
pass
# Set up MariaDB repository only if not already installed (version from --mariadb-version: 10.11, 11.8 or 12.1)
# Set up MariaDB repository only if not already installed (version from --mariadb-version: 10.3-10.11, 11.0-11.8, 12.0-12.x)
mariadb_ver = getattr(preFlightsChecks, 'mariadb_version', '11.8')
command = f'curl -LsS https://downloads.mariadb.com/MariaDB/mariadb_repo_setup | bash -s -- --mariadb-server-version={mariadb_ver}'
self.call(command, self.distro, command, command, 1, 1, os.EX_OSERR, True)
@@ -1904,9 +1922,14 @@ module cyberpanel_ols {
shell=True, timeout=5, capture_output=True
)
self.stdOut("Temporarily removed MariaDB-server from dnf exclude for installation (fallback)", 1)
# Install from official MariaDB repo (capitalized package names); --nobest for 10.11/11.8 on el9
# Install from official MariaDB repo (capitalized package names); --nobest for 10.x and 11.0-11.8 on el9
mariadb_packages = 'MariaDB-server MariaDB-client MariaDB-backup MariaDB-devel'
if mariadb_ver in ('10.11', '11.8'):
try:
maj_min = tuple(int(x) for x in mariadb_ver.split('.')[:2])
use_nobest = (maj_min[0] == 10) or (maj_min[0] == 11 and maj_min[1] <= 8)
except (ValueError, IndexError):
use_nobest = True
if use_nobest:
command = f'dnf install -y --nobest {mariadb_packages}'
else:
command = f'dnf install -y {mariadb_packages}'
@@ -3824,18 +3847,23 @@ class Migration(migrations.Migration):
except Exception:
pass
# Resolve phpMyAdmin version (same as upgrade path)
phpmyadmin_version = '5.2.3'
try:
from plogical.versionFetcher import get_latest_phpmyadmin_version
latest_version = get_latest_phpmyadmin_version()
if latest_version and latest_version != phpmyadmin_version:
self.stdOut(f"Using latest phpMyAdmin version: {latest_version}", 1)
phpmyadmin_version = latest_version
else:
self.stdOut(f"Using fallback phpMyAdmin version: {phpmyadmin_version}", 1)
except Exception as e:
self.stdOut(f"Failed to fetch latest phpMyAdmin version, using fallback: {e}", 1)
# Resolve phpMyAdmin version: CLI override (--phpmyadmin-version), else latest from API, else fallback
phpmyadmin_version = getattr(preFlightsChecks, 'phpmyadmin_version', None) or ''
phpmyadmin_version = (phpmyadmin_version or '').strip()
if not phpmyadmin_version or not re.match(r'^\d+\.\d+\.\d+$', phpmyadmin_version):
phpmyadmin_version = '5.2.3'
try:
from plogical.versionFetcher import get_latest_phpmyadmin_version
latest_version = get_latest_phpmyadmin_version()
if latest_version and re.match(r'^\d+\.\d+\.\d+$', latest_version):
self.stdOut(f"Using latest phpMyAdmin version: {latest_version}", 1)
phpmyadmin_version = latest_version
else:
self.stdOut(f"Using fallback phpMyAdmin version: {phpmyadmin_version}", 1)
except Exception as e:
self.stdOut(f"Failed to fetch latest phpMyAdmin version, using fallback: {e}", 1)
else:
self.stdOut(f"Using phpMyAdmin version: {phpmyadmin_version}", 1)
self.stdOut("Installing phpMyAdmin...", 1)
tarball = '/usr/local/CyberCP/public/phpmyadmin.tar.gz'
@@ -4544,27 +4572,40 @@ user_query = SELECT email as user, password, 'vmail' as uid, 'vmail' as gid, '/h
def downoad_and_install_raindloop(self):
try:
#######
if not os.path.exists("/usr/local/CyberCP/public"):
os.mkdir("/usr/local/CyberCP/public")
if os.path.exists("/usr/local/CyberCP/public/snappymail"):
return 0
# Version: CLI override (--snappymail-version), then latest from API, else class default
snappy_ver = getattr(preFlightsChecks, 'snappymail_version', None) or ''
snappy_ver = (snappy_ver or '').strip()
if not snappy_ver or not re.match(r'^\d+\.\d+(\.\d+)?$', snappy_ver):
try:
from plogical.versionFetcher import get_latest_snappymail_version
latest = get_latest_snappymail_version()
if latest and re.match(r'^\d+\.\d+', latest):
snappy_ver = latest
else:
snappy_ver = preFlightsChecks.SnappyVersion
except Exception:
snappy_ver = preFlightsChecks.SnappyVersion
self.stdOut("Using SnappyMail version: %s" % snappy_ver, 1)
os.chdir("/usr/local/CyberCP/public")
command = 'wget https://github.com/the-djmaze/snappymail/releases/download/v%s/snappymail-%s.zip' % (preFlightsChecks.SnappyVersion, preFlightsChecks.SnappyVersion)
command = 'wget https://github.com/the-djmaze/snappymail/releases/download/v%s/snappymail-%s.zip' % (snappy_ver, snappy_ver)
preFlightsChecks.call(command, self.distro, command, command, 1, 1, os.EX_OSERR)
#############
command = 'unzip snappymail-%s.zip -d /usr/local/CyberCP/public/snappymail' % (preFlightsChecks.SnappyVersion)
command = 'unzip snappymail-%s.zip -d /usr/local/CyberCP/public/snappymail' % (snappy_ver,)
preFlightsChecks.call(command, self.distro, command, command, 1, 1, os.EX_OSERR)
try:
os.remove("snappymail-%s.zip" % (preFlightsChecks.SnappyVersion))
os.remove("snappymail-%s.zip" % (snappy_ver,))
except:
pass
@@ -6616,14 +6657,19 @@ def main():
parser.add_argument('--mysqluser', help='MySQL user if remote is chosen.')
parser.add_argument('--mysqlpassword', help='MySQL password if remote is chosen.')
parser.add_argument('--mysqlport', help='MySQL port if remote is chosen.')
parser.add_argument('--mariadb-version', default='11.8', help='MariaDB version: 10.11, 11.8 (LTS, default) or 12.1')
parser.add_argument('--mariadb-version', default='11.8', help='MariaDB version: 10.3-10.11, 11.0-11.8, 12.0-12.x (default 11.8)')
parser.add_argument('--phpmyadmin-version', default='', help='phpMyAdmin version (e.g. 5.2.3); empty = latest from API')
parser.add_argument('--snappymail-version', default='', help='SnappyMail version (e.g. 2.38.2); empty = latest from API')
args = parser.parse_args()
# Normalize and validate MariaDB version choice (default 11.8)
mariadb_ver = (getattr(args, 'mariadb_version', None) or '11.8').strip()
if mariadb_ver not in ('10.11', '11.8', '12.1'):
mariadb_ver = '11.8'
mariadb_ver = _normalize_mariadb_version(getattr(args, 'mariadb_version', None) or '11.8')
preFlightsChecks.mariadb_version = mariadb_ver
# Optional phpMyAdmin/SnappyMail version overrides (empty = use latest from API)
if getattr(args, 'phpmyadmin_version', ''):
preFlightsChecks.phpmyadmin_version = (args.phpmyadmin_version or '').strip()
if getattr(args, 'snappymail_version', ''):
preFlightsChecks.snappymail_version = (args.snappymail_version or '').strip()
logging.InstallLog.ServerIP = args.publicip
logging.InstallLog.writeToFile("Starting CyberPanel installation..,10")

View File

@@ -8,10 +8,13 @@ if [[ $debug == "0" ]] ; then
fi
if [[ $debug == "1" ]] ; then
EXTRA_VER_ARGS=""
[[ -n "${PHPMYADMIN_VER:-}" ]] && EXTRA_VER_ARGS="$EXTRA_VER_ARGS --phpmyadmin-version ${PHPMYADMIN_VER}"
[[ -n "${SNAPPYMAIL_VER:-}" ]] && EXTRA_VER_ARGS="$EXTRA_VER_ARGS --snappymail-version ${SNAPPYMAIL_VER}"
if [[ $DEV == "ON" ]] ; then
/usr/local/CyberPanel/bin/python install.py $SERVER_IP $SERIAL_NO $LICENSE_KEY --mariadb-version "${MARIADB_VER:-11.8}"
/usr/local/CyberPanel/bin/python install.py $SERVER_IP $SERIAL_NO $LICENSE_KEY --mariadb-version "${MARIADB_VER:-11.8}" $EXTRA_VER_ARGS
else
/usr/local/CyberPanel/bin/python2 install.py $SERVER_IP $SERIAL_NO $LICENSE_KEY --mariadb-version "${MARIADB_VER:-11.8}"
/usr/local/CyberPanel/bin/python2 install.py $SERVER_IP $SERIAL_NO $LICENSE_KEY --mariadb-version "${MARIADB_VER:-11.8}" $EXTRA_VER_ARGS
fi
if grep "CyberPanel installation successfully completed" /var/log/installLogs.txt > /dev/null; then

View File

@@ -925,9 +925,12 @@ fi
if [[ $debug == "1" ]] ; then
if [[ $DEV == "ON" ]] ; then
/usr/local/CyberPanel/bin/python install.py $SERVER_IP $SERIAL_NO $LICENSE_KEY --mariadb-version "${MARIADB_VER:-11.8}"
EXTRA_VER_ARGS=""
[[ -n "${PHPMYADMIN_VER:-}" ]] && EXTRA_VER_ARGS="$EXTRA_VER_ARGS --phpmyadmin-version ${PHPMYADMIN_VER}"
[[ -n "${SNAPPYMAIL_VER:-}" ]] && EXTRA_VER_ARGS="$EXTRA_VER_ARGS --snappymail-version ${SNAPPYMAIL_VER}"
/usr/local/CyberPanel/bin/python install.py $SERVER_IP $SERIAL_NO $LICENSE_KEY --mariadb-version "${MARIADB_VER:-11.8}" $EXTRA_VER_ARGS
else
/usr/local/CyberPanel/bin/python2 install.py $SERVER_IP $SERIAL_NO $LICENSE_KEY --mariadb-version "${MARIADB_VER:-11.8}"
/usr/local/CyberPanel/bin/python2 install.py $SERVER_IP $SERIAL_NO $LICENSE_KEY --mariadb-version "${MARIADB_VER:-11.8}" $EXTRA_VER_ARGS
fi
if grep "CyberPanel installation successfully completed" /var/log/installLogs.txt > /dev/null; then

View File

@@ -87,11 +87,14 @@ install_cyberpanel_direct() {
# Ask MariaDB version (after web server choice) if not set via --mariadb-version
if [ -z "$MARIADB_VER" ]; then
echo ""
echo " MariaDB version: 10.11, 11.8 (LTS, default) or 12.1?"
read -r -t 60 -p " Enter 10.11, 11.8 or 12.1 [11.8]: " MARIADB_VER || true
echo " MariaDB version: 10.11, 11.8 (LTS, default), 12.1, 12.2, 12.3 or other X.Y?"
read -r -t 60 -p " Enter version [11.8]: " MARIADB_VER || true
MARIADB_VER="${MARIADB_VER:-11.8}"
MARIADB_VER="${MARIADB_VER// /}"
if [ "$MARIADB_VER" != "10.11" ] && [ "$MARIADB_VER" != "11.8" ] && [ "$MARIADB_VER" != "12.1" ]; then
# Normalize to major.minor (e.g. 12.3.1 -> 12.3)
if [[ "$MARIADB_VER" =~ ^([0-9]+)\.([0-9]+) ]]; then
MARIADB_VER="${BASH_REMATCH[1]}.${BASH_REMATCH[2]}"
else
MARIADB_VER="11.8"
fi
echo " Using MariaDB $MARIADB_VER"

View File

@@ -165,6 +165,36 @@ def verifyLogin(request):
@ensure_csrf_cookie
def loadLoginPage(request):
try:
return _loadLoginPage(request)
except Exception as e:
try:
from plogical.CyberCPLogFileWriter import CyberCPLogFileWriter as logging
import traceback
logging.writeToFile("loadLoginPage error: %s\n%s" % (str(e), traceback.format_exc()))
except Exception:
pass
# User-friendly message for database connection errors
from django.db.utils import OperationalError
err_str = str(e).lower()
if isinstance(e, OperationalError) or 'access denied' in err_str or '1045' in err_str:
msg = (
"Database connection failed (Access denied for user 'cyberpanel'@'localhost'). "
"Check: 1) MariaDB is running (systemctl status mariadb). "
"2) Password in /etc/cyberpanel/mysqlPassword matches the MySQL user used by the panel. "
"3) User exists: mysql -u root -p -e \"SELECT User,Host FROM mysql.user WHERE User='cyberpanel';\""
)
return HttpResponse(msg, status=503, content_type="text/plain; charset=utf-8")
try:
# Minimal cosmetic so template does not break (login.html uses cosmetic.MainDashboardCSS)
class _MinimalCosmetic:
MainDashboardCSS = ''
return render(request, 'loginSystem/login.html', {'cosmetic': _MinimalCosmetic()})
except Exception:
return HttpResponse("Server error. Check /home/cyberpanel/error-logs.txt", status=500, content_type="text/plain")
def _loadLoginPage(request):
try:
userID = request.session['userID']
currentACL = ACLManager.loadedACL(userID)

View File

@@ -1091,8 +1091,9 @@ class ACLManager:
Returns None if no IPv6 address is found
"""
try:
import ipaddress
import subprocess
# Get IPv6 addresses, exclude link-local (fe80::) and loopback (::1)
# Get IPv6 addresses and filter loopback/link-local with proper IP parsing.
result = subprocess.run(
['ip', '-6', 'addr', 'show'],
capture_output=True,
@@ -1103,14 +1104,19 @@ class ACLManager:
if result.returncode == 0:
lines = result.stdout.split('\n')
for line in lines:
if 'inet6' in line and '::1' not in line and 'fe80::' not in line:
# Extract IPv6 address (format: inet6 2a02:c207:2139:8929::1/64)
parts = line.strip().split()
if len(parts) >= 2:
ipv6 = parts[1].split('/')[0]
# Validate it's a real IPv6 (not link-local)
if not ipv6.startswith('fe80::'):
return ipv6
if 'inet6' not in line:
continue
# Expected format: "inet6 2a02:c207:2139:8929::1/64 scope global ..."
parts = line.strip().split()
if len(parts) < 2:
continue
ipv6 = parts[1].split('/')[0]
try:
ip_obj = ipaddress.ip_address(ipv6)
except ValueError:
continue
if ip_obj.version == 6 and not ip_obj.is_loopback and not ip_obj.is_link_local:
return str(ip_obj)
except Exception as e:
logging.CyberCPLogFileWriter.writeToFile(f'Error getting IPv6 address: {str(e)}')

View File

@@ -716,14 +716,18 @@ class DNS:
value = value.replace('\n\t', '')
value = value.replace('"', '')
# Only A and CNAME records can be proxied in CloudFlare
# Determine if proxy should be enabled (default: True for A/CNAME, except for mail domains)
if proxied is None and type in ['A', 'CNAME']:
# Check if this is a mail domain (starts with 'mail.' or contains 'mail.')
is_mail_domain = name.lower().startswith('mail.') or '.mail.' in name.lower()
# A, AAAA and CNAME records can be proxied in CloudFlare.
# Auto-enable proxy when Cloudflare is used, except for mail-related domains.
if proxied is None and type in ['A', 'AAAA', 'CNAME']:
name_lower = name.lower()
mail_prefixes = ('mail.', 'smtp.', 'imap.', 'pop3.', 'pop.', 'autodiscover.', 'webmail.')
is_mail_domain = (
any(name_lower.startswith(p) for p in mail_prefixes) or
any(f'.{p.rstrip(".")}.' in name_lower for p in mail_prefixes)
)
proxied = not is_mail_domain
elif type not in ['A', 'CNAME']:
# AAAA, MX, TXT, etc. cannot be proxied
elif type not in ['A', 'AAAA', 'CNAME']:
# MX, TXT, etc. cannot be proxied
proxied = False
if ttl > 0:
@@ -731,8 +735,8 @@ class DNS:
else:
dns_record = {'name': name, 'type': type, 'content': value, 'priority': priority}
# Only add proxied parameter for A and CNAME records
if type in ['A', 'CNAME']:
# Only add proxied parameter for proxy-capable record types.
if type in ['A', 'AAAA', 'CNAME']:
dns_record['proxied'] = proxied
cf.zones.dns_records.post(zone, data=dns_record)

View File

@@ -1194,27 +1194,58 @@ module cyberpanel_ols {
def download_install_phpmyadmin():
try:
cwd = os.getcwd()
pma_dir = '/usr/local/CyberCP/public/phpmyadmin'
tmp_config = '/tmp/cyberpanel_pma_config.inc.php'
tmp_signon = '/tmp/cyberpanel_pma_phpmyadminsignin.php'
if not os.path.exists("/usr/local/CyberCP/public"):
os.mkdir("/usr/local/CyberCP/public")
# Preserve existing config and signon before removing phpmyadmin (for up/downgrade)
saved_config = False
saved_signon = False
if os.path.isdir(pma_dir):
if os.path.isfile(os.path.join(pma_dir, 'config.inc.php')):
try:
shutil.copy2(os.path.join(pma_dir, 'config.inc.php'), tmp_config)
saved_config = True
except Exception:
pass
if os.path.isfile(os.path.join(pma_dir, 'phpmyadminsignin.php')):
try:
shutil.copy2(os.path.join(pma_dir, 'phpmyadminsignin.php'), tmp_signon)
saved_signon = True
except Exception:
pass
try:
shutil.rmtree("/usr/local/CyberCP/public/phpmyadmin")
except:
shutil.rmtree(pma_dir)
except Exception:
pass
# Try to fetch latest phpMyAdmin version from GitHub
phpmyadmin_version = '5.2.3' # Fallback version
try:
from plogical.versionFetcher import get_latest_phpmyadmin_version
latest_version = get_latest_phpmyadmin_version()
if latest_version and latest_version != phpmyadmin_version:
Upgrade.stdOut(f"Using latest phpMyAdmin version: {latest_version}", 0)
phpmyadmin_version = latest_version
else:
Upgrade.stdOut(f"Using fallback phpMyAdmin version: {phpmyadmin_version}", 0)
except Exception as e:
Upgrade.stdOut(f"Failed to fetch latest phpMyAdmin version, using fallback: {e}", 0)
# Version: /etc/cyberpanel/phpmyadmin_version, then latest from API, then fallback
phpmyadmin_version = '5.2.3'
version_file = '/etc/cyberpanel/phpmyadmin_version'
if os.path.isfile(version_file):
try:
with open(version_file, 'r') as f:
raw = (f.read() or '').strip()
if raw and len(raw) < 20 and all(c.isdigit() or c == '.' for c in raw):
phpmyadmin_version = raw
Upgrade.stdOut(f"Using phpMyAdmin version from {version_file}: {phpmyadmin_version}", 0)
except Exception:
pass
if phpmyadmin_version == '5.2.3':
try:
from plogical.versionFetcher import get_latest_phpmyadmin_version
latest_version = get_latest_phpmyadmin_version()
if latest_version and latest_version != phpmyadmin_version:
Upgrade.stdOut(f"Using latest phpMyAdmin version: {latest_version}", 0)
phpmyadmin_version = latest_version
else:
Upgrade.stdOut(f"Using fallback phpMyAdmin version: {phpmyadmin_version}", 0)
except Exception as e:
Upgrade.stdOut(f"Failed to fetch latest phpMyAdmin version, using fallback: {e}", 0)
Upgrade.stdOut("Installing phpMyAdmin...", 0)
@@ -1227,36 +1258,46 @@ module cyberpanel_ols {
command = 'tar -xzf /usr/local/CyberCP/public/phpmyadmin.tar.gz -C /usr/local/CyberCP/public/'
Upgrade.executioner_silent(command, 'Extract phpMyAdmin')
# Move extracted dir to phpmyadmin (support phpMyAdmin-X.Y.Z-all-languages or similar)
import glob
extracted = glob.glob('/usr/local/CyberCP/public/phpMyAdmin-*-all-languages')
if not extracted:
extracted = glob.glob('/usr/local/CyberCP/public/phpMyAdmin-*')
if extracted:
if os.path.exists('/usr/local/CyberCP/public/phpmyadmin'):
shutil.rmtree('/usr/local/CyberCP/public/phpmyadmin')
os.rename(extracted[0], '/usr/local/CyberCP/public/phpmyadmin')
if os.path.exists(pma_dir):
shutil.rmtree(pma_dir)
os.rename(extracted[0], pma_dir)
else:
Upgrade.executioner('mv /usr/local/CyberCP/public/phpMyAdmin-*-all-languages /usr/local/CyberCP/public/phpmyadmin', 0)
command = 'rm -f /usr/local/CyberCP/public/phpmyadmin.tar.gz'
Upgrade.executioner_silent(command, 'Cleanup phpMyAdmin tar.gz')
if not os.path.isdir('/usr/local/CyberCP/public/phpmyadmin'):
if not os.path.isdir(pma_dir):
raise RuntimeError('phpMyAdmin directory was not created after extract/mv')
Upgrade.stdOut("phpMyAdmin installation completed.", 0)
## Write secret phrase
rString = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(32)])
data = open('/usr/local/CyberCP/public/phpmyadmin/config.sample.inc.php', 'r').readlines()
writeToFile = open('/usr/local/CyberCP/public/phpmyadmin/config.inc.php', 'w')
writeE = 1
phpMyAdminContent = """
# Restore preserved config/signon and apply minimal overrides, or create new config
if saved_config and os.path.isfile(tmp_config):
shutil.copy2(tmp_config, os.path.join(pma_dir, 'config.inc.php'))
try:
os.remove(tmp_config)
except Exception:
pass
# Ensure TempDir and host/port present (append if missing)
with open(os.path.join(pma_dir, 'config.inc.php'), 'r') as f:
cfg_content = f.read()
if "TempDir" not in cfg_content:
with open(os.path.join(pma_dir, 'config.inc.php'), 'a') as f:
f.write("\n$cfg['TempDir'] = '/usr/local/CyberCP/public/phpmyadmin/tmp';\n")
if "'host'" not in cfg_content and 'host' not in cfg_content:
with open(os.path.join(pma_dir, 'config.inc.php'), 'a') as f:
f.write("$cfg['Servers'][$i]['host'] = '127.0.0.1';\n$cfg['Servers'][$i]['port'] = '3306';\n")
else:
rString = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(32)])
data = open(os.path.join(pma_dir, 'config.sample.inc.php'), 'r').readlines()
writeToFile = open(os.path.join(pma_dir, 'config.inc.php'), 'w')
writeE = 1
phpMyAdminContent = """
$cfg['Servers'][$i]['AllowNoPassword'] = false;
$cfg['Servers'][$i]['auth_type'] = 'signon';
$cfg['Servers'][$i]['SignonSession'] = 'SignonSession';
@@ -1265,48 +1306,44 @@ $cfg['Servers'][$i]['LogoutURL'] = 'phpmyadminsignin.php?logout';
$cfg['Servers'][$i]['host'] = '127.0.0.1';
$cfg['Servers'][$i]['port'] = '3306';
"""
for items in data:
if items.find('blowfish_secret') > -1:
writeToFile.writelines(
"$cfg['blowfish_secret'] = '" + rString + "'; /* YOU MUST FILL IN THIS FOR COOKIE AUTH! */\n")
elif items.find('/* Authentication type */') > -1:
writeToFile.writelines(items)
writeToFile.write(phpMyAdminContent)
writeE = 0
elif items.find("$cfg['Servers'][$i]['AllowNoPassword']") > -1:
writeE = 1
else:
if writeE:
for items in data:
if items.find('blowfish_secret') > -1:
writeToFile.writelines(
"$cfg['blowfish_secret'] = '" + rString + "'; /* YOU MUST FILL IN THIS FOR COOKIE AUTH! */\n")
elif items.find('/* Authentication type */') > -1:
writeToFile.writelines(items)
writeToFile.writelines("$cfg['TempDir'] = '/usr/local/CyberCP/public/phpmyadmin/tmp';\n")
writeToFile.close()
writeToFile.write(phpMyAdminContent)
writeE = 0
elif items.find("$cfg['Servers'][$i]['AllowNoPassword']") > -1:
writeE = 1
else:
if writeE:
writeToFile.writelines(items)
writeToFile.writelines("$cfg['TempDir'] = '/usr/local/CyberCP/public/phpmyadmin/tmp';\n")
writeToFile.close()
os.mkdir('/usr/local/CyberCP/public/phpmyadmin/tmp')
command = 'cp /usr/local/CyberCP/plogical/phpmyadminsignin.php /usr/local/CyberCP/public/phpmyadmin/phpmyadminsignin.php'
Upgrade.executioner(command, 0)
if saved_signon and os.path.isfile(tmp_signon):
shutil.copy2(tmp_signon, os.path.join(pma_dir, 'phpmyadminsignin.php'))
try:
os.remove(tmp_signon)
except Exception:
pass
else:
command = 'cp /usr/local/CyberCP/plogical/phpmyadminsignin.php /usr/local/CyberCP/public/phpmyadmin/phpmyadminsignin.php'
Upgrade.executioner(command, 0)
passFile = "/etc/cyberpanel/mysqlPassword"
try:
import json
jsonData = json.loads(open(passFile, 'r').read())
mysqluser = jsonData['mysqluser']
mysqlpassword = jsonData['mysqlpassword']
mysqlport = jsonData.get('mysqlport', 3306)
mysqlhost = jsonData.get('mysqlhost', '127.0.0.1') or '127.0.0.1'
if mysqlhost == 'localhost':
mysqlhost = '127.0.0.1'
command = "sed -i 's|localhost|%s|g' /usr/local/CyberCP/public/phpmyadmin/phpmyadminsignin.php" % (
mysqlhost)
command = "sed -i 's|localhost|%s|g' /usr/local/CyberCP/public/phpmyadmin/phpmyadminsignin.php" % (mysqlhost)
Upgrade.executioner(command, 0)
except:
except Exception:
pass
command = 'chown -R lscpd:lscpd /usr/local/CyberCP/public/phpmyadmin'
@@ -1346,52 +1383,36 @@ $cfg['Servers'][$i]['port'] = '3306';
@staticmethod
def downoad_and_install_raindloop():
try:
#######
# if os.path.exists("/usr/local/CyberCP/public/rainloop"):
#
# if os.path.exists("/usr/local/lscp/cyberpanel/rainloop/data"):
# pass
# else:
# command = "mv /usr/local/CyberCP/public/rainloop/data /usr/local/lscp/cyberpanel/rainloop/data"
# Upgrade.executioner(command, 0)
#
# command = "chown -R lscpd:lscpd /usr/local/lscp/cyberpanel/rainloop/data"
# Upgrade.executioner(command, 0)
#
# iPath = os.listdir('/usr/local/CyberCP/public/rainloop/rainloop/v/')
#
# path = "/usr/local/CyberCP/public/snappymail/snappymail/v/%s/include.php" % (iPath[0])
#
# data = open(path, 'r').readlines()
# writeToFile = open(path, 'w')
#
# for items in data:
# if items.find("$sCustomDataPath = '';") > -1:
# writeToFile.writelines(
# " $sCustomDataPath = '/usr/local/lscp/cyberpanel/rainloop/data';\n")
# else:
# writeToFile.writelines(items)
#
# writeToFile.close()
# return 0
# Data preservation: only /usr/local/CyberCP/public/snappymail (app files) is replaced.
# Data under /usr/local/lscp/cyberpanel/snappymail/data and public/snappymail/data is never deleted.
cwd = os.getcwd()
if not os.path.exists("/usr/local/CyberCP/public"):
os.mkdir("/usr/local/CyberCP/public")
# Try to fetch latest SnappyMail version from GitHub
try:
from plogical.versionFetcher import get_latest_snappymail_version
latest_version = get_latest_snappymail_version()
if latest_version and latest_version != Upgrade.SnappyVersion:
Upgrade.stdOut(f"Using latest SnappyMail version: {latest_version}", 0)
Upgrade.SnappyVersion = latest_version
else:
Upgrade.stdOut(f"Using fallback SnappyMail version: {Upgrade.SnappyVersion}", 0)
except Exception as e:
Upgrade.stdOut(f"Failed to fetch latest SnappyMail version, using fallback: {e}", 0)
# Version: /etc/cyberpanel/snappymail_version, then latest from API, then fallback
snappy_version = Upgrade.SnappyVersion
version_file = '/etc/cyberpanel/snappymail_version'
if os.path.isfile(version_file):
try:
with open(version_file, 'r') as f:
raw = (f.read() or '').strip()
if raw and len(raw) < 20 and all(c.isdigit() or c == '.' for c in raw):
snappy_version = raw
Upgrade.stdOut(f"Using SnappyMail version from {version_file}: {snappy_version}", 0)
except Exception:
pass
if snappy_version == Upgrade.SnappyVersion:
try:
from plogical.versionFetcher import get_latest_snappymail_version
latest_version = get_latest_snappymail_version()
if latest_version and latest_version != Upgrade.SnappyVersion:
Upgrade.stdOut(f"Using latest SnappyMail version: {latest_version}", 0)
snappy_version = latest_version
else:
Upgrade.stdOut(f"Using fallback SnappyMail version: {Upgrade.SnappyVersion}", 0)
except Exception as e:
Upgrade.stdOut(f"Failed to fetch latest SnappyMail version, using fallback: {e}", 0)
os.chdir("/usr/local/CyberCP/public")
@@ -1401,7 +1422,7 @@ $cfg['Servers'][$i]['port'] = '3306';
while (1):
command = 'wget -q https://github.com/the-djmaze/snappymail/releases/download/v%s/snappymail-%s.zip' % (
Upgrade.SnappyVersion, Upgrade.SnappyVersion)
snappy_version, snappy_version)
cmd = shlex.split(command)
res = subprocess.call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
if res != 0:
@@ -1415,11 +1436,12 @@ $cfg['Servers'][$i]['port'] = '3306';
count = 0
# Replace only app tree; data dirs (/usr/local/lscp/cyberpanel/snappymail/data, etc.) are preserved
if os.path.exists('/usr/local/CyberCP/public/snappymail'):
shutil.rmtree('/usr/local/CyberCP/public/snappymail')
while (1):
command = 'unzip -q snappymail-%s.zip -d /usr/local/CyberCP/public/snappymail' % (Upgrade.SnappyVersion)
command = 'unzip -q snappymail-%s.zip -d /usr/local/CyberCP/public/snappymail' % (snappy_version,)
cmd = shlex.split(command)
res = subprocess.call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
@@ -1430,7 +1452,7 @@ $cfg['Servers'][$i]['port'] = '3306';
else:
break
try:
os.remove("snappymail-%s.zip" % (Upgrade.SnappyVersion))
os.remove("snappymail-%s.zip" % (snappy_version,))
except:
pass
@@ -4510,14 +4532,20 @@ echo $oConfig->Save() ? 'Done' : 'Error';
subprocess.run(command, shell=True, capture_output=True)
# Install MariaDB from official repository (version from /etc/cyberpanel/mariadb_version or default 11.8)
# Accept any major.minor supported by mariadb_repo_setup (10.3-10.11, 11.0-11.8, 12.0-12.x); safe regex to avoid injection
mariadb_ver = "11.8"
try:
mariadb_version_file = "/etc/cyberpanel/mariadb_version"
if os.path.isfile(mariadb_version_file):
with open(mariadb_version_file, "r") as f:
raw = f.read().strip()
if raw in ("11.8", "12.1"):
mariadb_ver = raw
raw = (f.read() or "").strip()
if raw:
import re
m = re.match(r'^(\d+)\.(\d+)(?:\.\d+)*$', raw)
if m:
major, minor = int(m.group(1)), int(m.group(2))
if (major == 10 and 3 <= minor <= 11) or (major == 11 and 0 <= minor <= 8) or (major == 12 and 0 <= minor <= 99):
mariadb_ver = "%d.%d" % (major, minor)
except Exception:
pass
Upgrade.stdOut("Setting up official MariaDB %s repository..." % mariadb_ver, 1)

View File

@@ -321,44 +321,54 @@ class vhost:
@staticmethod
def createNONSSLMapEntry(virtualHostName):
"""Add NON-SSL map entry for virtualHostName in OLS httpd_config.conf.
Returns (1, None) on success, (0, error_message) on failure.
"""
try:
def modify_config(lines):
map_entry = " map " + virtualHostName + " " + virtualHostName + "\n"
modified = []
mapchecker = 1
line_lower = None
for line in lines:
if (mapchecker == 1 and (line.find("listener") > -1 and line.find("Default") > -1)):
line_lower = line.lower()
# Match listener block: "listener Default" or "listener default" (case-insensitive)
if (mapchecker == 1 and "listener" in line_lower and "default" in line_lower):
modified.append(line)
modified.append(map_entry)
mapchecker = 0
else:
modified.append(line)
if mapchecker != 0:
raise ValueError(
"Could not find Default listener block in /usr/local/lsws/conf/httpd_config.conf. "
"Ensure the file contains a line like 'listener Default {'."
)
return modified
success, error = installUtilities.installUtilities.safeModifyHttpdConfig(
modify_config,
f"Add NON-SSL map entry for {virtualHostName}"
)
if not success:
error_msg = error if error else "Unknown error"
logging.writeToFile(f"[createNONSSLMapEntry] Failed: {error_msg}")
return 0
return 1
logging.CyberCPLogFileWriter.writeToFile(f"[createNONSSLMapEntry] Failed: {error_msg}")
return 0, error_msg
return 1, None
except BaseException as msg:
logging.CyberCPLogFileWriter.writeToFile(str(msg))
return 0
logging.CyberCPLogFileWriter.writeToFile(str(msg) + " [createNONSSLMapEntry]")
return 0, str(msg)
@staticmethod
def createConfigInMainVirtualHostFile(virtualHostName):
if ProcessUtilities.decideServer() == ProcessUtilities.OLS:
try:
if vhost.createNONSSLMapEntry(virtualHostName) == 0:
return [0, "Failed to create NON SSL Map Entry [createConfigInMainVirtualHostFile]"]
success, error_msg = vhost.createNONSSLMapEntry(virtualHostName)
if success != 1:
display_msg = error_msg or "Failed to create NON SSL Map Entry [createConfigInMainVirtualHostFile]"
return [0, display_msg]
writeDataToFile = open("/usr/local/lsws/conf/httpd_config.conf", 'a')
@@ -1236,9 +1246,10 @@ class vhost:
def createConfigInMainDomainHostFile(domain, masterDomain):
if ProcessUtilities.decideServer() == ProcessUtilities.OLS:
try:
if vhost.createNONSSLMapEntry(domain) == 0:
return [0, "Failed to create NON SSL Map Entry [createConfigInMainVirtualHostFile]"]
success, error_msg = vhost.createNONSSLMapEntry(domain)
if success != 1:
display_msg = error_msg or "Failed to create NON SSL Map Entry [createConfigInMainVirtualHostFile]"
return [0, display_msg]
writeDataToFile = open("/usr/local/lsws/conf/httpd_config.conf", 'a')

View File

@@ -103,6 +103,8 @@ urlpatterns = [
path('api/backups/<str:plugin_name>/', views.get_plugin_backups, name='get_plugin_backups'),
path('api/revert/<str:plugin_name>/', views.revert_plugin, name='revert_plugin'),
path('api/debug-plugins/', views.debug_loaded_plugins, name='debug_loaded_plugins'),
path('api/check-subscription/<str:plugin_name>/', views.check_plugin_subscription, name='check_plugin_subscription'),
path('<str:plugin_name>/help/', views.plugin_help, name='plugin_help'),
]
# Include each installed plugin's URLs *before* the catch-all so /plugins/<name>/settings/ etc. match

View File

@@ -732,6 +732,22 @@ app.controller('configureDefaultNameservers', function ($scope, $http) {
/* Java script code for CloudFlare */
app.directive('cfImportFile', function () {
return {
link: function (scope, element) {
element.on('change', function (ev) {
var files = ev.target && ev.target.files;
if (files && files.length && scope.onImportFile) {
scope.$apply(function () {
scope.onImportFile(files);
});
}
ev.target.value = '';
});
}
};
});
app.filter('dnsRecordSearch', function () {
return function (records, searchText) {
if (!records || !Array.isArray(records)) return records;
@@ -827,6 +843,14 @@ app.controller('addModifyDNSRecordsCloudFlare', function ($scope, $http, $window
$scope.couldNotAddRecord = true;
$scope.recordValueDefault = false;
$scope.records = [];
$scope.cfDeletedBackup = {};
$scope.exportLoading = false;
$scope.clearAllLoading = false;
$scope.restoreLoading = false;
$scope.staleRecords = [];
$scope.staleModalVisible = false;
$scope.staleLoading = false;
$scope.fixDNSLoading = false;
$scope.showEditModal = false;
$scope.editRecord = {};
@@ -1079,8 +1103,31 @@ app.controller('addModifyDNSRecordsCloudFlare', function ($scope, $http, $window
}
$scope.deleteRecord = function (id) {
$scope.confirmDeleteRecord = function (record) {
var msg = 'Delete DNS record?\n\nName: ' + (record.name || '') + '\nType: ' + (record.type || '') + '\nValue: ' + (record.content || '');
if (!$window.confirm(msg)) {
return;
}
var zone = $scope.selectedZone;
if (!zone) {
return;
}
if (!$scope.cfDeletedBackup[zone]) {
$scope.cfDeletedBackup[zone] = [];
}
$scope.cfDeletedBackup[zone].push({
type: record.type,
name: record.name,
content: record.content,
priority: parseInt(record.priority, 10) || 0,
ttl: record.ttlNum || record.ttl || 3600,
proxy: record.proxy,
proxiable: record.proxiable !== false
});
$scope.deleteRecord(record.id);
};
$scope.deleteRecord = function (id) {
var selectedZone = $scope.selectedZone;
@@ -1164,6 +1211,221 @@ app.controller('addModifyDNSRecordsCloudFlare', function ($scope, $http, $window
};
$scope.hasBackupForZone = function () {
var zone = $scope.selectedZone;
if (!zone) return false;
var list = $scope.cfDeletedBackup[zone];
return list && list.length > 0;
};
$scope.confirmClearAll = function () {
var zone = $scope.selectedZone;
if (!zone) return;
var msg1 = 'This will remove ALL DNS records for this zone in CloudFlare. This action cannot be undone on CloudFlare.\n\nA local copy will be kept so you can use Restore.\n\nContinue?';
if (!$window.confirm(msg1)) return;
var msg2 = 'Type the zone name below to confirm:\n\n' + zone;
var typed = $window.prompt(msg2);
if (typed === null) return;
if (typed.trim() !== zone) {
new PNotify({ title: 'Cancelled', text: 'Zone name did not match. No records were deleted.', type: 'warning' });
return;
}
$scope.clearAllLoading = true;
url = '/dns/clearAllDNSRecordsCloudFlare';
var data = { selectedZone: zone };
var config = { headers: { 'X-CSRFToken': getCookie('csrftoken') } };
$http.post(url, data, config).then(function (response) {
$scope.clearAllLoading = false;
if (response.data.delete_status === 1 && response.data.deleted_records) {
$scope.cfDeletedBackup[zone] = response.data.deleted_records;
$scope.canNotFetchRecords = true;
$scope.recordsFetched = false;
$scope.recordDeleted = false;
populateCurrentRecords();
new PNotify({ title: 'Done', text: 'All DNS records were deleted. Use Restore to undo.', type: 'success' });
} else {
$scope.errorMessage = response.data.error_message || 'Clear all failed';
new PNotify({ title: 'Error', text: $scope.errorMessage, type: 'error' });
}
}, function () {
$scope.clearAllLoading = false;
new PNotify({ title: 'Error', text: 'Could not connect to server.', type: 'error' });
});
};
$scope.restoreFromBackup = function () {
var zone = $scope.selectedZone;
var list = $scope.cfDeletedBackup[zone];
if (!zone || !list || list.length === 0) return;
$scope.restoreLoading = true;
url = '/dns/importDNSRecordsCloudFlare';
var data = { selectedZone: zone, records: list };
var config = { headers: { 'X-CSRFToken': getCookie('csrftoken') } };
$http.post(url, data, config).then(function (response) {
$scope.restoreLoading = false;
if (response.data.import_status === 1) {
$scope.cfDeletedBackup[zone] = [];
populateCurrentRecords();
var failed = response.data.failed || [];
var msg = response.data.imported + ' record(s) restored.';
if (failed.length) msg += ' ' + failed.length + ' failed.';
new PNotify({ title: 'Restore done', text: msg, type: failed.length ? 'warning' : 'success' });
} else {
new PNotify({ title: 'Error', text: response.data.error_message || 'Restore failed', type: 'error' });
}
}, function () {
$scope.restoreLoading = false;
new PNotify({ title: 'Error', text: 'Could not connect to server.', type: 'error' });
});
};
$scope.exportRecords = function () {
var zone = $scope.selectedZone;
if (!zone) return;
$scope.exportLoading = true;
url = '/dns/getExportRecordsCloudFlare';
var data = { selectedZone: zone };
var config = { headers: { 'X-CSRFToken': getCookie('csrftoken') } };
$http.post(url, data, config).then(function (response) {
$scope.exportLoading = false;
if (response.data.fetchStatus === 1 && response.data.data) {
var arr = typeof response.data.data === 'string' ? JSON.parse(response.data.data) : response.data.data;
var blob = new Blob([JSON.stringify(arr, null, 2)], { type: 'application/json' });
var a = document.createElement('a');
a.href = (window.URL || window.webkitURL).createObjectURL(blob);
a.download = 'dns-records-' + zone.replace(/\./g, '-') + '.json';
a.click();
if (a.href) (window.URL || window.webkitURL).revokeObjectURL(a.href);
new PNotify({ title: 'Export done', text: 'DNS records downloaded.', type: 'success' });
} else {
new PNotify({ title: 'Error', text: response.data.error_message || 'Export failed', type: 'error' });
}
}, function () {
$scope.exportLoading = false;
new PNotify({ title: 'Error', text: 'Could not connect to server.', type: 'error' });
});
};
$scope.onImportFile = function (files) {
if (!files || !files.length) return;
var zone = $scope.selectedZone;
if (!zone) {
new PNotify({ title: 'Error', text: 'Select a zone first.', type: 'error' });
return;
}
var file = files[0];
var reader = new FileReader();
reader.onload = function (e) {
var text = e.target && e.target.result;
if (!text) {
new PNotify({ title: 'Error', text: 'Could not read file.', type: 'error' });
return;
}
var arr;
try {
arr = JSON.parse(text);
} catch (err) {
new PNotify({ title: 'Error', text: 'Invalid JSON: ' + (err.message || ''), type: 'error' });
return;
}
if (!Array.isArray(arr)) {
if (arr && Array.isArray(arr.records)) arr = arr.records;
else if (arr && arr.data) arr = Array.isArray(arr.data) ? arr.data : [arr.data];
else arr = [arr];
}
url = '/dns/importDNSRecordsCloudFlare';
var data = { selectedZone: zone, records: arr };
var config = { headers: { 'X-CSRFToken': getCookie('csrftoken') } };
$http.post(url, data, config).then(function (response) {
if (response.data.import_status === 1) {
populateCurrentRecords();
var failed = response.data.failed || [];
var msg = response.data.imported + ' record(s) imported.';
if (failed.length) msg += ' ' + failed.length + ' failed.';
new PNotify({ title: 'Import done', text: msg, type: failed.length ? 'warning' : 'success' });
} else {
new PNotify({ title: 'Error', text: response.data.error_message || 'Import failed', type: 'error' });
}
}, function () {
new PNotify({ title: 'Error', text: 'Could not connect to server.', type: 'error' });
});
};
reader.readAsText(file, 'UTF-8');
};
$scope.checkStaleRecords = function () {
var zone = $scope.selectedZone;
if (!zone) return;
$scope.staleLoading = true;
url = '/dns/getStaleDNSRecordsCloudFlare';
var data = { selectedZone: zone };
var config = { headers: { 'X-CSRFToken': getCookie('csrftoken') } };
$http.post(url, data, config).then(function (response) {
$scope.staleLoading = false;
if (response.data.fetchStatus === 1) {
$scope.staleRecords = response.data.stale_records || [];
$scope.staleModalVisible = true;
} else {
new PNotify({ title: 'Error', text: response.data.error_message || 'Could not fetch stale records', type: 'error' });
}
}, function () {
$scope.staleLoading = false;
new PNotify({ title: 'Error', text: 'Could not connect to server.', type: 'error' });
});
};
$scope.closeStaleModal = function () {
$scope.staleModalVisible = false;
$scope.staleRecords = [];
};
$scope.fixDNS = function () {
var zone = $scope.selectedZone;
if (!zone) return;
$scope.fixDNSLoading = true;
url = '/dns/fixDNSRecordsCloudFlare';
var data = { selectedZone: zone };
var config = { headers: { 'X-CSRFToken': getCookie('csrftoken') } };
$http.post(url, data, config).then(function (response) {
$scope.fixDNSLoading = false;
if (response.data.fix_status === 1) {
populateCurrentRecords();
var msg = response.data.added + ' record(s) added.';
if (response.data.skipped) msg += ' ' + response.data.skipped + ' already present.';
new PNotify({ title: 'Fix DNS done', text: msg, type: 'success' });
} else {
new PNotify({ title: 'Error', text: response.data.error_message || 'Fix DNS failed', type: 'error' });
}
}, function () {
$scope.fixDNSLoading = false;
new PNotify({ title: 'Error', text: 'Could not connect to server.', type: 'error' });
});
};
$scope.removeStaleRecords = function () {
if (!$scope.staleRecords || $scope.staleRecords.length === 0) return;
var zone = $scope.selectedZone;
var msg = 'Remove ' + $scope.staleRecords.length + ' orphan DNS record(s)? A local copy will be kept for Restore.';
if (!$window.confirm(msg)) return;
var ids = $scope.staleRecords.map(function (r) { return r.id; });
url = '/dns/removeStaleDNSRecordsCloudFlare';
var data = { selectedZone: zone, ids: ids };
var config = { headers: { 'X-CSRFToken': getCookie('csrftoken') } };
$http.post(url, data, config).then(function (response) {
if (response.data.delete_status === 1 && response.data.deleted_records) {
if (!$scope.cfDeletedBackup[zone]) $scope.cfDeletedBackup[zone] = [];
$scope.cfDeletedBackup[zone] = $scope.cfDeletedBackup[zone].concat(response.data.deleted_records);
$scope.closeStaleModal();
populateCurrentRecords();
new PNotify({ title: 'Done', text: response.data.deleted_records.length + ' orphan record(s) removed. Use Restore to undo.', type: 'success' });
} else {
new PNotify({ title: 'Error', text: response.data.error_message || 'Remove failed', type: 'error' });
}
}, function () {
new PNotify({ title: 'Error', text: 'Could not connect to server.', type: 'error' });
});
};
$scope.dnsTypeList = ['A', 'AAAA', 'CNAME', 'MX', 'TXT', 'NS', 'SOA', 'SRV', 'CAA', 'SPF', 'DNSKEY', 'CDNSKEY', 'HTTPS', 'SVCB', 'URI', 'LOC', 'NAPTR', 'SMIMEA', 'SSHFP', 'TLSA', 'PTR'];
$scope.getTypeOptions = function (record) {
var list = angular.copy($scope.dnsTypeList);

View File

@@ -732,6 +732,22 @@ app.controller('configureDefaultNameservers', function ($scope, $http) {
/* Java script code for CloudFlare */
app.directive('cfImportFile', function () {
return {
link: function (scope, element) {
element.on('change', function (ev) {
var files = ev.target && ev.target.files;
if (files && files.length && scope.onImportFile) {
scope.$apply(function () {
scope.onImportFile(files);
});
}
ev.target.value = '';
});
}
};
});
app.filter('dnsRecordSearch', function () {
return function (records, searchText) {
if (!records || !Array.isArray(records)) return records;
@@ -828,6 +844,14 @@ app.controller('addModifyDNSRecordsCloudFlare', function ($scope, $http, $window
$scope.couldNotAddRecord = true;
$scope.recordValueDefault = false;
$scope.records = [];
$scope.cfDeletedBackup = {};
$scope.exportLoading = false;
$scope.clearAllLoading = false;
$scope.restoreLoading = false;
$scope.staleRecords = [];
$scope.staleModalVisible = false;
$scope.staleLoading = false;
$scope.fixDNSLoading = false;
$scope.showEditModal = false;
$scope.editRecord = {};
@@ -1083,6 +1107,30 @@ app.controller('addModifyDNSRecordsCloudFlare', function ($scope, $http, $window
}
$scope.confirmDeleteRecord = function (record) {
var msg = 'Delete DNS record?\n\nName: ' + (record.name || '') + '\nType: ' + (record.type || '') + '\nValue: ' + (record.content || '');
if (!$window.confirm(msg)) {
return;
}
var zone = $scope.selectedZone;
if (!zone) {
return;
}
if (!$scope.cfDeletedBackup[zone]) {
$scope.cfDeletedBackup[zone] = [];
}
$scope.cfDeletedBackup[zone].push({
type: record.type,
name: record.name,
content: record.content,
priority: parseInt(record.priority, 10) || 0,
ttl: record.ttlNum || record.ttl || 3600,
proxy: record.proxy,
proxiable: record.proxiable !== false
});
$scope.deleteRecord(record.id);
};
$scope.deleteRecord = function (id) {
@@ -1168,6 +1216,221 @@ app.controller('addModifyDNSRecordsCloudFlare', function ($scope, $http, $window
};
$scope.hasBackupForZone = function () {
var zone = $scope.selectedZone;
if (!zone) return false;
var list = $scope.cfDeletedBackup[zone];
return list && list.length > 0;
};
$scope.confirmClearAll = function () {
var zone = $scope.selectedZone;
if (!zone) return;
var msg1 = 'This will remove ALL DNS records for this zone in CloudFlare. This action cannot be undone on CloudFlare.\n\nA local copy will be kept so you can use Restore.\n\nContinue?';
if (!$window.confirm(msg1)) return;
var msg2 = 'Type the zone name below to confirm:\n\n' + zone;
var typed = $window.prompt(msg2);
if (typed === null) return;
if (typed.trim() !== zone) {
new PNotify({ title: 'Cancelled', text: 'Zone name did not match. No records were deleted.', type: 'warning' });
return;
}
$scope.clearAllLoading = true;
url = '/dns/clearAllDNSRecordsCloudFlare';
var data = { selectedZone: zone };
var config = { headers: { 'X-CSRFToken': getCookie('csrftoken') } };
$http.post(url, data, config).then(function (response) {
$scope.clearAllLoading = false;
if (response.data.delete_status === 1 && response.data.deleted_records) {
$scope.cfDeletedBackup[zone] = response.data.deleted_records;
$scope.canNotFetchRecords = true;
$scope.recordsFetched = false;
$scope.recordDeleted = false;
populateCurrentRecords();
new PNotify({ title: 'Done', text: 'All DNS records were deleted. Use Restore to undo.', type: 'success' });
} else {
$scope.errorMessage = response.data.error_message || 'Clear all failed';
new PNotify({ title: 'Error', text: $scope.errorMessage, type: 'error' });
}
}, function () {
$scope.clearAllLoading = false;
new PNotify({ title: 'Error', text: 'Could not connect to server.', type: 'error' });
});
};
$scope.restoreFromBackup = function () {
var zone = $scope.selectedZone;
var list = $scope.cfDeletedBackup[zone];
if (!zone || !list || list.length === 0) return;
$scope.restoreLoading = true;
url = '/dns/importDNSRecordsCloudFlare';
var data = { selectedZone: zone, records: list };
var config = { headers: { 'X-CSRFToken': getCookie('csrftoken') } };
$http.post(url, data, config).then(function (response) {
$scope.restoreLoading = false;
if (response.data.import_status === 1) {
$scope.cfDeletedBackup[zone] = [];
populateCurrentRecords();
var failed = response.data.failed || [];
var msg = response.data.imported + ' record(s) restored.';
if (failed.length) msg += ' ' + failed.length + ' failed.';
new PNotify({ title: 'Restore done', text: msg, type: failed.length ? 'warning' : 'success' });
} else {
new PNotify({ title: 'Error', text: response.data.error_message || 'Restore failed', type: 'error' });
}
}, function () {
$scope.restoreLoading = false;
new PNotify({ title: 'Error', text: 'Could not connect to server.', type: 'error' });
});
};
$scope.exportRecords = function () {
var zone = $scope.selectedZone;
if (!zone) return;
$scope.exportLoading = true;
url = '/dns/getExportRecordsCloudFlare';
var data = { selectedZone: zone };
var config = { headers: { 'X-CSRFToken': getCookie('csrftoken') } };
$http.post(url, data, config).then(function (response) {
$scope.exportLoading = false;
if (response.data.fetchStatus === 1 && response.data.data) {
var arr = typeof response.data.data === 'string' ? JSON.parse(response.data.data) : response.data.data;
var blob = new Blob([JSON.stringify(arr, null, 2)], { type: 'application/json' });
var a = document.createElement('a');
a.href = (window.URL || window.webkitURL).createObjectURL(blob);
a.download = 'dns-records-' + zone.replace(/\./g, '-') + '.json';
a.click();
if (a.href) (window.URL || window.webkitURL).revokeObjectURL(a.href);
new PNotify({ title: 'Export done', text: 'DNS records downloaded.', type: 'success' });
} else {
new PNotify({ title: 'Error', text: response.data.error_message || 'Export failed', type: 'error' });
}
}, function () {
$scope.exportLoading = false;
new PNotify({ title: 'Error', text: 'Could not connect to server.', type: 'error' });
});
};
$scope.onImportFile = function (files) {
if (!files || !files.length) return;
var zone = $scope.selectedZone;
if (!zone) {
new PNotify({ title: 'Error', text: 'Select a zone first.', type: 'error' });
return;
}
var file = files[0];
var reader = new FileReader();
reader.onload = function (e) {
var text = e.target && e.target.result;
if (!text) {
new PNotify({ title: 'Error', text: 'Could not read file.', type: 'error' });
return;
}
var arr;
try {
arr = JSON.parse(text);
} catch (err) {
new PNotify({ title: 'Error', text: 'Invalid JSON: ' + (err.message || ''), type: 'error' });
return;
}
if (!Array.isArray(arr)) {
if (arr && Array.isArray(arr.records)) arr = arr.records;
else if (arr && arr.data) arr = Array.isArray(arr.data) ? arr.data : [arr.data];
else arr = [arr];
}
url = '/dns/importDNSRecordsCloudFlare';
var data = { selectedZone: zone, records: arr };
var config = { headers: { 'X-CSRFToken': getCookie('csrftoken') } };
$http.post(url, data, config).then(function (response) {
if (response.data.import_status === 1) {
populateCurrentRecords();
var failed = response.data.failed || [];
var msg = response.data.imported + ' record(s) imported.';
if (failed.length) msg += ' ' + failed.length + ' failed.';
new PNotify({ title: 'Import done', text: msg, type: failed.length ? 'warning' : 'success' });
} else {
new PNotify({ title: 'Error', text: response.data.error_message || 'Import failed', type: 'error' });
}
}, function () {
new PNotify({ title: 'Error', text: 'Could not connect to server.', type: 'error' });
});
};
reader.readAsText(file, 'UTF-8');
};
$scope.checkStaleRecords = function () {
var zone = $scope.selectedZone;
if (!zone) return;
$scope.staleLoading = true;
url = '/dns/getStaleDNSRecordsCloudFlare';
var data = { selectedZone: zone };
var config = { headers: { 'X-CSRFToken': getCookie('csrftoken') } };
$http.post(url, data, config).then(function (response) {
$scope.staleLoading = false;
if (response.data.fetchStatus === 1) {
$scope.staleRecords = response.data.stale_records || [];
$scope.staleModalVisible = true;
} else {
new PNotify({ title: 'Error', text: response.data.error_message || 'Could not fetch stale records', type: 'error' });
}
}, function () {
$scope.staleLoading = false;
new PNotify({ title: 'Error', text: 'Could not connect to server.', type: 'error' });
});
};
$scope.closeStaleModal = function () {
$scope.staleModalVisible = false;
$scope.staleRecords = [];
};
$scope.fixDNS = function () {
var zone = $scope.selectedZone;
if (!zone) return;
$scope.fixDNSLoading = true;
url = '/dns/fixDNSRecordsCloudFlare';
var data = { selectedZone: zone };
var config = { headers: { 'X-CSRFToken': getCookie('csrftoken') } };
$http.post(url, data, config).then(function (response) {
$scope.fixDNSLoading = false;
if (response.data.fix_status === 1) {
populateCurrentRecords();
var msg = response.data.added + ' record(s) added.';
if (response.data.skipped) msg += ' ' + response.data.skipped + ' already present.';
new PNotify({ title: 'Fix DNS done', text: msg, type: 'success' });
} else {
new PNotify({ title: 'Error', text: response.data.error_message || 'Fix DNS failed', type: 'error' });
}
}, function () {
$scope.fixDNSLoading = false;
new PNotify({ title: 'Error', text: 'Could not connect to server.', type: 'error' });
});
};
$scope.removeStaleRecords = function () {
if (!$scope.staleRecords || $scope.staleRecords.length === 0) return;
var zone = $scope.selectedZone;
var msg = 'Remove ' + $scope.staleRecords.length + ' orphan DNS record(s)? A local copy will be kept for Restore.';
if (!$window.confirm(msg)) return;
var ids = $scope.staleRecords.map(function (r) { return r.id; });
url = '/dns/removeStaleDNSRecordsCloudFlare';
var data = { selectedZone: zone, ids: ids };
var config = { headers: { 'X-CSRFToken': getCookie('csrftoken') } };
$http.post(url, data, config).then(function (response) {
if (response.data.delete_status === 1 && response.data.deleted_records) {
if (!$scope.cfDeletedBackup[zone]) $scope.cfDeletedBackup[zone] = [];
$scope.cfDeletedBackup[zone] = $scope.cfDeletedBackup[zone].concat(response.data.deleted_records);
$scope.closeStaleModal();
populateCurrentRecords();
new PNotify({ title: 'Done', text: response.data.deleted_records.length + ' orphan record(s) removed. Use Restore to undo.', type: 'success' });
} else {
new PNotify({ title: 'Error', text: response.data.error_message || 'Remove failed', type: 'error' });
}
}, function () {
new PNotify({ title: 'Error', text: 'Could not connect to server.', type: 'error' });
});
};
$scope.dnsTypeList = ['A', 'AAAA', 'CNAME', 'MX', 'TXT', 'NS', 'SOA', 'SRV', 'CAA', 'SPF', 'DNSKEY', 'CDNSKEY', 'HTTPS', 'SVCB', 'URI', 'LOC', 'NAPTR', 'SMIMEA', 'SSHFP', 'TLSA', 'PTR'];
$scope.getTypeOptions = function (record) {
var list = angular.copy($scope.dnsTypeList);

View File

@@ -1,63 +0,0 @@
# CyberCP git pull conflicts on v2.5.5-dev (server at /usr/local/CyberCP)
## Why Git asks to "remove" or "move" files
When you run `git pull --ff-only origin v2.5.5-dev` in `/usr/local/CyberCP`, Git can block for two reasons:
### 1. Modified files (would be overwritten by merge)
- **Meaning:** You have **local changes** in tracked files (e.g. `CyberCP/settings.py`, `baseTemplate/views.py`, …). The remote branch also changed those files. Git will not overwrite your working tree without you deciding what to do with your changes.
- **So:** You must either **commit** or **stash** (or discard) those local changes before the pull can apply.
### 2. Untracked files (would be overwritten by merge)
- **Meaning:** You have **untracked** files/dirs at paths where the **incoming** branch (v2.5.5-dev) **adds** files. For example: `panelAccess/`, `baseTemplate/static/baseTemplate/assets/mobile-responsive.css`, `sql/create_ftp_quotas.sql`, etc. Git will not overwrite untracked content, so it refuses to merge and says "Please move or remove them."
- **So:** You must **move or remove** those untracked paths so Git can write the version from the repo there.
## Are all these files on v2.5.5-dev?
- **Yes.** The branch `v2.5.5-dev` on `master3395/cyberpanel` contains:
- All the modified paths (canonical versions).
- All the "untracked" paths (e.g. `panelAccess/`, `mobile-responsive.css`, `readability-fixes.css`, `emailLimitsController.js`, `create_ftp_quotas.sql`, `firewall/migrations/0001_initial.py`, `install/ols_binaries_config.py`, etc.).
- So the **repo** is the source of truth; the server just needs to be brought in line with it. You can confirm by cloning fresh: `git clone -b v2.5.5-dev https://github.com/master3395/cyberpanel.git` and listing those paths.
## Safe way to sync the server to v2.5.5-dev
If you are **ok discarding all local and untracked changes** in `/usr/local/CyberCP` and making it exactly match `origin/v2.5.5-dev`:
```bash
cd /usr/local/CyberCP
# Optional: backup current state
tar -czf /root/cybercp-backup-before-sync-$(date +%Y%m%d-%H%M%S).tar.gz .
# Reset tracked files to current HEAD and remove untracked/ignored files
git fetch origin
git checkout v2.5.5-dev
git reset --hard origin/v2.5.5-dev
git clean -fd
# Ensure you're up to date (should already be after reset)
git pull --ff-only origin v2.5.5-dev
```
After this, **Current** in Version Management should match **Latest** (commit `c24f067e` or whatever is the tip of `origin/v2.5.5-dev`).
## If you need to keep local changes
- **Tracked changes:** Stash first, then pull, then re-apply:
```bash
cd /usr/local/CyberCP
git stash push -m "before sync v2.5.5-dev"
# move or remove the untracked paths listed by Git (e.g. backup then delete)
git pull --ff-only origin v2.5.5-dev
git stash pop
```
- **Untracked files:** Back them up to another directory (e.g. `/root/cybercp-untracked-backup/`) before removing or moving them, then run the pull.
## Upgrade script sync step
The upgrade scripts `Sync_CyberCP_To_Latest()` runs `git fetch`, `checkout`, and `git pull --ff-only`. If the server has local or untracked conflicts like above, that pull will keep failing until you either:
- Run the "safe way" (reset + clean) on the server once, or
- Change the script to use `git reset --hard origin/$Branch_Name` and `git clean -fd` so the install is forced to match the remote (only do this if you intend the install to always mirror the repo with no local edits).

View File

@@ -1,26 +0,0 @@
# Deploy Locally Before Push (v2.5.5-dev)
## Rule
**Always deploy to the local CyberPanel installation before pushing to v2.5.5-dev.**
When deploying and pushing changes:
1. **First: Deploy locally**
Copy all modified/relevant files from the repo to `/usr/local/CyberCP`, preserving directory structure.
2. **Then: Commit and push**
Stage the same files, commit (author: `master3395`), and push to `origin v2.5.5-dev`.
## Order
1. Deploy → 2. Commit → 3. Push
Never push to v2.5.5-dev without deploying to `/usr/local/CyberCP` first.
## Example
```bash
# 1. Deploy
cp /home/cyberpanel-repo/path/to/file /usr/local/CyberCP/path/to/
# 2. Commit and push
cd /home/cyberpanel-repo && git add ... && git commit -m "..." --author="master3395 <master3395@users.noreply.github.com>" && git push origin v2.5.5-dev
```

View File

@@ -1,53 +0,0 @@
# Deploy MySQL Manager fixes to the server (e.g. 207.180.193.210)
## Why you still see no data
- The URL **https://207.180.193.210:2087** is the **remote server** (or your servers public IP). It is **not** “localhost.”
- Our earlier deploy commands ran on the machine where the repo lives. If that machine is **not** the one serving 207.180.193.210, then the panel you open in the browser is still running the **old** code and old `databases.js`.
- Seeing **`{$ Slow_queries $}`** (literal text) and empty processes means the **Mysqlmanager** controller or the updated JS is not running on the server that serves that URL.
## Fix: run the deploy on the server that serves 207.180.193.210
You must copy the updated files into CyberPanel **on the same machine** that serves https://207.180.193.210:2087 (i.e. where `/usr/local/CyberCP` is used by the panel).
### Option A You have the repo on that server (e.g. at `/home/cyberpanel-repo`)
SSH to **207.180.193.210** (or the host that serves that IP) and run:
```bash
# Path to repo on THAT server (change if different)
REPO=/home/cyberpanel-repo
cp "$REPO/plogical/mysqlUtilities.py" /usr/local/CyberCP/plogical/
cp "$REPO/databases/views.py" /usr/local/CyberCP/databases/
cp "$REPO/databases/static/databases/databases.js" /usr/local/CyberCP/databases/static/databases/
cp "$REPO/static/databases/databases.js" /usr/local/CyberCP/static/databases/
# LiteSpeed serves /static/ from public/static/ must deploy here or the browser gets the old file
mkdir -p /usr/local/CyberCP/public/static/databases
cp "$REPO/static/databases/databases.js" /usr/local/CyberCP/public/static/databases/
# Restart panel so changes are used
systemctl restart lscpd
echo "MySQL Manager deploy done. Hard-refresh the MySQL Manager page (Ctrl+Shift+R)."
```
### Option B Repo is only on another machine (e.g. your dev box)
1. Copy the **four files** from the machine that has the repo to **207.180.193.210** (e.g. with `scp` or `rsync`):
- `plogical/mysqlUtilities.py`
- `databases/views.py`
- `databases/static/databases/databases.js`
- `static/databases/databases.js`
2. On **207.180.193.210**, run the same `cp` commands as in Option A, using the paths where you put those files instead of `$REPO`.
3. Restart the panel:
`systemctl restart lscpd`
### After deploy
- Open **https://207.180.193.210:2087/dataBases/MysqlManager**
- Do a **hard refresh**: **Ctrl+Shift+R** (or Cmd+Shift+R on Mac) so the browser doesnt use cached `databases.js`.
If you still see no data, open the browser **Developer Tools (F12) → Console** and note any red errors (e.g. `ctrlreg` or 404 for `databases.js`), then share that message.

View File

@@ -1,64 +0,0 @@
# Firewall Rules & Banned IPs Making Sure Changes Load
If Firewall Rules or Banned IPs dont show the latest UI (Modify buttons, Per-page dropdown, Search, etc.), do the following.
## 1. Sync firewall JavaScript (when you change firewall JS)
The panel can serve `firewall/firewall.js` from the **firewall app** (`firewall/static/firewall/firewall.js`) or from **static/** after collectstatic. The cache-buster uses the newest mtime from:
- `firewall/static/firewall/firewall.js`
- `static/firewall/firewall.js`
- `public/static/firewall/firewall.js`
So that the query param updates when any of these change.
**After editing `firewall/static/firewall/firewall.js`, sync copies so all paths are up to date:**
```bash
# From repo root
mkdir -p static/firewall public/static/firewall
cp firewall/static/firewall/firewall.js static/firewall/
cp firewall/static/firewall/firewall.js public/static/firewall/
```
## 2. Templates
The firewall **HTML** comes from the **firewall app** template:
- `firewall/templates/firewall/firewall.html`
Django loads it when you open the firewall page. There is no separate copy under `static/` or `baseTemplate/` for that page. So any change in `firewall/templates/firewall/firewall.html` is used as long as the running app is your repo (or a deploy that includes this file).
## 3. Where CyberPanel stores files (production)
- **Production root:** `/usr/local/CyberCP` the full repo (including `firewall/`, `baseTemplate/`, etc.) lives here after install/upgrade.
- **Upgrade sync:** `upgrade_modules/09_sync.sh` runs from that directory (`git fetch` / checkout / pull). After sync, it copies **baseTemplate** static and **firewall** static into `public/static/` so LiteSpeed serves the latest dashboard and firewall JS.
- **Firewall code:** `firewall/templates/firewall/firewall.html` and `firewall/static/firewall/firewall.js` under `/usr/local/CyberCP`. LiteSpeed serves `/static/firewall/firewall.js` from `public/static/firewall/firewall.js`, which is updated by the upgrade script.
## 4. Production (e.g. `/usr/local/CyberCP`) manual deploy
If the panel runs from an **installed** path (e.g. `/usr/local/CyberCP`), that directory is often a copy of the repo. Then:
- Replace or update the firewall app there with your repo version:
- `firewall/templates/firewall/firewall.html`
- `firewall/static/firewall/firewall.js`
- If the installer or deploy uses `static/` or `public/static/`, copy the same `firewall.js` there too (as in step 1).
- Restart the app server (e.g. Gunicorn/LiteSpeed) so Django and static file serving use the new files.
## 5. Browser cache
The script tag uses a cache-buster:
`?v={{ CP_VERSION }}&fw={{ FIREWALL_STATIC_VERSION }}&cb=4`
- Do a **hard refresh**: Ctrl+Shift+R (Windows/Linux) or Cmd+Shift+R (Mac).
- Or clear cache for the panel site and reload.
## 6. Quick checklist
- [ ] `firewall/static/firewall/firewall.js` has the latest code.
- [ ] Synced to `static/firewall/firewall.js` and `public/static/firewall/firewall.js` (see step 1).
- [ ] `firewall/templates/firewall/firewall.html` has the latest markup (Modify buttons, modals, Per page dropdown).
- [ ] If using an installed path, copy updated firewall app (and static copies) there and restart the server.
- [ ] Hard refresh (or clear cache) in the browser.
After this, Firewall Rules and Banned IPs should load the correct layout and Modify buttons.

View File

@@ -1,113 +0,0 @@
# HTTP 500 after git sync recovery steps
## Cause
After running `git reset --hard origin/v2.5.5-dev` and `git clean -fd` in `/usr/local/CyberCP`, the **repos** `CyberCP/settings.py` replaced the **servers** production `settings.py`. The repo file has different (or placeholder) database credentials and config, so the app cant connect to the DB or behaves incorrectly → **500** on `/base/` and elsewhere.
## 1. Restore production `settings.py`
Use one of these options.
### A. From your tarball backup (recommended)
You created a backup before sync, e.g.:
`/root/cybercp-backup-before-sync-YYYYMMDD-HHMMSS.tar.gz`
Restore only `settings.py`:
```bash
cd /root
# List to find the exact backup name
ls -la cybercp-backup-before-sync-*.tar.gz
# Restore CyberCP/settings.py (tarball was created from /usr/local/CyberCP so paths start with . or ./)
BACKUP=$(ls -t cybercp-backup-before-sync-*.tar.gz 2>/dev/null | head -1)
if [ -n "$BACKUP" ]; then
tar -xzf "$BACKUP" -C /usr/local/CyberCP ./CyberCP/settings.py 2>/dev/null || \
tar -xzf "$BACKUP" -C /usr/local/CyberCP CyberCP/settings.py 2>/dev/null
echo "Restored settings.py from $BACKUP"
else
echo "No backup found in /root"
fi
```
If the archive has no leading `./`, try:
```bash
tar -xzf "$BACKUP" -C /usr/local/CyberCP --strip-components=0 CyberCP/settings.py
# or
tar -xzf "$BACKUP" -C /tmp cp CyberCP/settings.py && mv /tmp/CyberCP/settings.py /usr/local/CyberCP/CyberCP/
```
### B. From upgrade script backup (if a previous upgrade ran)
The upgrade script backs up to `/tmp/cyberpanel_settings_backup.py`:
```bash
if [ -f /tmp/cyberpanel_settings_backup.py ]; then
cp /tmp/cyberpanel_settings_backup.py /usr/local/CyberCP/CyberCP/settings.py
echo "Restored settings.py from /tmp"
fi
```
### C. If you have no backup
Edit `/usr/local/CyberCP/CyberCP/settings.py` and set the **DATABASES** section to match your server:
- Same DB name, user, and password as used before the sync (e.g. from another backup or from the MySQL/MariaDB config your install used).
## 2. Restart CyberPanel / LiteSpeed
So the app loads the restored config:
```bash
systemctl restart lscpd
# or, depending on setup:
# systemctl restart lsws
```
Wait a few seconds, then try https://207.180.193.210:2087/ and https://207.180.193.210:2087/base/ again.
## 3. If 500 persists get the real error
Run:
```bash
# Application log (Django/CyberPanel)
tail -100 /home/cyberpanel/error-logs.txt
# LiteSpeed / WSGI errors
tail -100 /usr/local/lscp/logs/error.log
# If present
tail -100 /usr/local/CyberCP/logs/cyberpanel.log
journalctl -u lscpd -n 50 --no-pager
```
Then run Django check and migrate:
```bash
cd /usr/local/CyberCP
source /usr/local/CyberCP/bin/activate # if venv exists
python manage.py check
python manage.py migrate --noinput
```
Fix any errors reported (e.g. missing DB user, wrong password, or migrations).
## 4. Future syncs keep production settings
Before running `git reset --hard` again:
1. Back up `settings.py`:
```bash
cp /usr/local/CyberCP/CyberCP/settings.py /root/cyberpanel_settings_production.py
```
2. After sync, restore it:
```bash
cp /root/cyberpanel_settings_production.py /usr/local/CyberCP/CyberCP/settings.py
systemctl restart lscpd
```
Or add a small script that does sync then restores `settings.py` and restarts `lscpd`.

View File

@@ -1,36 +0,0 @@
# Install modularization design
## Overview
- **cyberpanel.sh**: Modular loader; sources `install_modules/00_common.sh``09_parse_main.sh`. When `install_modules/` is missing (e.g. one-liner), downloads modules from GitHub.
- **install.sh**: Wrapper that detects OS, checks disk; if repo has `cyberpanel.sh` + `install_modules/`, runs local loader; else downloads `cyberpanel.sh` and runs it.
- **install/venvsetup.sh**: Loader that sources `install/venvsetup_modules/01_*``05_*`. Original kept as `install/venvsetup_monolithic.sh`.
## install_modules/ (repo root)
| Module | Lines | Content |
|--------|-------|---------|
| 00_common.sh | ~418 | Globals, log_message, print_status, show_banner, detect_os, fix_static_file_permissions, fix_post_install_issues |
| 01_verify_deps.sh | ~129 | verify_installation, install_dependencies |
| 02_install_core.sh | ~390 | install_cyberpanel, check_cyberpanel_installed, cleanup_existing_cyberpanel, install_cyberpanel_direct (part 1) |
| 03_install_direct.sh | ~411 | install_cyberpanel_direct_cont |
| 04_fixes_status.sh | ~210 | apply_fixes, _port_listening, show_status_summary |
| 05_menus_main.sh | ~328 | show_main_menu, show_fresh_install_menu, show_commit_selection, show_version_selection, show_installation_preferences |
| 06_menus_update.sh | ~247 | show_update_menu, show_reinstall_menu, show_system_status |
| 07_menus_advanced.sh | ~273 | show_advanced_menu, show_error_help, show_fix_menu, show_clean_menu, show_logs_menu, show_diagnostics |
| 08_actions.sh | ~317 | start_upgrade, start_force_reinstall, start_preupgrade, start_reinstall, start_installation |
| 09_parse_main.sh | ~247 | parse_arguments, detect_installation_mode, create_standard_aliases, main |
All modules kept under 500 lines. Loader: `cyberpanel.sh`. Backup: `cyberpanel_install_monolithic.sh`.
## install/venvsetup_modules/
| Module | Content |
|--------|---------|
| 01_vars_install_required.sh | Vars, safe_pip_install, license_validation, special_change, system_tweak, install_required |
| 02_memcached_main.sh | memcached_installation, redis_installation, check_provider, check_*, interactive_*, main_install |
| 03_main_run_pip.sh | main_install_run, pip_virtualenv |
| 04_after_install.sh | after_install |
| 05_argument_main.sh | argument_mode, main flow (check_OS, install_required, pip_virtualenv, system_tweak, main_install) |
Loader: `install/venvsetup.sh`. Backup: `install/venvsetup_monolithic.sh`. Refactor: `main_install` calls `main_install_run()` for size split.
## install/ (Python and other files)
- **install/install.py**, **install/installCyberPanel.py**, etc. are unchanged; they are used by the shell installer and may be split in a future pass (e.g. into Python packages) if needed for the 500-line rule.

View File

@@ -1,51 +0,0 @@
# MariaDB Client No-SSL (ERROR 2026 Fix) Install and Upgrade Coverage
This document summarizes where the MariaDB client “no SSL” configuration is applied so that **install** and **upgrade** both work when the server has `have_ssl=DISABLED` (avoids `ERROR 2026 (HY000): TLS/SSL error: SSL is required, but the server does not support it`).
## What gets applied
- **`[client]`** with **`ssl=0`** and **`skip-ssl`** in:
- `/etc/my.cnf.d/cyberpanel-client.cnf` (RHEL/AlmaLinux/CentOS)
- `/etc/mysql/mariadb.conf.d/99-cyberpanel-client.cnf` (Debian/Ubuntu, when that directory exists)
- Optionally appended to **`/etc/my.cnf`** if it has no `[client]` section
## Install path
| Location | What happens |
|----------|----------------|
| **install/install.py** | Writes `/root/.my.cnf` with `[client]` including `ssl=0` and `skip-ssl`. When `remotemysql == 'OFF'`, calls `_ensure_mariadb_client_no_ssl()` which creates `/etc/my.cnf.d/cyberpanel-client.cnf` (RHEL) and `/etc/mysql/mariadb.conf.d/99-cyberpanel-client.cnf` (Debian/Ubuntu). |
So every **fresh install** (local MariaDB) gets the client no-SSL config.
## Upgrade path (modular: `cyberpanel_upgrade.sh` + `upgrade_modules/`)
| Module | What happens |
|--------|----------------|
| **03_mariadb.sh** | Defines **`Ensure_MariaDB_Client_No_SSL()`** (writes `cyberpanel-client.cnf`, optional `[client]` in `my.cnf`, and Debian `99-cyberpanel-client.cnf`). Called at end of **`Pre_Upgrade_CentOS7_MySQL`** when that path runs. |
| **05_repository.sh** | After all OS-specific repository and MariaDB install/upgrade logic (CentOS, AlmaLinux 9, Ubuntu/Debian, openEuler), calls **`Ensure_MariaDB_Client_No_SSL`** once. Every RHEL/DNF path also writes `cyberpanel-client.cnf` and optional `my.cnf` [client] inline; Ubuntu/Debian get the fix via this single call. |
So every **modular upgrade** run applies the client no-SSL config on all supported OSes.
## Upgrade path (monolithic: `cyberpanel_upgrade_monolithic.sh`)
| Location | What happens |
|----------|----------------|
| **Pre_Upgrade_Setup_Repository** | Each RHEL/DNF branch already creates `/etc/my.cnf.d/cyberpanel-client.cnf` with `ssl=0` and `skip-ssl` and optionally appends `[client]` to `/etc/my.cnf`. At the **end** of the same function (after Ubuntu and openEuler blocks), a single block runs that: creates `cyberpanel-client.cnf`, appends `[client]` to `my.cnf` if missing, and creates `/etc/mysql/mariadb.conf.d/99-cyberpanel-client.cnf` on Debian/Ubuntu. |
So every **monolithic upgrade** run also ensures the client no-SSL config on all paths.
## Verification
After install or upgrade:
```bash
mariadb -e "SELECT 1"
# or
mariadb -e "SELECT @@version;"
```
If these work without `ERROR 2026`, the client no-SSL configuration is in effect.
## Manual fix (if needed)
See **to-do/fix-phpmyadmin-mariadb-version-on-server.md** for a manual one-off fix on a single server.

View File

@@ -0,0 +1,33 @@
# MariaDB rollback using upgrade backups
When you run a CyberPanel upgrade with MariaDB version change, an optional full backup of all databases can be created in two places:
1. **Legacy path:** `/root/cyberpanel_mariadb_backups/mariadb_backup_before_upgrade_YYYYMMDD_HHMMSS.sql.gz`
2. **Standard path:** `/root/db-upgrade-backups/YYYY-MM-DD_HHMMSS/all_databases.sql.gz`
To roll back to the previous MariaDB state (e.g. after a failed or undesired upgrade):
1. Stop MariaDB: `systemctl stop mariadb` (or `mysql`/`mysqld` on your system).
2. Restore the dump (example for the standard path):
```bash
BACKUP_DIR="/root/db-upgrade-backups/2026-02-17_010304" # use your actual folder
gunzip -c "$BACKUP_DIR/all_databases.sql.gz" | mariadb --skip-ssl -u root -p
```
Or if the backup is in the legacy location:
```bash
gunzip -c /root/cyberpanel_mariadb_backups/mariadb_backup_before_upgrade_*.sql.gz | mariadb --skip-ssl -u root -p
```
You will be prompted for the MariaDB root password (stored in `/etc/cyberpanel/mysqlPassword`).
3. If you need to reinstall the previous MariaDB server version, use the official MariaDB repo for that version, then start the service and run `mariadb-upgrade --force` if required.
**Note:** Restoring over an existing data directory is destructive. Only use this when you intend to replace the current databases with the backup. For a safe test, back up the current `/var/lib/mysql` first.
## Optional standalone version managers
For advanced MariaDB/phpMyAdmin version changes without running the full upgrade, you can use the community scripts from [cyberpanel-mods](https://github.com/master3395/cyberpanel-mods) (version-managers):
- [mariadb_version_manager_enhanced.sh](https://github.com/master3395/cyberpanel-mods/blob/main/version-managers/mariadb_version_manager_enhanced.sh) interactive MariaDB version manager (backup, remove, add repo, install, secure).
- [mariadb_v_changer.sh](https://github.com/master3395/cyberpanel-mods/blob/main/version-managers/mariadb_v_changer.sh) simple prompt-based MariaDB version changer.
- [phpmyadmin_v_changer.sh](https://github.com/master3395/cyberpanel-mods/blob/main/version-managers/phpmyadmin_v_changer.sh) phpMyAdmin version changer (preserves config/signon).
CyberPanel install/upgrade now integrates equivalent behaviour (version choice, backup path, config preservation) so these scripts are optional for users who prefer a standalone workflow.

View File

@@ -1,49 +0,0 @@
# phpMyAdmin 404 After Upgrade
## Symptom
After upgrading with:
```bash
sh <(curl -s https://raw.githubusercontent.com/usmannasir/cyberpanel/v2.5.5-dev/preUpgrade.sh ...) -b v2.5.5-dev --mariadb-version 11.8
```
opening **https://YOUR_IP:2087/phpmyadmin/** (or the panels “phpMyAdmin” link) returns **404 Not Found**.
## Cause
The upgrade step that installs phpMyAdmin (`download_install_phpmyadmin`) can fail without stopping the upgrade (e.g. network, or extract/mv path mismatch). The panel then has no `/usr/local/CyberCP/public/phpmyadmin/` directory, so the web server returns 404 for `/phpmyadmin/`.
## Fix on the server
Run the fix script **as root** on the panel server (e.g. 207.180.193.210):
```bash
# From the repo (if you have it on the server):
cd /home/cyberpanel-repo
sudo bash fix-phpmyadmin.sh
# Or one-liner (download and run from repo):
sudo bash -c 'curl -sL https://raw.githubusercontent.com/master3395/cyberpanel/v2.5.5-dev/fix-phpmyadmin.sh | bash'
```
Or run the same logic via Python:
```bash
sudo /usr/local/CyberCP/bin/python -c "
import sys; sys.path.insert(0, '/usr/local/CyberCP')
import os; os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CyberCP.settings')
from plogical.upgrade import Upgrade
Upgrade.download_install_phpmyadmin()
"
sudo chown -R lscpd:lscpd /usr/local/CyberCP/public/phpmyadmin
```
Then reload **https://YOUR_IP:2087/phpmyadmin/** (or use Databases → phpMyAdmin in the panel).
## Repo changes
- **fix-phpmyadmin.sh** Script to install/fix phpMyAdmin on the server (run as root).
- **plogical/upgrade.py** `download_install_phpmyadmin()`:
- Resolves extracted folder with `glob` (handles `phpMyAdmin-*-all-languages` or `phpMyAdmin-*`).
- Verifies that `public/phpmyadmin` exists after install and raises if missing so the upgrade step is not silent.

View File

@@ -1,44 +0,0 @@
# phpMyAdmin vs CLI MariaDB Version Mismatch
## Why SSH / `mariadb -V` Shows 11.8 While phpMyAdmin Shows 10.11
Two main causes:
### 1. **Different connection target (most common)**
- **CLI** (`mariadb -V`, `mariadb -e "SELECT @@version;"`) uses the default connection: usually the **main** MariaDB instance (e.g. port 3306 or default socket).
- **phpMyAdmin** previously used host **`localhost`** (hardcoded). With `localhost`, the PHP MySQL client connects via the **default Unix socket**, not necessarily the same as the main instance.
- If you have (or had) **two** MariaDB instances (e.g. main on 3306 and a second on 3307 from `mysqld_multi`, or an old 10.11 still running), the CLI can hit 11.8 while PHPs default socket pointed at the 10.11 instance.
### 2. **Client vs server version**
- `mariadb -V` prints the **client** version (e.g. 11.8). The upgrade script banner also used that for “Database (MariaDB): 11.8”.
- The **server** version is what phpMyAdmin shows. If the server was still 10.11 (e.g. wrong service restarted or second instance), phpMyAdmin correctly showed 10.11.
## Fix applied in code
- The panel now passes **host** (and port) from `/etc/cyberpanel/mysqlPassword` into the phpMyAdmin signon form.
- When the stored host is `localhost`, we send **`127.0.0.1`** so phpMyAdmin connects via **TCP to port 3306** (the main instance), not the default socket.
- So after deploy, phpMyAdmin should show the same MariaDB version as the CLI (the main 11.8 server).
## Verification on the server
Run as root:
```bash
# Server version (what phpMyAdmin should show after fix)
mariadb -e "SELECT @@version;"
# Listeners (only one MariaDB should be on 3306)
ss -tlnp | grep 3306
# Processes (check for duplicate mysqld/mariadbd)
ps aux | grep -E 'mariadb|mysqld'
```
If `SELECT @@version` shows 11.8 but phpMyAdmin still showed 10.11 before the fix, it was almost certainly a different connection (socket vs 127.0.0.1:3306 or a second instance). After the code change and a fresh phpMyAdmin login, it should report 11.8.
## If two instances exist
- Stop the old 10.11 instance (e.g. `mysqld_multi stop 1` if using `mysqld1` on 3307, or disable its service).
- Ensure only the 11.8 service (e.g. `mariadb.service`) is running and listening on 3306.

View File

@@ -1,45 +0,0 @@
# Plugins Installed Grid Install and Verify
## How install works
1. **Grid "Install" button**
- Tries **local install** first: plugin must exist under `/home/cyberpanel/plugins/` or `/home/cyberpanel-plugins/` (with `meta.xml`).
- If the API returns **404** or **"Plugin source not found"**, the UI automatically retries **store install** (download from GitHub `master3395/cyberpanel-plugins` and install).
2. **Store install**
- Used from the Store view or as fallback when local source is missing.
- Downloads the plugin from GitHub and runs the same installer (extract, pre_install, settings/URLs, inform CyberPanel, collectstatic, post_install).
3. **"Installed" status**
- A plugin is considered installed if the **directory** exists: `/usr/local/CyberCP/<plugin_name>/`.
- If that directory exists but `meta.xml` is missing, the UI still shows "Installed". On load of `/plugins/installed`, the backend tries to restore `meta.xml` from source (if source exists).
## Making sure all grid plugins install correctly
- **Local source**
Put plugin folders (each with `meta.xml`) in:
- `/home/cyberpanel/plugins/<plugin_name>/`, or
- `/home/cyberpanel-plugins/<plugin_name>/`
Then use **Install** in the grid; local install will be used.
- **No local source**
Click **Install** in the grid; if local source is not found, the UI falls back to **store install** (GitHub). Ensure the plugin exists in `master3395/cyberpanel-plugins` (main branch).
- **Already installed but broken**
If a plugin directory exists under `/usr/local/CyberCP/` but `meta.xml` was missing, opening **Plugins → Installed** will try to copy `meta.xml` from source into the installed folder so version/update checks work.
## Quick checks on the server
```bash
# Installed plugin dirs
ls -la /usr/local/CyberCP/ | grep -E '^d'
# Local source (grid uses these for local install)
ls -la /home/cyberpanel/plugins/ 2>/dev/null || true
ls -la /home/cyberpanel-plugins/ 2>/dev/null || true
# Ensure meta.xml exists for an installed plugin (e.g. premiumPlugin)
ls -la /usr/local/CyberCP/premiumPlugin/meta.xml
```
After code changes, restart Gunicorn (or the CyberPanel app server) so the updated pluginHolder views and JS are used.

View File

@@ -1,44 +0,0 @@
# Deploy and verify latest Plugins template on server
## 1. Check if server has the latest template
On the server (207.180.193.210), run:
```bash
grep -q "installedFilterBtnAll" /usr/local/CyberCP/pluginHolder/templates/pluginHolder/plugins.html && echo "LATEST: Yes (Show / Installed only / Active only present)" || echo "LATEST: No (run deploy below)"
```
## 2. Deploy latest template to the server
**Option A Run on the server (repo already on server)**
If the cyberpanel repo is on the same machine (e.g. at `/home/cyberpanel-repo`):
```bash
sudo bash /home/cyberpanel-repo/pluginHolder/deploy-plugins-template.sh
```
**Option B Copy from this machine to the server**
From your dev machine (where the repo lives):
```bash
scp /home/cyberpanel-repo/pluginHolder/templates/pluginHolder/plugins.html root@207.180.193.210:/usr/local/CyberCP/pluginHolder/templates/pluginHolder/plugins.html
ssh root@207.180.193.210 "systemctl restart lscpd"
```
Then on the server, verify:
```bash
ssh root@207.180.193.210 'grep -q "installedFilterBtnAll" /usr/local/CyberCP/pluginHolder/templates/pluginHolder/plugins.html && echo "LATEST: Yes" || echo "LATEST: No"'
```
## 3. Verify in the browser
1. Open: https://207.180.193.210:2087/plugins/installed#grid
2. Ensure **Grid View** is selected.
3. You should see two rows under the view toggle:
- **Show:** [All] [Installed only] [Active only]
- **Sort by:** [Name AÅ] [Type] [Date (newest)]
If you see **Show:** and the three filter buttons, you are on the latest template.

View File

@@ -1,41 +0,0 @@
# RainLoop → SnappyMail rename
## Summary
RainLoop has been replaced by SnappyMail. All **operational** paths and the install template folder now use SnappyMail. References to "rainloop" remain only where we **migrate from** old installs (2.4.4 → 2.5.5).
## Changes made
### Repo folder
- **`install/rainloop/`** renamed to **`install/snappymail/`**
- Template file still `cyberpanel.net.ini` (SnappyMail uses same format).
### Code updated to SnappyMail paths
- **plogical/mailUtilities.py** — Template path `/usr/local/CyberCP/install/snappymail/cyberpanel.net.ini`; all data paths `/usr/local/lscp/cyberpanel/snappymail/...`.
- **install/install.py** — chown and mkdir use `snappymail`; commented blocks updated for consistency.
- **plogical/acl.py** — `chown ... /usr/local/lscp/cyberpanel/snappymail`.
- **plogical/upgrade.py** — Operational chown and backup path use snappymail.
### Left as-is (intentional)
- **Migration logic** in `plogical/upgrade.py`, `upgrade_modules/10_post_tweak.sh`, and `cyberpanel_upgrade_monolithic.sh` still uses the **source** path `/usr/local/lscp/cyberpanel/rainloop/data` when upgrading from 2.4.4: they check for old rainloop data and rsync it to `/usr/local/lscp/cyberpanel/snappymail/data/`. That "rainloop" path must stay so existing servers upgrading from RainLoop get their data migrated.
## Upgrade to 2.5.5-dev: migrate ALL links to SnappyMail
On upgrade, the following ensure every RainLoop reference becomes SnappyMail:
1. **Data migration** (existing): rsync from `/usr/local/lscp/cyberpanel/rainloop/data` to `.../snappymail/data`, and update `include.php` paths.
2. **Replace all rainloop path/URL in migrated data**: After rsync, every config file under `snappymail/data` (`.ini`, `.json`, `.php`, `.cfg`) is scanned and any occurrence of:
- `/usr/local/lscp/cyberpanel/rainloop/data``.../snappymail/data`
- `/rainloop/``/snappymail/`
- `rainloop/data``snappymail/data`
is replaced. So stored links and paths in SnappyMail configs point to SnappyMail.
3. **HTTP redirect /rainloop → /snappymail**: In `/usr/local/CyberCP/public/.htaccess` a 301 redirect is added (or ensured once) so that:
- `/rainloop`, `/rainloop/`, `/rainloop/anything``/snappymail/...`
Old bookmarks and shared links keep working.
Implemented in: `plogical/upgrade.py` (`migrateRainloopToSnappymail`), `upgrade_modules/10_post_tweak.sh`, `cyberpanel_upgrade_monolithic.sh`.
## Result
- New installs and day-to-day operations use only SnappyMail paths.
- Upgrades from versions that had RainLoop: data migrated, all config links updated to snappymail, and /rainloop URLs redirect to /snappymail.

View File

@@ -1,21 +0,0 @@
# Removed Unused install/ Folders
## Summary
Unused config folders under `install/` were removed; only the folders actually referenced by the codebase remain.
## Removed
### install/email-configs
- **Reason:** Never referenced. All code uses `install/email-configs-one` (e.g. `install/install.py`, `plogical/mailUtilities.py`, `mailServer/mailserverManager.py`).
- **Removed:** 2025-02-15.
### install/php-configs
- **Reason:** Never referenced. Code uses `install/phpconfigs` (no hyphen) only:
- `plogical/installUtilities.py`: `shutil.copytree("phpconfigs", ...)` and `include phpconfigs/php*.conf`
- `install/litespeed/conf/httpd_config.conf` and `serverStatus/litespeed/conf/httpd_config.conf`: `include phpconfigs/php53.conf` etc.
- **Note:** `php-configs` contained `php.ini` and `www.conf` (different purpose); `phpconfigs` contains `php53.conf``php80.conf` (LiteSpeed PHP version includes).
- **Removed:** 2025-02-15.
## Still in use
- `install/email-configs-one/` — mail configs used by install and mail utilities.
- `install/phpconfigs/` — LiteSpeed PHP version include configs used by install and httpd_config.

View File

@@ -1,37 +0,0 @@
# Install/Upgrade Support Matrix (v2.5.5-dev)
This document summarizes how install and upgrade **detect and handle** each OS in the support table. It does **not** guarantee that every combination has been tested; it reflects what the code paths are.
## Summary
| OS family | Detection | Install/upgrade path | Notes |
|-----------|-----------|----------------------|--------|
| **AlmaLinux 10, 9, 8** | `AlmaLinux-8/9/10` in `/etc/os-release` | 9/10 → `AlmaLinux9` (dnf, repo fixes, venv). 8 → `CentOS` + version 8. | Explicit branches for 9/10 (EPEL, MariaDB, python3-venv). |
| **CentOS 7** | `CentOS Linux 7` in os-release | `CentOS` + version 7. | Legacy; EOL. Uses yum, requirments-old.txt. |
| **CloudLinux 9, 8** | `CloudLinux 7/8/9` in os-release | Normalized to `CentOS` + version. Same as RHEL family. | Version from VERSION_ID (e.g. 8 → 8, 9 → 9). |
| **Debian 13, 12, 11** | `Debian GNU/Linux 11/12/13` in os-release | Treated as **Ubuntu** (`Server_OS=Ubuntu`). Version 11/12/13 from VERSION_ID. | Uses **requirments-old.txt** (not requirments.txt). No Debian-specific package blocks; gets generic apt install. install_utils has Debian 13 package mappings. |
| **RHEL 9, 8** | `Red Hat Enterprise Linux` in os-release | Normalized to `CentOS` + version 8 or 9. | Same repo/package logic as CentOS 8/9. RHEL repo names differ; AlmaLinux-specific repo fixes do not run for RHEL. |
| **RockyLinux 9, 8** | `Rocky Linux` in os-release | Normalized to `CentOS`; version 8 or 9. | Same as CentOS 8/9 (EPEL, MariaDB, venv for 9/10). |
| **Ubuntu 24.04, 22.04, 20.04** | `Ubuntu 24.04` etc. in os-release | Explicit branches for 22/24 (packages, python3-venv). 20 → specific fixes. 18 → minimal. | 24.04: externally-managed-environment handled. Uses **requirments.txt** for 22 and 24. |
## Do we *know* it works on all of them?
- **Code coverage:** Detection and branching exist for all listed OSes. AlmaLinux 8/9/10, Ubuntu 18/20/22/24, Debian 11/12/13, CentOS 7/8/9, Rocky, RHEL, CloudLinux, and openEuler have explicit or normalized paths.
- **No automated proof:** There is no CI in this repo that runs install or upgrade on each OS. “Works” is based on:
- Manual and community testing
- Code review of detection and branches
- **RHEL:** Uses the same code path as CentOS (RedHat → CentOS). RHEL 9 uses different repo IDs than AlmaLinux; if repo issues appear on RHEL 9, RHEL-specific repo handling may be needed.
- **Debian 11/12/13:** Share the “Ubuntu” path and use **requirments-old.txt**. install_utils has Debian 13 (Trixie) package mappings. No Debian-version-specific blocks in the upgrade script.
- **CentOS 7:** Marked legacy/EOL; still in the script with yum and old requirements.
## Recommendations
1. **Staging:** Test install and upgrade on a non-production VM for your chosen OS before production.
2. **CI (optional):** Add a test matrix (e.g. GitHub Actions or other CI) that runs install and/or upgrade on a subset of OSes (e.g. AlmaLinux 9, Ubuntu 22.04, Debian 12) to catch regressions.
3. **Docs:** Keep this file (or a short “Supported platforms” section) in sync with the script when adding or dropping OS versions.
## Where to look in the repo
- **Upgrade OS detection:** `cyberpanel_upgrade.sh` (lines ~160187), `Server_OS_Version` (~187), and branches for `CentOS`/`AlmaLinux9`/`Ubuntu`/`openEuler`.
- **Install OS detection:** `install/install.py` (`preFlightsChecks.detect_os`, `get_distro`), and `install/install_utils.py` (Debian/Ubuntu version and package helpers).
- **Requirements choice:** `cyberpanel_upgrade.sh` `Download_Requirement()`: uses `requirments.txt` for version 22, 24, 9, 10; else `requirments-old.txt`.

View File

@@ -1,48 +0,0 @@
# CyberPanel Upgrade Script - Modular Layout for Debugging
## Goal
Split `cyberpanel_upgrade.sh` into modules under `upgrade_modules/` so each file is under 500 lines and easier to debug.
## Directory Layout
- `upgrade_modules/00_common.sh` - Debug_Log, Debug_Log2, Branch_Check, Check_Return, Regenerate_Cert, Retry_Command (DONE)
- `upgrade_modules/01_variables.sh` - Set_Default_Variables (DONE)
- `upgrade_modules/02_checks.sh` - Check_Root, Check_Server_IP, Check_OS, Check_Provider, Check_Argument
- `upgrade_modules/03_mariadb.sh` - Pre_Upgrade_CentOS7_MySQL, Maybe_Backup_MariaDB_Before_Upgrade, Backup_MariaDB_Before_Upgrade, Migrate_MariaDB_To_UTF8
- `upgrade_modules/04_git_url.sh` - Pre_Upgrade_Setup_Git_URL
- `upgrade_modules/05_repository.sh` - Pre_Upgrade_Setup_Repository (~490 lines)
- `upgrade_modules/06_components.sh` - Download_Requirement, Pre_Upgrade_Required_Components
- `upgrade_modules/07_branch_input.sh` - Pre_Upgrade_Branch_Input
- `upgrade_modules/08_main_upgrade.sh` - Main_Upgrade
- `upgrade_modules/09_sync.sh` - Sync_CyberCP_To_Latest
- `upgrade_modules/10_post_tweak.sh` - Post_Upgrade_System_Tweak
- `upgrade_modules/11_display_final.sh` - Post_Install_Display_Final_Info, _br, _bl, _b
## Line Ranges in Current Script
- 00_common: 99-106, 237-263, 264-337
- 01_variables: 27-98
- 02_checks: 107-148, 149-206, 207-236, 352-399
- 03_mariadb: 425-520
- 04_git_url: 400-424
- 05_repository: 521-1011
- 06_components: 1012-1298
- 07_branch_input: 1299-1311
- 08_main_upgrade: 1312-1649
- 09_sync: 1650-1688
- 10_post_tweak: 1691-2023
- 11_display_final: 2024-2118
## Main Script After Refactor
1. Root check, Sudo_Test
2. If upgrade_modules/ exists: source each 00-11; else (one-liner) download modules from GitHub by branch and source
3. Set_Default_Variables, Check_Root, Check_Server_IP, Check_OS, Check_Provider, Check_Argument
4. Branch and MariaDB prompts
5. Pre_Upgrade_Setup_Repository, Pre_Upgrade_Setup_Git_URL, Pre_Upgrade_Required_Components
6. Main_Upgrade, Sync_CyberCP_To_Latest, Post_Upgrade_System_Tweak, Post_Install_Display_Final_Info
## Status
Done: 00_common.sh, 01_variables.sh. Remaining: create 02-11 and refactor main script to loader.

View File

@@ -1,119 +0,0 @@
# Security alert: `rm -rf /home/cyberpanel/upgrade_logs`
## Is this an issue?
**No.** This is **expected behavior** from the CyberPanel upgrade process, not a sign of compromise.
## Whats going on
- Your security product (e.g. OSSEC, Wazuh, or similar) flagged:
- **Command:** `sudo ... /bin/rm -rf /home/cyberpanel/upgrade_logs`
- **Context:** `PWD=/tmp/lscpd`, `USER=root`
- The CyberPanel daemon (**lscpd**) runs upgrade-related tasks. The upgrade logic uses `/home/cyberpanel/upgrade_logs` as the path for upgrade logs (see `plogical/upgrade.py`: `LogPathNew = '/home/cyberpanel/upgrade_logs'`). Cleaning that path (file or directory) before or after an upgrade is normal so the next run starts from a clean state.
- So this command is the **panel cleaning its own upgrade logs**, not an attacker.
## Why does it look “suspicious”?
- Security tools often treat **any** `rm -rf` as “dangerous” because it can delete a lot if misused.
- They also flag “system file access” or “writes/deletes under /home” to catch abuse.
- Here, the path is a **known, fixed** CyberPanel path and the process is **root from lscpd** (expected for the panel). So the alert is a **false positive** for “suspicious command” in this context.
## Why “my own local files” look suspicious
- “Local files” in the alert usually means “commands or file operations on this machine.” The product isnt saying your personal files are malicious; its saying the **behavior** (e.g. `rm -rf` on a path under `/home`) matches a **rule** that can indicate compromise.
- In this case the “local” actor is **CyberPanel itself** (lscpd/upgrade), so the behavior is legitimate.
## What you can do
1. **Treat as expected:** No need to change passwords or hunt for backdoors solely because of this alert.
2. **Whitelist/tune the rule:** In your security product, add an exception or rule so that this specific command (or pattern) when run by root from the lscpd context is not reported, e.g.:
- Command pattern: `rm -rf /home/cyberpanel/upgrade_logs`
- Or: allow `rm -rf` for paths under `/home/cyberpanel/` when the process is lscpd/upgrade-related.
3. **Keep monitoring:** Continue to review real suspicious activity (e.g. unknown scripts, unexpected `rm -rf /` or `rm -rf /home/*`).
## Summary
- **Not a compromise** normal CyberPanel upgrade cleanup.
- **“Suspicious”** only in the generic sense (rm -rf + /home); in context its the panels own operation.
- **Action:** Whitelist or tune the alert for this known-good case; no need to panic or “fix” the panel for this.
---
## Whitelist / rule examples (stop this specific case being reported)
Use the example that matches your product. After editing config, restart the agent/manager as indicated.
### OSSEC
Allow this command so it is not reported as suspicious.
**1. Local rule to ignore this command**
Create or edit a local rule file (e.g. `/var/ossec/etc/rules/local_rules.xml`) and add:
```xml
<!-- Allow CyberPanel upgrade cleanup: rm -rf /home/cyberpanel/upgrade_logs -->
<rule id="100001" level="0">
<if_sid>100002</if_sid>
<match>rm -rf /home/cyberpanel/upgrade_logs</match>
<description>Whitelist: CyberPanel upgrade log cleanup (expected)</description>
</rule>
```
If your “suspicious command” rule has a different `<rule id>`, replace `100002` with that rules ID (so this rule only applies when that one fires). If youre not sure, you can use a broader override that matches the command and sets level 0:
```xml
<rule id="100001" level="0">
<match>rm -rf /home/cyberpanel/upgrade_logs</match>
<description>Whitelist: CyberPanel upgrade log cleanup</description>
</rule>
```
Restart OSSEC:
```bash
systemctl restart ossec
# or
/var/ossec/bin/ossec-control restart
```
**2. (Optional) Decoder to tag sudo rm**
In `/var/ossec/etc/decoders/local_decoder.xml` you can add a decoder so the command is clearly identified; the rule above is enough to stop the alert.
### Wazuh
**1. Local rule to not alert on this command**
Append to `/var/ossec/etc/rules/local_rules.xml` (Wazuh keeps OSSEC-style paths):
```xml
<!-- Whitelist CyberPanel upgrade cleanup -->
<group name="local,syscheck,">
<rule id="100001" level="0">
<match>rm -rf /home/cyberpanel/upgrade_logs</match>
<description>Whitelist: CyberPanel upgrade_logs cleanup (lscpd/upgrade)</description>
</rule>
</group>
```
If the alert is from a different rule (e.g. “suspicious command” or “syscheck”), you may need to set `<if_sid>` to that rules ID so this rule only overrides that case.
Restart Wazuh:
```bash
systemctl restart wazuh-agent
# On manager:
systemctl restart wazuh-manager
```
**2. (Optional) Broader CyberPanel cleanup**
To allow any `rm -rf` under `/home/cyberpanel/` when the process is from lscpd/upgrade, youd need a rule that matches both the command pattern and (if available) the process or PWD. Thats product-specific; the rule above is the minimal, safe whitelist for the exact command you saw.
### Other products (generic)
- **Fail2ban / custom script:** If the alert is generated by a script that parses `auth.log` or `secure`, add an exception when the log line contains both `rm -rf` and `/home/cyberpanel/upgrade_logs`.
- **SIEM / cloud:** Add an exception or filter so that events with command `rm -rf /home/cyberpanel/upgrade_logs` and user `root` (and optionally process/source indicating lscpd) are not escalated.
Once the whitelist is in place, future runs of that CyberPanel cleanup will no longer trigger this specific alert.

View File

@@ -1,38 +0,0 @@
# Fix phpMyAdmin Showing 10.11 When 11.8 Is Installed
## 1. Fix CLI SSL error and see real server version
Run as root on the server:
```bash
# Allow mariadb client to connect without SSL (avoids ERROR 2026 when server has have_ssl=DISABLED)
mkdir -p /etc/my.cnf.d
printf '[client]\nssl=0\nskip-ssl\n' > /etc/my.cnf.d/cyberpanel-client.cnf
# If client still requires SSL, add [client] to main my.cnf (only if not already present)
grep -q '^\[client\]' /etc/my.cnf 2>/dev/null || echo -e "\n[client]\nssl=0\nskip-ssl" >> /etc/my.cnf
# Now this should work and show the *actual* server version on 3306
mariadb -e "SELECT @@version;"
```
- If it shows **11.8.x**: the server is 11.8; phpMyAdmin should show 11.8 after you **log out, clear cookies for :2087, then log in again via CyberPanel → Databases → phpMyAdmin**.
- If it still shows **10.11.x**: the process on 3306 is still 10.11. Force the 11.8 service to take over:
```bash
systemctl stop mariadb
sleep 3
systemctl start mariadb
mariadb -e "SELECT @@version;"
```
If it still shows 10.11, check:
```bash
rpm -q MariaDB-server
ss -tlnp | grep 3306
systemctl status mariadb
```
## 2. phpMyAdmin config (already correct on your server)
Your `config.inc.php` already has `host = '127.0.0.1'` and `port = '3306'`. Once the server on 3306 is 11.8 and you log in again via the panel, phpMyAdmin will show 11.8.

View File

@@ -14,7 +14,7 @@ Pre_Upgrade_CentOS7_MySQL() {
mv /etc/cnfbackup/my.cnf.d /etc/
systemctl enable mariadb 2>/dev/null || systemctl enable mysql
systemctl start mariadb 2>/dev/null || systemctl start mysql
mariadb-upgrade -uroot -p"$MySQL_Password" 2>/dev/null || mysql_upgrade -uroot -p"$MySQL_Password"
mariadb-upgrade --force -uroot -p"$MySQL_Password" 2>/dev/null || mysql_upgrade --force -uroot -p"$MySQL_Password" 2>/dev/null || true
fi
mariadb -uroot -p"$MySQL_Password" -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' IDENTIFIED BY '$MySQL_Password';flush privileges" 2>/dev/null || mysql -uroot -p"$MySQL_Password" -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' IDENTIFIED BY '$MySQL_Password';flush privileges"
Ensure_MariaDB_Client_No_SSL
@@ -41,6 +41,8 @@ Maybe_Backup_MariaDB_Before_Upgrade() {
Backup_MariaDB_Before_Upgrade() {
local pass="" backup_dir="/root/cyberpanel_mariadb_backups" backup_file=""
local std_backup_base="/root/db-upgrade-backups"
local std_backup_dir="${std_backup_base}/$(date +%Y-%m-%d_%H%M%S)"
echo -e "[$(date +"%Y-%m-%d %H:%M:%S")] Starting MariaDB pre-upgrade backup... (this may take a few minutes)" | tee -a /var/log/cyberpanel_upgrade_debug.log
[[ -f /etc/cyberpanel/mysqlPassword ]] || { echo -e "[$(date +"%Y-%m-%d %H:%M:%S")] MariaDB pre-upgrade backup: skipped (no password file)." | tee -a /var/log/cyberpanel_upgrade_debug.log; return 0; }
if grep -q '"mysqlpassword"' /etc/cyberpanel/mysqlPassword 2>/dev/null; then
@@ -55,6 +57,8 @@ Backup_MariaDB_Before_Upgrade() {
(mariadb-dump --skip-ssl -u root -p"$pass" --all-databases --single-transaction --routines --triggers --events 2>/dev/null || mysqldump --skip-ssl -u root -p"$pass" --all-databases --single-transaction --routines --triggers --events 2>/dev/null) | gzip > "$backup_file" 2>/dev/null
if [[ -s "$backup_file" ]]; then
echo -e "[$(date +"%Y-%m-%d %H:%M:%S")] MariaDB backup created: $backup_file" | tee -a /var/log/cyberpanel_upgrade_debug.log
mkdir -p "$std_backup_dir"
cp -a "$backup_file" "$std_backup_dir/all_databases.sql.gz" 2>/dev/null && echo -e "[$(date +"%Y-%m-%d %H:%M:%S")] MariaDB backup also saved to: $std_backup_dir/all_databases.sql.gz" | tee -a /var/log/cyberpanel_upgrade_debug.log || true
echo -e "[$(date +"%Y-%m-%d %H:%M:%S")] MariaDB pre-upgrade backup: done." | tee -a /var/log/cyberpanel_upgrade_debug.log
else
echo -e "[$(date +"%Y-%m-%d %H:%M:%S")] WARNING: MariaDB backup file empty or failed." | tee -a /var/log/cyberpanel_upgrade_debug.log

View File

@@ -259,7 +259,8 @@ EOF
sed -i 's|https://yum.mariadb.org/RPM-GPG-KEY-MariaDB|https://cyberpanel.sh/yum.mariadb.org/RPM-GPG-KEY-MariaDB|g' /etc/yum.repos.d/MariaDB.repo
fi
dnf clean metadata --disablerepo='*' --enablerepo=mariadb 2>/dev/null || true
# MariaDB 10 -> 11 or 11 -> 12: RPM scriptlet blocks in-place upgrade; do manual stop, remove old server, install target, start, mariadb-upgrade
# MariaDB 10 -> 11 or 11 -> 12: RPM scriptlet blocks in-place upgrade; do manual stop, remove old server, install target, start, mariadb-upgrade.
# Data in /var/lib/mysql is preserved; no databases are dropped.
MARIADB_OLD_10=$(rpm -qa 'MariaDB-server-10*' 2>/dev/null | head -1)
[[ -z "$MARIADB_OLD_10" ]] && MARIADB_OLD_10=$(rpm -qa 2>/dev/null | grep -E '^MariaDB-server-10\.' | head -1)
MARIADB_OLD_11=$(rpm -qa 'MariaDB-server-11*' 2>/dev/null | head -1)
@@ -278,7 +279,7 @@ EOF
printf "[client]\nssl=0\nskip-ssl\n" > /etc/my.cnf.d/cyberpanel-client.cnf 2>/dev/null || true
systemctl start mariadb 2>/dev/null || true
sleep 2
mariadb-upgrade -u root 2>/dev/null || true
mariadb-upgrade --force -u root 2>/dev/null || true
echo -e "[$(date +"%Y-%m-%d %H:%M:%S")] MariaDB manual upgrade to $MARIADB_VER_REPO completed." | tee -a /var/log/cyberpanel_upgrade_debug.log
elif [[ -n "$MARIADB_OLD_11" ]] && [[ "$MARIADB_VER_REPO" =~ ^12\. ]]; then
echo -e "[$(date +"%Y-%m-%d %H:%M:%S")] MariaDB 11.x detected; performing manual upgrade to $MARIADB_VER_REPO (stop, remove, install, start, mariadb-upgrade)..." | tee -a /var/log/cyberpanel_upgrade_debug.log
@@ -292,7 +293,7 @@ EOF
printf "[client]\nssl=0\nskip-ssl\n" > /etc/my.cnf.d/cyberpanel-client.cnf 2>/dev/null || true
systemctl start mariadb 2>/dev/null || true
sleep 2
mariadb-upgrade -u root 2>/dev/null || true
mariadb-upgrade --force -u root 2>/dev/null || true
echo -e "[$(date +"%Y-%m-%d %H:%M:%S")] MariaDB manual upgrade to $MARIADB_VER_REPO completed (11->12)." | tee -a /var/log/cyberpanel_upgrade_debug.log
else
# Normal install/upgrade (same version or 10.11)
@@ -315,7 +316,7 @@ EOF
printf "[client]\nssl=0\nskip-ssl\n" > /etc/my.cnf.d/cyberpanel-client.cnf 2>/dev/null || true
systemctl start mariadb 2>/dev/null || true
sleep 2
mariadb-upgrade -u root 2>/dev/null || true
mariadb-upgrade --force -u root 2>/dev/null || true
echo -e "[$(date +"%Y-%m-%d %H:%M:%S")] MariaDB manual 11->12 fallback completed." | tee -a /var/log/cyberpanel_upgrade_debug.log
fi
fi
@@ -366,7 +367,7 @@ EOF
printf "[client]\nssl=0\nskip-ssl\n" > /etc/my.cnf.d/cyberpanel-client.cnf 2>/dev/null || true
systemctl start mariadb 2>/dev/null || true
sleep 2
mariadb-upgrade -u root 2>/dev/null || true
mariadb-upgrade --force -u root 2>/dev/null || true
echo -e "[$(date +"%Y-%m-%d %H:%M:%S")] MariaDB manual upgrade to $MARIADB_VER_REPO completed (AlmaLinux 9)." | tee -a /var/log/cyberpanel_upgrade_debug.log
elif [[ -n "$MARIADB_OLD_11_AL9" ]] && [[ "$MARIADB_VER_REPO" =~ ^12\. ]]; then
echo -e "[$(date +"%Y-%m-%d %H:%M:%S")] MariaDB 11.x detected (AlmaLinux 9); manual upgrade to $MARIADB_VER_REPO..." | tee -a /var/log/cyberpanel_upgrade_debug.log
@@ -380,7 +381,7 @@ EOF
printf "[client]\nssl=0\nskip-ssl\n" > /etc/my.cnf.d/cyberpanel-client.cnf 2>/dev/null || true
systemctl start mariadb 2>/dev/null || true
sleep 2
mariadb-upgrade -u root 2>/dev/null || true
mariadb-upgrade --force -u root 2>/dev/null || true
echo -e "[$(date +"%Y-%m-%d %H:%M:%S")] MariaDB manual upgrade to $MARIADB_VER_REPO completed (AlmaLinux 9, 11->12)." | tee -a /var/log/cyberpanel_upgrade_debug.log
else
dnf install -y --enablerepo=mariadb MariaDB-server MariaDB-devel 2>/dev/null || dnf install -y mariadb-server mariadb-devel

View File

@@ -31,6 +31,14 @@ echo -e "[$(date +"%Y-%m-%d %H:%M:%S")] Running: $CP_PYTHON upgrade.py $Branch_N
# Export Git user so upgrade.py clones from the same repo (master3395 or --repo override)
export CYBERPANEL_GIT_USER="${Git_User:-master3395}"
# Run from directory that contains upgrade.py (downloaded by Pre_Upgrade_Required_Components)
for d in /root/cyberpanel_upgrade_tmp /usr/local/CyberCP; do
if [[ -f "$d/upgrade.py" ]]; then
cd "$d" || true
break
fi
done
# Run upgrade.py and capture output
upgrade_output=$("$CP_PYTHON" upgrade.py "$Branch_Name" 2>&1)
RETURN_CODE=$?

View File

@@ -206,15 +206,15 @@
}
.btn-primary {
background: var(--bg-gradient);
color: white;
background: var(--bg-gradient, linear-gradient(135deg, #667eea 0%, #764ba2 100%));
color: #fff !important;
box-shadow: 0 4px 15px rgba(99, 102, 241, 0.3);
}
.btn-primary:hover {
transform: translateY(-2px);
box-shadow: 0 6px 20px rgba(99, 102, 241, 0.4);
color: white;
color: #fff !important;
}
.btn-preview {