shell bypass 403
UnknownSec Shell
:
/
opt
/
cloudlinux
/
venv
/
lib64
/
python3.11
/
site-packages
/
lvestats
/
lib
/
commons
/ [
drwxr-xr-x
]
upload
mass deface
mass delete
console
info server
name :
proctitle.py
# coding=utf-8 # # Copyright © Cloud Linux GmbH & Cloud Linux Software, Inc 2010-2019 All Rights Reserved # # Licensed under CLOUD LINUX LICENSE AGREEMENT # http://cloudlinux.com/docs/LICENSE.TXT import logging import os import re import time from lvestats.lib.commons.func import get_all_user_domains, normalize_domain class Proctitle(object): """ Class for working with mod_procurl files """ def __init__(self, shm_dir="/dev/shm/"): self.now = 0 self.log = logging.getLogger('Proctitle') self.SHM_DIR = shm_dir self.FILE_PATTERN = re.compile(r"apache_title_shm_[0-9]+_[0-9]+_[0-9]+$", re.IGNORECASE) self.parsed_data = self._get_all_data() def _get_all_data(self): """ 1) Get all files in the self.SHM_DIR that mathes to the regexp self.FILE_PATTERN 2) Read every file to the first \x00 simbol 3) If apache process is iddle file content equals to httpd, else it should contain 5 values separated by space 4) Split every interesting line and return :return: list of the lists [[Timestamp, Domain, Http type, Path, Http version],...] """ result = [] if os.path.exists(self.SHM_DIR): files = filter(self.FILE_PATTERN.search, os.listdir(self.SHM_DIR)) self.now = time.time() for file in files: try: file_name = os.path.join(self.SHM_DIR, file) with open(file_name, 'r', encoding='utf-8') as http_stats: http_stats_line = http_stats.readline() http_stats_line = http_stats_line.split('\x00')[0] if not http_stats_line == "httpd": http_stats_line_split = http_stats_line.split(" ") if len(http_stats_line_split) == 5: result.append(http_stats_line_split + [file.split("_")[-3]]) else: self.log.debug("Number of values in file %s is not equal to 5", file) except IOError as e: self.log.debug(str(e)) return result def get_user_data(self, username): """ Returns information about processed by user pages. :param username: :return: list of the lists [[Pid, Domain, Path, Http type, Http version, Time],...] """ all_domains = get_all_user_domains(username) normalized_domains = set(map(normalize_domain, all_domains)) result = [] for data in self.parsed_data: if normalize_domain(data[1]) in normalized_domains: result.append([ data[5], data[1], data[2], data[3], data[4], f"{self.now - float(data[0]):.1f}", ]) return result
© 2024 UnknownSec