import logging import os import zipfile from datetime import datetime, timedelta from logging.config import dictConfig from logging.handlers import TimedRotatingFileHandler class UTF8StreamHandler(logging.StreamHandler): def __init__(self, stream=None): super().__init__(stream) self.setStream(stream) def setStream(self, stream): super().setStream(stream) if hasattr(stream, 'reconfigure'): stream.reconfigure(encoding='utf-8') class FilterByMessage(logging.Filter): def filter(self, record): # Фильтруем сообщения, содержащие 'Received 1 new updates' return 'Received ' not in record.getMessage() class LogManager: def __init__(self, log_dir='logs', retention_days=30): self.log_dir = log_dir self.retention_days = retention_days self.log_files = { 'flask': os.path.join(self.log_dir, 'flask.log'), 'flask_error': os.path.join(self.log_dir, 'flask_error.log'), 'app': os.path.join(self.log_dir, 'app.log'), 'app_error': os.path.join(self.log_dir, 'app_error.log'), 'zabbix': os.path.join(self.log_dir, 'zabbix.log'), 'zabbix_error': os.path.join(self.log_dir, 'zabbix_error.log'), 'debug': os.path.join(self.log_dir, 'debug.log'), } # Ensure the log directory exists if not os.path.exists(self.log_dir): os.makedirs(self.log_dir) # Setup logging configuration self.setup_logging() def setup_logging(self): dictConfig({ 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'default': { 'format': '[%(asctime)s] %(levelname)s %(module)s: %(message)s', }, 'error': { 'format': '[%(asctime)s] %(levelname)s %(module)s: %(message)s', }, 'werkzeug': { 'format': '[%(asctime)s] %(levelname)s %(message)s' }, 'debug': { 'format': '[%(asctime)s] %(levelname)s %(module)s [%(funcName)s:%(lineno)d]: %(message)s' } }, 'filters': { 'filter_by_message': { '()': FilterByMessage, } }, 'handlers': { 'telebot_console': { 'class': 'utilities.log_manager.UTF8StreamHandler', 'stream': 'ext://sys.stdout', 'formatter': 'default', 'filters': ['filter_by_message'], }, 'flask_console': { 'class': 'utilities.log_manager.UTF8StreamHandler', 'stream': 'ext://sys.stdout', 'formatter': 'werkzeug', }, 'flask_file': { 'class': 'logging.handlers.TimedRotatingFileHandler', 'filename': self.log_files['flask'], 'when': 'midnight', 'backupCount': self.retention_days, 'formatter': 'werkzeug', 'encoding': 'utf-8', }, 'flask_error_file': { 'class': 'logging.handlers.TimedRotatingFileHandler', 'filename': self.log_files['flask_error'], 'when': 'midnight', 'backupCount': self.retention_days, 'formatter': 'werkzeug', 'encoding': 'utf-8', 'level': 'ERROR', }, 'app_file': { 'class': 'logging.handlers.TimedRotatingFileHandler', 'filename': self.log_files['app'], 'when': 'midnight', 'backupCount': self.retention_days, 'formatter': 'default', 'encoding': 'utf-8', }, 'app_error_file': { 'class': 'logging.handlers.TimedRotatingFileHandler', 'filename': self.log_files['app_error'], 'when': 'midnight', 'backupCount': self.retention_days, 'formatter': 'error', 'encoding': 'utf-8', 'level': 'ERROR', }, 'zabbix_file': { 'class': 'logging.handlers.TimedRotatingFileHandler', 'filename': self.log_files['zabbix'], 'when': 'midnight', 'backupCount': self.retention_days, 'formatter': 'default', 'encoding': 'utf-8', }, 'zabbix_error_file': { 'class': 'logging.handlers.TimedRotatingFileHandler', 'filename': self.log_files['zabbix_error'], 'when': 'midnight', 'backupCount': self.retention_days, 'formatter': 'error', 'encoding': 'utf-8', 'level': 'ERROR', }, 'debug_file': { 'class': 'logging.handlers.TimedRotatingFileHandler', 'filename': self.log_files['debug'], 'when': 'midnight', 'backupCount': self.retention_days, 'formatter': 'debug', 'encoding': 'utf-8', 'level': 'DEBUG', }, }, 'loggers': { 'flask': { 'level': 'DEBUG', 'handlers': ['flask_file', 'flask_error_file', 'flask_console'], 'propagate': False, }, 'telebot': { 'level': 'DEBUG', 'handlers': ['app_file', 'app_error_file', 'telebot_console'], 'propagate': False, }, 'werkzeug': { 'level': 'DEBUG', 'handlers': ['flask_file', 'flask_error_file', 'flask_console'], 'propagate': False, }, 'flask_ldap3_login': { 'level': 'DEBUG', 'handlers': ['flask_file', 'flask_error_file', 'flask_console'], 'propagate': False, }, 'flask_login': { 'level': 'DEBUG', 'handlers': ['flask_file', 'flask_error_file', 'flask_console'], 'propagate': False, }, 'pyzabbix': { 'level': 'ERROR', 'handlers': ['zabbix_file', 'zabbix_error_file', 'flask_console'], 'propagate': False, }, 'debug': { 'level': 'DEBUG', 'handlers': ['debug_file'], 'propagate': False, }, } }) def change_log_level(self, component, level): """Changes the log level of a specified component.""" if level not in ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']: return False, 'Invalid log level' log_level = getattr(logging, level, logging.DEBUG) if component in self.get_all_loggers(): logger = logging.getLogger(component) logger.setLevel(log_level) for handler in logger.handlers: handler.setLevel(log_level) return True, f'Log level for {component} changed to {level}' else: return False, 'Invalid component' def get_all_loggers(self): """Returns a list of all configured loggers.""" return list(logging.Logger.manager.loggerDict.keys()) def archive_old_logs(self): """Archives old log files and removes logs older than retention_days.""" yesterday_date = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d') for log_name, log_file in self.log_files.items(): if os.path.exists(log_file): # Get the logger and its handlers logger = logging.getLogger(log_name if log_name in logging.Logger.manager.loggerDict else 'telebot' if log_name in ['app', 'app_error'] else 'flask') handlers = logger.handlers[:] # Create a copy to avoid modification during iteration # Close and remove the file handler for handler in handlers: if isinstance(handler, TimedRotatingFileHandler) and handler.baseFilename == log_file: handler.close() logger.removeHandler(handler) archive_name = f"{log_name}_{yesterday_date}.zip" archive_path = os.path.join(self.log_dir, archive_name) # Archive the log file with zipfile.ZipFile(archive_path, 'w', zipfile.ZIP_DEFLATED) as zipf: zipf.write(log_file, arcname=os.path.basename(log_file)) # Remove the old log file after archiving os.remove(log_file) # Clean up old archives self.cleanup_old_archives() def configure_werkzeug_logging(self): """Отключаем встроенный логгер Werkzeug и задаём собственные настройки логирования.""" werkzeug_logger = logging.getLogger('werkzeug') werkzeug_logger.handlers = [] # Удаляем существующие обработчики # Добавляем кастомный обработчик для форматирования логов handler = TimedRotatingFileHandler(self.log_files['flask'], when='midnight', backupCount=self.retention_days, encoding='utf-8') handler.setFormatter(logging.Formatter('[%(asctime)s] %(levelname)s %(message)s')) werkzeug_logger.addHandler(handler) # Отключаем дублирование логов werkzeug_logger.propagate = False def cleanup_old_archives(self): """Deletes archived logs older than retention_days.""" now = datetime.now() cutoff = now - timedelta(days=self.retention_days) for file in os.listdir(self.log_dir): if file.endswith('.zip'): file_path = os.path.join(self.log_dir, file) file_time = datetime.fromtimestamp(os.path.getmtime(file_path)) if file_time < cutoff: os.remove(file_path) def schedule_log_rotation(self): """Schedules daily log rotation and archiving.""" from threading import Timer now = datetime.now() next_midnight = now.replace(hour=0, minute=0, second=0, microsecond=0) + timedelta(days=1) delay = (next_midnight - now).total_seconds() Timer(delay, self.rotate_and_archive_logs).start() def rotate_and_archive_logs(self): """Rotates and archives logs.""" self.archive_old_logs() self.schedule_log_rotation() # Schedule the next rotation