Add endpoint telezab/users Add endpoint telezab/users/add Add endpoint telezab/users/del Add endpoint telezab/users/get Add endpoint telezab/regions Add endpoint telezab/regions/add Add endpoint telezab/regions/del Add endpoint telezab/regions/get Rework Active Triggers button now don't need subscription Rework Help button Add option to change what Notification type you want reciving All or Disaster Only Rework Settings button removed some misc buttons Rework Registration mechanism now using POST JSON users/add Rework formating of Zabbix Triggers for Active triggers and Notification from Zabbix
158 lines
5.9 KiB
Python
158 lines
5.9 KiB
Python
import logging
|
|
from logging.config import dictConfig
|
|
from logging.handlers import TimedRotatingFileHandler
|
|
import os
|
|
import zipfile
|
|
from datetime import datetime, timedelta
|
|
|
|
class UTF8StreamHandler(logging.StreamHandler):
|
|
def __init__(self, stream=None):
|
|
super().__init__(stream)
|
|
self.setStream(stream)
|
|
|
|
def setStream(self, stream):
|
|
super().setStream(stream)
|
|
if hasattr(stream, 'reconfigure'):
|
|
stream.reconfigure(encoding='utf-8')
|
|
|
|
class FilterByMessage(logging.Filter):
|
|
def filter(self, record):
|
|
# Фильтруем сообщения, содержащие 'Received 1 new updates'
|
|
return 'Received ' not in record.getMessage()
|
|
|
|
class LogManager:
|
|
def __init__(self, log_dir='logs', retention_days=30):
|
|
self.log_dir = log_dir
|
|
self.retention_days = retention_days
|
|
self.log_files = {
|
|
'flask': os.path.join(self.log_dir, 'flask.log'),
|
|
'flask_error': os.path.join(self.log_dir, 'flask_error.log'),
|
|
'app': os.path.join(self.log_dir, 'app.log'),
|
|
'app_error': os.path.join(self.log_dir, 'app_error.log'),
|
|
}
|
|
|
|
# Ensure the log directory exists
|
|
if not os.path.exists(self.log_dir):
|
|
os.makedirs(self.log_dir)
|
|
|
|
# Setup logging configuration
|
|
self.setup_logging()
|
|
|
|
def setup_logging(self):
|
|
dictConfig({
|
|
'version': 1,
|
|
'disable_existing_loggers': False,
|
|
'formatters': {
|
|
'default': {
|
|
'format': '[%(asctime)s] %(levelname)s %(module)s: %(message)s',
|
|
},
|
|
'error': {
|
|
'format': '[%(asctime)s] %(levelname)s %(module)s: %(message)s',
|
|
},
|
|
},
|
|
'handlers': {
|
|
'console': {
|
|
'class': 'log_manager.UTF8StreamHandler',
|
|
'stream': 'ext://sys.stdout',
|
|
'formatter': 'default',
|
|
},
|
|
'flask_file': {
|
|
'class': 'logging.handlers.TimedRotatingFileHandler',
|
|
'filename': self.log_files['flask'],
|
|
'when': 'midnight',
|
|
'backupCount': self.retention_days,
|
|
'formatter': 'default',
|
|
'encoding': 'utf-8',
|
|
},
|
|
'flask_error_file': {
|
|
'class': 'logging.handlers.TimedRotatingFileHandler',
|
|
'filename': self.log_files['flask_error'],
|
|
'when': 'midnight',
|
|
'backupCount': self.retention_days,
|
|
'formatter': 'error',
|
|
'encoding': 'utf-8',
|
|
'level': 'ERROR',
|
|
},
|
|
'app_file': {
|
|
'class': 'logging.handlers.TimedRotatingFileHandler',
|
|
'filename': self.log_files['app'],
|
|
'when': 'midnight',
|
|
'backupCount': self.retention_days,
|
|
'formatter': 'default',
|
|
'encoding': 'utf-8',
|
|
},
|
|
'app_error_file': {
|
|
'class': 'logging.handlers.TimedRotatingFileHandler',
|
|
'filename': self.log_files['app_error'],
|
|
'when': 'midnight',
|
|
'backupCount': self.retention_days,
|
|
'formatter': 'error',
|
|
'encoding': 'utf-8',
|
|
'level': 'ERROR',
|
|
},
|
|
},
|
|
'loggers': {
|
|
'flask': {
|
|
'level': 'INFO',
|
|
'handlers': ['flask_file', 'flask_error_file', 'console'],
|
|
'propagate': False,
|
|
},
|
|
'telebot': {
|
|
'level': 'INFO',
|
|
'handlers': ['app_file', 'app_error_file', 'console'],
|
|
'propagate': False,
|
|
},
|
|
},
|
|
'root': {
|
|
'level': 'DEBUG',
|
|
'handlers': ['console'],
|
|
}
|
|
})
|
|
|
|
def archive_old_logs(self):
|
|
"""Archives old log files and removes logs older than retention_days."""
|
|
# Get yesterday's date
|
|
yesterday_date = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d')
|
|
|
|
for log_name, log_file in self.log_files.items():
|
|
if os.path.exists(log_file):
|
|
archive_name = f"{log_name}_{yesterday_date}.zip"
|
|
archive_path = os.path.join(self.log_dir, archive_name)
|
|
|
|
# Archive the log file
|
|
with zipfile.ZipFile(archive_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
|
|
zipf.write(log_file, arcname=os.path.basename(log_file))
|
|
|
|
# Remove the old log file after archiving
|
|
os.remove(log_file)
|
|
|
|
# Clean up old archives
|
|
self.cleanup_old_archives()
|
|
|
|
def cleanup_old_archives(self):
|
|
"""Deletes archived logs older than retention_days."""
|
|
now = datetime.now()
|
|
cutoff = now - timedelta(days=self.retention_days)
|
|
|
|
for file in os.listdir(self.log_dir):
|
|
if file.endswith('.zip'):
|
|
file_path = os.path.join(self.log_dir, file)
|
|
file_time = datetime.fromtimestamp(os.path.getmtime(file_path))
|
|
if file_time < cutoff:
|
|
os.remove(file_path)
|
|
|
|
def schedule_log_rotation(self):
|
|
"""Schedules daily log rotation and archiving."""
|
|
from threading import Timer
|
|
now = datetime.now()
|
|
next_midnight = now.replace(hour=0, minute=0, second=0, microsecond=0) + timedelta(days=1)
|
|
delay = (next_midnight - now).total_seconds()
|
|
|
|
Timer(delay, self.rotate_and_archive_logs).start()
|
|
|
|
def rotate_and_archive_logs(self):
|
|
"""Rotates and archives logs."""
|
|
self.archive_old_logs()
|
|
self.schedule_log_rotation() # Schedule the next rotation
|
|
|