mirror of
https://github.com/Monadical-SAS/reflector.git
synced 2025-12-21 04:39:06 +00:00
- Remove encode/databases dependency, use native SQLAlchemy 2.0 async - Convert all table definitions to Declarative Mapping pattern - Update all controllers to accept session parameter (dependency injection) - Convert all queries from Core style to ORM style - Remove PostgreSQL compatibility checks (PostgreSQL only now) - Add proper typing for engine and session factories
165 lines
5.4 KiB
Python
165 lines
5.4 KiB
Python
"""
|
|
Main task for cleanup old public data.
|
|
|
|
Deletes old anonymous transcripts and their associated meetings/recordings.
|
|
Transcripts are the main entry point - any associated data is also removed.
|
|
"""
|
|
|
|
import asyncio
|
|
from datetime import datetime, timedelta, timezone
|
|
from typing import TypedDict
|
|
|
|
import structlog
|
|
from celery import shared_task
|
|
from pydantic.types import PositiveInt
|
|
from sqlalchemy import delete, select
|
|
|
|
from reflector.asynctask import asynctask
|
|
from reflector.db import get_session_factory
|
|
from reflector.db.base import MeetingModel, RecordingModel, TranscriptModel
|
|
from reflector.db.transcripts import transcripts_controller
|
|
from reflector.settings import settings
|
|
from reflector.storage import get_recordings_storage
|
|
|
|
logger = structlog.get_logger(__name__)
|
|
|
|
|
|
class CleanupStats(TypedDict):
|
|
"""Statistics for cleanup operation."""
|
|
|
|
transcripts_deleted: int
|
|
meetings_deleted: int
|
|
recordings_deleted: int
|
|
errors: list[str]
|
|
|
|
|
|
async def delete_single_transcript(
|
|
session_factory, transcript_data: dict, stats: CleanupStats
|
|
):
|
|
transcript_id = transcript_data["id"]
|
|
meeting_id = transcript_data["meeting_id"]
|
|
recording_id = transcript_data["recording_id"]
|
|
|
|
try:
|
|
async with session_factory() as session:
|
|
async with session.begin():
|
|
if meeting_id:
|
|
await session.execute(
|
|
delete(MeetingModel).where(MeetingModel.id == meeting_id)
|
|
)
|
|
stats["meetings_deleted"] += 1
|
|
logger.info("Deleted associated meeting", meeting_id=meeting_id)
|
|
|
|
if recording_id:
|
|
result = await session.execute(
|
|
select(RecordingModel).where(RecordingModel.id == recording_id)
|
|
)
|
|
recording = result.mappings().first()
|
|
if recording:
|
|
try:
|
|
await get_recordings_storage().delete_file(
|
|
recording["object_key"]
|
|
)
|
|
except Exception as storage_error:
|
|
logger.warning(
|
|
"Failed to delete recording from storage",
|
|
recording_id=recording_id,
|
|
object_key=recording["object_key"],
|
|
error=str(storage_error),
|
|
)
|
|
|
|
await session.execute(
|
|
delete(RecordingModel).where(
|
|
RecordingModel.id == recording_id
|
|
)
|
|
)
|
|
stats["recordings_deleted"] += 1
|
|
logger.info(
|
|
"Deleted associated recording", recording_id=recording_id
|
|
)
|
|
|
|
await transcripts_controller.remove_by_id(transcript_id)
|
|
stats["transcripts_deleted"] += 1
|
|
logger.info(
|
|
"Deleted transcript",
|
|
transcript_id=transcript_id,
|
|
created_at=transcript_data["created_at"].isoformat(),
|
|
)
|
|
except Exception as e:
|
|
error_msg = f"Failed to delete transcript {transcript_id}: {str(e)}"
|
|
logger.error(error_msg, exc_info=e)
|
|
stats["errors"].append(error_msg)
|
|
|
|
|
|
async def cleanup_old_transcripts(
|
|
session_factory, cutoff_date: datetime, stats: CleanupStats
|
|
):
|
|
"""Delete old anonymous transcripts and their associated recordings/meetings."""
|
|
query = select(transcripts).where(
|
|
(TranscriptModel.created_at < cutoff_date) & (TranscriptModel.user_id.is_(None))
|
|
)
|
|
|
|
async with session_factory() as session:
|
|
result = await session.execute(query)
|
|
old_transcripts = result.mappings().all()
|
|
|
|
logger.info(f"Found {len(old_transcripts)} old transcripts to delete")
|
|
|
|
for transcript_data in old_transcripts:
|
|
await delete_single_transcript(session_factory, transcript_data, stats)
|
|
|
|
|
|
def log_cleanup_results(stats: CleanupStats):
|
|
logger.info(
|
|
"Cleanup completed",
|
|
transcripts_deleted=stats["transcripts_deleted"],
|
|
meetings_deleted=stats["meetings_deleted"],
|
|
recordings_deleted=stats["recordings_deleted"],
|
|
errors_count=len(stats["errors"]),
|
|
)
|
|
|
|
if stats["errors"]:
|
|
logger.warning(
|
|
"Cleanup completed with errors",
|
|
errors=stats["errors"][:10],
|
|
)
|
|
|
|
|
|
async def cleanup_old_public_data(
|
|
days: PositiveInt | None = None,
|
|
) -> CleanupStats | None:
|
|
if days is None:
|
|
days = settings.PUBLIC_DATA_RETENTION_DAYS
|
|
|
|
if not settings.PUBLIC_MODE:
|
|
logger.info("Skipping cleanup - not a public instance")
|
|
return None
|
|
|
|
cutoff_date = datetime.now(timezone.utc) - timedelta(days=days)
|
|
logger.info(
|
|
"Starting cleanup of old public data",
|
|
cutoff_date=cutoff_date.isoformat(),
|
|
)
|
|
|
|
stats: CleanupStats = {
|
|
"transcripts_deleted": 0,
|
|
"meetings_deleted": 0,
|
|
"recordings_deleted": 0,
|
|
"errors": [],
|
|
}
|
|
|
|
session_factory = get_session_factory()
|
|
await cleanup_old_transcripts(session_factory, cutoff_date, stats)
|
|
|
|
log_cleanup_results(stats)
|
|
return stats
|
|
|
|
|
|
@shared_task(
|
|
autoretry_for=(Exception,),
|
|
retry_kwargs={"max_retries": 3, "countdown": 300},
|
|
)
|
|
@asynctask
|
|
def cleanup_old_public_data_task(days: int | None = None):
|
|
asyncio.run(cleanup_old_public_data(days=days))
|