Files
reflector/server/reflector/worker/cleanup.py
Mathieu Virbel 617a1c8b32 refactor: improve session management across worker tasks and pipelines
- Remove "if session" anti-pattern from all functions
- Functions now require explicit AsyncSession parameters instead of optional session_factory
- Worker tasks (Celery) create sessions at top level using session_factory
- Add proper AsyncSession type annotations to all session parameters
- Update cleanup.py: delete_single_transcript, cleanup_old_transcripts, cleanup_old_public_data
- Update process.py: process_recording, process_meetings, reprocess_failed_recordings
- Update ics_sync.py: sync_room_ics, sync_all_ics_calendars, create_upcoming_meetings
- Update pipeline classes: get_transcript methods now require session
- Fix tests to pass sessions correctly

Benefits:
- Better type safety and IDE support with explicit AsyncSession typing
- Clear transaction boundaries with sessions created at task level
- Consistent session management pattern across codebase
- No ambiguity about session vs session_factory usage
2025-09-23 08:39:50 -06:00

169 lines
5.4 KiB
Python

"""
Main task for cleanup old public data.
Deletes old anonymous transcripts and their associated meetings/recordings.
Transcripts are the main entry point - any associated data is also removed.
"""
from datetime import datetime, timedelta, timezone
from typing import TypedDict
import structlog
from celery import shared_task
from pydantic.types import PositiveInt
from sqlalchemy import delete, select
from sqlalchemy.ext.asyncio import AsyncSession
from reflector.asynctask import asynctask
from reflector.db import get_session_factory
from reflector.db.base import MeetingModel, RecordingModel, TranscriptModel
from reflector.db.transcripts import transcripts_controller
from reflector.settings import settings
from reflector.storage import get_recordings_storage
logger = structlog.get_logger(__name__)
class CleanupStats(TypedDict):
"""Statistics for cleanup operation."""
transcripts_deleted: int
meetings_deleted: int
recordings_deleted: int
errors: list[str]
async def delete_single_transcript(
session: AsyncSession, transcript_data: dict, stats: CleanupStats
):
transcript_id = transcript_data["id"]
meeting_id = transcript_data["meeting_id"]
recording_id = transcript_data["recording_id"]
try:
if meeting_id:
await session.execute(
delete(MeetingModel).where(MeetingModel.id == meeting_id)
)
stats["meetings_deleted"] += 1
logger.info("Deleted associated meeting", meeting_id=meeting_id)
if recording_id:
result = await session.execute(
select(RecordingModel).where(RecordingModel.id == recording_id)
)
recording = result.mappings().first()
if recording:
try:
await get_recordings_storage().delete_file(recording["object_key"])
except Exception as storage_error:
logger.warning(
"Failed to delete recording from storage",
recording_id=recording_id,
object_key=recording["object_key"],
error=str(storage_error),
)
await session.execute(
delete(RecordingModel).where(RecordingModel.id == recording_id)
)
stats["recordings_deleted"] += 1
logger.info("Deleted associated recording", recording_id=recording_id)
await transcripts_controller.remove_by_id(session, transcript_id)
stats["transcripts_deleted"] += 1
logger.info(
"Deleted transcript",
transcript_id=transcript_id,
created_at=transcript_data["created_at"].isoformat(),
)
except Exception as e:
error_msg = f"Failed to delete transcript {transcript_id}: {str(e)}"
logger.error(error_msg, exc_info=e)
stats["errors"].append(error_msg)
async def cleanup_old_transcripts(
session: AsyncSession, cutoff_date: datetime, stats: CleanupStats
):
"""Delete old anonymous transcripts and their associated recordings/meetings."""
query = select(
TranscriptModel.id,
TranscriptModel.meeting_id,
TranscriptModel.recording_id,
TranscriptModel.created_at,
).where(
(TranscriptModel.created_at < cutoff_date) & (TranscriptModel.user_id.is_(None))
)
result = await session.execute(query)
old_transcripts = result.mappings().all()
logger.info(f"Found {len(old_transcripts)} old transcripts to delete")
for transcript_data in old_transcripts:
try:
await delete_single_transcript(session, transcript_data, stats)
except Exception as e:
error_msg = f"Failed to delete transcript {transcript_data['id']}: {str(e)}"
logger.error(error_msg, exc_info=e)
stats["errors"].append(error_msg)
def log_cleanup_results(stats: CleanupStats):
logger.info(
"Cleanup completed",
transcripts_deleted=stats["transcripts_deleted"],
meetings_deleted=stats["meetings_deleted"],
recordings_deleted=stats["recordings_deleted"],
errors_count=len(stats["errors"]),
)
if stats["errors"]:
logger.warning(
"Cleanup completed with errors",
errors=stats["errors"][:10],
)
async def cleanup_old_public_data(
session: AsyncSession,
days: PositiveInt | None = None,
) -> CleanupStats | None:
if days is None:
days = settings.PUBLIC_DATA_RETENTION_DAYS
if not settings.PUBLIC_MODE:
logger.info("Skipping cleanup - not a public instance")
return None
cutoff_date = datetime.now(timezone.utc) - timedelta(days=days)
logger.info(
"Starting cleanup of old public data",
cutoff_date=cutoff_date.isoformat(),
)
stats: CleanupStats = {
"transcripts_deleted": 0,
"meetings_deleted": 0,
"recordings_deleted": 0,
"errors": [],
}
await cleanup_old_transcripts(session, cutoff_date, stats)
log_cleanup_results(stats)
return stats
@shared_task(
autoretry_for=(Exception,),
retry_kwargs={"max_retries": 3, "countdown": 300},
)
@asynctask
async def cleanup_old_public_data_task(days: int | None = None):
session_factory = get_session_factory()
async with session_factory() as session:
async with session.begin():
await cleanup_old_public_data(session, days=days)