update audio-deleted flow

This commit is contained in:
Igor Loskutov
2025-06-18 15:43:34 -04:00
parent 6cb6d90b9a
commit c23e0e07ef
15 changed files with 282 additions and 76 deletions

View File

@@ -0,0 +1,25 @@
"""add audio_deleted field to transcript
Revision ID: 20250618140000
Revises: 20250617140003
Create Date: 2025-06-18 14:00:00.000000
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "20250618140000"
down_revision: Union[str, None] = "20250617140003"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.add_column("transcript", sa.Column("audio_deleted", sa.Boolean(), nullable=True))
def downgrade() -> None:
op.drop_column("transcript", "audio_deleted")

View File

@@ -70,6 +70,7 @@ transcripts = sqlalchemy.Table(
Enum(SourceKind, values_callable=lambda obj: [e.value for e in obj]), Enum(SourceKind, values_callable=lambda obj: [e.value for e in obj]),
nullable=False, nullable=False,
), ),
sqlalchemy.Column("audio_deleted", sqlalchemy.Boolean, nullable=True),
) )
def generate_transcript_name() -> str: def generate_transcript_name() -> str:
@@ -157,6 +158,7 @@ class Transcript(BaseModel):
recording_id: str | None = None recording_id: str | None = None
zulip_message_id: int | None = None zulip_message_id: int | None = None
source_kind: SourceKind source_kind: SourceKind
audio_deleted: bool | None = None
def add_event(self, event: str, data: BaseModel) -> TranscriptEvent: def add_event(self, event: str, data: BaseModel) -> TranscriptEvent:
ev = TranscriptEvent(event=event, data=data.model_dump()) ev = TranscriptEvent(event=event, data=data.model_dump())
@@ -544,8 +546,14 @@ class TranscriptController:
Move mp3 file to storage Move mp3 file to storage
""" """
if transcript.audio_deleted:
raise FileNotFoundError(f"Invalid state of transcript {transcript.id}: audio_deleted mark is set true")
if transcript.audio_location == "local": if transcript.audio_location == "local":
# store the audio on external storage if it's not already there # store the audio on external storage if it's not already there
if not transcript.audio_mp3_filename.exists():
raise FileNotFoundError(f"Audio file not found: {transcript.audio_mp3_filename}")
await get_transcripts_storage().put_file( await get_transcripts_storage().put_file(
transcript.storage_audio_path, transcript.storage_audio_path,
transcript.audio_mp3_filename.read_bytes(), transcript.audio_mp3_filename.read_bytes(),

View File

@@ -59,6 +59,13 @@ from reflector.zulip import (
send_message_to_zulip, send_message_to_zulip,
update_zulip_message, update_zulip_message,
) )
from reflector.db.meetings import meetings_controller, meeting_consent_controller
from reflector.db.recordings import recordings_controller
from reflector.storage import get_transcripts_storage
import boto3
from structlog import BoundLogger as Logger from structlog import BoundLogger as Logger
@@ -470,6 +477,7 @@ class PipelineMainWaveform(PipelineMainFromTopics):
@get_transcript @get_transcript
async def pipeline_remove_upload(transcript: Transcript, logger: Logger): async def pipeline_remove_upload(transcript: Transcript, logger: Logger):
# for future changes: note that there's also a consent process happens, beforehand and users may not consent with keeping files. currently, we delete regardless, so it's no need for that
logger.info("Starting remove upload") logger.info("Starting remove upload")
uploads = transcript.data_path.glob("upload.*") uploads = transcript.data_path.glob("upload.*")
for upload in uploads: for upload in uploads:
@@ -520,6 +528,10 @@ async def pipeline_upload_mp3(transcript: Transcript, logger: Logger):
logger.info("No storage backend configured, skipping mp3 upload") logger.info("No storage backend configured, skipping mp3 upload")
return return
if transcript.audio_deleted:
logger.info("Skipping MP3 upload - audio marked as deleted")
return
logger.info("Starting upload mp3") logger.info("Starting upload mp3")
# If the audio mp3 is not available, just skip # If the audio mp3 is not available, just skip
@@ -558,6 +570,67 @@ async def pipeline_summaries(transcript: Transcript, logger: Logger):
logger.info("Summaries done") logger.info("Summaries done")
@get_transcript
async def cleanup_consent(transcript: Transcript, logger: Logger):
logger.info("Starting consent cleanup")
consent_denied = False
recording = None
try:
if transcript.recording_id:
recording = await recordings_controller.get_by_id(transcript.recording_id)
if recording and recording.meeting_id:
meeting = await meetings_controller.get_by_id(recording.meeting_id)
if meeting:
consent_denied = await meeting_consent_controller.has_any_denial(meeting.id)
except Exception as e:
logger.error(f"Failed to get fetch consent: {e}")
consent_denied = True
if not consent_denied:
logger.info("Consent approved, keeping all files")
return
logger.info("Consent denied, cleaning up all related audio files")
# 1. Delete original Whereby recording from S3
if recording and recording.s3_bucket and recording.s3_key:
s3_whereby = boto3.client(
"s3",
aws_access_key_id=settings.AWS_WHEREBY_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_WHEREBY_ACCESS_KEY_SECRET,
)
try:
s3_whereby.delete_object(Bucket=recording.s3_bucket, Key=recording.s3_key)
logger.info(f"Deleted original Whereby recording: {recording.s3_bucket}/{recording.s3_key}")
except Exception as e:
logger.error(f"Failed to delete Whereby recording: {e}")
# non-transactional, files marked for deletion not actually deleted is possible
await transcripts_controller.update(transcript, {"audio_deleted": True})
# 2. Delete processed audio from transcript storage S3 bucket
if transcript.audio_location == "storage":
storage = get_transcripts_storage()
try:
await storage.delete_file(transcript.storage_audio_path)
logger.info(f"Deleted processed audio from storage: {transcript.storage_audio_path}")
except Exception as e:
logger.error(f"Failed to delete processed audio: {e}")
# 3. Delete local audio files
try:
if hasattr(transcript, 'audio_mp3_filename') and transcript.audio_mp3_filename:
transcript.audio_mp3_filename.unlink(missing_ok=True)
if hasattr(transcript, 'audio_wav_filename') and transcript.audio_wav_filename:
transcript.audio_wav_filename.unlink(missing_ok=True)
except Exception as e:
logger.error(f"Failed to delete local audio files: {e}")
logger.info("Consent cleanup done")
@get_transcript @get_transcript
async def pipeline_post_to_zulip(transcript: Transcript, logger: Logger): async def pipeline_post_to_zulip(transcript: Transcript, logger: Logger):
logger.info("Starting post to zulip") logger.info("Starting post to zulip")
@@ -659,6 +732,12 @@ async def task_pipeline_final_summaries(*, transcript_id: str):
await pipeline_summaries(transcript_id=transcript_id) await pipeline_summaries(transcript_id=transcript_id)
@shared_task
@asynctask
async def task_cleanup_consent(*, transcript_id: str):
await cleanup_consent(transcript_id=transcript_id)
@shared_task @shared_task
@asynctask @asynctask
async def task_pipeline_post_to_zulip(*, transcript_id: str): async def task_pipeline_post_to_zulip(*, transcript_id: str):
@@ -675,6 +754,7 @@ def pipeline_post(*, transcript_id: str):
| task_pipeline_upload_mp3.si(transcript_id=transcript_id) | task_pipeline_upload_mp3.si(transcript_id=transcript_id)
| task_pipeline_remove_upload.si(transcript_id=transcript_id) | task_pipeline_remove_upload.si(transcript_id=transcript_id)
| task_pipeline_diarization.si(transcript_id=transcript_id) | task_pipeline_diarization.si(transcript_id=transcript_id)
| task_cleanup_consent.si(transcript_id=transcript_id)
) )
chain_title_preview = task_pipeline_title.si(transcript_id=transcript_id) chain_title_preview = task_pipeline_title.si(transcript_id=transcript_id)
chain_final_summaries = task_pipeline_final_summaries.si( chain_final_summaries = task_pipeline_final_summaries.si(

View File

@@ -43,6 +43,10 @@ def range_requests_response(
): ):
"""Returns StreamingResponse using Range Requests of a given file""" """Returns StreamingResponse using Range Requests of a given file"""
if not os.path.exists(file_path):
from fastapi import HTTPException
raise HTTPException(status_code=404, detail="File not found")
file_size = os.stat(file_path).st_size file_size = os.stat(file_path).st_size
range_header = request.headers.get("range") range_header = request.headers.get("range")

View File

@@ -65,6 +65,7 @@ class GetTranscript(BaseModel):
source_kind: SourceKind source_kind: SourceKind
room_id: str | None = None room_id: str | None = None
room_name: str | None = None room_name: str | None = None
audio_deleted: bool | None = None
class CreateTranscript(BaseModel): class CreateTranscript(BaseModel):
@@ -82,6 +83,7 @@ class UpdateTranscript(BaseModel):
share_mode: Optional[Literal["public", "semi-private", "private"]] = Field(None) share_mode: Optional[Literal["public", "semi-private", "private"]] = Field(None)
participants: Optional[list[TranscriptParticipant]] = Field(None) participants: Optional[list[TranscriptParticipant]] = Field(None)
reviewed: Optional[bool] = Field(None) reviewed: Optional[bool] = Field(None)
audio_deleted: Optional[bool] = Field(None)
class DeletionStatus(BaseModel): class DeletionStatus(BaseModel):

View File

@@ -86,8 +86,11 @@ async def transcript_get_audio_mp3(
headers=resp.headers, headers=resp.headers,
) )
if transcript.audio_deleted:
raise HTTPException(status_code=404, detail="Audio unavailable due to privacy settings")
if not transcript.audio_mp3_filename.exists(): if not transcript.audio_mp3_filename.exists():
raise HTTPException(status_code=500, detail="Audio not found") raise HTTPException(status_code=404, detail="Audio file not found")
truncated_id = str(transcript.id).split("-")[0] truncated_id = str(transcript.id).split("-")[0]
filename = f"recording_{truncated_id}.mp3" filename = f"recording_{truncated_id}.mp3"

View File

@@ -9,7 +9,7 @@ import structlog
from celery import shared_task from celery import shared_task
from celery.utils.log import get_task_logger from celery.utils.log import get_task_logger
from pydantic import ValidationError from pydantic import ValidationError
from reflector.db.meetings import meeting_consent_controller, meetings_controller from reflector.db.meetings import meetings_controller
from reflector.db.recordings import Recording, recordings_controller from reflector.db.recordings import Recording, recordings_controller
from reflector.db.rooms import rooms_controller from reflector.db.rooms import rooms_controller
from reflector.db.transcripts import SourceKind, transcripts_controller from reflector.db.transcripts import SourceKind, transcripts_controller
@@ -132,52 +132,6 @@ async def process_recording(bucket_name: str, object_key: str):
task_pipeline_process.delay(transcript_id=transcript.id) task_pipeline_process.delay(transcript_id=transcript.id)
# Check if any participant denied consent after transcript processing is complete
should_delete = await meeting_consent_controller.has_any_denial(meeting.id)
if should_delete:
logger.info(f"Deleting audio files for {object_key} due to consent denial")
await delete_audio_files(transcript, bucket_name, object_key)
async def delete_audio_files(transcript, bucket_name: str, object_key: str):
"""Delete ONLY audio files from all locations, keep transcript data"""
try:
# 1. Delete original Whereby recording from S3
s3_whereby = boto3.client(
"s3",
aws_access_key_id=settings.AWS_WHEREBY_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_WHEREBY_ACCESS_KEY_SECRET,
)
s3_whereby.delete_object(Bucket=bucket_name, Key=object_key)
logger.info(f"Deleted original Whereby recording: {bucket_name}/{object_key}")
# 2. Delete processed audio from transcript storage S3 bucket
if transcript.audio_location == "storage":
storage = get_transcripts_storage()
await storage.delete_file(transcript.storage_audio_path)
logger.info(f"Deleted processed audio from storage: {transcript.storage_audio_path}")
# 3. Delete local audio files (if any remain)
if hasattr(transcript, 'audio_mp3_filename') and transcript.audio_mp3_filename:
transcript.audio_mp3_filename.unlink(missing_ok=True)
if hasattr(transcript, 'audio_wav_filename') and transcript.audio_wav_filename:
transcript.audio_wav_filename.unlink(missing_ok=True)
upload_path = transcript.data_path / f"upload{os.path.splitext(object_key)[1]}"
upload_path.unlink(missing_ok=True)
# 4. Update transcript to reflect audio deletion (keep all other data)
await transcripts_controller.update(transcript, {
'audio_location_deleted': True
})
logger.info(f"Deleted all audio files for transcript {transcript.id}, kept transcript data")
except Exception as e:
logger.error(f"Failed to delete audio files for {object_key}: {str(e)}")
@shared_task @shared_task
@asynctask @asynctask
async def process_meetings(): async def process_meetings():

View File

@@ -183,7 +183,18 @@ const TopicPlayer = ({
setIsPlaying(false); setIsPlaying(false);
}; };
const isLoaded = !!(mp3.media && topicTime); const isLoaded = !!(mp3.loading && topicTime);
const error = mp3.error;
if (error !== null) {
return <Text fontSize="sm" pt="1" pl="2">
Loading error: {error}
</Text>
}
if (mp3.audioDeleted) {
return <Text fontSize="sm" pt="1" pl="2">
This topic file has been deleted.
</Text>
}
return ( return (
<Skeleton <Skeleton
isLoaded={isLoaded} isLoaded={isLoaded}

View File

@@ -54,10 +54,30 @@ export default function TranscriptDetails(details: TranscriptDetails) {
); );
} }
if (mp3.audioDeleted) {
return (
<Modal
title="Can't find transcription: Transcription file is deleted"
text={`The recording is deleted.`}
/>
);
}
if (transcript?.loading || topics?.loading) { if (transcript?.loading || topics?.loading) {
return <Modal title="Loading" text={"Loading transcript..."} />; return <Modal title="Loading" text={"Loading transcript..."} />;
} }
if (mp3.error) {
return (
<Modal
title="Transcription error"
text={`There was an error loading the recording. Error: ${mp3.error}`}
/>
);
}
return ( return (
<> <>
<Grid <Grid

View File

@@ -27,7 +27,7 @@ const TranscriptRecord = (details: TranscriptDetails) => {
const webSockets = useWebSockets(details.params.transcriptId); const webSockets = useWebSockets(details.params.transcriptId);
let mp3 = useMp3(details.params.transcriptId, true); const mp3 = useMp3(details.params.transcriptId, true);
const router = useRouter(); const router = useRouter();

View File

@@ -21,7 +21,7 @@ const TranscriptUpload = (details: TranscriptUpload) => {
const webSockets = useWebSockets(details.params.transcriptId); const webSockets = useWebSockets(details.params.transcriptId);
let mp3 = useMp3(details.params.transcriptId, true); const mp3 = useMp3(details.params.transcriptId, true);
const router = useRouter(); const router = useRouter();

View File

@@ -5,13 +5,19 @@ import getApi from "../../lib/useApi";
export type Mp3Response = { export type Mp3Response = {
media: HTMLMediaElement | null; media: HTMLMediaElement | null;
loading: boolean; loading: boolean;
error: string | null;
getNow: () => void; getNow: () => void;
audioDeleted: boolean | null;
}; };
const useMp3 = (id: string, waiting?: boolean): Mp3Response => { const useMp3 = (transcriptId: string, waiting?: boolean): Mp3Response => {
const [media, setMedia] = useState<HTMLMediaElement | null>(null); const [media, setMedia] = useState<HTMLMediaElement | null>(null);
const [later, setLater] = useState(waiting); const [later, setLater] = useState(waiting);
const [loading, setLoading] = useState<boolean>(false); const [audioLoading, setAudioLoading] = useState<boolean>(true);
const [audioLoadingError, setAudioLoadingError] = useState<null | string>(null);
const [transcriptMetadataLoading, setTranscriptMetadataLoading] = useState<boolean>(true);
const [transcriptMetadataLoadingError, setTranscriptMetadataLoadingError] = useState<string | null>(null);
const [audioDeleted, setAudioDeleted] = useState<boolean | null>(null);
const api = getApi(); const api = getApi();
const { api_url } = useContext(DomainContext); const { api_url } = useContext(DomainContext);
const accessTokenInfo = api?.httpRequest?.config?.TOKEN; const accessTokenInfo = api?.httpRequest?.config?.TOKEN;
@@ -42,23 +48,69 @@ const useMp3 = (id: string, waiting?: boolean): Mp3Response => {
}, [navigator.serviceWorker, !serviceWorker, accessTokenInfo]); }, [navigator.serviceWorker, !serviceWorker, accessTokenInfo]);
useEffect(() => { useEffect(() => {
if (!id || !api || later) return; if (!transcriptId || !api || later) return;
setTranscriptMetadataLoading(true);
// createa a audio element and set the source
setLoading(true);
const audioElement = document.createElement("audio"); const audioElement = document.createElement("audio");
audioElement.src = `${api_url}/v1/transcripts/${id}/audio/mp3`; audioElement.src = `${api_url}/v1/transcripts/${transcriptId}/audio/mp3`;
audioElement.crossOrigin = "anonymous"; audioElement.crossOrigin = "anonymous";
audioElement.preload = "auto"; audioElement.preload = "auto";
const handleCanPlay = () => {
setAudioLoading(false);
setAudioLoadingError(null);
};
const handleError = () => {
setAudioLoading(false);
setAudioLoadingError("Failed to load audio");
};
audioElement.addEventListener('canplay', handleCanPlay);
audioElement.addEventListener('error', handleError);
setMedia(audioElement); setMedia(audioElement);
setLoading(false);
}, [id, !api, later]);
setAudioLoading(true);
let stopped = false;
// Fetch transcript info in parallel
api.v1TranscriptGet({ transcriptId })
.then((transcript) => {
if (stopped) return;
setAudioDeleted(transcript.audio_deleted || false);
setTranscriptMetadataLoadingError(null);
})
.catch((error) => {
if (stopped) return;
console.error("Failed to fetch transcript:", error);
setAudioDeleted(null);
setTranscriptMetadataLoadingError(error.message);
})
.finally(() => {
if (stopped) return;
setTranscriptMetadataLoading(false);
})
// Cleanup
return () => {
stopped = true;
audioElement.removeEventListener('canplay', handleCanPlay);
audioElement.removeEventListener('error', handleError);
};
}, [transcriptId, !api, later, api_url]);
const getNow = () => { const getNow = () => {
setLater(false); setLater(false);
}; };
return { media, loading, getNow }; const loading = audioLoading || transcriptMetadataLoading;
const error = audioLoadingError || transcriptMetadataLoadingError;
return { media, loading, error, getNow, audioDeleted };
}; };
export default useMp3; export default useMp3;

View File

@@ -2,12 +2,11 @@
import "@whereby.com/browser-sdk/embed"; import "@whereby.com/browser-sdk/embed";
import { useCallback, useEffect, useRef, useState, useContext } from "react"; import { useCallback, useEffect, useRef, useState, useContext } from "react";
import { Box, Button, Text, VStack, HStack, Spinner } from "@chakra-ui/react"; import { Box, Button, Text, VStack, HStack, Spinner, useToast } from "@chakra-ui/react";
import useRoomMeeting from "./useRoomMeeting"; import useRoomMeeting from "./useRoomMeeting";
import { useRouter } from "next/navigation"; import { useRouter } from "next/navigation";
import { notFound } from "next/navigation"; import { notFound } from "next/navigation";
import useSessionStatus from "../lib/useSessionStatus"; import useSessionStatus from "../lib/useSessionStatus";
import AudioConsentDialog from "../(app)/rooms/audioConsentDialog";
import { DomainContext } from "../domainContext"; import { DomainContext } from "../domainContext";
import { useRecordingConsent } from "../recordingConsentContext"; import { useRecordingConsent } from "../recordingConsentContext";
import useSessionAccessToken from "../lib/useSessionAccessToken"; import useSessionAccessToken from "../lib/useSessionAccessToken";
@@ -26,13 +25,13 @@ export default function Room(details: RoomDetails) {
const meeting = useRoomMeeting(roomName); const meeting = useRoomMeeting(roomName);
const router = useRouter(); const router = useRouter();
const { isLoading, isAuthenticated } = useSessionStatus(); const { isLoading, isAuthenticated } = useSessionStatus();
const [showConsentDialog, setShowConsentDialog] = useState(false);
const [consentLoading, setConsentLoading] = useState(false); const [consentLoading, setConsentLoading] = useState(false);
const { state: consentState, touch, hasConsent } = useRecordingConsent(); const { state: consentState, touch, hasConsent } = useRecordingConsent();
const { api_url } = useContext(DomainContext); const { api_url } = useContext(DomainContext);
const { accessToken } = useSessionAccessToken(); const { accessToken } = useSessionAccessToken();
const { id: userId } = useSessionUser(); const { id: userId } = useSessionUser();
const api = useApi(); const api = useApi();
const toast = useToast();
const roomUrl = meeting?.response?.host_room_url const roomUrl = meeting?.response?.host_room_url
@@ -45,10 +44,10 @@ export default function Room(details: RoomDetails) {
router.push("/browse"); router.push("/browse");
}, [router]); }, [router]);
const handleConsent = useCallback(async (meetingId: string, given: boolean) => { const handleConsent = useCallback(async (meetingId: string, given: boolean, onClose?: () => void) => {
if (!api) return; if (!api) return;
setShowConsentDialog(false); if (onClose) onClose();
setConsentLoading(true); setConsentLoading(true);
try { try {
@@ -77,18 +76,49 @@ export default function Room(details: RoomDetails) {
} }
}, [isLoading, meeting?.error]); }, [isLoading, meeting?.error]);
// Show consent dialog when meeting is loaded and consent hasn't been answered yet // Show consent toast when meeting is loaded and consent hasn't been answered yet
useEffect(() => { useEffect(() => {
if ( if (
consentState.ready && consentState.ready &&
meetingId && meetingId &&
!hasConsent(meetingId) && !hasConsent(meetingId) &&
!showConsentDialog &&
!consentLoading !consentLoading
) { ) {
setShowConsentDialog(true); const toastId = toast({
position: "top",
duration: null,
render: ({ onClose }) => (
<Box p={4} bg="white" borderRadius="md" boxShadow="md">
<VStack spacing={3} align="stretch">
<Text>
Can we have your permission to store this meeting's audio recording on our servers?
</Text>
<HStack spacing={4} justify="center">
<Button
colorScheme="green"
size="sm"
onClick={() => handleConsent(meetingId, true, onClose)}
>
Yes, store the audio
</Button>
<Button
colorScheme="red"
size="sm"
onClick={() => handleConsent(meetingId, false, onClose)}
>
No, delete after transcription
</Button>
</HStack>
</VStack>
</Box>
),
});
return () => {
toast.close(toastId);
};
} }
}, [consentState.ready, meetingId, hasConsent, showConsentDialog, consentLoading]); }, [consentState.ready, meetingId, hasConsent, consentLoading, toast, handleConsent]);
useEffect(() => { useEffect(() => {
if (isLoading || !isAuthenticated || !roomUrl) return; if (isLoading || !isAuthenticated || !roomUrl) return;
@@ -131,13 +161,6 @@ export default function Room(details: RoomDetails) {
style={{ width: "100vw", height: "100vh" }} style={{ width: "100vw", height: "100vh" }}
/> />
)} )}
{meetingId && consentState.ready && !hasConsent(meetingId) && !consentLoading && (
<AudioConsentDialog
isOpen={showConsentDialog}
onClose={() => {}} // No-op: ESC should not close without consent
onConsent={b => handleConsent(meetingId, b)}
/>
)}
</> </>
); );
} }

View File

@@ -293,6 +293,17 @@ export const $GetTranscript = {
], ],
title: "Room Name", title: "Room Name",
}, },
audio_deleted: {
anyOf: [
{
type: "boolean",
},
{
type: "null",
},
],
title: "Audio Deleted",
},
}, },
type: "object", type: "object",
required: [ required: [
@@ -1109,6 +1120,17 @@ export const $UpdateTranscript = {
], ],
title: "Reviewed", title: "Reviewed",
}, },
audio_deleted: {
anyOf: [
{
type: "boolean",
},
{
type: "null",
},
],
title: "Audio Deleted",
},
}, },
type: "object", type: "object",
title: "UpdateTranscript", title: "UpdateTranscript",

View File

@@ -56,6 +56,7 @@ export type GetTranscript = {
source_kind: SourceKind; source_kind: SourceKind;
room_id?: string | null; room_id?: string | null;
room_name?: string | null; room_name?: string | null;
audio_deleted?: boolean | null;
}; };
export type GetTranscriptSegmentTopic = { export type GetTranscriptSegmentTopic = {
@@ -219,6 +220,7 @@ export type UpdateTranscript = {
share_mode?: "public" | "semi-private" | "private" | null; share_mode?: "public" | "semi-private" | "private" | null;
participants?: Array<TranscriptParticipant> | null; participants?: Array<TranscriptParticipant> | null;
reviewed?: boolean | null; reviewed?: boolean | null;
audio_deleted?: boolean | null;
}; };
export type UserInfo = { export type UserInfo = {