server: started updating documentation

This commit is contained in:
2023-10-27 20:08:00 +02:00
committed by Mathieu Virbel
parent d8a842f099
commit e405ccb8f3
4 changed files with 46 additions and 16 deletions

View File

@@ -6,7 +6,7 @@ The project architecture consists of three primary components:
* **Front-End**: NextJS React project hosted on Vercel, located in `www/`.
* **Back-End**: Python server that offers an API and data persistence, found in `server/`.
* **AI Models**: Providing services such as speech-to-text transcription, topic generation, automated summaries, and translations.
* **GPU implementation**: Providing services such as speech-to-text transcription, topic generation, automated summaries, and translations.
It also uses https://github.com/fief-dev for authentication, and Vercel for deployment and configuration of the front-end.
@@ -120,6 +120,9 @@ TRANSCRIPT_MODAL_API_KEY=<omitted>
LLM_BACKEND=modal
LLM_URL=https://monadical-sas--reflector-llm-web.modal.run
LLM_MODAL_API_KEY=<omitted>
TRANSLATE_URL=https://monadical-sas--reflector-translator-web.modal.run
ZEPHYR_LLM_URL=https://monadical-sas--reflector-llm-zephyr-web.modal.run
DIARIZATION_URL=https://monadical-sas--reflector-diarizer-web.modal.run
AUTH_BACKEND=fief
AUTH_FIEF_URL=https://auth.reflector.media/reflector-local
@@ -138,6 +141,10 @@ Use:
poetry run python3 -m reflector.app
```
And start the background worker
celery -A reflector.worker.app worker --loglevel=info
#### Using docker
Use:

View File

@@ -5,10 +5,19 @@ services:
context: server
ports:
- 1250:1250
environment:
LLM_URL: "${LLM_URL}"
volumes:
- model-cache:/root/.cache
environment: ENTRYPOINT=server
worker:
build:
context: server
volumes:
- model-cache:/root/.cache
environment: ENTRYPOINT=worker
redis:
image: redis:7.2
ports:
- 6379:6379
web:
build:
context: www
@@ -17,4 +26,3 @@ services:
volumes:
model-cache:

View File

@@ -1,15 +1,23 @@
version: "3.9"
services:
# server:
# build:
# context: .
# ports:
# - 1250:1250
# environment:
# LLM_URL: "${LLM_URL}"
# MIN_TRANSCRIPT_LENGTH: "${MIN_TRANSCRIPT_LENGTH}"
# volumes:
# - model-cache:/root/.cache
server:
build:
context: .
ports:
- 1250:1250
volumes:
- model-cache:/root/.cache
environment:
ENTRYPOINT: server
REDIS_HOST: redis
worker:
build:
context: .
volumes:
- model-cache:/root/.cache
environment:
ENTRYPOINT: worker
REDIS_HOST: redis
redis:
image: redis:7.2
ports:

View File

@@ -4,4 +4,11 @@ if [ -f "/venv/bin/activate" ]; then
source /venv/bin/activate
fi
alembic upgrade head
if [ "${ENTRYPOINT}" = "server" ]; then
python -m reflector.app
elif [ "${ENTRYPOINT}" = "worker" ]; then
celery -A reflector.worker.app worker --loglevel=info
else
echo "Unknown command"
fi