diff --git a/server/README.md b/server/README.md index c3004bcb..a70fb7e7 100644 --- a/server/README.md +++ b/server/README.md @@ -33,15 +33,21 @@ Then run the server: ``` # With a config.ini -$ poetry run python -m reflector.server +$ poetry run python -m reflector.app # Within a poetry env $ poetry shell -$ LLM_URL=http://.../api/v1/generate python -m reflector.server +$ LLM_URL=http://.../api/v1/generate python -m reflector.app ``` +### Using local GPT4All + +- Start GPT4All with any model you want +- Ensure the API server is activated in GPT4all +- Run with: `LLM_BACKEND=openai LLM_URL=http://localhost:4891/v1/completions LLM_OPENAI_MODEL="GPT4All Falcon" python -m reflector.app` + # Old documentation This is the code base for the Reflector demo (formerly called agenda-talk-diff) for the leads : Troy Web Consulting diff --git a/server/reflector/app.py b/server/reflector/app.py index f40af489..4a10b685 100644 --- a/server/reflector/app.py +++ b/server/reflector/app.py @@ -26,3 +26,7 @@ app.add_middleware( # register views app.include_router(rtc_offer_router) + +if __name__ == "__main__": + import uvicorn + uvicorn.run("reflector.app:app", host="0.0.0.0", port=1250, reload=True)