mirror of
https://github.com/Monadical-SAS/cubbi.git
synced 2025-12-21 12:49:07 +00:00
Compare commits
52 Commits
doc-mcp-fi
...
v0.1.0-rc.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ea0d44f4db | ||
| 3850bc3212 | |||
| 12d77d0128 | |||
| 51fb79baa3 | |||
| 3799f04c13 | |||
| 7fc9cfd8e1 | |||
| 979b43846a | |||
| 4f54c0fbe7 | |||
| 3a182fd265 | |||
| 2f9fd68cad | |||
| e25e30e749 | |||
| b1aa415dde | |||
| 5b9713dc2f | |||
| 9e742b439b | |||
| cfa7dd647d | |||
| deff036406 | |||
| 5678438661 | |||
| 30c6b995cb | |||
| f32b3dd269 | |||
| a74251b119 | |||
| 9c21611a7f | |||
| 6b2c1ebf1c | |||
| 33d90d0531 | |||
| a51115a45d | |||
| 0d75bfc3d8 | |||
| 7805aa720e | |||
| 16f59b1c40 | |||
| 4b0461a6fa | |||
| 5d674f7508 | |||
| 3ee8ce6338 | |||
| d098f268cd | |||
| 0892b6c8c4 | |||
| 212f271268 | |||
| 20916c5713 | |||
| 7c46d66b53 | |||
| 2caeb42551 | |||
| deb5945e40 | |||
| 7736573b84 | |||
| 133583b941 | |||
| 028bd26cf1 | |||
| 7649173d6c | |||
| 307eee4fce | |||
| 6f08e2b274 | |||
| b72f1eef9a | |||
| 092f497ecc | |||
| dab783b01d | |||
| d42af870ff | |||
| e36eef4ef7 | |||
| f83c49c0f3 | |||
| e36f4540bf | |||
| 1c538f8a59 | |||
| a4591ddbd8 |
17
.github/workflows/conventional_commit_pr.yml
vendored
Normal file
17
.github/workflows/conventional_commit_pr.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
name: Conventional commit PR
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
cog_check_job:
|
||||
runs-on: ubuntu-latest
|
||||
name: check conventional commit compliance
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
# pick the pr HEAD instead of the merge commit
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Conventional commit check
|
||||
uses: cocogitto/cocogitto-action@v3
|
||||
21
.github/workflows/conventional_commit_pr_title.yml
vendored
Normal file
21
.github/workflows/conventional_commit_pr_title.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: "Lint PR"
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- synchronize
|
||||
- reopened
|
||||
|
||||
permissions:
|
||||
pull-requests: read
|
||||
|
||||
jobs:
|
||||
main:
|
||||
name: Validate PR title
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: amannn/action-semantic-pull-request@v5
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
14
.github/workflows/pre_commit.yml
vendored
Normal file
14
.github/workflows/pre_commit.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
name: pre-commit
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
pre-commit:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v3
|
||||
- uses: pre-commit/action@v3.0.1
|
||||
40
.github/workflows/pytests.yml
vendored
Normal file
40
.github/workflows/pytests.yml
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
name: Pytests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
checks: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
pytest:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: true
|
||||
matrix:
|
||||
python-version: ["3.12"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
enable-cache: true
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install all dependencies
|
||||
run: uv sync --frozen --all-extras --all-groups
|
||||
|
||||
- name: Build goose image
|
||||
run: |
|
||||
uv tool install --with-editable . .
|
||||
cubbi image build goose
|
||||
|
||||
- name: Tests
|
||||
run: |
|
||||
uv run --frozen -m pytest -v
|
||||
127
.github/workflows/release.yml
vendored
Normal file
127
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
release_force:
|
||||
# see https://python-semantic-release.readthedocs.io/en/latest/github-action.html#command-line-options
|
||||
description: |
|
||||
Force release be one of: [major | minor | patch]
|
||||
Leave empty for auto-detect based on commit messages.
|
||||
type: choice
|
||||
options:
|
||||
- "" # auto - no force
|
||||
- major # force major
|
||||
- minor # force minor
|
||||
- patch # force patch
|
||||
default: ""
|
||||
required: false
|
||||
prerelease_token:
|
||||
description: 'The "prerelease identifier" to use as a prefix for the "prerelease" part of a semver. Like the rc in `1.2.0-rc.8`.'
|
||||
type: choice
|
||||
options:
|
||||
- rc
|
||||
- beta
|
||||
- alpha
|
||||
default: rc
|
||||
required: false
|
||||
prerelease:
|
||||
description: "Is a pre-release"
|
||||
type: boolean
|
||||
default: false
|
||||
required: false
|
||||
|
||||
concurrency:
|
||||
group: deploy
|
||||
cancel-in-progress: false # prevent hickups with semantic-release
|
||||
|
||||
env:
|
||||
PYTHON_VERSION_DEFAULT: "3.12"
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
concurrency: release
|
||||
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
# Note: we need to checkout the repository at the workflow sha in case during the workflow
|
||||
# the branch was updated. To keep PSR working with the configured release branches,
|
||||
# we force a checkout of the desired release branch but at the workflow sha HEAD.
|
||||
- name: Setup | Checkout Repository at workflow sha
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.sha }}
|
||||
ssh-key: ${{ secrets.DEPLOY_KEY }}
|
||||
|
||||
- name: Setup | Force correct release branch on workflow sha
|
||||
run: |
|
||||
git checkout -B ${{ github.ref_name }} ${{ github.sha }}
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
enable-cache: true
|
||||
python-version: ${{ env.PYTHON_VERSION_DEFAULT }}
|
||||
|
||||
- name: Install all dependencies
|
||||
run: uv sync --frozen --all-extras --all-groups
|
||||
|
||||
# 2 steps to prevent uv.lock out of sync
|
||||
# CF https://github.com/python-semantic-release/python-semantic-release/issues/1125
|
||||
- name: Action | Semantic Version Release (stamp only)
|
||||
uses: python-semantic-release/python-semantic-release@v9.12.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
git_committer_name: "github-actions"
|
||||
git_committer_email: "actions@users.noreply.github.com"
|
||||
force: ${{ github.event.inputs.release_force }}
|
||||
prerelease: ${{ github.event.inputs.prerelease }}
|
||||
prerelease_token: ${{ github.event.inputs.prerelease_token }}
|
||||
ssh_public_signing_key: ${{ secrets.DEPLOY_KEY_PUB }}
|
||||
ssh_private_signing_key: ${{ secrets.DEPLOY_KEY }}
|
||||
push: false
|
||||
commit: false
|
||||
tag: false
|
||||
changelog: false
|
||||
|
||||
- name: Push and tags
|
||||
run: |
|
||||
uv lock
|
||||
git add uv.lock
|
||||
|
||||
- name: Action | Semantic Version Release (fully to create release)
|
||||
id: release
|
||||
uses: python-semantic-release/python-semantic-release@v9.12.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
git_committer_name: "github-actions"
|
||||
git_committer_email: "actions@users.noreply.github.com"
|
||||
force: ${{ github.event.inputs.release_force }}
|
||||
prerelease: ${{ github.event.inputs.prerelease }}
|
||||
prerelease_token: ${{ github.event.inputs.prerelease_token }}
|
||||
ssh_public_signing_key: ${{ secrets.DEPLOY_KEY_PUB }}
|
||||
ssh_private_signing_key: ${{ secrets.DEPLOY_KEY }}
|
||||
push: false
|
||||
|
||||
- name: Push and tags
|
||||
run: |
|
||||
git push --set-upstream --follow-tags origin ${{ github.ref_name }}
|
||||
|
||||
- name: Build package
|
||||
run: uv build
|
||||
|
||||
- name: Publish | Upload package to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
if: steps.release.outputs.released == 'true'
|
||||
|
||||
- name: Publish | Upload to GitHub Release Assets
|
||||
uses: python-semantic-release/publish-action@v9.8.9
|
||||
if: steps.release.outputs.released == 'true'
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
tag: ${{ steps.release.outputs.tag }}
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -8,3 +8,7 @@ wheels/
|
||||
|
||||
# Virtual environments
|
||||
.venv
|
||||
|
||||
# Aider
|
||||
.aider*
|
||||
.goose
|
||||
|
||||
159
CHANGELOG.md
Normal file
159
CHANGELOG.md
Normal file
@@ -0,0 +1,159 @@
|
||||
# CHANGELOG
|
||||
|
||||
|
||||
## v0.1.0-rc.1 (2025-04-18)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* fix: mcp tests ([`3799f04`](https://github.com/Monadical-SAS/cubbi/commit/3799f04c1395d3b018f371db0c0cb8714e6fb8b3))
|
||||
|
||||
* fix: osx tests on volume ([`7fc9cfd`](https://github.com/Monadical-SAS/cubbi/commit/7fc9cfd8e1babfa069691d3b7997449535069674))
|
||||
|
||||
* fix: remove the "mc stop" meant to be in the container, but not implemented ([`4f54c0f`](https://github.com/Monadical-SAS/cubbi/commit/4f54c0fbe7886c8551368b4b35be3ad8c7ae49ab))
|
||||
|
||||
* fix(uid): correctly pass uid/gid to project ([`e25e30e`](https://github.com/Monadical-SAS/cubbi/commit/e25e30e7492c6b0a03017440a18bb2708927fc19))
|
||||
|
||||
* fix(goose): always update the file ([`b1aa415`](https://github.com/Monadical-SAS/cubbi/commit/b1aa415ddee981dc1278cd24f7509363b9c54a54))
|
||||
|
||||
* fix(goose): ensure configuration is run as user ([`cfa7dd6`](https://github.com/Monadical-SAS/cubbi/commit/cfa7dd647d1e4055bf9159be2ee9c2280f2d908e))
|
||||
|
||||
* fix(mcp): fix UnboundLocalError: cannot access local variable 'container_name' where it is not associated with a value ([`deff036`](https://github.com/Monadical-SAS/cubbi/commit/deff036406d72d55659da40520a3a09599d65f07))
|
||||
|
||||
* fix(ssh): do not enable ssh automatically ([`f32b3dd`](https://github.com/Monadical-SAS/cubbi/commit/f32b3dd269d1a3d6ebaa2e7b2893f267b5175b20))
|
||||
|
||||
* fix(uid): use symlink instead of volume for persistent volume in the container ([`a74251b`](https://github.com/Monadical-SAS/cubbi/commit/a74251b119d24714c7cc1eaadeea851008006137))
|
||||
|
||||
* fix(session): ensure a session connect only to the mcp server passed in --mcp ([`5d674f7`](https://github.com/Monadical-SAS/cubbi/commit/5d674f750878f0895dc1544620e8b1da4da29752))
|
||||
|
||||
* fix(goose): add ping, nano and vim to the default image ([`028bd26`](https://github.com/Monadical-SAS/cubbi/commit/028bd26cf12e181541e006650b58d97e1d568a45))
|
||||
|
||||
* fix(goose): install latest goose version, do not use pip ([`7649173`](https://github.com/Monadical-SAS/cubbi/commit/7649173d6c8a82ac236d0f89263591eaa6e21a20))
|
||||
|
||||
* fix(mc): fix runtime issue when starting mc ([`6f08e2b`](https://github.com/Monadical-SAS/cubbi/commit/6f08e2b274b67001694123b5bb977401df0810c6))
|
||||
|
||||
* fix(session): fix session status display ([`092f497`](https://github.com/Monadical-SAS/cubbi/commit/092f497ecc19938d4917a18441995170d1f68704))
|
||||
|
||||
* fix(goose): remove MCP_HOST and such, this is not how mcp works ([`d42af87`](https://github.com/Monadical-SAS/cubbi/commit/d42af870ff56112b4503f2568b8a5b0f385c435c))
|
||||
|
||||
* fix(langfuse): fix goose langfuse integration (wrong env variables) ([`e36eef4`](https://github.com/Monadical-SAS/cubbi/commit/e36eef4ef7c2d0cbdef31704afb45c50c4293986))
|
||||
|
||||
* fix: remove double connecting to message ([`e36f454`](https://github.com/Monadical-SAS/cubbi/commit/e36f4540bfe3794ab2d065f552cfb9528489de71))
|
||||
|
||||
* fix(cli): rename MAI->MC ([`354834f`](https://github.com/Monadical-SAS/cubbi/commit/354834fff733c37202b01a6fc49ebdf5003390c1))
|
||||
|
||||
* fix(goose): rename mai to mc, add initialization status ([`74c723d`](https://github.com/Monadical-SAS/cubbi/commit/74c723db7b6b7dd57c4ca32a804436a990e5260c))
|
||||
|
||||
### Chores
|
||||
|
||||
* chore: remove unnecessary output ([`30c6b99`](https://github.com/Monadical-SAS/cubbi/commit/30c6b995cbb5bdf3dc7adf2e79d8836660d4f295))
|
||||
|
||||
* chore: update doc and add pre-commit ([`958d87b`](https://github.com/Monadical-SAS/cubbi/commit/958d87bcaeed16210a7c22574b5e63f2422af098))
|
||||
|
||||
### Continuous Integration
|
||||
|
||||
* ci: add ci files (#11)
|
||||
|
||||
* ci: add ci files
|
||||
|
||||
* fix: add goose image build ([`3850bc3`](https://github.com/Monadical-SAS/cubbi/commit/3850bc32129da539f53b69427ddca85f8c5f390a))
|
||||
|
||||
### Documentation
|
||||
|
||||
* docs: Prefer mcx alias in README examples ([`9c21611`](https://github.com/Monadical-SAS/cubbi/commit/9c21611a7fa1497f7cbddb1f1b4cd22b4ebc8a19))
|
||||
|
||||
* docs: Add --run option examples to README ([`6b2c1eb`](https://github.com/Monadical-SAS/cubbi/commit/6b2c1ebf1cd7a5d9970234112f32fe7a231303f9))
|
||||
|
||||
* docs(mcp): add specification for MCP server support ([`20916c5`](https://github.com/Monadical-SAS/cubbi/commit/20916c5713b3a047f4a8a33194f751f36e3c8a7a))
|
||||
|
||||
* docs(readme): remove license part ([`1c538f8`](https://github.com/Monadical-SAS/cubbi/commit/1c538f8a59e28888309c181ae8f8034b9e70a631))
|
||||
|
||||
* docs(readme): update README to update tool call ([`a4591dd`](https://github.com/Monadical-SAS/cubbi/commit/a4591ddbd863bc6658a7643d3f33d06c82816cae))
|
||||
|
||||
### Features
|
||||
|
||||
* feat(project): explicitely add --project to save information in /mc-config across run.
|
||||
|
||||
Containers are now isolated by default. ([`3a182fd`](https://github.com/Monadical-SAS/cubbi/commit/3a182fd2658c0eb361ce5ed88938686e2bd19e59))
|
||||
|
||||
* feat(gemini): support for gemini model ([`2f9fd68`](https://github.com/Monadical-SAS/cubbi/commit/2f9fd68cada9b5aaba652efb67368c2641046da5))
|
||||
|
||||
* feat(llm): add default model/provider to auto configure the driver (#7) ([`5b9713d`](https://github.com/Monadical-SAS/cubbi/commit/5b9713dc2f7d7c25808ad37094838c697c056fec))
|
||||
|
||||
* feat(goose): update config using uv script with pyyaml (#6) ([`9e742b4`](https://github.com/Monadical-SAS/cubbi/commit/9e742b439b7b852efa4219850f8b67c143274045))
|
||||
|
||||
* feat(ssh): make SSH server optional with --ssh flag
|
||||
|
||||
- Added --ssh flag to session create command
|
||||
- Modified mc-init.sh to check MC_SSH_ENABLED environment variable
|
||||
- SSH server is now disabled by default
|
||||
- Updated README.md with new flag example
|
||||
- Fixed UnboundLocalError with container_name in exception handler ([`5678438`](https://github.com/Monadical-SAS/cubbi/commit/56784386614fcd0a52be8a2eb89d2deef9323ca1))
|
||||
|
||||
* feat(run): add --run command ([`33d90d0`](https://github.com/Monadical-SAS/cubbi/commit/33d90d05311ad872b7a7d4cd303ff6f7b7726038))
|
||||
|
||||
* feat(mc): support for uid/gid, and use default current user ([`a51115a`](https://github.com/Monadical-SAS/cubbi/commit/a51115a45d88bf703fb5380171042276873b7207))
|
||||
|
||||
* feat(mcp): ensure inner mcp environemnt variables are passed ([`0d75bfc`](https://github.com/Monadical-SAS/cubbi/commit/0d75bfc3d8e130fb05048c2bc8a674f6b7e5de83))
|
||||
|
||||
* feat(goose): auto add mcp server to goose configuration when starting a session ([`7805aa7`](https://github.com/Monadical-SAS/cubbi/commit/7805aa720eba78d47f2ad565f6944e84a21c4b1c))
|
||||
|
||||
* feat(goose): optimize init status ([`16f59b1`](https://github.com/Monadical-SAS/cubbi/commit/16f59b1c408dbff4781ad7ccfa70e81d6d98f7bd))
|
||||
|
||||
* feat(mcp): add the possibility to have default mcp to connect to ([`4b0461a`](https://github.com/Monadical-SAS/cubbi/commit/4b0461a6faf81de1e1b54d1fe78fea7977cde9dd))
|
||||
|
||||
* feat(mcp): improve inspector reliability over re-run ([`3ee8ce6`](https://github.com/Monadical-SAS/cubbi/commit/3ee8ce6338c35b7e48d788d2dddfa9b6a70381cb))
|
||||
|
||||
* feat(mcp): add inspector ([`d098f26`](https://github.com/Monadical-SAS/cubbi/commit/d098f268cd164e9d708089c9f9525a940653c010))
|
||||
|
||||
* feat(mcp): first docker proxy working ([`0892b6c`](https://github.com/Monadical-SAS/cubbi/commit/0892b6c8c472063c639cc78cf29b322bb39f998f))
|
||||
|
||||
* feat(mcp): initial version of mcp ([`212f271`](https://github.com/Monadical-SAS/cubbi/commit/212f271268c5724775beceae119f97aec2748dcb))
|
||||
|
||||
* feat(volume): add mc config volume command ([`2caeb42`](https://github.com/Monadical-SAS/cubbi/commit/2caeb425518242fbe1c921b9678e6e7571b9b0a6))
|
||||
|
||||
* feat(config): ensure config is correctly saved ([`deb5945`](https://github.com/Monadical-SAS/cubbi/commit/deb5945e40d55643dca4e1aa4201dfa8da1bfd70))
|
||||
|
||||
* feat(cli): separate session state into its own session.yaml file ([`7736573`](https://github.com/Monadical-SAS/cubbi/commit/7736573b84c7a51eaa60b932f835726b411ca742))
|
||||
|
||||
* feat(cli): support to join external network ([`133583b`](https://github.com/Monadical-SAS/cubbi/commit/133583b941ed56d1b0636277bb847c45eee7f3b8))
|
||||
|
||||
* feat(volume): add the possibilty to mount local directory into the container (like docker volume) ([`b72f1ee`](https://github.com/Monadical-SAS/cubbi/commit/b72f1eef9af598f2090a0edae8921c16814b3cda))
|
||||
|
||||
* feat(config): add global user configuration for the tool
|
||||
|
||||
- langfuse
|
||||
- default driver
|
||||
- and api keys ([`dab783b`](https://github.com/Monadical-SAS/cubbi/commit/dab783b01d82bcb210b5e01ac3b93ba64c7bc023))
|
||||
|
||||
* feat(keys): pass local keys to the session by default ([`f83c49c`](https://github.com/Monadical-SAS/cubbi/commit/f83c49c0f340d1a3accba1fe1317994b492755c0))
|
||||
|
||||
* feat(cli): more information when closing session ([`08ba1ab`](https://github.com/Monadical-SAS/cubbi/commit/08ba1ab2da3c24237c0f0bc411924d8ffbe71765))
|
||||
|
||||
* feat(cli): auto mount current directory as /app ([`e6e3c20`](https://github.com/Monadical-SAS/cubbi/commit/e6e3c207bcee531b135824688adf1a56ae427a01))
|
||||
|
||||
* feat(cli): auto connect to a session ([`4a63606`](https://github.com/Monadical-SAS/cubbi/commit/4a63606d58cc3e331a349974e9b3bf2d856a72a1))
|
||||
|
||||
* feat(cli): phase 1 - local cli with docker integration ([`6443083`](https://github.com/Monadical-SAS/cubbi/commit/64430830d883308e4d52e17b25c260a0d5385141))
|
||||
|
||||
* feat: first commit ([`fde6529`](https://github.com/Monadical-SAS/cubbi/commit/fde6529d545b5625484c5c1236254d2e0c6f0f4d))
|
||||
|
||||
### Refactoring
|
||||
|
||||
* refactor: rename project to cubbi ([`12d77d0`](https://github.com/Monadical-SAS/cubbi/commit/12d77d0128e4d82e5ddc1a4ab7e873ddaa22e130))
|
||||
|
||||
* refactor: rename driver to image, first pass ([`51fb79b`](https://github.com/Monadical-SAS/cubbi/commit/51fb79baa30ff479ac5479ba5ea0cad70bbb4c20))
|
||||
|
||||
* refactor: reduce amount of data in session.yaml ([`979b438`](https://github.com/Monadical-SAS/cubbi/commit/979b43846a798f1fb25ff05e6dc1fc27fa16f590))
|
||||
|
||||
* refactor: move drivers directory into mcontainer package
|
||||
|
||||
- Relocate goose driver to mcontainer/drivers/
|
||||
- Update ConfigManager to dynamically scan for driver YAML files
|
||||
- Add support for mc-driver.yaml instead of mai-driver.yaml
|
||||
- Update Driver model to support init commands and other YAML fields
|
||||
- Auto-discover drivers at runtime instead of hardcoding them
|
||||
- Update documentation to reflect new directory structure ([`307eee4`](https://github.com/Monadical-SAS/cubbi/commit/307eee4fcef47189a98a76187d6080a36423ad6e))
|
||||
|
||||
### Testing
|
||||
|
||||
* test: add unit tests ([`7c46d66`](https://github.com/Monadical-SAS/cubbi/commit/7c46d66b53ac49c08458bc5d72e636e7d296e74f))
|
||||
@@ -1,15 +1,12 @@
|
||||
# Monadical Container Development Guide
|
||||
# Cubbi Container Development Guide
|
||||
|
||||
## Build Commands
|
||||
```bash
|
||||
# Install dependencies using uv (Astral)
|
||||
uv sync
|
||||
|
||||
# Run MC service
|
||||
uv run -m mcontainer.service
|
||||
|
||||
# Run MC CLI
|
||||
uv run -m mcontainer.cli
|
||||
# Run Cubbi CLI
|
||||
uv run -m cubbi.cli
|
||||
```
|
||||
|
||||
## Lint/Test Commands
|
||||
|
||||
341
README.md
341
README.md
@@ -1,75 +1,136 @@
|
||||
# MC - Monadical Container Tool
|
||||
<div align="center">
|
||||
|
||||
MC (Monadical Container) is a command-line tool for managing ephemeral
|
||||
containers that run AI tools and development environments. It works with both
|
||||
local Docker and a dedicated remote web service that manages containers in a
|
||||
Docker-in-Docker (DinD) environment.
|
||||
# Cubbi - Container Tool
|
||||
|
||||
## Requirements
|
||||
Cubbi is a command-line tool for managing ephemeral containers that run AI tools and development environments. It works with both local Docker and a dedicated remote web service that manages containers in a Docker-in-Docker (DinD) environment. Cubbi also supports connecting to MCP (Model Control Protocol) servers to extend AI tools with additional capabilities.
|
||||
|
||||

|
||||

|
||||
[](https://github.com/monadical-sas/cubbi/actions/workflows/pytests.yml)
|
||||
|
||||
</div>
|
||||
|
||||
## 🚀 Quick Reference
|
||||
|
||||
- `cubbi session create` - Create a new session
|
||||
- `cubbix` - Shortcut for `cubbi session create`
|
||||
- `cubbix .` - Mount the current directory
|
||||
- `cubbix /path/to/dir` - Mount a specific directory
|
||||
- `cubbix https://github.com/user/repo` - Clone a repository
|
||||
|
||||
## 📋 Requirements
|
||||
|
||||
- [uv](https://docs.astral.sh/uv/)
|
||||
|
||||
## Installation
|
||||
## 📥 Installation
|
||||
|
||||
```bash
|
||||
# Clone the repository
|
||||
git clone https://github.com/monadical/mcontainer.git
|
||||
cd mc
|
||||
git clone https://github.com/monadical/cubbi.git
|
||||
|
||||
# Install with uv
|
||||
uv sync
|
||||
# Install the tool locally
|
||||
# (with editable, so you can update the code and work with it)
|
||||
cd cubbi
|
||||
uv tool install --with-editable . .
|
||||
|
||||
# Then you could use the tool as `cubbi`
|
||||
cubbi --help
|
||||
```
|
||||
|
||||
## Basic Usage
|
||||
Important: compile your first image
|
||||
|
||||
```bash
|
||||
# Create a new session with the default driver
|
||||
mc session create
|
||||
cubbi image build goose
|
||||
```
|
||||
|
||||
## 📚 Basic Usage
|
||||
|
||||
```bash
|
||||
# Show help message (displays available commands)
|
||||
cubbi
|
||||
|
||||
# Create a new session with the default image (using cubbix alias)
|
||||
cubbix
|
||||
|
||||
# Create a session and run an initial command before the shell starts
|
||||
cubbix --run "echo 'Setup complete'; ls -l"
|
||||
|
||||
# List all active sessions
|
||||
mc session list
|
||||
cubbi session list
|
||||
|
||||
# Connect to a specific session
|
||||
mc session connect SESSION_ID
|
||||
cubbi session connect SESSION_ID
|
||||
|
||||
# Close a session when done
|
||||
mc session close SESSION_ID
|
||||
cubbi session close SESSION_ID
|
||||
|
||||
# Create a session with a specific driver
|
||||
mc session create --driver goose
|
||||
# Create a session with a specific image
|
||||
cubbix --image goose
|
||||
|
||||
# Create a session with environment variables
|
||||
mc session create -e VAR1=value1 -e VAR2=value2
|
||||
cubbix -e VAR1=value1 -e VAR2=value2
|
||||
|
||||
# Shorthand for creating a session with a project repository
|
||||
mc github.com/username/repo
|
||||
# Mount custom volumes (similar to Docker's -v flag)
|
||||
cubbix -v /local/path:/container/path
|
||||
cubbix -v ~/data:/data -v ./configs:/etc/app/config
|
||||
|
||||
# Mount a local directory (current directory or specific path)
|
||||
cubbix .
|
||||
cubbix /path/to/project
|
||||
|
||||
# Connect to external Docker networks
|
||||
cubbix --network teamnet --network dbnet
|
||||
|
||||
# Connect to MCP servers for extended capabilities
|
||||
cubbix --mcp github --mcp jira
|
||||
|
||||
# Clone a Git repository
|
||||
cubbix https://github.com/username/repo
|
||||
|
||||
# Using the cubbix shortcut (equivalent to cubbi session create)
|
||||
cubbix # Creates a session without mounting anything
|
||||
cubbix . # Mounts the current directory
|
||||
cubbix /path/to/project # Mounts the specified directory
|
||||
cubbix https://github.com/username/repo # Clones the repository
|
||||
|
||||
# Shorthand with MCP servers
|
||||
cubbix https://github.com/username/repo --mcp github
|
||||
|
||||
# Shorthand with an initial command
|
||||
cubbix . --run "apt-get update && apt-get install -y my-package"
|
||||
|
||||
# Enable SSH server in the container
|
||||
cubbix --ssh
|
||||
```
|
||||
|
||||
## Driver Management
|
||||
## 🖼️ Image Management
|
||||
|
||||
MC includes a driver management system that allows you to build, manage, and use Docker images for different AI tools:
|
||||
Cubbi includes an image management system that allows you to build, manage, and use Docker images for different AI tools:
|
||||
|
||||
```bash
|
||||
# List available drivers
|
||||
mc driver list
|
||||
# List available images
|
||||
cubbi image list
|
||||
|
||||
# Get detailed information about a driver
|
||||
mc driver info goose
|
||||
# Get detailed information about an image
|
||||
cubbi image info goose
|
||||
|
||||
# Build a driver image
|
||||
mc driver build goose
|
||||
# Build an image
|
||||
cubbi image build goose
|
||||
|
||||
# Build and push a driver image
|
||||
mc driver build goose --push
|
||||
# Build and push an image
|
||||
cubbi image build goose --push
|
||||
```
|
||||
|
||||
Drivers are defined in the `drivers/` directory, with each subdirectory containing:
|
||||
Images are defined in the `cubbi/images/` directory, with each subdirectory containing:
|
||||
|
||||
- `Dockerfile`: Docker image definition
|
||||
- `entrypoint.sh`: Container entrypoint script
|
||||
- `mai-init.sh`: Standardized initialization script
|
||||
- `mai-driver.yaml`: Driver metadata and configuration
|
||||
- `README.md`: Driver documentation
|
||||
- `cubbi-init.sh`: Standardized initialization script
|
||||
- `cubbi-image.yaml`: Image metadata and configuration
|
||||
- `README.md`: Image documentation
|
||||
|
||||
Cubbi automatically discovers and loads image definitions from the YAML files.
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
@@ -87,6 +148,210 @@ uvx mypy .
|
||||
uvx ruff format .
|
||||
```
|
||||
|
||||
## License
|
||||
## ⚙️ Configuration
|
||||
|
||||
See LICENSE file for details.
|
||||
Cubbi supports user-specific configuration via a YAML file located at `~/.config/cubbi/config.yaml`. This allows you to set default values and configure service credentials.
|
||||
|
||||
### Managing Configuration
|
||||
|
||||
```bash
|
||||
# View all configuration
|
||||
cubbi config list
|
||||
|
||||
# Get a specific configuration value
|
||||
cubbi config get langfuse.url
|
||||
|
||||
# Set configuration values
|
||||
cubbi config set langfuse.url "https://cloud.langfuse.com"
|
||||
cubbi config set langfuse.public_key "pk-lf-..."
|
||||
cubbi config set langfuse.secret_key "sk-lf-..."
|
||||
|
||||
# Set API keys for various services
|
||||
cubbi config set openai.api_key "sk-..."
|
||||
cubbi config set anthropic.api_key "sk-ant-..."
|
||||
|
||||
# Reset configuration to defaults
|
||||
cubbi config reset
|
||||
```
|
||||
|
||||
### Default Networks Configuration
|
||||
|
||||
You can configure default networks that will be applied to every new session:
|
||||
|
||||
```bash
|
||||
# List default networks
|
||||
cubbi config network list
|
||||
|
||||
# Add a network to defaults
|
||||
cubbi config network add teamnet
|
||||
|
||||
# Remove a network from defaults
|
||||
cubbi config network remove teamnet
|
||||
```
|
||||
|
||||
### Default Volumes Configuration
|
||||
|
||||
You can configure default volumes that will be automatically mounted in every new session:
|
||||
|
||||
```bash
|
||||
# List default volumes
|
||||
cubbi config volume list
|
||||
|
||||
# Add a volume to defaults
|
||||
cubbi config volume add /local/path:/container/path
|
||||
|
||||
# Remove a volume from defaults (will prompt if multiple matches found)
|
||||
cubbi config volume remove /local/path
|
||||
```
|
||||
|
||||
Default volumes will be combined with any volumes specified using the `-v` flag when creating a session.
|
||||
|
||||
### Default MCP Servers Configuration
|
||||
|
||||
You can configure default MCP servers that sessions will automatically connect to:
|
||||
|
||||
```bash
|
||||
# List default MCP servers
|
||||
cubbi config mcp list
|
||||
|
||||
# Add an MCP server to defaults
|
||||
cubbi config mcp add github
|
||||
|
||||
# Remove an MCP server from defaults
|
||||
cubbi config mcp remove github
|
||||
```
|
||||
|
||||
When adding new MCP servers, they are added to defaults by default. Use the `--no-default` flag to prevent this:
|
||||
|
||||
```bash
|
||||
# Add an MCP server without adding it to defaults
|
||||
cubbi mcp add github ghcr.io/mcp/github:latest --no-default
|
||||
cubbi mcp add-remote jira https://jira-mcp.example.com/sse --no-default
|
||||
```
|
||||
|
||||
When creating sessions, if no MCP server is specified with `--mcp`, the default MCP servers will be used automatically.
|
||||
|
||||
### External Network Connectivity
|
||||
|
||||
Cubbi containers can connect to external Docker networks, allowing them to communicate with other services in those networks:
|
||||
|
||||
```bash
|
||||
# Create a session connected to external networks
|
||||
cubbi session create --network teamnet --network dbnet
|
||||
```
|
||||
|
||||
**Important**: Networks must be "attachable" to be joined by Cubbi containers. Here's how to create attachable networks:
|
||||
|
||||
```bash
|
||||
# Create an attachable network with Docker
|
||||
docker network create --driver bridge --attachable teamnet
|
||||
|
||||
# Example docker-compose.yml with attachable network
|
||||
# docker-compose.yml
|
||||
version: '3'
|
||||
services:
|
||||
web:
|
||||
image: nginx
|
||||
networks:
|
||||
- teamnet
|
||||
|
||||
networks:
|
||||
teamnet:
|
||||
driver: bridge
|
||||
attachable: true # This is required for Cubbi containers to connect
|
||||
```
|
||||
|
||||
### Service Credentials
|
||||
|
||||
Service credentials like API keys configured in `~/.config/cubbi/config.yaml` are automatically passed to containers as environment variables:
|
||||
|
||||
| Config Setting | Environment Variable |
|
||||
|----------------|---------------------|
|
||||
| `langfuse.url` | `LANGFUSE_URL` |
|
||||
| `langfuse.public_key` | `LANGFUSE_INIT_PROJECT_PUBLIC_KEY` |
|
||||
| `langfuse.secret_key` | `LANGFUSE_INIT_PROJECT_SECRET_KEY` |
|
||||
| `openai.api_key` | `OPENAI_API_KEY` |
|
||||
| `anthropic.api_key` | `ANTHROPIC_API_KEY` |
|
||||
| `openrouter.api_key` | `OPENROUTER_API_KEY` |
|
||||
| `google.api_key` | `GOOGLE_API_KEY` |
|
||||
|
||||
## 🌐 MCP Server Management
|
||||
|
||||
MCP (Model Control Protocol) servers provide tool-calling capabilities to AI models, enhancing their ability to interact with external services, databases, and systems. Cubbi supports multiple types of MCP servers:
|
||||
|
||||
1. **Remote HTTP SSE servers** - External MCP servers accessed over HTTP
|
||||
2. **Docker-based MCP servers** - Local MCP servers running in Docker containers
|
||||
3. **Proxy-based MCP servers** - Local MCP servers with an SSE proxy for stdio-to-SSE conversion
|
||||
|
||||
### Managing MCP Servers
|
||||
|
||||
```bash
|
||||
# List all configured MCP servers and their status
|
||||
cubbi mcp list
|
||||
|
||||
# View detailed status of an MCP server
|
||||
cubbi mcp status github
|
||||
|
||||
# Start/stop/restart individual MCP servers
|
||||
cubbi mcp start github
|
||||
cubbi mcp stop github
|
||||
cubbi mcp restart github
|
||||
|
||||
# Start all MCP servers at once
|
||||
cubbi mcp start --all
|
||||
|
||||
# Stop and remove all MCP servers at once
|
||||
cubbi mcp stop --all
|
||||
|
||||
# Run the MCP Inspector to visualize and interact with MCP servers
|
||||
# It automatically joins all MCP networks for seamless DNS resolution
|
||||
# Uses two ports: frontend UI (default: 5173) and backend API (default: 3000)
|
||||
cubbi mcp inspector
|
||||
|
||||
# Run the MCP Inspector with custom ports
|
||||
cubbi mcp inspector --client-port 6173 --server-port 6174
|
||||
|
||||
# Run the MCP Inspector in detached mode
|
||||
cubbi mcp inspector --detach
|
||||
|
||||
# Stop the MCP Inspector
|
||||
cubbi mcp inspector --stop
|
||||
|
||||
# View MCP server logs
|
||||
cubbi mcp logs github
|
||||
|
||||
# Remove an MCP server configuration
|
||||
cubbi mcp remove github
|
||||
```
|
||||
|
||||
### Adding MCP Servers
|
||||
|
||||
Cubbi supports different types of MCP servers:
|
||||
|
||||
```bash
|
||||
# Add a remote HTTP SSE MCP server
|
||||
cubbi mcp remote add github http://my-mcp-server.example.com/sse --header "Authorization=Bearer token123"
|
||||
|
||||
# Add a Docker-based MCP server
|
||||
cubbi mcp docker add github mcp/github:latest --command "github-mcp" --env GITHUB_TOKEN=ghp_123456
|
||||
|
||||
# Add a proxy-based MCP server (for stdio-to-SSE conversion)
|
||||
cubbi mcp add github ghcr.io/mcp/github:latest --proxy-image ghcr.io/sparfenyuk/mcp-proxy:latest --command "github-mcp" --sse-port 8080 --no-default
|
||||
```
|
||||
|
||||
### Using MCP Servers with Sessions
|
||||
|
||||
MCP servers can be attached to sessions when they are created:
|
||||
|
||||
```bash
|
||||
# Create a session with a single MCP server
|
||||
cubbi session create --mcp github
|
||||
|
||||
# Create a session with multiple MCP servers
|
||||
cubbi session create --mcp github --mcp jira
|
||||
|
||||
# Using MCP with a project repository
|
||||
cubbi github.com/username/repo --mcp github
|
||||
```
|
||||
|
||||
MCP servers are persistent and can be shared between sessions. They continue running even when sessions are closed, allowing for efficient reuse across multiple sessions.
|
||||
|
||||
@@ -1,510 +0,0 @@
|
||||
# MC - Monadical AI Container Tool
|
||||
|
||||
## Overview
|
||||
|
||||
MC (Monadical Container) is a command-line tool for managing ephemeral
|
||||
containers that run AI tools and development environments. It works with both
|
||||
local Docker and a dedicated remote web service that manages containers in a
|
||||
Docker-in-Docker (DinD) environment.
|
||||
|
||||
## Technology Stack
|
||||
|
||||
### MC Service
|
||||
- **Web Framework**: FastAPI for high-performance, async API endpoints
|
||||
- **Package Management**: uv (Astral) for dependency management
|
||||
- **Database**: SQLite for development, PostgreSQL for production
|
||||
- **Container Management**: Docker SDK for Python
|
||||
- **Authentication**: OAuth 2.0 integration with Authentik
|
||||
|
||||
### MC CLI
|
||||
- **Language**: Python
|
||||
- **Package Management**: uv for dependency management
|
||||
- **Distribution**: Standalone binary via PyInstaller or similar
|
||||
- **Configuration**: YAML for configuration files
|
||||
|
||||
## System Architecture
|
||||
|
||||
### Components
|
||||
|
||||
1. **CLI Tool (`mc`)**: The command-line interface users interact with
|
||||
2. **MC Service**: A web service that handles remote container execution
|
||||
3. **Container Drivers**: Predefined container templates for various AI tools
|
||||
|
||||
### Architecture Diagram
|
||||
|
||||
```
|
||||
┌─────────────┐ ┌─────────────────────────┐
|
||||
│ │ │ │
|
||||
│ MC CLI │◄─────────►│ Local Docker Daemon │
|
||||
│ (mc) │ │ │
|
||||
│ │ └─────────────────────────┘
|
||||
└──────┬──────┘
|
||||
│
|
||||
│ REST API
|
||||
│
|
||||
┌──────▼──────┐ ┌─────────────────────────┐
|
||||
│ │ │ │
|
||||
│ MC Service │◄─────────►│ Docker-in-Docker │
|
||||
│ (Web API) │ │ │
|
||||
│ │ └─────────────────────────┘
|
||||
└─────────────┘
|
||||
│
|
||||
├──────────────┬───────────────┐
|
||||
│ │ │
|
||||
┌──────▼──────┐ ┌─────▼─────┐ ┌──────▼──────┐
|
||||
│ │ │ │ │ │
|
||||
│ Fluentd │ │ Langfuse │ │ Other │
|
||||
│ Logging │ │ Logging │ │ Services │
|
||||
│ │ │ │ │ │
|
||||
└─────────────┘ └───────────┘ └─────────────┘
|
||||
```
|
||||
|
||||
## Core Concepts
|
||||
|
||||
- **Session**: An active container instance with a specific driver
|
||||
- **Driver**: A predefined container template with specific AI tools installed
|
||||
- **Remote**: A configured MC service instance
|
||||
|
||||
## CLI Tool Commands
|
||||
|
||||
### Basic Commands
|
||||
|
||||
```bash
|
||||
# Create a new session locally (shorthand)
|
||||
mc
|
||||
|
||||
# List active sessions on local system
|
||||
mc session list
|
||||
|
||||
# Create a new session locally
|
||||
mc session create [OPTIONS]
|
||||
|
||||
# Create a session with a specific driver
|
||||
mc session create --driver goose
|
||||
|
||||
# Create a session with a specific project repository
|
||||
mc session create --driver goose --project github.com/hello/private
|
||||
|
||||
# Create a session with a project (shorthand)
|
||||
mc git@github.com:hello/private
|
||||
|
||||
# Close a specific session
|
||||
mc session close <id>
|
||||
|
||||
# Connect to an existing session
|
||||
mc session connect <id>
|
||||
|
||||
# Stop the current session (from inside the container)
|
||||
mc stop
|
||||
```
|
||||
|
||||
### Remote Management
|
||||
|
||||
```bash
|
||||
# Add a remote MC service
|
||||
mc remote add <name> <url>
|
||||
|
||||
# List configured remote services
|
||||
mc remote list
|
||||
|
||||
# Remove a remote service
|
||||
mc remote remove <name>
|
||||
|
||||
# Authenticate with a remote service
|
||||
mc -r <remote_name> auth
|
||||
|
||||
# Create a session on a remote service
|
||||
mc -r <remote_name> [session create]
|
||||
|
||||
# List sessions on a remote service
|
||||
mc -r <remote_name> session list
|
||||
```
|
||||
|
||||
### Environment Variables
|
||||
|
||||
```bash
|
||||
# Set environment variables for a session
|
||||
mc session create -e VAR1=value1 -e VAR2=value2
|
||||
|
||||
# Set environment variables for a remote session
|
||||
mc -r <remote_name> session create -e VAR1=value1
|
||||
```
|
||||
|
||||
### Logging
|
||||
|
||||
```bash
|
||||
# Stream logs from a session
|
||||
mc session logs <id>
|
||||
|
||||
# Stream logs with follow option
|
||||
mc session logs <id> -f
|
||||
```
|
||||
|
||||
## MC Service Specification
|
||||
|
||||
### Overview
|
||||
|
||||
The MC Service is a web service that manages ephemeral containers in a Docker-in-Docker environment. It provides a REST API for container lifecycle management, authentication, and real-time log streaming.
|
||||
|
||||
### API Endpoints
|
||||
|
||||
#### Authentication
|
||||
|
||||
```
|
||||
POST /auth/login - Initiate Authentik authentication flow
|
||||
POST /auth/callback - Handle Authentik OAuth callback
|
||||
POST /auth/refresh - Refresh an existing token
|
||||
POST /auth/logout - Invalidate current token
|
||||
```
|
||||
|
||||
### Authentik Integration
|
||||
|
||||
The MC Service integrates with Authentik at https://authentik.monadical.io using OAuth 2.0:
|
||||
|
||||
1. **Application Registration**:
|
||||
- MC Service is registered as an OAuth application in Authentik
|
||||
- Configured with redirect URI to `/auth/callback`
|
||||
- Assigned appropriate scopes for user identification
|
||||
|
||||
2. **Authentication Flow**:
|
||||
- User initiates authentication via CLI
|
||||
- MC CLI opens browser to Authentik authorization URL
|
||||
- User logs in through Authentik's interface
|
||||
- Authentik redirects to callback URL with authorization code
|
||||
- MC Service exchanges code for access and refresh tokens
|
||||
- CLI receives and securely stores tokens
|
||||
|
||||
3. **Token Management**:
|
||||
- Access tokens used for API authorization
|
||||
- Refresh tokens used to obtain new access tokens
|
||||
- Tokens are encrypted at rest in CLI configuration
|
||||
|
||||
#### Sessions
|
||||
|
||||
```
|
||||
GET /sessions - List all sessions
|
||||
POST /sessions - Create a new session
|
||||
GET /sessions/{id} - Get session details
|
||||
DELETE /sessions/{id} - Terminate a session
|
||||
POST /sessions/{id}/connect - Establish connection to session
|
||||
GET /sessions/{id}/logs - Stream session logs
|
||||
```
|
||||
|
||||
#### Drivers
|
||||
|
||||
```
|
||||
GET /drivers - List available drivers
|
||||
GET /drivers/{name} - Get driver details
|
||||
```
|
||||
|
||||
#### Projects
|
||||
|
||||
```
|
||||
GET /projects - List all projects
|
||||
POST /projects - Add a new project
|
||||
GET /projects/{id} - Get project details
|
||||
PUT /projects/{id} - Update project details
|
||||
DELETE /projects/{id} - Remove a project
|
||||
```
|
||||
|
||||
### Service Configuration
|
||||
|
||||
```yaml
|
||||
# mc-service.yaml
|
||||
server:
|
||||
port: 3000
|
||||
host: 0.0.0.0
|
||||
|
||||
docker:
|
||||
socket: /var/run/docker.sock
|
||||
network: mc-network
|
||||
|
||||
auth:
|
||||
provider: authentik
|
||||
url: https://authentik.monadical.io
|
||||
clientId: mc-service
|
||||
|
||||
logging:
|
||||
providers:
|
||||
- type: fluentd
|
||||
url: http://fluentd.example.com:24224
|
||||
- type: langfuse
|
||||
url: https://api.langfuse.com
|
||||
apiKey: ${LANGFUSE_API_KEY}
|
||||
|
||||
drivers:
|
||||
- name: goose
|
||||
image: monadical/mc-goose:latest
|
||||
env:
|
||||
MCP_HOST: http://mcp:8000
|
||||
- name: aider
|
||||
image: monadical/mc-aider:latest
|
||||
- name: claude-code
|
||||
image: monadical/mc-claude-code:latest
|
||||
|
||||
projects:
|
||||
storage:
|
||||
type: encrypted
|
||||
key: ${PROJECT_ENCRYPTION_KEY}
|
||||
default_ssh_scan:
|
||||
- github.com
|
||||
- gitlab.com
|
||||
- bitbucket.org
|
||||
```
|
||||
|
||||
### Docker-in-Docker Implementation
|
||||
|
||||
The MC Service runs in a container with access to the host's Docker socket, allowing it to create and manage sibling containers. This approach provides:
|
||||
|
||||
1. Isolation between containers
|
||||
2. Simple lifecycle management
|
||||
3. Resource constraints for security
|
||||
|
||||
### Connection Handling
|
||||
|
||||
For remote connections to containers, the service provides two methods:
|
||||
|
||||
1. **WebSocket Terminal**: Browser-based terminal access
|
||||
2. **SSH Server**: Each container runs an SSH server for CLI access
|
||||
|
||||
### Logging Implementation
|
||||
|
||||
The MC Service implements log collection and forwarding:
|
||||
|
||||
1. Container logs are captured using Docker's logging drivers
|
||||
2. Logs are forwarded to configured providers (Fluentd, Langfuse)
|
||||
3. Real-time log streaming is available via WebSockets
|
||||
|
||||
## Project Management
|
||||
|
||||
### Adding Projects
|
||||
|
||||
Users can add projects with associated credentials:
|
||||
|
||||
```bash
|
||||
# Add a project with SSH key
|
||||
mc project add github.com/hello/private --ssh-key ~/.ssh/id_ed25519
|
||||
|
||||
# Add a project with token authentication
|
||||
mc project add github.com/hello/private --token ghp_123456789
|
||||
|
||||
# List all projects
|
||||
mc project list
|
||||
|
||||
# Remove a project
|
||||
mc project remove github.com/hello/private
|
||||
```
|
||||
|
||||
### Project Configuration
|
||||
|
||||
Projects are stored in the MC service and referenced by their repository URL. The configuration includes:
|
||||
|
||||
```yaml
|
||||
# Project configuration
|
||||
id: github.com/hello/private
|
||||
url: git@github.com:hello/private.git
|
||||
type: git
|
||||
auth:
|
||||
type: ssh
|
||||
key: |
|
||||
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||
...encrypted key data...
|
||||
-----END OPENSSH PRIVATE KEY-----
|
||||
public_key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAI...
|
||||
```
|
||||
|
||||
## Driver Implementation
|
||||
|
||||
### Driver Structure
|
||||
|
||||
Each driver is a Docker image with a standardized structure:
|
||||
|
||||
```
|
||||
/
|
||||
├── entrypoint.sh # Container initialization
|
||||
├── mc-init.sh # Standardized initialization script
|
||||
├── mc-driver.yaml # Driver metadata and configuration
|
||||
├── tool/ # AI tool installation
|
||||
└── ssh/ # SSH server configuration
|
||||
```
|
||||
|
||||
### Standardized Initialization Script
|
||||
|
||||
All drivers include a standardized `mc-init.sh` script that handles common initialization tasks:
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
# Project initialization
|
||||
if [ -n "$MC_PROJECT_URL" ]; then
|
||||
echo "Initializing project: $MC_PROJECT_URL"
|
||||
|
||||
# Set up SSH key if provided
|
||||
if [ -n "$MC_GIT_SSH_KEY" ]; then
|
||||
mkdir -p ~/.ssh
|
||||
echo "$MC_GIT_SSH_KEY" > ~/.ssh/id_ed25519
|
||||
chmod 600 ~/.ssh/id_ed25519
|
||||
ssh-keyscan github.com >> ~/.ssh/known_hosts 2>/dev/null
|
||||
fi
|
||||
|
||||
# Set up token if provided
|
||||
if [ -n "$MC_GIT_TOKEN" ]; then
|
||||
git config --global credential.helper store
|
||||
echo "https://$MC_GIT_TOKEN:x-oauth-basic@github.com" > ~/.git-credentials
|
||||
fi
|
||||
|
||||
# Clone repository
|
||||
git clone $MC_PROJECT_URL /app
|
||||
cd /app
|
||||
|
||||
# Run project-specific initialization if present
|
||||
if [ -f "/app/.mc/init.sh" ]; then
|
||||
bash /app/.mc/init.sh
|
||||
fi
|
||||
fi
|
||||
|
||||
# Driver-specific initialization continues...
|
||||
```
|
||||
|
||||
### Driver Configuration (mc-driver.yaml)
|
||||
|
||||
```yaml
|
||||
name: goose
|
||||
description: Goose with MCP servers
|
||||
version: 1.0.0
|
||||
maintainer: team@monadical.com
|
||||
|
||||
init:
|
||||
pre_command: /mc-init.sh
|
||||
command: /entrypoint.sh
|
||||
|
||||
environment:
|
||||
- name: MCP_HOST
|
||||
description: MCP server host
|
||||
required: true
|
||||
default: http://localhost:8000
|
||||
|
||||
- name: GOOSE_ID
|
||||
description: Goose instance ID
|
||||
required: false
|
||||
|
||||
# Project environment variables
|
||||
- name: MC_PROJECT_URL
|
||||
description: Project repository URL
|
||||
required: false
|
||||
|
||||
- name: MC_PROJECT_TYPE
|
||||
description: Project repository type (git, svn, etc.)
|
||||
required: false
|
||||
default: git
|
||||
|
||||
- name: MC_GIT_SSH_KEY
|
||||
description: SSH key for Git authentication
|
||||
required: false
|
||||
sensitive: true
|
||||
|
||||
- name: MC_GIT_TOKEN
|
||||
description: Token for Git authentication
|
||||
required: false
|
||||
sensitive: true
|
||||
|
||||
ports:
|
||||
- 8000 # Main application
|
||||
- 22 # SSH server
|
||||
|
||||
volumes:
|
||||
- mountPath: /app
|
||||
description: Application directory
|
||||
```
|
||||
|
||||
### Example Built-in Drivers
|
||||
|
||||
1. **goose**: Goose with MCP servers
|
||||
2. **aider**: Aider coding assistant
|
||||
3. **claude-code**: Claude Code environment
|
||||
4. **custom**: Custom Dockerfile support
|
||||
|
||||
## Security Considerations
|
||||
|
||||
1. **Container Isolation**: Each session runs in an isolated container
|
||||
2. **Authentication**: Integration with Authentik for secure authentication
|
||||
3. **Resource Limits**: Configurable CPU, memory, and storage limits
|
||||
4. **Network Isolation**: Internal Docker network for container-to-container communication
|
||||
5. **Encrypted Connections**: TLS for API connections and SSH for terminal access
|
||||
|
||||
## Deployment
|
||||
|
||||
### MC Service Deployment
|
||||
|
||||
```yaml
|
||||
# docker-compose.yml for MC Service
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
mc-service:
|
||||
image: monadical/mc-service:latest
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- ./config:/app/config
|
||||
ports:
|
||||
- "3000:3000"
|
||||
environment:
|
||||
- AUTH_URL=https://authentik.monadical.io
|
||||
- LANGFUSE_API_KEY=your_api_key
|
||||
networks:
|
||||
- mc-network
|
||||
|
||||
networks:
|
||||
mc-network:
|
||||
driver: bridge
|
||||
```
|
||||
|
||||
## Project Repository Integration Workflow
|
||||
|
||||
### Adding a Project Repository
|
||||
|
||||
1. User adds project repository with authentication:
|
||||
```bash
|
||||
mc project add github.com/hello/private --ssh-key ~/.ssh/id_ed25519
|
||||
```
|
||||
|
||||
2. MC CLI reads the SSH key, encrypts it, and sends to MC Service
|
||||
|
||||
3. MC Service stores the project configuration securely
|
||||
|
||||
### Using a Project in a Session
|
||||
|
||||
1. User creates a session with a project:
|
||||
```bash
|
||||
mc -r monadical git@github.com:hello/private
|
||||
```
|
||||
|
||||
2. MC Service:
|
||||
- Identifies the project from the URL
|
||||
- Retrieves project authentication details
|
||||
- Sets up environment variables:
|
||||
```
|
||||
MC_PROJECT_URL=git@github.com:hello/private
|
||||
MC_PROJECT_TYPE=git
|
||||
MC_GIT_SSH_KEY=<contents of the SSH key>
|
||||
```
|
||||
- Creates container with these environment variables
|
||||
|
||||
3. Container initialization:
|
||||
- The standardized `mc-init.sh` script detects the project environment variables
|
||||
- Sets up SSH key or token authentication
|
||||
- Clones the repository to `/app`
|
||||
- Runs any project-specific initialization scripts
|
||||
|
||||
4. User can immediately begin working with the repository
|
||||
|
||||
## Implementation Roadmap
|
||||
|
||||
1. **Phase 1**: Local CLI tool with Docker integration
|
||||
2. **Phase 2**: MC Service REST API with basic container management
|
||||
3. **Phase 3**: Authentication and secure connections
|
||||
4. **Phase 4**: Project management functionality
|
||||
5. **Phase 5**: Driver implementation (Goose, Aider, Claude Code)
|
||||
6. **Phase 6**: Logging integration with Fluentd and Langfuse
|
||||
7. **Phase 7**: CLI remote connectivity improvements
|
||||
8. **Phase 8**: Additional drivers and extensibility features
|
||||
@@ -1,5 +1,5 @@
|
||||
"""
|
||||
MC - Monadical Container Tool
|
||||
Cubbi - Cubbi Container Tool
|
||||
"""
|
||||
|
||||
__version__ = "0.1.0"
|
||||
1916
cubbi/cli.py
Normal file
1916
cubbi/cli.py
Normal file
File diff suppressed because it is too large
Load Diff
174
cubbi/config.py
Normal file
174
cubbi/config.py
Normal file
@@ -0,0 +1,174 @@
|
||||
from pathlib import Path
|
||||
from typing import Dict, Optional
|
||||
|
||||
import yaml
|
||||
|
||||
from .models import Config, Image
|
||||
|
||||
DEFAULT_CONFIG_DIR = Path.home() / ".config" / "cubbi"
|
||||
DEFAULT_CONFIG_FILE = DEFAULT_CONFIG_DIR / "config.yaml"
|
||||
DEFAULT_IMAGES_DIR = Path.home() / ".config" / "cubbi" / "images"
|
||||
PROJECT_ROOT = Path(__file__).parent.parent
|
||||
BUILTIN_IMAGES_DIR = Path(__file__).parent / "images"
|
||||
|
||||
# Dynamically loaded from images directory at runtime
|
||||
DEFAULT_IMAGES = {}
|
||||
|
||||
|
||||
class ConfigManager:
|
||||
def __init__(self, config_path: Optional[Path] = None):
|
||||
self.config_path = config_path or DEFAULT_CONFIG_FILE
|
||||
self.config_dir = self.config_path.parent
|
||||
self.images_dir = DEFAULT_IMAGES_DIR
|
||||
self.config = self._load_or_create_config()
|
||||
|
||||
# Always load package images on initialization
|
||||
# These are separate from the user config
|
||||
self.builtin_images = self._load_package_images()
|
||||
|
||||
def _load_or_create_config(self) -> Config:
|
||||
"""Load existing config or create a new one with defaults"""
|
||||
if self.config_path.exists():
|
||||
try:
|
||||
with open(self.config_path, "r") as f:
|
||||
config_data = yaml.safe_load(f) or {}
|
||||
|
||||
# Create a new config from scratch, then update with data from file
|
||||
config = Config(
|
||||
docker=config_data.get("docker", {}),
|
||||
defaults=config_data.get("defaults", {}),
|
||||
)
|
||||
|
||||
# Add images
|
||||
if "images" in config_data:
|
||||
for image_name, image_data in config_data["images"].items():
|
||||
config.images[image_name] = Image.model_validate(image_data)
|
||||
|
||||
return config
|
||||
except Exception as e:
|
||||
print(f"Error loading config: {e}")
|
||||
return self._create_default_config()
|
||||
else:
|
||||
return self._create_default_config()
|
||||
|
||||
def _create_default_config(self) -> Config:
|
||||
"""Create a default configuration"""
|
||||
self.config_dir.mkdir(parents=True, exist_ok=True)
|
||||
self.images_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Initial config without images
|
||||
config = Config(
|
||||
docker={
|
||||
"socket": "/var/run/docker.sock",
|
||||
"network": "cubbi-network",
|
||||
},
|
||||
defaults={
|
||||
"image": "goose",
|
||||
},
|
||||
)
|
||||
|
||||
self.save_config(config)
|
||||
return config
|
||||
|
||||
def save_config(self, config: Optional[Config] = None) -> None:
|
||||
"""Save the current config to disk"""
|
||||
if config:
|
||||
self.config = config
|
||||
|
||||
self.config_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Use model_dump with mode="json" for proper serialization of enums
|
||||
config_dict = self.config.model_dump(mode="json")
|
||||
|
||||
# Write to file
|
||||
with open(self.config_path, "w") as f:
|
||||
yaml.dump(config_dict, f)
|
||||
|
||||
def get_image(self, name: str) -> Optional[Image]:
|
||||
"""Get an image by name, checking builtin images first, then user-configured ones"""
|
||||
# Check builtin images first (package images take precedence)
|
||||
if name in self.builtin_images:
|
||||
return self.builtin_images[name]
|
||||
# If not found, check user-configured images
|
||||
return self.config.images.get(name)
|
||||
|
||||
def list_images(self) -> Dict[str, Image]:
|
||||
"""List all available images (both builtin and user-configured)"""
|
||||
# Start with user config images
|
||||
all_images = dict(self.config.images)
|
||||
|
||||
# Add builtin images, overriding any user images with the same name
|
||||
# This ensures that package-provided images always take precedence
|
||||
all_images.update(self.builtin_images)
|
||||
|
||||
return all_images
|
||||
|
||||
# Session management has been moved to SessionManager in session.py
|
||||
|
||||
def load_image_from_dir(self, image_dir: Path) -> Optional[Image]:
|
||||
"""Load an image configuration from a directory"""
|
||||
# Check for image config file
|
||||
yaml_path = image_dir / "cubbi-image.yaml"
|
||||
if not yaml_path.exists():
|
||||
return None
|
||||
|
||||
try:
|
||||
with open(yaml_path, "r") as f:
|
||||
image_data = yaml.safe_load(f)
|
||||
|
||||
# Extract required fields
|
||||
if not all(
|
||||
k in image_data
|
||||
for k in ["name", "description", "version", "maintainer"]
|
||||
):
|
||||
print(f"Image config {yaml_path} missing required fields")
|
||||
return None
|
||||
|
||||
# Use Image.model_validate to handle all fields from YAML
|
||||
# This will map all fields according to the Image model structure
|
||||
try:
|
||||
# Ensure image field is set if not in YAML
|
||||
if "image" not in image_data:
|
||||
image_data["image"] = f"monadical/cubbi-{image_data['name']}:latest"
|
||||
|
||||
image = Image.model_validate(image_data)
|
||||
return image
|
||||
except Exception as validation_error:
|
||||
print(
|
||||
f"Error validating image data from {yaml_path}: {validation_error}"
|
||||
)
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error loading image from {yaml_path}: {e}")
|
||||
return None
|
||||
|
||||
def _load_package_images(self) -> Dict[str, Image]:
|
||||
"""Load all package images from the cubbi/images directory"""
|
||||
images = {}
|
||||
|
||||
if not BUILTIN_IMAGES_DIR.exists():
|
||||
return images
|
||||
|
||||
# Search for cubbi-image.yaml files in each subdirectory
|
||||
for image_dir in BUILTIN_IMAGES_DIR.iterdir():
|
||||
if image_dir.is_dir():
|
||||
image = self.load_image_from_dir(image_dir)
|
||||
if image:
|
||||
images[image.name] = image
|
||||
|
||||
return images
|
||||
|
||||
def get_image_path(self, image_name: str) -> Optional[Path]:
|
||||
"""Get the directory path for an image"""
|
||||
# Check package images first (these are the bundled ones)
|
||||
package_path = BUILTIN_IMAGES_DIR / image_name
|
||||
if package_path.exists() and package_path.is_dir():
|
||||
return package_path
|
||||
|
||||
# Then check user images
|
||||
user_path = self.images_dir / image_name
|
||||
if user_path.exists() and user_path.is_dir():
|
||||
return user_path
|
||||
|
||||
return None
|
||||
882
cubbi/container.py
Normal file
882
cubbi/container.py
Normal file
@@ -0,0 +1,882 @@
|
||||
import concurrent.futures
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
import sys
|
||||
import uuid
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
import docker
|
||||
from docker.errors import DockerException, ImageNotFound
|
||||
|
||||
from .config import ConfigManager
|
||||
from .mcp import MCPManager
|
||||
from .models import Session, SessionStatus
|
||||
from .session import SessionManager
|
||||
from .user_config import UserConfigManager
|
||||
|
||||
# Configure logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ContainerManager:
|
||||
def __init__(
|
||||
self,
|
||||
config_manager: Optional[ConfigManager] = None,
|
||||
session_manager: Optional[SessionManager] = None,
|
||||
user_config_manager: Optional[UserConfigManager] = None,
|
||||
):
|
||||
self.config_manager = config_manager or ConfigManager()
|
||||
self.session_manager = session_manager or SessionManager()
|
||||
self.user_config_manager = user_config_manager or UserConfigManager()
|
||||
self.mcp_manager = MCPManager(config_manager=self.user_config_manager)
|
||||
|
||||
try:
|
||||
self.client = docker.from_env()
|
||||
# Test connection
|
||||
self.client.ping()
|
||||
except DockerException as e:
|
||||
logger.error(f"Error connecting to Docker: {e}")
|
||||
print(f"Error connecting to Docker: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
def _ensure_network(self) -> None:
|
||||
"""Ensure the Cubbi network exists"""
|
||||
network_name = self.config_manager.config.docker.get("network", "cubbi-network")
|
||||
networks = self.client.networks.list(names=[network_name])
|
||||
if not networks:
|
||||
self.client.networks.create(network_name, driver="bridge")
|
||||
|
||||
def _generate_session_id(self) -> str:
|
||||
"""Generate a unique session ID"""
|
||||
return str(uuid.uuid4())[:8]
|
||||
|
||||
def _get_project_config_path(
|
||||
self, project: Optional[str] = None, project_name: Optional[str] = None
|
||||
) -> Optional[pathlib.Path]:
|
||||
"""Get the path to the project configuration directory
|
||||
|
||||
Args:
|
||||
project: Optional project repository URL or path (only used for mounting).
|
||||
project_name: Optional explicit project name. Only used if specified.
|
||||
|
||||
Returns:
|
||||
Path to the project configuration directory, or None if no project_name is provided
|
||||
"""
|
||||
# Get home directory for the Cubbi config
|
||||
cubbi_home = pathlib.Path.home() / ".cubbi"
|
||||
|
||||
# Only use project_name if explicitly provided
|
||||
if project_name:
|
||||
# Create a hash of the project name to use as directory name
|
||||
project_hash = hashlib.md5(project_name.encode()).hexdigest()
|
||||
|
||||
# Create the project config directory path
|
||||
config_path = cubbi_home / "projects" / project_hash / "config"
|
||||
|
||||
# Create the directory if it doesn't exist
|
||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
config_path.mkdir(exist_ok=True)
|
||||
|
||||
return config_path
|
||||
else:
|
||||
# If no project_name is provided, don't create any config directory
|
||||
# This ensures we don't mount the /cubbi-config volume for project-less sessions
|
||||
return None
|
||||
|
||||
def list_sessions(self) -> List[Session]:
|
||||
"""List all active Cubbi sessions"""
|
||||
sessions = []
|
||||
try:
|
||||
containers = self.client.containers.list(
|
||||
all=True, filters={"label": "cubbi.session"}
|
||||
)
|
||||
|
||||
for container in containers:
|
||||
container_id = container.id
|
||||
labels = container.labels
|
||||
|
||||
session_id = labels.get("cubbi.session.id")
|
||||
if not session_id:
|
||||
continue
|
||||
|
||||
status = SessionStatus.RUNNING
|
||||
if container.status == "exited":
|
||||
status = SessionStatus.STOPPED
|
||||
elif container.status == "created":
|
||||
status = SessionStatus.CREATING
|
||||
|
||||
session = Session(
|
||||
id=session_id,
|
||||
name=labels.get("cubbi.session.name", f"cubbi-{session_id}"),
|
||||
image=labels.get("cubbi.image", "unknown"),
|
||||
status=status,
|
||||
container_id=container_id,
|
||||
)
|
||||
|
||||
# Get port mappings
|
||||
if container.attrs.get("NetworkSettings", {}).get("Ports"):
|
||||
ports = {}
|
||||
for container_port, host_ports in container.attrs[
|
||||
"NetworkSettings"
|
||||
]["Ports"].items():
|
||||
if host_ports:
|
||||
# Strip /tcp or /udp suffix and convert to int
|
||||
container_port_num = int(container_port.split("/")[0])
|
||||
host_port = int(host_ports[0]["HostPort"])
|
||||
ports[container_port_num] = host_port
|
||||
session.ports = ports
|
||||
|
||||
sessions.append(session)
|
||||
|
||||
except DockerException as e:
|
||||
print(f"Error listing sessions: {e}")
|
||||
|
||||
return sessions
|
||||
|
||||
def create_session(
|
||||
self,
|
||||
image_name: str,
|
||||
project: Optional[str] = None,
|
||||
project_name: Optional[str] = None,
|
||||
environment: Optional[Dict[str, str]] = None,
|
||||
session_name: Optional[str] = None,
|
||||
mount_local: bool = False,
|
||||
volumes: Optional[Dict[str, Dict[str, str]]] = None,
|
||||
networks: Optional[List[str]] = None,
|
||||
mcp: Optional[List[str]] = None,
|
||||
run_command: Optional[str] = None,
|
||||
uid: Optional[int] = None,
|
||||
gid: Optional[int] = None,
|
||||
model: Optional[str] = None,
|
||||
provider: Optional[str] = None,
|
||||
ssh: bool = False,
|
||||
) -> Optional[Session]:
|
||||
"""Create a new Cubbi session
|
||||
|
||||
Args:
|
||||
image_name: The name of the image to use
|
||||
project: Optional project repository URL or local directory path
|
||||
project_name: Optional explicit project name for configuration persistence
|
||||
environment: Optional environment variables
|
||||
session_name: Optional session name
|
||||
mount_local: Whether to mount the specified local directory to /app (ignored if project is None)
|
||||
volumes: Optional additional volumes to mount (dict of {host_path: {"bind": container_path, "mode": mode}})
|
||||
run_command: Optional command to execute before starting the shell
|
||||
networks: Optional list of additional Docker networks to connect to
|
||||
mcp: Optional list of MCP server names to attach to the session
|
||||
uid: Optional user ID for the container process
|
||||
gid: Optional group ID for the container process
|
||||
ssh: Whether to start the SSH server in the container (default: False)
|
||||
"""
|
||||
try:
|
||||
# Validate image exists
|
||||
image = self.config_manager.get_image(image_name)
|
||||
if not image:
|
||||
print(f"Image '{image_name}' not found")
|
||||
return None
|
||||
|
||||
# Generate session ID and name
|
||||
session_id = self._generate_session_id()
|
||||
if not session_name:
|
||||
session_name = f"cubbi-{session_id}"
|
||||
|
||||
# Ensure network exists
|
||||
self._ensure_network()
|
||||
|
||||
# Prepare environment variables
|
||||
env_vars = environment or {}
|
||||
|
||||
# Add CUBBI_USER_ID and CUBBI_GROUP_ID for entrypoint script
|
||||
env_vars["CUBBI_USER_ID"] = str(uid) if uid is not None else "1000"
|
||||
env_vars["CUBBI_GROUP_ID"] = str(gid) if gid is not None else "1000"
|
||||
|
||||
# Set SSH environment variable
|
||||
env_vars["CUBBI_SSH_ENABLED"] = "true" if ssh else "false"
|
||||
|
||||
# Pass API keys from host environment to container for local development
|
||||
api_keys = [
|
||||
"OPENAI_API_KEY",
|
||||
"ANTHROPIC_API_KEY",
|
||||
"OPENROUTER_API_KEY",
|
||||
"GOOGLE_API_KEY",
|
||||
"LANGFUSE_INIT_PROJECT_PUBLIC_KEY",
|
||||
"LANGFUSE_INIT_PROJECT_SECRET_KEY",
|
||||
"LANGFUSE_URL",
|
||||
]
|
||||
for key in api_keys:
|
||||
if key in os.environ and key not in env_vars:
|
||||
env_vars[key] = os.environ[key]
|
||||
|
||||
# Pull image if needed
|
||||
try:
|
||||
self.client.images.get(image.image)
|
||||
except ImageNotFound:
|
||||
print(f"Pulling image {image.image}...")
|
||||
self.client.images.pull(image.image)
|
||||
|
||||
# Set up volume mounts
|
||||
session_volumes = {}
|
||||
|
||||
# Determine if project is a local directory or a Git repository
|
||||
is_local_directory = False
|
||||
is_git_repo = False
|
||||
|
||||
if project:
|
||||
# Check if project is a local directory
|
||||
if os.path.isdir(os.path.expanduser(project)):
|
||||
is_local_directory = True
|
||||
else:
|
||||
# If not a local directory, assume it's a Git repo URL
|
||||
is_git_repo = True
|
||||
|
||||
# Handle mounting based on project type
|
||||
if is_local_directory and mount_local:
|
||||
# Mount the specified local directory to /app in the container
|
||||
local_dir = os.path.abspath(os.path.expanduser(project))
|
||||
session_volumes[local_dir] = {"bind": "/app", "mode": "rw"}
|
||||
print(f"Mounting local directory {local_dir} to /app")
|
||||
# Clear project for container environment since we're mounting
|
||||
project = None
|
||||
elif is_git_repo:
|
||||
env_vars["CUBBI_PROJECT_URL"] = project
|
||||
print(
|
||||
f"Git repository URL provided - container will clone {project} into /app during initialization"
|
||||
)
|
||||
|
||||
# Add user-specified volumes
|
||||
if volumes:
|
||||
for host_path, mount_spec in volumes.items():
|
||||
container_path = mount_spec["bind"]
|
||||
# Check for conflicts with /app mount
|
||||
if container_path == "/app" and is_local_directory and mount_local:
|
||||
print(
|
||||
"[yellow]Warning: Volume mount to /app conflicts with local directory mount. User-specified mount takes precedence.[/yellow]"
|
||||
)
|
||||
# Remove the local directory mount if there's a conflict
|
||||
if local_dir in session_volumes:
|
||||
del session_volumes[local_dir]
|
||||
|
||||
# Add the volume
|
||||
session_volumes[host_path] = mount_spec
|
||||
print(f"Mounting volume: {host_path} -> {container_path}")
|
||||
|
||||
# Set up persistent project configuration if project_name is provided
|
||||
project_config_path = self._get_project_config_path(project, project_name)
|
||||
if project_config_path:
|
||||
print(f"Using project configuration directory: {project_config_path}")
|
||||
|
||||
# Mount the project configuration directory
|
||||
session_volumes[str(project_config_path)] = {
|
||||
"bind": "/cubbi-config",
|
||||
"mode": "rw",
|
||||
}
|
||||
|
||||
# Add environment variables for config path
|
||||
env_vars["CUBBI_CONFIG_DIR"] = "/cubbi-config"
|
||||
env_vars["CUBBI_IMAGE_CONFIG_DIR"] = f"/cubbi-config/{image_name}"
|
||||
|
||||
# Create image-specific config directories and set up direct volume mounts
|
||||
if image.persistent_configs:
|
||||
persistent_links_data = [] # To store "source:target" pairs for symlinks
|
||||
print("Setting up persistent configuration directories:")
|
||||
for config in image.persistent_configs:
|
||||
# Get target directory path on host
|
||||
target_dir = project_config_path / config.target.removeprefix(
|
||||
"/cubbi-config/"
|
||||
)
|
||||
|
||||
# Create directory if it's a directory type config
|
||||
if config.type == "directory":
|
||||
dir_existed = target_dir.exists()
|
||||
target_dir.mkdir(parents=True, exist_ok=True)
|
||||
if not dir_existed:
|
||||
print(f" - Created directory: {target_dir}")
|
||||
# For files, make sure parent directory exists
|
||||
elif config.type == "file":
|
||||
target_dir.parent.mkdir(parents=True, exist_ok=True)
|
||||
# File will be created by the container if needed
|
||||
|
||||
# Store the source and target paths for the init script
|
||||
# Note: config.target is the path *within* /cubbi-config
|
||||
persistent_links_data.append(f"{config.source}:{config.target}")
|
||||
|
||||
print(
|
||||
f" - Prepared host path {target_dir} for symlink target {config.target}"
|
||||
)
|
||||
|
||||
# Set up persistent links
|
||||
if persistent_links_data:
|
||||
env_vars["CUBBI_PERSISTENT_LINKS"] = ";".join(
|
||||
persistent_links_data
|
||||
)
|
||||
print(
|
||||
f"Setting CUBBI_PERSISTENT_LINKS={env_vars['CUBBI_PERSISTENT_LINKS']}"
|
||||
)
|
||||
else:
|
||||
print(
|
||||
"No project_name provided - skipping configuration directory setup."
|
||||
)
|
||||
|
||||
# Default Cubbi network
|
||||
default_network = self.config_manager.config.docker.get(
|
||||
"network", "cubbi-network"
|
||||
)
|
||||
|
||||
# Get network list
|
||||
network_list = [default_network]
|
||||
|
||||
# Process MCPs if provided
|
||||
mcp_configs = []
|
||||
mcp_names = []
|
||||
mcp_container_names = []
|
||||
|
||||
# Ensure MCP is a list
|
||||
mcps_to_process = mcp if isinstance(mcp, list) else []
|
||||
|
||||
# Process each MCP
|
||||
for mcp_name in mcps_to_process:
|
||||
# Get the MCP configuration
|
||||
mcp_config = self.mcp_manager.get_mcp(mcp_name)
|
||||
if not mcp_config:
|
||||
print(f"Warning: MCP server '{mcp_name}' not found, skipping")
|
||||
continue
|
||||
|
||||
# Add to the list of processed MCPs
|
||||
mcp_configs.append(mcp_config)
|
||||
mcp_names.append(mcp_name)
|
||||
|
||||
# Check if the MCP server is running (for Docker-based MCPs)
|
||||
if mcp_config.get("type") in ["docker", "proxy"]:
|
||||
# Ensure the MCP is running
|
||||
try:
|
||||
print(f"Ensuring MCP server '{mcp_name}' is running...")
|
||||
self.mcp_manager.start_mcp(mcp_name)
|
||||
|
||||
# Store container name for later network connection
|
||||
container_name = self.mcp_manager.get_mcp_container_name(
|
||||
mcp_name
|
||||
)
|
||||
mcp_container_names.append(container_name)
|
||||
|
||||
# Get MCP status to extract endpoint information
|
||||
mcp_status = self.mcp_manager.get_mcp_status(mcp_name)
|
||||
|
||||
# Add MCP environment variables with index
|
||||
idx = len(mcp_names) - 1 # 0-based index for the current MCP
|
||||
|
||||
if mcp_config.get("type") == "remote":
|
||||
# For remote MCP, set the URL and headers
|
||||
env_vars[f"MCP_{idx}_URL"] = mcp_config.get("url")
|
||||
if mcp_config.get("headers"):
|
||||
# Serialize headers as JSON
|
||||
import json
|
||||
|
||||
env_vars[f"MCP_{idx}_HEADERS"] = json.dumps(
|
||||
mcp_config.get("headers")
|
||||
)
|
||||
else:
|
||||
# For Docker/proxy MCP, set the connection details
|
||||
# Use both the container name and the short name for internal Docker DNS resolution
|
||||
container_name = self.mcp_manager.get_mcp_container_name(
|
||||
mcp_name
|
||||
)
|
||||
# Use the short name (mcp_name) as the primary hostname
|
||||
env_vars[f"MCP_{idx}_HOST"] = mcp_name
|
||||
# Default port is 8080 unless specified in status
|
||||
port = next(
|
||||
iter(mcp_status.get("ports", {}).values()), 8080
|
||||
)
|
||||
env_vars[f"MCP_{idx}_PORT"] = str(port)
|
||||
# Use the short name in the URL to take advantage of the network alias
|
||||
env_vars[f"MCP_{idx}_URL"] = f"http://{mcp_name}:{port}/sse"
|
||||
# For backward compatibility, also set the full container name URL
|
||||
env_vars[f"MCP_{idx}_CONTAINER_URL"] = (
|
||||
f"http://{container_name}:{port}/sse"
|
||||
)
|
||||
|
||||
# Set type-specific information
|
||||
env_vars[f"MCP_{idx}_TYPE"] = mcp_config.get("type")
|
||||
env_vars[f"MCP_{idx}_NAME"] = mcp_name
|
||||
|
||||
except Exception as e:
|
||||
print(f"Warning: Failed to start MCP server '{mcp_name}': {e}")
|
||||
# Get the container name before trying to remove it from the list
|
||||
try:
|
||||
container_name = self.mcp_manager.get_mcp_container_name(
|
||||
mcp_name
|
||||
)
|
||||
if container_name in mcp_container_names:
|
||||
mcp_container_names.remove(container_name)
|
||||
except Exception:
|
||||
# If we can't get the container name, just continue
|
||||
pass
|
||||
|
||||
elif mcp_config.get("type") == "remote":
|
||||
# For remote MCP, just set environment variables
|
||||
idx = len(mcp_names) - 1 # 0-based index for the current MCP
|
||||
|
||||
env_vars[f"MCP_{idx}_URL"] = mcp_config.get("url")
|
||||
if mcp_config.get("headers"):
|
||||
# Serialize headers as JSON
|
||||
import json
|
||||
|
||||
env_vars[f"MCP_{idx}_HEADERS"] = json.dumps(
|
||||
mcp_config.get("headers")
|
||||
)
|
||||
|
||||
# Set type-specific information
|
||||
env_vars[f"MCP_{idx}_TYPE"] = "remote"
|
||||
env_vars[f"MCP_{idx}_NAME"] = mcp_name
|
||||
|
||||
# Set environment variables for MCP count if we have any
|
||||
if mcp_names:
|
||||
env_vars["MCP_COUNT"] = str(len(mcp_names))
|
||||
env_vars["MCP_ENABLED"] = "true"
|
||||
# Serialize all MCP names as JSON
|
||||
import json
|
||||
|
||||
env_vars["MCP_NAMES"] = json.dumps(mcp_names)
|
||||
|
||||
# Add user-specified networks
|
||||
# Default Cubbi network
|
||||
default_network = self.config_manager.config.docker.get(
|
||||
"network", "cubbi-network"
|
||||
)
|
||||
|
||||
# Get network list, ensuring default is first and no duplicates
|
||||
network_list_set = {default_network}
|
||||
if networks:
|
||||
network_list_set.update(networks)
|
||||
network_list = (
|
||||
[default_network] + [n for n in networks if n != default_network]
|
||||
if networks
|
||||
else [default_network]
|
||||
)
|
||||
|
||||
if networks:
|
||||
for network in networks:
|
||||
if network not in network_list:
|
||||
# This check is slightly redundant now but harmless
|
||||
network_list.append(network)
|
||||
print(f"Adding network {network} to session")
|
||||
|
||||
# Determine container command and entrypoint
|
||||
container_command = None
|
||||
entrypoint = None
|
||||
target_shell = "/bin/bash"
|
||||
|
||||
if run_command:
|
||||
# Set environment variable for cubbi-init.sh to pick up
|
||||
env_vars["CUBBI_RUN_COMMAND"] = run_command
|
||||
# Set the container's command to be the final shell
|
||||
container_command = [target_shell]
|
||||
logger.info(
|
||||
f"Setting CUBBI_RUN_COMMAND and targeting shell {target_shell}"
|
||||
)
|
||||
else:
|
||||
# Use default behavior (often defined by image's ENTRYPOINT/CMD)
|
||||
# Set the container's command to be the final shell if none specified by Dockerfile CMD
|
||||
# Note: Dockerfile CMD is ["tail", "-f", "/dev/null"], so this might need adjustment
|
||||
# if we want interactive shell by default without --run. Let's default to bash for now.
|
||||
container_command = [target_shell]
|
||||
logger.info(
|
||||
"Using default container entrypoint/command for interactive shell."
|
||||
)
|
||||
|
||||
# Set default model/provider from user config if not explicitly provided
|
||||
env_vars["CUBBI_MODEL"] = model or self.user_config_manager.get(
|
||||
"defaults.model", ""
|
||||
)
|
||||
env_vars["CUBBI_PROVIDER"] = provider or self.user_config_manager.get(
|
||||
"defaults.provider", ""
|
||||
)
|
||||
|
||||
# Create container
|
||||
container = self.client.containers.create(
|
||||
image=image.image,
|
||||
name=session_name,
|
||||
hostname=session_name,
|
||||
detach=True,
|
||||
tty=True,
|
||||
stdin_open=True,
|
||||
environment=env_vars,
|
||||
volumes=session_volumes,
|
||||
labels={
|
||||
"cubbi.session": "true",
|
||||
"cubbi.session.id": session_id,
|
||||
"cubbi.session.name": session_name,
|
||||
"cubbi.image": image_name,
|
||||
"cubbi.project": project or "",
|
||||
"cubbi.project_name": project_name or "",
|
||||
"cubbi.mcps": ",".join(mcp_names) if mcp_names else "",
|
||||
},
|
||||
network=network_list[0], # Connect to the first network initially
|
||||
command=container_command, # Set the command
|
||||
entrypoint=entrypoint, # Set the entrypoint (might be None)
|
||||
ports={f"{port}/tcp": None for port in image.ports},
|
||||
)
|
||||
|
||||
# Start container
|
||||
container.start()
|
||||
|
||||
# Connect to additional networks (after the first one in network_list)
|
||||
if len(network_list) > 1:
|
||||
for network_name in network_list[1:]:
|
||||
try:
|
||||
# Get or create the network
|
||||
try:
|
||||
network = self.client.networks.get(network_name)
|
||||
except DockerException:
|
||||
print(f"Network '{network_name}' not found, creating it...")
|
||||
network = self.client.networks.create(
|
||||
network_name, driver="bridge"
|
||||
)
|
||||
|
||||
# Connect the container to the network with session name as an alias
|
||||
network.connect(container, aliases=[session_name])
|
||||
print(
|
||||
f"Connected to network: {network_name} with alias: {session_name}"
|
||||
)
|
||||
except DockerException as e:
|
||||
print(f"Error connecting to network {network_name}: {e}")
|
||||
|
||||
# Reload the container to get updated network information
|
||||
container.reload()
|
||||
|
||||
# Connect directly to each MCP's dedicated network
|
||||
for mcp_name in mcp_names:
|
||||
try:
|
||||
# Get the dedicated network for this MCP
|
||||
dedicated_network_name = f"cubbi-mcp-{mcp_name}-network"
|
||||
|
||||
try:
|
||||
network = self.client.networks.get(dedicated_network_name)
|
||||
|
||||
# Connect the session container to the MCP's dedicated network
|
||||
network.connect(container, aliases=[session_name])
|
||||
print(
|
||||
f"Connected session to MCP '{mcp_name}' via dedicated network: {dedicated_network_name}"
|
||||
)
|
||||
except DockerException as e:
|
||||
print(
|
||||
f"Error connecting to MCP dedicated network '{dedicated_network_name}': {e}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error connecting session to MCP '{mcp_name}': {e}")
|
||||
|
||||
# Connect to additional user-specified networks
|
||||
if networks:
|
||||
for network_name in networks:
|
||||
# Check if already connected to this network
|
||||
# NetworkSettings.Networks contains a dict where keys are network names
|
||||
existing_networks = (
|
||||
container.attrs.get("NetworkSettings", {})
|
||||
.get("Networks", {})
|
||||
.keys()
|
||||
)
|
||||
if network_name not in existing_networks:
|
||||
try:
|
||||
# Get or create the network
|
||||
try:
|
||||
network = self.client.networks.get(network_name)
|
||||
except DockerException:
|
||||
print(
|
||||
f"Network '{network_name}' not found, creating it..."
|
||||
)
|
||||
network = self.client.networks.create(
|
||||
network_name, driver="bridge"
|
||||
)
|
||||
|
||||
# Connect the container to the network with session name as an alias
|
||||
network.connect(container, aliases=[session_name])
|
||||
print(
|
||||
f"Connected to network: {network_name} with alias: {session_name}"
|
||||
)
|
||||
except DockerException as e:
|
||||
print(f"Error connecting to network {network_name}: {e}")
|
||||
|
||||
# Get updated port information
|
||||
container.reload()
|
||||
ports = {}
|
||||
if container.attrs.get("NetworkSettings", {}).get("Ports"):
|
||||
for container_port, host_ports in container.attrs["NetworkSettings"][
|
||||
"Ports"
|
||||
].items():
|
||||
if host_ports:
|
||||
container_port_num = int(container_port.split("/")[0])
|
||||
host_port = int(host_ports[0]["HostPort"])
|
||||
ports[container_port_num] = host_port
|
||||
|
||||
# Create session object
|
||||
session = Session(
|
||||
id=session_id,
|
||||
name=session_name,
|
||||
image=image_name,
|
||||
status=SessionStatus.RUNNING,
|
||||
container_id=container.id,
|
||||
ports=ports,
|
||||
)
|
||||
|
||||
# Save session to the session manager
|
||||
# Assuming Session model has uid and gid fields added to its definition
|
||||
session_data_to_save = session.model_dump(mode="json")
|
||||
# uid and gid are already part of the model dump now
|
||||
self.session_manager.add_session(session_id, session_data_to_save)
|
||||
|
||||
return session
|
||||
|
||||
except DockerException as e:
|
||||
print(f"Error creating session: {e}")
|
||||
return None
|
||||
|
||||
def close_session(self, session_id: str) -> bool:
|
||||
"""Close a Cubbi session"""
|
||||
try:
|
||||
sessions = self.list_sessions()
|
||||
for session in sessions:
|
||||
if session.id == session_id:
|
||||
return self._close_single_session(session)
|
||||
|
||||
print(f"Session '{session_id}' not found")
|
||||
return False
|
||||
|
||||
except DockerException as e:
|
||||
print(f"Error closing session: {e}")
|
||||
return False
|
||||
|
||||
def connect_session(self, session_id: str) -> bool:
|
||||
"""Connect to a running Cubbi session"""
|
||||
# Retrieve full session data which should include uid/gid
|
||||
session_data = self.session_manager.get_session(session_id)
|
||||
|
||||
if not session_data:
|
||||
print(f"Session '{session_id}' not found in session manager.")
|
||||
# Fallback: try listing via Docker labels if session data is missing
|
||||
sessions = self.list_sessions()
|
||||
session_obj = next((s for s in sessions if s.id == session_id), None)
|
||||
if not session_obj or not session_obj.container_id:
|
||||
print(f"Session '{session_id}' not found via Docker either.")
|
||||
return False
|
||||
container_id = session_obj.container_id
|
||||
print(
|
||||
f"[yellow]Warning: Session data missing for {session_id}. Connecting as default container user.[/yellow]"
|
||||
)
|
||||
else:
|
||||
container_id = session_data.get("container_id")
|
||||
if not container_id:
|
||||
print(f"Container ID not found for session {session_id}.")
|
||||
return False
|
||||
|
||||
# Check status from Docker directly
|
||||
try:
|
||||
container = self.client.containers.get(container_id)
|
||||
if container.status != "running":
|
||||
print(
|
||||
f"Session '{session_id}' container is not running (status: {container.status})."
|
||||
)
|
||||
return False
|
||||
except docker.errors.NotFound:
|
||||
print(f"Container {container_id} for session {session_id} not found.")
|
||||
# Clean up potentially stale session data
|
||||
self.session_manager.remove_session(session_id)
|
||||
return False
|
||||
except DockerException as e:
|
||||
print(f"Error checking container status for session {session_id}: {e}")
|
||||
return False
|
||||
|
||||
try:
|
||||
# Use exec instead of attach to avoid container exit on Ctrl+C
|
||||
print(
|
||||
f"Connecting to session {session_id} (container: {container_id[:12]})..."
|
||||
)
|
||||
print("Type 'exit' to detach from the session.")
|
||||
|
||||
# Use docker exec to start a new bash process in the container
|
||||
# This leverages the init-status.sh script in bash.bashrc
|
||||
# which will check initialization status
|
||||
cmd = ["docker", "exec", "-it", container_id, "bash", "-l"]
|
||||
|
||||
# Use execvp to replace the current process with docker exec
|
||||
# This provides a seamless shell experience
|
||||
os.execvp("docker", cmd)
|
||||
# execvp does not return if successful
|
||||
return True # Should not be reached if execvp succeeds
|
||||
|
||||
except FileNotFoundError:
|
||||
print(
|
||||
"[red]Error: 'docker' command not found. Is Docker installed and in your PATH?[/red]"
|
||||
)
|
||||
return False
|
||||
|
||||
except DockerException as e:
|
||||
print(f"Error connecting to session: {e}")
|
||||
return False
|
||||
|
||||
def _close_single_session(self, session: Session) -> bool:
|
||||
"""Close a single session (helper for parallel processing)
|
||||
|
||||
Args:
|
||||
session: The session to close
|
||||
|
||||
Returns:
|
||||
bool: Whether the session was successfully closed
|
||||
"""
|
||||
if not session.container_id:
|
||||
return False
|
||||
|
||||
try:
|
||||
container = self.client.containers.get(session.container_id)
|
||||
container.stop()
|
||||
container.remove()
|
||||
self.session_manager.remove_session(session.id)
|
||||
return True
|
||||
except DockerException as e:
|
||||
print(f"Error closing session {session.id}: {e}")
|
||||
return False
|
||||
|
||||
def close_all_sessions(self, progress_callback=None) -> Tuple[int, bool]:
|
||||
"""Close all Cubbi sessions with parallel processing and progress reporting
|
||||
|
||||
Args:
|
||||
progress_callback: Optional callback function to report progress
|
||||
The callback should accept (session_id, status, message)
|
||||
|
||||
Returns:
|
||||
tuple: (number of sessions closed, success)
|
||||
"""
|
||||
try:
|
||||
sessions = self.list_sessions()
|
||||
if not sessions:
|
||||
return 0, True
|
||||
|
||||
# No need for session status as we receive it via callback
|
||||
|
||||
# Define a wrapper to track progress
|
||||
def close_with_progress(session):
|
||||
if not session.container_id:
|
||||
return False
|
||||
|
||||
try:
|
||||
container = self.client.containers.get(session.container_id)
|
||||
# Stop and remove container
|
||||
container.stop()
|
||||
container.remove()
|
||||
# Remove from session storage
|
||||
self.session_manager.remove_session(session.id)
|
||||
|
||||
# Notify about completion
|
||||
if progress_callback:
|
||||
progress_callback(
|
||||
session.id,
|
||||
"completed",
|
||||
f"{session.name} closed successfully",
|
||||
)
|
||||
|
||||
return True
|
||||
except DockerException as e:
|
||||
error_msg = f"Error: {str(e)}"
|
||||
if progress_callback:
|
||||
progress_callback(session.id, "failed", error_msg)
|
||||
print(f"Error closing session {session.id}: {e}")
|
||||
return False
|
||||
|
||||
# Use ThreadPoolExecutor to close sessions in parallel
|
||||
with concurrent.futures.ThreadPoolExecutor(
|
||||
max_workers=min(10, len(sessions))
|
||||
) as executor:
|
||||
# Submit all session closing tasks
|
||||
future_to_session = {
|
||||
executor.submit(close_with_progress, session): session
|
||||
for session in sessions
|
||||
}
|
||||
|
||||
# Collect results
|
||||
closed_count = 0
|
||||
for future in concurrent.futures.as_completed(future_to_session):
|
||||
session = future_to_session[future]
|
||||
try:
|
||||
success = future.result()
|
||||
if success:
|
||||
closed_count += 1
|
||||
except Exception as e:
|
||||
print(f"Error closing session {session.id}: {e}")
|
||||
|
||||
return closed_count, closed_count > 0
|
||||
|
||||
except DockerException as e:
|
||||
print(f"Error closing all sessions: {e}")
|
||||
return 0, False
|
||||
|
||||
def get_session_logs(self, session_id: str, follow: bool = False) -> Optional[str]:
|
||||
"""Get logs from a Cubbi session"""
|
||||
try:
|
||||
sessions = self.list_sessions()
|
||||
for session in sessions:
|
||||
if session.id == session_id and session.container_id:
|
||||
container = self.client.containers.get(session.container_id)
|
||||
if follow:
|
||||
for line in container.logs(stream=True, follow=True):
|
||||
print(line.decode().strip())
|
||||
return None
|
||||
else:
|
||||
return container.logs().decode()
|
||||
|
||||
print(f"Session '{session_id}' not found")
|
||||
return None
|
||||
|
||||
except DockerException as e:
|
||||
print(f"Error getting session logs: {e}")
|
||||
return None
|
||||
|
||||
def get_init_logs(self, session_id: str, follow: bool = False) -> Optional[str]:
|
||||
"""Get initialization logs from a Cubbi session
|
||||
|
||||
Args:
|
||||
session_id: The session ID
|
||||
follow: Whether to follow the logs
|
||||
|
||||
Returns:
|
||||
The logs as a string, or None if there was an error
|
||||
"""
|
||||
try:
|
||||
sessions = self.list_sessions()
|
||||
for session in sessions:
|
||||
if session.id == session_id and session.container_id:
|
||||
container = self.client.containers.get(session.container_id)
|
||||
|
||||
# Check if initialization is complete
|
||||
init_complete = False
|
||||
try:
|
||||
exit_code, output = container.exec_run(
|
||||
"grep -q 'INIT_COMPLETE=true' /init.status"
|
||||
)
|
||||
init_complete = exit_code == 0
|
||||
except DockerException:
|
||||
pass
|
||||
|
||||
if follow and not init_complete:
|
||||
print(
|
||||
f"Following initialization logs for session {session_id}..."
|
||||
)
|
||||
print("Press Ctrl+C to stop following")
|
||||
container.exec_run(
|
||||
"tail -f /init.log", stream=True, demux=True, tty=True
|
||||
)
|
||||
return None
|
||||
else:
|
||||
exit_code, output = container.exec_run("cat /init.log")
|
||||
if exit_code == 0:
|
||||
return output.decode()
|
||||
else:
|
||||
print("No initialization logs found")
|
||||
return None
|
||||
|
||||
print(f"Session '{session_id}' not found")
|
||||
return None
|
||||
|
||||
except DockerException as e:
|
||||
print(f"Error getting initialization logs: {e}")
|
||||
return None
|
||||
36
cubbi/cubbi_inspector_entrypoint.sh
Executable file
36
cubbi/cubbi_inspector_entrypoint.sh
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/bin/sh
|
||||
# This script modifies the Express server to bind to all interfaces
|
||||
|
||||
# Try to find the CLI script
|
||||
CLI_FILE=$(find /app -name "cli.js" | grep -v node_modules | head -1)
|
||||
|
||||
if [ -z "$CLI_FILE" ]; then
|
||||
echo "Could not find CLI file. Trying common locations..."
|
||||
for path in "/app/client/bin/cli.js" "/app/bin/cli.js" "./client/bin/cli.js" "./bin/cli.js"; do
|
||||
if [ -f "$path" ]; then
|
||||
CLI_FILE="$path"
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [ -z "$CLI_FILE" ]; then
|
||||
echo "ERROR: Could not find the MCP Inspector CLI file."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Found CLI file at: $CLI_FILE"
|
||||
|
||||
# Make a backup of the original file
|
||||
cp "$CLI_FILE" "$CLI_FILE.bak"
|
||||
|
||||
# Modify the file to use 0.0.0.0 as the host
|
||||
sed -i 's/app.listen(PORT/app.listen(PORT, "0.0.0.0"/g' "$CLI_FILE"
|
||||
sed -i 's/server.listen(port/server.listen(port, "0.0.0.0"/g' "$CLI_FILE"
|
||||
sed -i 's/listen(PORT/listen(PORT, "0.0.0.0"/g' "$CLI_FILE"
|
||||
|
||||
echo "Modified server to listen on all interfaces (0.0.0.0)"
|
||||
|
||||
# Start the MCP Inspector
|
||||
echo "Starting MCP Inspector on all interfaces..."
|
||||
exec npm start
|
||||
3
cubbi/images/__init__.py
Normal file
3
cubbi/images/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""
|
||||
MAI container image management
|
||||
"""
|
||||
28
cubbi/images/base.py
Normal file
28
cubbi/images/base.py
Normal file
@@ -0,0 +1,28 @@
|
||||
"""
|
||||
Base image implementation for MAI
|
||||
"""
|
||||
|
||||
from typing import Dict, Optional
|
||||
|
||||
from ..models import Image
|
||||
|
||||
|
||||
class ImageManager:
|
||||
"""Manager for MAI images"""
|
||||
|
||||
@staticmethod
|
||||
def get_default_images() -> Dict[str, Image]:
|
||||
"""Get the default built-in images"""
|
||||
from ..config import DEFAULT_IMAGES
|
||||
|
||||
return DEFAULT_IMAGES
|
||||
|
||||
@staticmethod
|
||||
def get_image_metadata(image_name: str) -> Optional[Dict]:
|
||||
"""Get metadata for a specific image"""
|
||||
from ..config import DEFAULT_IMAGES
|
||||
|
||||
if image_name in DEFAULT_IMAGES:
|
||||
return DEFAULT_IMAGES[image_name].model_dump()
|
||||
|
||||
return None
|
||||
71
cubbi/images/goose/Dockerfile
Normal file
71
cubbi/images/goose/Dockerfile
Normal file
@@ -0,0 +1,71 @@
|
||||
FROM python:3.12-slim
|
||||
|
||||
LABEL maintainer="team@monadical.com"
|
||||
LABEL description="Goose with MCP servers for Cubbi"
|
||||
|
||||
# Install system dependencies including gosu for user switching and shadow for useradd/groupadd
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
gosu \
|
||||
passwd \
|
||||
git \
|
||||
openssh-server \
|
||||
bash \
|
||||
curl \
|
||||
bzip2 \
|
||||
iputils-ping \
|
||||
iproute2 \
|
||||
libxcb1 \
|
||||
libdbus-1-3 \
|
||||
nano \
|
||||
vim \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Set up SSH server directory (configuration will be handled by entrypoint if needed)
|
||||
RUN mkdir -p /var/run/sshd && chmod 0755 /var/run/sshd
|
||||
# Do NOT enable root login or set root password here
|
||||
|
||||
# Install deps
|
||||
WORKDIR /tmp
|
||||
RUN curl -fsSL https://astral.sh/uv/install.sh -o install.sh && \
|
||||
sh install.sh && \
|
||||
mv /root/.local/bin/uv /usr/local/bin/uv && \
|
||||
mv /root/.local/bin/uvx /usr/local/bin/uvx && \
|
||||
rm install.sh
|
||||
RUN curl -fsSL https://github.com/block/goose/releases/download/stable/download_cli.sh -o download_cli.sh && \
|
||||
chmod +x download_cli.sh && \
|
||||
./download_cli.sh && \
|
||||
mv /root/.local/bin/goose /usr/local/bin/goose && \
|
||||
rm -rf download_cli.sh /tmp/goose-*
|
||||
|
||||
# Create app directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy initialization scripts
|
||||
COPY cubbi-init.sh /cubbi-init.sh
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
COPY cubbi-image.yaml /cubbi-image.yaml
|
||||
COPY init-status.sh /init-status.sh
|
||||
COPY update-goose-config.py /usr/local/bin/update-goose-config.py
|
||||
|
||||
# Extend env via bashrc
|
||||
|
||||
# Make scripts executable
|
||||
RUN chmod +x /cubbi-init.sh /entrypoint.sh /init-status.sh \
|
||||
/usr/local/bin/update-goose-config.py
|
||||
|
||||
# Set up initialization status check on login
|
||||
RUN echo '[ -x /init-status.sh ] && /init-status.sh' >> /etc/bash.bashrc
|
||||
|
||||
# Set up environment
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
# Set WORKDIR to /app, common practice and expected by cubbi-init.sh
|
||||
WORKDIR /app
|
||||
|
||||
# Expose ports
|
||||
EXPOSE 8000 22
|
||||
|
||||
# Set entrypoint - container starts as root, entrypoint handles user switching
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
# Default command if none is provided (entrypoint will run this via gosu)
|
||||
CMD ["tail", "-f", "/dev/null"]
|
||||
41
cubbi/images/goose/README.md
Normal file
41
cubbi/images/goose/README.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# Goose Image for MC
|
||||
|
||||
This image provides a containerized environment for running [Goose](https://goose.ai).
|
||||
|
||||
## Features
|
||||
|
||||
- Pre-configured environment for Goose AI
|
||||
- Self-hosted instance integration
|
||||
- SSH access
|
||||
- Git repository integration
|
||||
- Langfuse logging support
|
||||
|
||||
## Environment Variables
|
||||
|
||||
| Variable | Description | Required |
|
||||
|----------|-------------|----------|
|
||||
| `LANGFUSE_INIT_PROJECT_PUBLIC_KEY` | Langfuse public key | No |
|
||||
| `LANGFUSE_INIT_PROJECT_SECRET_KEY` | Langfuse secret key | No |
|
||||
| `LANGFUSE_URL` | Langfuse API URL | No |
|
||||
| `CUBBI_PROJECT_URL` | Project repository URL | No |
|
||||
| `CUBBI_GIT_SSH_KEY` | SSH key for Git authentication | No |
|
||||
| `CUBBI_GIT_TOKEN` | Token for Git authentication | No |
|
||||
|
||||
## Build
|
||||
|
||||
To build this image:
|
||||
|
||||
```bash
|
||||
cd drivers/goose
|
||||
docker build -t monadical/cubbi-goose:latest .
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Create a new session with this image
|
||||
cubbi session create --driver goose
|
||||
|
||||
# Create with project repository
|
||||
cubbi session create --driver goose --project github.com/username/repo
|
||||
```
|
||||
63
cubbi/images/goose/cubbi-image.yaml
Normal file
63
cubbi/images/goose/cubbi-image.yaml
Normal file
@@ -0,0 +1,63 @@
|
||||
name: goose
|
||||
description: Goose AI environment
|
||||
version: 1.0.0
|
||||
maintainer: team@monadical.com
|
||||
image: monadical/cubbi-goose:latest
|
||||
|
||||
init:
|
||||
pre_command: /cubbi-init.sh
|
||||
command: /entrypoint.sh
|
||||
|
||||
environment:
|
||||
- name: LANGFUSE_INIT_PROJECT_PUBLIC_KEY
|
||||
description: Langfuse public key
|
||||
required: false
|
||||
sensitive: true
|
||||
|
||||
- name: LANGFUSE_INIT_PROJECT_SECRET_KEY
|
||||
description: Langfuse secret key
|
||||
required: false
|
||||
sensitive: true
|
||||
|
||||
- name: LANGFUSE_URL
|
||||
description: Langfuse API URL
|
||||
required: false
|
||||
default: https://cloud.langfuse.com
|
||||
|
||||
# Project environment variables
|
||||
- name: CUBBI_PROJECT_URL
|
||||
description: Project repository URL
|
||||
required: false
|
||||
|
||||
- name: CUBBI_PROJECT_TYPE
|
||||
description: Project repository type (git, svn, etc.)
|
||||
required: false
|
||||
default: git
|
||||
|
||||
- name: CUBBI_GIT_SSH_KEY
|
||||
description: SSH key for Git authentication
|
||||
required: false
|
||||
sensitive: true
|
||||
|
||||
- name: CUBBI_GIT_TOKEN
|
||||
description: Token for Git authentication
|
||||
required: false
|
||||
sensitive: true
|
||||
|
||||
ports:
|
||||
- 8000 # Main application
|
||||
- 22 # SSH server
|
||||
|
||||
volumes:
|
||||
- mountPath: /app
|
||||
description: Application directory
|
||||
|
||||
persistent_configs:
|
||||
- source: "/app/.goose"
|
||||
target: "/cubbi-config/goose-app"
|
||||
type: "directory"
|
||||
description: "Goose memory"
|
||||
- source: "/home/cubbi/.config/goose"
|
||||
target: "/cubbi-config/goose-config"
|
||||
type: "directory"
|
||||
description: "Goose configuration"
|
||||
179
cubbi/images/goose/cubbi-init.sh
Executable file
179
cubbi/images/goose/cubbi-init.sh
Executable file
@@ -0,0 +1,179 @@
|
||||
#!/bin/bash
|
||||
# Standardized initialization script for Cubbi images
|
||||
|
||||
# Redirect all output to both stdout and the log file
|
||||
exec > >(tee -a /init.log) 2>&1
|
||||
|
||||
# Mark initialization as started
|
||||
echo "=== Cubbi Initialization started at $(date) ==="
|
||||
|
||||
# --- START INSERTED BLOCK ---
|
||||
|
||||
# Default UID/GID if not provided (should be passed by cubbi tool)
|
||||
CUBBI_USER_ID=${CUBBI_USER_ID:-1000}
|
||||
CUBBI_GROUP_ID=${CUBBI_GROUP_ID:-1000}
|
||||
|
||||
echo "Using UID: $CUBBI_USER_ID, GID: $CUBBI_GROUP_ID"
|
||||
|
||||
# Create group if it doesn't exist
|
||||
if ! getent group cubbi > /dev/null; then
|
||||
groupadd -g $CUBBI_GROUP_ID cubbi
|
||||
else
|
||||
# If group exists but has different GID, modify it
|
||||
EXISTING_GID=$(getent group cubbi | cut -d: -f3)
|
||||
if [ "$EXISTING_GID" != "$CUBBI_GROUP_ID" ]; then
|
||||
groupmod -g $CUBBI_GROUP_ID cubbi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create user if it doesn't exist
|
||||
if ! getent passwd cubbi > /dev/null; then
|
||||
useradd --shell /bin/bash --uid $CUBBI_USER_ID --gid $CUBBI_GROUP_ID --no-create-home cubbi
|
||||
else
|
||||
# If user exists but has different UID/GID, modify it
|
||||
EXISTING_UID=$(getent passwd cubbi | cut -d: -f3)
|
||||
EXISTING_GID=$(getent passwd cubbi | cut -d: -f4)
|
||||
if [ "$EXISTING_UID" != "$CUBBI_USER_ID" ] || [ "$EXISTING_GID" != "$CUBBI_GROUP_ID" ]; then
|
||||
usermod --uid $CUBBI_USER_ID --gid $CUBBI_GROUP_ID cubbi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create home directory and set permissions
|
||||
mkdir -p /home/cubbi
|
||||
chown $CUBBI_USER_ID:$CUBBI_GROUP_ID /home/cubbi
|
||||
mkdir -p /app
|
||||
chown $CUBBI_USER_ID:$CUBBI_GROUP_ID /app
|
||||
|
||||
# Copy /root/.local/bin to the user's home directory
|
||||
if [ -d /root/.local/bin ]; then
|
||||
echo "Copying /root/.local/bin to /home/cubbi/.local/bin..."
|
||||
mkdir -p /home/cubbi/.local/bin
|
||||
cp -r /root/.local/bin/* /home/cubbi/.local/bin/
|
||||
chown -R $CUBBI_USER_ID:$CUBBI_GROUP_ID /home/cubbi/.local
|
||||
fi
|
||||
|
||||
# Start SSH server only if explicitly enabled
|
||||
if [ "$CUBBI_SSH_ENABLED" = "true" ]; then
|
||||
echo "Starting SSH server..."
|
||||
/usr/sbin/sshd
|
||||
else
|
||||
echo "SSH server disabled (use --ssh flag to enable)"
|
||||
fi
|
||||
|
||||
# --- END INSERTED BLOCK ---
|
||||
|
||||
echo "INIT_COMPLETE=false" > /init.status
|
||||
|
||||
# Project initialization
|
||||
if [ -n "$CUBBI_PROJECT_URL" ]; then
|
||||
echo "Initializing project: $CUBBI_PROJECT_URL"
|
||||
|
||||
# Set up SSH key if provided
|
||||
if [ -n "$CUBBI_GIT_SSH_KEY" ]; then
|
||||
mkdir -p ~/.ssh
|
||||
echo "$CUBBI_GIT_SSH_KEY" > ~/.ssh/id_ed25519
|
||||
chmod 600 ~/.ssh/id_ed25519
|
||||
ssh-keyscan github.com >> ~/.ssh/known_hosts 2>/dev/null
|
||||
ssh-keyscan gitlab.com >> ~/.ssh/known_hosts 2>/dev/null
|
||||
ssh-keyscan bitbucket.org >> ~/.ssh/known_hosts 2>/dev/null
|
||||
fi
|
||||
|
||||
# Set up token if provided
|
||||
if [ -n "$CUBBI_GIT_TOKEN" ]; then
|
||||
git config --global credential.helper store
|
||||
echo "https://$CUBBI_GIT_TOKEN:x-oauth-basic@github.com" > ~/.git-credentials
|
||||
fi
|
||||
|
||||
# Clone repository
|
||||
git clone $CUBBI_PROJECT_URL /app
|
||||
cd /app
|
||||
|
||||
# Run project-specific initialization if present
|
||||
if [ -f "/app/.cubbi/init.sh" ]; then
|
||||
bash /app/.cubbi/init.sh
|
||||
fi
|
||||
|
||||
# Persistent configs are now directly mounted as volumes
|
||||
# No need to create symlinks anymore
|
||||
if [ -n "$CUBBI_CONFIG_DIR" ] && [ -d "$CUBBI_CONFIG_DIR" ]; then
|
||||
echo "Using persistent configuration volumes (direct mounts)"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Goose uses self-hosted instance, no API key required
|
||||
|
||||
# Set up Langfuse logging if credentials are provided
|
||||
if [ -n "$LANGFUSE_INIT_PROJECT_SECRET_KEY" ] && [ -n "$LANGFUSE_INIT_PROJECT_PUBLIC_KEY" ]; then
|
||||
echo "Setting up Langfuse logging"
|
||||
export LANGFUSE_INIT_PROJECT_SECRET_KEY="$LANGFUSE_INIT_PROJECT_SECRET_KEY"
|
||||
export LANGFUSE_INIT_PROJECT_PUBLIC_KEY="$LANGFUSE_INIT_PROJECT_PUBLIC_KEY"
|
||||
export LANGFUSE_URL="${LANGFUSE_URL:-https://cloud.langfuse.com}"
|
||||
fi
|
||||
|
||||
# Ensure /cubbi-config directory exists (required for symlinks)
|
||||
if [ ! -d "/cubbi-config" ]; then
|
||||
echo "Creating /cubbi-config directory since it doesn't exist"
|
||||
mkdir -p /cubbi-config
|
||||
chown $CUBBI_USER_ID:$CUBBI_GROUP_ID /cubbi-config
|
||||
fi
|
||||
|
||||
# Create symlinks for persistent configurations defined in the image
|
||||
if [ -n "$CUBBI_PERSISTENT_LINKS" ]; then
|
||||
echo "Creating persistent configuration symlinks..."
|
||||
# Split by semicolon
|
||||
IFS=';' read -ra LINKS <<< "$CUBBI_PERSISTENT_LINKS"
|
||||
for link_pair in "${LINKS[@]}"; do
|
||||
# Split by colon
|
||||
IFS=':' read -r source_path target_path <<< "$link_pair"
|
||||
|
||||
if [ -z "$source_path" ] || [ -z "$target_path" ]; then
|
||||
echo "Warning: Invalid link pair format '$link_pair', skipping."
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "Processing link: $source_path -> $target_path"
|
||||
parent_dir=$(dirname "$source_path")
|
||||
|
||||
# Ensure parent directory of the link source exists and is owned by cubbi
|
||||
if [ ! -d "$parent_dir" ]; then
|
||||
echo "Creating parent directory: $parent_dir"
|
||||
mkdir -p "$parent_dir"
|
||||
echo "Changing ownership of parent $parent_dir to $CUBBI_USER_ID:$CUBBI_GROUP_ID"
|
||||
chown "$CUBBI_USER_ID:$CUBBI_GROUP_ID" "$parent_dir" || echo "Warning: Could not chown parent $parent_dir"
|
||||
fi
|
||||
|
||||
# Create the symlink (force, no-dereference)
|
||||
echo "Creating symlink: ln -sfn $target_path $source_path"
|
||||
ln -sfn "$target_path" "$source_path"
|
||||
# Optionally, change ownership of the symlink itself
|
||||
echo "Changing ownership of symlink $source_path to $CUBBI_USER_ID:$CUBBI_GROUP_ID"
|
||||
chown -h "$CUBBI_USER_ID:$CUBBI_GROUP_ID" "$source_path" || echo "Warning: Could not chown symlink $source_path"
|
||||
|
||||
done
|
||||
echo "Persistent configuration symlinks created."
|
||||
fi
|
||||
|
||||
# Update Goose configuration with available MCP servers (run as cubbi after symlinks are created)
|
||||
if [ -f "/usr/local/bin/update-goose-config.py" ]; then
|
||||
echo "Updating Goose configuration with MCP servers as cubbi..."
|
||||
gosu cubbi /usr/local/bin/update-goose-config.py
|
||||
elif [ -f "$(dirname "$0")/update-goose-config.py" ]; then
|
||||
echo "Updating Goose configuration with MCP servers as cubbi..."
|
||||
gosu cubbi "$(dirname "$0")/update-goose-config.py"
|
||||
else
|
||||
echo "Warning: update-goose-config.py script not found. Goose configuration will not be updated."
|
||||
fi
|
||||
|
||||
# Run the user command first, if set, as cubbi
|
||||
if [ -n "$CUBBI_RUN_COMMAND" ]; then
|
||||
echo "--- Executing initial command: $CUBBI_RUN_COMMAND ---";
|
||||
gosu cubbi sh -c "$CUBBI_RUN_COMMAND"; # Run user command as cubbi
|
||||
COMMAND_EXIT_CODE=$?;
|
||||
echo "--- Initial command finished (exit code: $COMMAND_EXIT_CODE) ---";
|
||||
fi;
|
||||
|
||||
# Mark initialization as complete
|
||||
echo "=== Cubbi Initialization completed at $(date) ==="
|
||||
echo "INIT_COMPLETE=true" > /init.status
|
||||
|
||||
exec gosu cubbi "$@"
|
||||
7
cubbi/images/goose/entrypoint.sh
Executable file
7
cubbi/images/goose/entrypoint.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
# Entrypoint script for Goose image
|
||||
# Executes the standard initialization script, which handles user setup,
|
||||
# service startup (like sshd), and switching to the non-root user
|
||||
# before running the container's command (CMD).
|
||||
|
||||
exec /cubbi-init.sh "$@"
|
||||
31
cubbi/images/goose/init-status.sh
Normal file
31
cubbi/images/goose/init-status.sh
Normal file
@@ -0,0 +1,31 @@
|
||||
#!/bin/bash
|
||||
# Script to check and display initialization status
|
||||
|
||||
# Only proceed if running as root
|
||||
if [ "$(id -u)" != "0" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Quick check instead of full logic
|
||||
if ! grep -q "INIT_COMPLETE=true" "/init.status" 2>/dev/null; then
|
||||
# Only follow logs if initialization is incomplete
|
||||
if [ -f "/init.log" ]; then
|
||||
echo "----------------------------------------"
|
||||
tail -f /init.log &
|
||||
tail_pid=$!
|
||||
|
||||
# Check every second if initialization has completed
|
||||
while true; do
|
||||
if grep -q "INIT_COMPLETE=true" "/init.status" 2>/dev/null; then
|
||||
kill $tail_pid 2>/dev/null
|
||||
echo "----------------------------------------"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
else
|
||||
echo "No initialization logs found."
|
||||
fi
|
||||
fi
|
||||
|
||||
exec gosu cubbi /bin/bash -il
|
||||
106
cubbi/images/goose/update-goose-config.py
Normal file
106
cubbi/images/goose/update-goose-config.py
Normal file
@@ -0,0 +1,106 @@
|
||||
#!/usr/bin/env -S uv run --script
|
||||
# /// script
|
||||
# dependencies = ["ruamel.yaml"]
|
||||
# ///
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
# Path to goose config
|
||||
GOOSE_CONFIG = Path.home() / ".config/goose/config.yaml"
|
||||
CONFIG_DIR = GOOSE_CONFIG.parent
|
||||
|
||||
# Create config directory if it doesn't exist
|
||||
CONFIG_DIR.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
def update_config():
|
||||
"""Update Goose configuration based on environment variables and config file"""
|
||||
|
||||
yaml = YAML()
|
||||
|
||||
# Load or initialize the YAML configuration
|
||||
if not GOOSE_CONFIG.exists():
|
||||
config_data = {"extensions": {}}
|
||||
else:
|
||||
with GOOSE_CONFIG.open("r") as f:
|
||||
config_data = yaml.load(f)
|
||||
if "extensions" not in config_data:
|
||||
config_data["extensions"] = {}
|
||||
|
||||
# Add default developer extension
|
||||
config_data["extensions"]["developer"] = {
|
||||
"enabled": True,
|
||||
"name": "developer",
|
||||
"timeout": 300,
|
||||
"type": "builtin",
|
||||
}
|
||||
|
||||
# Update goose configuration with model and provider from environment variables
|
||||
goose_model = os.environ.get("CUBBI_MODEL")
|
||||
goose_provider = os.environ.get("CUBBI_PROVIDER")
|
||||
|
||||
if goose_model:
|
||||
config_data["GOOSE_MODEL"] = goose_model
|
||||
print(f"Set GOOSE_MODEL to {goose_model}")
|
||||
|
||||
if goose_provider:
|
||||
config_data["GOOSE_PROVIDER"] = goose_provider
|
||||
print(f"Set GOOSE_PROVIDER to {goose_provider}")
|
||||
|
||||
# Get MCP information from environment variables
|
||||
mcp_count = int(os.environ.get("MCP_COUNT", "0"))
|
||||
mcp_names_str = os.environ.get("MCP_NAMES", "[]")
|
||||
|
||||
try:
|
||||
mcp_names = json.loads(mcp_names_str)
|
||||
print(f"Found {mcp_count} MCP servers: {', '.join(mcp_names)}")
|
||||
except json.JSONDecodeError:
|
||||
mcp_names = []
|
||||
print("Error parsing MCP_NAMES environment variable")
|
||||
|
||||
# Process each MCP - collect the MCP configs to add or update
|
||||
for idx in range(mcp_count):
|
||||
mcp_name = os.environ.get(f"MCP_{idx}_NAME")
|
||||
mcp_type = os.environ.get(f"MCP_{idx}_TYPE")
|
||||
mcp_host = os.environ.get(f"MCP_{idx}_HOST")
|
||||
|
||||
# Always use container's SSE port (8080) not the host-bound port
|
||||
if mcp_name and mcp_host:
|
||||
# Use standard MCP SSE port (8080)
|
||||
mcp_url = f"http://{mcp_host}:8080/sse"
|
||||
print(f"Processing MCP extension: {mcp_name} ({mcp_type}) - {mcp_url}")
|
||||
config_data["extensions"][mcp_name] = {
|
||||
"enabled": True,
|
||||
"name": mcp_name,
|
||||
"timeout": 60,
|
||||
"type": "sse",
|
||||
"uri": mcp_url,
|
||||
"envs": {},
|
||||
}
|
||||
elif mcp_name and os.environ.get(f"MCP_{idx}_URL"):
|
||||
# For remote MCPs, use the URL provided in environment
|
||||
mcp_url = os.environ.get(f"MCP_{idx}_URL")
|
||||
print(
|
||||
f"Processing remote MCP extension: {mcp_name} ({mcp_type}) - {mcp_url}"
|
||||
)
|
||||
config_data["extensions"][mcp_name] = {
|
||||
"enabled": True,
|
||||
"name": mcp_name,
|
||||
"timeout": 60,
|
||||
"type": "sse",
|
||||
"uri": mcp_url,
|
||||
"envs": {},
|
||||
}
|
||||
|
||||
# Write the updated configuration back to the file
|
||||
with GOOSE_CONFIG.open("w") as f:
|
||||
yaml.dump(config_data, f)
|
||||
|
||||
print(f"Updated Goose configuration at {GOOSE_CONFIG}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
update_config()
|
||||
874
cubbi/mcp.py
Normal file
874
cubbi/mcp.py
Normal file
@@ -0,0 +1,874 @@
|
||||
"""
|
||||
MCP (Model Control Protocol) server management for Cubbi Container.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import tempfile
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import docker
|
||||
from docker.errors import DockerException, ImageNotFound, NotFound
|
||||
|
||||
from .models import DockerMCP, MCPContainer, MCPStatus, ProxyMCP, RemoteMCP
|
||||
from .user_config import UserConfigManager
|
||||
|
||||
# Configure logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MCPManager:
|
||||
"""Manager for MCP (Model Control Protocol) servers."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
config_manager: Optional[UserConfigManager] = None,
|
||||
):
|
||||
"""Initialize the MCP manager."""
|
||||
self.config_manager = config_manager or UserConfigManager()
|
||||
try:
|
||||
self.client = docker.from_env()
|
||||
# Test connection
|
||||
self.client.ping()
|
||||
except DockerException as e:
|
||||
logger.error(f"Error connecting to Docker: {e}")
|
||||
self.client = None
|
||||
|
||||
def _ensure_mcp_network(self) -> str:
|
||||
"""Ensure the MCP network exists and return its name.
|
||||
Note: This is used only by the inspector, not for session-to-MCP connections.
|
||||
"""
|
||||
network_name = "cubbi-mcp-network"
|
||||
if self.client:
|
||||
networks = self.client.networks.list(names=[network_name])
|
||||
if not networks:
|
||||
self.client.networks.create(network_name, driver="bridge")
|
||||
return network_name
|
||||
|
||||
def _get_mcp_dedicated_network(self, mcp_name: str) -> str:
|
||||
"""Get or create a dedicated network for direct session-to-MCP connections.
|
||||
|
||||
Args:
|
||||
mcp_name: The name of the MCP server
|
||||
|
||||
Returns:
|
||||
The name of the dedicated network
|
||||
"""
|
||||
network_name = f"cubbi-mcp-{mcp_name}-network"
|
||||
if self.client:
|
||||
networks = self.client.networks.list(names=[network_name])
|
||||
if not networks:
|
||||
self.client.networks.create(network_name, driver="bridge")
|
||||
return network_name
|
||||
|
||||
def list_mcps(self) -> List[Dict[str, Any]]:
|
||||
"""List all configured MCP servers."""
|
||||
mcps = self.config_manager.get("mcps", [])
|
||||
return mcps
|
||||
|
||||
def get_mcp(self, name: str) -> Optional[Dict[str, Any]]:
|
||||
"""Get an MCP configuration by name."""
|
||||
mcps = self.list_mcps()
|
||||
for mcp in mcps:
|
||||
if mcp.get("name") == name:
|
||||
return mcp
|
||||
return None
|
||||
|
||||
def add_remote_mcp(
|
||||
self,
|
||||
name: str,
|
||||
url: str,
|
||||
headers: Dict[str, str] = None,
|
||||
add_as_default: bool = True,
|
||||
) -> Dict[str, Any]:
|
||||
"""Add a remote MCP server.
|
||||
|
||||
Args:
|
||||
name: Name of the MCP server
|
||||
url: URL of the remote MCP server
|
||||
headers: HTTP headers to use when connecting
|
||||
add_as_default: Whether to add this MCP to the default MCPs list
|
||||
|
||||
Returns:
|
||||
The MCP configuration dictionary
|
||||
"""
|
||||
# Create the remote MCP configuration
|
||||
remote_mcp = RemoteMCP(
|
||||
name=name,
|
||||
url=url,
|
||||
headers=headers or {},
|
||||
)
|
||||
|
||||
# Add to the configuration
|
||||
mcps = self.list_mcps()
|
||||
|
||||
# Remove existing MCP with the same name if it exists
|
||||
mcps = [mcp for mcp in mcps if mcp.get("name") != name]
|
||||
|
||||
# Add the new MCP
|
||||
mcp_config = remote_mcp.model_dump()
|
||||
mcps.append(mcp_config)
|
||||
|
||||
# Save the configuration
|
||||
self.config_manager.set("mcps", mcps)
|
||||
|
||||
# Add to default MCPs if requested
|
||||
if add_as_default:
|
||||
default_mcps = self.config_manager.get("defaults.mcps", [])
|
||||
if name not in default_mcps:
|
||||
default_mcps.append(name)
|
||||
self.config_manager.set("defaults.mcps", default_mcps)
|
||||
|
||||
return mcp_config
|
||||
|
||||
def add_docker_mcp(
|
||||
self,
|
||||
name: str,
|
||||
image: str,
|
||||
command: str,
|
||||
env: Dict[str, str] = None,
|
||||
add_as_default: bool = True,
|
||||
) -> Dict[str, Any]:
|
||||
"""Add a Docker-based MCP server.
|
||||
|
||||
Args:
|
||||
name: Name of the MCP server
|
||||
image: Docker image for the MCP server
|
||||
command: Command to run in the container
|
||||
env: Environment variables to set in the container
|
||||
add_as_default: Whether to add this MCP to the default MCPs list
|
||||
|
||||
Returns:
|
||||
The MCP configuration dictionary
|
||||
"""
|
||||
# Create the Docker MCP configuration
|
||||
docker_mcp = DockerMCP(
|
||||
name=name,
|
||||
image=image,
|
||||
command=command,
|
||||
env=env or {},
|
||||
)
|
||||
|
||||
# Add to the configuration
|
||||
mcps = self.list_mcps()
|
||||
|
||||
# Remove existing MCP with the same name if it exists
|
||||
mcps = [mcp for mcp in mcps if mcp.get("name") != name]
|
||||
|
||||
# Add the new MCP
|
||||
mcp_config = docker_mcp.model_dump()
|
||||
mcps.append(mcp_config)
|
||||
|
||||
# Save the configuration
|
||||
self.config_manager.set("mcps", mcps)
|
||||
|
||||
# Add to default MCPs if requested
|
||||
if add_as_default:
|
||||
default_mcps = self.config_manager.get("defaults.mcps", [])
|
||||
if name not in default_mcps:
|
||||
default_mcps.append(name)
|
||||
self.config_manager.set("defaults.mcps", default_mcps)
|
||||
|
||||
return mcp_config
|
||||
|
||||
def add_proxy_mcp(
|
||||
self,
|
||||
name: str,
|
||||
base_image: str,
|
||||
proxy_image: str,
|
||||
command: str,
|
||||
proxy_options: Dict[str, Any] = None,
|
||||
env: Dict[str, str] = None,
|
||||
host_port: Optional[int] = None,
|
||||
add_as_default: bool = True,
|
||||
) -> Dict[str, Any]:
|
||||
"""Add a proxy-based MCP server.
|
||||
|
||||
Args:
|
||||
name: Name of the MCP server
|
||||
base_image: Base Docker image running the actual MCP server
|
||||
proxy_image: Docker image for the MCP proxy
|
||||
command: Command to run in the container
|
||||
proxy_options: Options for the MCP proxy
|
||||
env: Environment variables to set in the container
|
||||
host_port: Host port to bind the MCP server to (auto-assigned if not specified)
|
||||
add_as_default: Whether to add this MCP to the default MCPs list
|
||||
|
||||
Returns:
|
||||
The MCP configuration dictionary
|
||||
"""
|
||||
# If no host port specified, find the next available port starting from 5101
|
||||
if host_port is None:
|
||||
# Get current MCPs and find highest assigned port
|
||||
mcps = self.list_mcps()
|
||||
highest_port = 5100 # Start at 5100, so next will be 5101
|
||||
|
||||
for mcp in mcps:
|
||||
if mcp.get("type") == "proxy" and mcp.get("host_port"):
|
||||
try:
|
||||
port = int(mcp.get("host_port"))
|
||||
if port > highest_port:
|
||||
highest_port = port
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
|
||||
# Next port will be highest + 1
|
||||
host_port = highest_port + 1
|
||||
|
||||
# Create the Proxy MCP configuration
|
||||
proxy_mcp = ProxyMCP(
|
||||
name=name,
|
||||
base_image=base_image,
|
||||
proxy_image=proxy_image,
|
||||
command=command,
|
||||
proxy_options=proxy_options or {},
|
||||
env=env or {},
|
||||
host_port=host_port,
|
||||
)
|
||||
|
||||
# Add to the configuration
|
||||
mcps = self.list_mcps()
|
||||
|
||||
# Remove existing MCP with the same name if it exists
|
||||
mcps = [mcp for mcp in mcps if mcp.get("name") != name]
|
||||
|
||||
# Add the new MCP
|
||||
mcp_config = proxy_mcp.model_dump()
|
||||
mcps.append(mcp_config)
|
||||
|
||||
# Save the configuration
|
||||
self.config_manager.set("mcps", mcps)
|
||||
|
||||
# Add to default MCPs if requested
|
||||
if add_as_default:
|
||||
default_mcps = self.config_manager.get("defaults.mcps", [])
|
||||
if name not in default_mcps:
|
||||
default_mcps.append(name)
|
||||
self.config_manager.set("defaults.mcps", default_mcps)
|
||||
|
||||
return mcp_config
|
||||
|
||||
def remove_mcp(self, name: str) -> bool:
|
||||
"""Remove an MCP server configuration.
|
||||
|
||||
Args:
|
||||
name: Name of the MCP server to remove
|
||||
|
||||
Returns:
|
||||
True if the MCP was successfully removed, False otherwise
|
||||
"""
|
||||
mcps = self.list_mcps()
|
||||
|
||||
# Filter out the MCP with the specified name
|
||||
updated_mcps = [mcp for mcp in mcps if mcp.get("name") != name]
|
||||
|
||||
# If the length hasn't changed, the MCP wasn't found
|
||||
if len(mcps) == len(updated_mcps):
|
||||
return False
|
||||
|
||||
# Save the updated configuration
|
||||
self.config_manager.set("mcps", updated_mcps)
|
||||
|
||||
# Also remove from default MCPs if it's there
|
||||
default_mcps = self.config_manager.get("defaults.mcps", [])
|
||||
if name in default_mcps:
|
||||
default_mcps.remove(name)
|
||||
self.config_manager.set("defaults.mcps", default_mcps)
|
||||
|
||||
# Stop and remove the container if it exists
|
||||
self.stop_mcp(name)
|
||||
|
||||
return True
|
||||
|
||||
def get_mcp_container_name(self, mcp_name: str) -> str:
|
||||
"""Get the Docker container name for an MCP server."""
|
||||
return f"cubbi_mcp_{mcp_name}"
|
||||
|
||||
def start_mcp(self, name: str) -> Dict[str, Any]:
|
||||
"""Start an MCP server container."""
|
||||
if not self.client:
|
||||
raise Exception("Docker client is not available")
|
||||
|
||||
# Get the MCP configuration
|
||||
mcp_config = self.get_mcp(name)
|
||||
if not mcp_config:
|
||||
raise ValueError(f"MCP server '{name}' not found")
|
||||
|
||||
# Get the container name
|
||||
container_name = self.get_mcp_container_name(name)
|
||||
|
||||
# Check if the container already exists
|
||||
try:
|
||||
container = self.client.containers.get(container_name)
|
||||
# Check if we need to recreate the container due to port binding changes
|
||||
needs_recreate = False
|
||||
|
||||
if mcp_config.get("type") == "proxy" and mcp_config.get("host_port"):
|
||||
# Get the current container port bindings
|
||||
port_bindings = container.attrs.get("HostConfig", {}).get(
|
||||
"PortBindings", {}
|
||||
)
|
||||
sse_port = f"{mcp_config['proxy_options'].get('sse_port', 8080)}/tcp"
|
||||
|
||||
# Check if the port binding matches the configured host port
|
||||
current_binding = port_bindings.get(sse_port, [])
|
||||
if not current_binding or int(
|
||||
current_binding[0].get("HostPort", 0)
|
||||
) != mcp_config.get("host_port"):
|
||||
logger.info(
|
||||
f"Port binding changed for MCP '{name}', recreating container"
|
||||
)
|
||||
needs_recreate = True
|
||||
|
||||
# If we don't need to recreate, just start it if it's not running
|
||||
if not needs_recreate:
|
||||
if container.status != "running":
|
||||
container.start()
|
||||
|
||||
# Return the container status
|
||||
return {
|
||||
"container_id": container.id,
|
||||
"status": "running",
|
||||
"name": name,
|
||||
}
|
||||
else:
|
||||
# We need to recreate the container with new port bindings
|
||||
logger.info(
|
||||
f"Recreating container for MCP '{name}' with updated port bindings"
|
||||
)
|
||||
container.remove(force=True)
|
||||
# Container doesn't exist, we need to create it
|
||||
pass
|
||||
except NotFound:
|
||||
# Container doesn't exist, we need to create it
|
||||
pass
|
||||
|
||||
# Ensure the MCP network exists
|
||||
network_name = self._ensure_mcp_network()
|
||||
|
||||
# Handle different MCP types
|
||||
mcp_type = mcp_config.get("type")
|
||||
|
||||
if mcp_type == "remote":
|
||||
# Remote MCP servers don't need containers
|
||||
return {
|
||||
"status": "not_applicable",
|
||||
"name": name,
|
||||
"type": "remote",
|
||||
}
|
||||
|
||||
elif mcp_type == "docker":
|
||||
# Pull the image if needed
|
||||
try:
|
||||
self.client.images.get(mcp_config["image"])
|
||||
except ImageNotFound:
|
||||
logger.info(f"Pulling image {mcp_config['image']}")
|
||||
self.client.images.pull(mcp_config["image"])
|
||||
|
||||
# Create and start the container
|
||||
container = self.client.containers.run(
|
||||
image=mcp_config["image"],
|
||||
command=mcp_config.get("command"),
|
||||
name=container_name,
|
||||
detach=True,
|
||||
network=None, # Start without network, we'll add it with aliases
|
||||
environment=mcp_config.get("env", {}),
|
||||
labels={
|
||||
"cubbi.mcp": "true",
|
||||
"cubbi.mcp.name": name,
|
||||
"cubbi.mcp.type": "docker",
|
||||
},
|
||||
)
|
||||
|
||||
# Connect to the inspector network
|
||||
network = self.client.networks.get(network_name)
|
||||
network.connect(container, aliases=[name])
|
||||
logger.info(
|
||||
f"Connected MCP server '{name}' to inspector network {network_name} with alias '{name}'"
|
||||
)
|
||||
|
||||
# Create and connect to a dedicated network for session connections
|
||||
dedicated_network_name = self._get_mcp_dedicated_network(name)
|
||||
try:
|
||||
dedicated_network = self.client.networks.get(dedicated_network_name)
|
||||
except DockerException:
|
||||
dedicated_network = self.client.networks.create(
|
||||
dedicated_network_name, driver="bridge"
|
||||
)
|
||||
|
||||
dedicated_network.connect(container, aliases=[name])
|
||||
logger.info(
|
||||
f"Connected MCP server '{name}' to dedicated network {dedicated_network_name} with alias '{name}'"
|
||||
)
|
||||
|
||||
return {
|
||||
"container_id": container.id,
|
||||
"status": "running",
|
||||
"name": name,
|
||||
}
|
||||
|
||||
elif mcp_type == "proxy":
|
||||
# For proxy, we need to create a custom Dockerfile and build an image
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
# Create entrypoint script for mcp-proxy that runs the base MCP image
|
||||
entrypoint_script = """#!/bin/sh
|
||||
set -x
|
||||
echo "Starting MCP proxy with base image $MCP_BASE_IMAGE (command: $MCP_COMMAND) on port $SSE_PORT"
|
||||
|
||||
# Verify if Docker socket is available
|
||||
if [ ! -S /var/run/docker.sock ]; then
|
||||
echo "ERROR: Docker socket not available. Cannot run base MCP image."
|
||||
echo "Make sure the Docker socket is mounted from the host."
|
||||
|
||||
# Create a minimal fallback server for testing
|
||||
cat > /tmp/fallback_server.py << 'EOF'
|
||||
import json, sys, time
|
||||
print(json.dumps({"type": "ready", "message": "Fallback server - Docker socket not available"}))
|
||||
sys.stdout.flush()
|
||||
while True:
|
||||
line = sys.stdin.readline().strip()
|
||||
if line:
|
||||
try:
|
||||
data = json.loads(line)
|
||||
if data.get("type") == "ping":
|
||||
print(json.dumps({"type": "pong", "id": data.get("id")}))
|
||||
else:
|
||||
print(json.dumps({"type": "error", "message": "Docker socket not available"}))
|
||||
except:
|
||||
print(json.dumps({"type": "error"}))
|
||||
sys.stdout.flush()
|
||||
time.sleep(1)
|
||||
EOF
|
||||
|
||||
exec mcp-proxy \
|
||||
--sse-port "$SSE_PORT" \
|
||||
--sse-host "$SSE_HOST" \
|
||||
--allow-origin "$ALLOW_ORIGIN" \
|
||||
--pass-environment \
|
||||
-- \
|
||||
python /tmp/fallback_server.py
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Pull the base MCP image
|
||||
echo "Pulling base MCP image: $MCP_BASE_IMAGE"
|
||||
docker pull "$MCP_BASE_IMAGE" || true
|
||||
|
||||
# Prepare the command to run the MCP server
|
||||
if [ -n "$MCP_COMMAND" ]; then
|
||||
CMD="$MCP_COMMAND"
|
||||
else
|
||||
# Default to empty if no command specified
|
||||
CMD=""
|
||||
fi
|
||||
|
||||
echo "Running MCP server from image $MCP_BASE_IMAGE with command: $CMD"
|
||||
|
||||
# Run the actual MCP server in the base image and pipe its I/O to mcp-proxy
|
||||
# Using docker run without -d to keep stdio connected
|
||||
|
||||
# Build env vars string to pass through to the inner container
|
||||
ENV_ARGS=""
|
||||
|
||||
# Check if the environment variable names file exists
|
||||
if [ -f "/mcp-envs.txt" ]; then
|
||||
# Read env var names from file and pass them to docker
|
||||
while read -r var_name; do
|
||||
# Skip empty lines
|
||||
if [ -n "$var_name" ]; then
|
||||
# Simply add the env var - Docker will only pass it if it exists
|
||||
ENV_ARGS="$ENV_ARGS -e $var_name"
|
||||
fi
|
||||
done < "/mcp-envs.txt"
|
||||
|
||||
echo "Passing environment variables from mcp-envs.txt: $ENV_ARGS"
|
||||
fi
|
||||
|
||||
exec mcp-proxy \
|
||||
--sse-port "$SSE_PORT" \
|
||||
--sse-host "$SSE_HOST" \
|
||||
--allow-origin "$ALLOW_ORIGIN" \
|
||||
--pass-environment \
|
||||
-- \
|
||||
docker run --rm -i $ENV_ARGS "$MCP_BASE_IMAGE" $CMD
|
||||
"""
|
||||
# Write the entrypoint script
|
||||
entrypoint_path = os.path.join(tmp_dir, "entrypoint.sh")
|
||||
with open(entrypoint_path, "w") as f:
|
||||
f.write(entrypoint_script)
|
||||
|
||||
# Create a file with environment variable names (no values)
|
||||
env_names_path = os.path.join(tmp_dir, "mcp-envs.txt")
|
||||
with open(env_names_path, "w") as f:
|
||||
# Write one env var name per line
|
||||
for env_name in mcp_config.get("env", {}).keys():
|
||||
f.write(f"{env_name}\n")
|
||||
|
||||
# Create a Dockerfile for the proxy
|
||||
dockerfile_content = f"""
|
||||
FROM {mcp_config["proxy_image"]}
|
||||
|
||||
# Install Docker CLI (trying multiple package managers to handle different base images)
|
||||
USER root
|
||||
RUN (apt-get update && apt-get install -y docker.io) || \\
|
||||
(apt-get update && apt-get install -y docker-ce-cli) || \\
|
||||
(apk add --no-cache docker-cli) || \\
|
||||
(yum install -y docker) || \\
|
||||
echo "WARNING: Could not install Docker CLI - will fall back to minimal MCP server"
|
||||
|
||||
# Set environment variables for the proxy
|
||||
ENV MCP_BASE_IMAGE={mcp_config["base_image"]}
|
||||
ENV MCP_COMMAND="{mcp_config.get("command", "")}"
|
||||
ENV SSE_PORT={mcp_config["proxy_options"].get("sse_port", 8080)}
|
||||
ENV SSE_HOST={mcp_config["proxy_options"].get("sse_host", "0.0.0.0")}
|
||||
ENV ALLOW_ORIGIN={mcp_config["proxy_options"].get("allow_origin", "*")}
|
||||
ENV DEBUG=1
|
||||
|
||||
# Add environment variables from the configuration
|
||||
{chr(10).join([f'ENV {k}="{v}"' for k, v in mcp_config.get("env", {}).items()])}
|
||||
|
||||
# Add env names file and entrypoint script
|
||||
COPY mcp-envs.txt /mcp-envs.txt
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
RUN chmod +x /entrypoint.sh
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
"""
|
||||
|
||||
# Write the Dockerfile
|
||||
dockerfile_path = os.path.join(tmp_dir, "Dockerfile")
|
||||
with open(dockerfile_path, "w") as f:
|
||||
f.write(dockerfile_content)
|
||||
|
||||
# Build the image
|
||||
custom_image_name = f"cubbi_mcp_proxy_{name}"
|
||||
logger.info(f"Building custom proxy image: {custom_image_name}")
|
||||
self.client.images.build(
|
||||
path=tmp_dir,
|
||||
tag=custom_image_name,
|
||||
rm=True,
|
||||
)
|
||||
|
||||
# Format command for the Docker entrypoint arguments
|
||||
# The MCP proxy container will handle this internally based on
|
||||
# the MCP_BASE_IMAGE and MCP_COMMAND env vars we set
|
||||
logger.info(
|
||||
f"Starting MCP proxy with base_image={mcp_config['base_image']}, command={mcp_config.get('command', '')}"
|
||||
)
|
||||
|
||||
# Get the SSE port from the proxy options
|
||||
sse_port = mcp_config["proxy_options"].get("sse_port", 8080)
|
||||
|
||||
# Check if we need to bind to a host port
|
||||
port_bindings = {}
|
||||
if mcp_config.get("host_port"):
|
||||
host_port = mcp_config.get("host_port")
|
||||
port_bindings = {f"{sse_port}/tcp": host_port}
|
||||
|
||||
# Create and start the container
|
||||
container = self.client.containers.run(
|
||||
image=custom_image_name,
|
||||
name=container_name,
|
||||
detach=True,
|
||||
network=None, # Start without network, we'll add it with aliases
|
||||
volumes={
|
||||
"/var/run/docker.sock": {
|
||||
"bind": "/var/run/docker.sock",
|
||||
"mode": "rw",
|
||||
}
|
||||
},
|
||||
labels={
|
||||
"cubbi.mcp": "true",
|
||||
"cubbi.mcp.name": name,
|
||||
"cubbi.mcp.type": "proxy",
|
||||
},
|
||||
ports=port_bindings, # Bind the SSE port to the host if configured
|
||||
)
|
||||
|
||||
# Connect to the inspector network
|
||||
network = self.client.networks.get(network_name)
|
||||
network.connect(container, aliases=[name])
|
||||
logger.info(
|
||||
f"Connected MCP server '{name}' to inspector network {network_name} with alias '{name}'"
|
||||
)
|
||||
|
||||
# Create and connect to a dedicated network for session connections
|
||||
dedicated_network_name = self._get_mcp_dedicated_network(name)
|
||||
try:
|
||||
dedicated_network = self.client.networks.get(dedicated_network_name)
|
||||
except DockerException:
|
||||
dedicated_network = self.client.networks.create(
|
||||
dedicated_network_name, driver="bridge"
|
||||
)
|
||||
|
||||
dedicated_network.connect(container, aliases=[name])
|
||||
logger.info(
|
||||
f"Connected MCP server '{name}' to dedicated network {dedicated_network_name} with alias '{name}'"
|
||||
)
|
||||
|
||||
return {
|
||||
"container_id": container.id,
|
||||
"status": "running",
|
||||
"name": name,
|
||||
}
|
||||
|
||||
else:
|
||||
raise ValueError(f"Unsupported MCP type: {mcp_type}")
|
||||
|
||||
def stop_mcp(self, name: str) -> bool:
|
||||
"""Stop an MCP server container.
|
||||
|
||||
Args:
|
||||
name: The name of the MCP server to stop
|
||||
|
||||
Returns:
|
||||
True if the operation was successful (including cases where the container doesn't exist)
|
||||
"""
|
||||
if not self.client:
|
||||
logger.warning("Docker client is not available")
|
||||
return False
|
||||
|
||||
# Get the MCP configuration - don't raise an exception if not found
|
||||
mcp_config = self.get_mcp(name)
|
||||
if not mcp_config:
|
||||
logger.warning(
|
||||
f"MCP server '{name}' not found, but continuing with removal"
|
||||
)
|
||||
return True
|
||||
|
||||
# Remote MCPs don't have containers to stop
|
||||
if mcp_config.get("type") == "remote":
|
||||
return True
|
||||
|
||||
# Get the container name
|
||||
container_name = self.get_mcp_container_name(name)
|
||||
|
||||
# Try to get, stop, and remove the container
|
||||
try:
|
||||
container = self.client.containers.get(container_name)
|
||||
|
||||
# Stop the container if it's running
|
||||
if container.status == "running":
|
||||
logger.info(f"Stopping MCP container '{name}'...")
|
||||
container.stop(timeout=10)
|
||||
|
||||
# Remove the container regardless of its status
|
||||
logger.info(f"Removing MCP container '{name}'...")
|
||||
container.remove(force=True)
|
||||
return True
|
||||
|
||||
except NotFound:
|
||||
# Container doesn't exist - this is fine when removing
|
||||
logger.info(f"MCP container '{name}' not found, nothing to stop or remove")
|
||||
return True
|
||||
except Exception as e:
|
||||
# Log the error but don't fail the removal operation
|
||||
logger.error(f"Error stopping/removing MCP container: {e}")
|
||||
return True # Return true anyway to continue with removal
|
||||
|
||||
def restart_mcp(self, name: str) -> Dict[str, Any]:
|
||||
"""Restart an MCP server container."""
|
||||
if not self.client:
|
||||
raise Exception("Docker client is not available")
|
||||
|
||||
# Get the MCP configuration
|
||||
mcp_config = self.get_mcp(name)
|
||||
if not mcp_config:
|
||||
raise ValueError(f"MCP server '{name}' not found")
|
||||
|
||||
# Remote MCPs don't have containers to restart
|
||||
if mcp_config.get("type") == "remote":
|
||||
return {
|
||||
"status": "not_applicable",
|
||||
"name": name,
|
||||
"type": "remote",
|
||||
}
|
||||
|
||||
# Get the container name
|
||||
container_name = self.get_mcp_container_name(name)
|
||||
|
||||
# Try to get and restart the container
|
||||
try:
|
||||
container = self.client.containers.get(container_name)
|
||||
container.restart(timeout=10)
|
||||
return {
|
||||
"container_id": container.id,
|
||||
"status": "running",
|
||||
"name": name,
|
||||
}
|
||||
except NotFound:
|
||||
# Container doesn't exist, start it
|
||||
return self.start_mcp(name)
|
||||
except Exception as e:
|
||||
logger.error(f"Error restarting MCP container: {e}")
|
||||
raise
|
||||
|
||||
def get_mcp_status(self, name: str) -> Dict[str, Any]:
|
||||
"""Get the status of an MCP server."""
|
||||
if not self.client:
|
||||
raise Exception("Docker client is not available")
|
||||
|
||||
# Get the MCP configuration
|
||||
mcp_config = self.get_mcp(name)
|
||||
if not mcp_config:
|
||||
raise ValueError(f"MCP server '{name}' not found")
|
||||
|
||||
# Remote MCPs don't have containers
|
||||
if mcp_config.get("type") == "remote":
|
||||
return {
|
||||
"status": "not_applicable",
|
||||
"name": name,
|
||||
"type": "remote",
|
||||
"url": mcp_config.get("url"),
|
||||
}
|
||||
|
||||
# Get the container name
|
||||
container_name = self.get_mcp_container_name(name)
|
||||
|
||||
# Try to get the container status
|
||||
try:
|
||||
container = self.client.containers.get(container_name)
|
||||
status = (
|
||||
MCPStatus.RUNNING
|
||||
if container.status == "running"
|
||||
else MCPStatus.STOPPED
|
||||
)
|
||||
|
||||
# Get container details
|
||||
container_info = container.attrs
|
||||
|
||||
# Extract exposed ports from config
|
||||
ports = {}
|
||||
if (
|
||||
"Config" in container_info
|
||||
and "ExposedPorts" in container_info["Config"]
|
||||
):
|
||||
# Add all exposed ports
|
||||
for port in container_info["Config"]["ExposedPorts"].keys():
|
||||
ports[port] = None
|
||||
|
||||
# Add any ports that might be published
|
||||
if (
|
||||
"NetworkSettings" in container_info
|
||||
and "Ports" in container_info["NetworkSettings"]
|
||||
):
|
||||
for port, mappings in container_info["NetworkSettings"][
|
||||
"Ports"
|
||||
].items():
|
||||
if mappings:
|
||||
# Port is bound to host
|
||||
ports[port] = int(mappings[0]["HostPort"])
|
||||
|
||||
return {
|
||||
"status": status.value,
|
||||
"container_id": container.id,
|
||||
"name": name,
|
||||
"type": mcp_config.get("type"),
|
||||
"image": container_info["Config"]["Image"],
|
||||
"ports": ports,
|
||||
"created": container_info["Created"],
|
||||
}
|
||||
except NotFound:
|
||||
# Container doesn't exist
|
||||
return {
|
||||
"status": MCPStatus.NOT_FOUND.value,
|
||||
"name": name,
|
||||
"type": mcp_config.get("type"),
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting MCP container status: {e}")
|
||||
return {
|
||||
"status": MCPStatus.FAILED.value,
|
||||
"name": name,
|
||||
"error": str(e),
|
||||
}
|
||||
|
||||
def get_mcp_logs(self, name: str, tail: int = 100) -> str:
|
||||
"""Get logs from an MCP server container."""
|
||||
if not self.client:
|
||||
raise Exception("Docker client is not available")
|
||||
|
||||
# Get the MCP configuration
|
||||
mcp_config = self.get_mcp(name)
|
||||
if not mcp_config:
|
||||
raise ValueError(f"MCP server '{name}' not found")
|
||||
|
||||
# Remote MCPs don't have logs
|
||||
if mcp_config.get("type") == "remote":
|
||||
return "Remote MCPs don't have local logs"
|
||||
|
||||
# Get the container name
|
||||
container_name = self.get_mcp_container_name(name)
|
||||
|
||||
# Try to get the container logs
|
||||
try:
|
||||
container = self.client.containers.get(container_name)
|
||||
logs = container.logs(tail=tail, timestamps=True).decode("utf-8")
|
||||
return logs
|
||||
except NotFound:
|
||||
# Container doesn't exist
|
||||
return f"MCP container '{name}' not found"
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting MCP container logs: {e}")
|
||||
return f"Error getting logs: {str(e)}"
|
||||
|
||||
def list_mcp_containers(self) -> List[MCPContainer]:
|
||||
"""List all MCP containers."""
|
||||
if not self.client:
|
||||
raise Exception("Docker client is not available")
|
||||
|
||||
# Get all containers with the cubbi.mcp label
|
||||
containers = self.client.containers.list(
|
||||
all=True, filters={"label": "cubbi.mcp"}
|
||||
)
|
||||
|
||||
result = []
|
||||
for container in containers:
|
||||
# Get container details
|
||||
container_info = container.attrs
|
||||
|
||||
# Extract labels
|
||||
labels = container_info["Config"]["Labels"]
|
||||
|
||||
# Extract exposed ports from config
|
||||
ports = {}
|
||||
if (
|
||||
"Config" in container_info
|
||||
and "ExposedPorts" in container_info["Config"]
|
||||
):
|
||||
# Add all exposed ports
|
||||
for port in container_info["Config"]["ExposedPorts"].keys():
|
||||
ports[port] = None
|
||||
|
||||
# Add any ports that might be published
|
||||
if (
|
||||
"NetworkSettings" in container_info
|
||||
and "Ports" in container_info["NetworkSettings"]
|
||||
):
|
||||
for port, mappings in container_info["NetworkSettings"][
|
||||
"Ports"
|
||||
].items():
|
||||
if mappings:
|
||||
# Port is bound to host
|
||||
ports[port] = int(mappings[0]["HostPort"])
|
||||
|
||||
# Determine status
|
||||
status = (
|
||||
MCPStatus.RUNNING
|
||||
if container.status == "running"
|
||||
else MCPStatus.STOPPED
|
||||
)
|
||||
|
||||
# Create MCPContainer object
|
||||
mcp_container = MCPContainer(
|
||||
name=labels.get("cubbi.mcp.name", "unknown"),
|
||||
container_id=container.id,
|
||||
status=status,
|
||||
image=container_info["Config"]["Image"],
|
||||
ports=ports,
|
||||
created_at=container_info["Created"],
|
||||
type=labels.get("cubbi.mcp.type", "unknown"),
|
||||
)
|
||||
|
||||
result.append(mcp_container)
|
||||
|
||||
return result
|
||||
113
cubbi/models.py
Normal file
113
cubbi/models.py
Normal file
@@ -0,0 +1,113 @@
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class SessionStatus(str, Enum):
|
||||
CREATING = "creating"
|
||||
RUNNING = "running"
|
||||
STOPPED = "stopped"
|
||||
FAILED = "failed"
|
||||
|
||||
|
||||
class MCPStatus(str, Enum):
|
||||
RUNNING = "running"
|
||||
STOPPED = "stopped"
|
||||
NOT_FOUND = "not_found"
|
||||
FAILED = "failed"
|
||||
|
||||
|
||||
class ImageEnvironmentVariable(BaseModel):
|
||||
name: str
|
||||
description: str
|
||||
required: bool = False
|
||||
default: Optional[str] = None
|
||||
sensitive: bool = False
|
||||
|
||||
|
||||
class PersistentConfig(BaseModel):
|
||||
source: str
|
||||
target: str
|
||||
type: str # "directory" or "file"
|
||||
description: str = ""
|
||||
|
||||
|
||||
class VolumeMount(BaseModel):
|
||||
mountPath: str
|
||||
description: str = ""
|
||||
|
||||
|
||||
class ImageInit(BaseModel):
|
||||
pre_command: Optional[str] = None
|
||||
command: str
|
||||
|
||||
|
||||
class Image(BaseModel):
|
||||
name: str
|
||||
description: str
|
||||
version: str
|
||||
maintainer: str
|
||||
image: str
|
||||
init: Optional[ImageInit] = None
|
||||
environment: List[ImageEnvironmentVariable] = []
|
||||
ports: List[int] = []
|
||||
volumes: List[VolumeMount] = []
|
||||
persistent_configs: List[PersistentConfig] = []
|
||||
|
||||
|
||||
class RemoteMCP(BaseModel):
|
||||
name: str
|
||||
type: str = "remote"
|
||||
url: str
|
||||
headers: Dict[str, str] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class DockerMCP(BaseModel):
|
||||
name: str
|
||||
type: str = "docker"
|
||||
image: str
|
||||
command: str
|
||||
env: Dict[str, str] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class ProxyMCP(BaseModel):
|
||||
name: str
|
||||
type: str = "proxy"
|
||||
base_image: str
|
||||
proxy_image: str
|
||||
command: str
|
||||
proxy_options: Dict[str, Any] = Field(default_factory=dict)
|
||||
env: Dict[str, str] = Field(default_factory=dict)
|
||||
host_port: Optional[int] = None # External port to bind the SSE port to on the host
|
||||
|
||||
|
||||
MCP = Union[RemoteMCP, DockerMCP, ProxyMCP]
|
||||
|
||||
|
||||
class MCPContainer(BaseModel):
|
||||
name: str
|
||||
container_id: str
|
||||
status: MCPStatus
|
||||
image: str
|
||||
ports: Dict[str, Optional[int]] = Field(default_factory=dict)
|
||||
created_at: str
|
||||
type: str
|
||||
|
||||
|
||||
class Session(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
image: str
|
||||
status: SessionStatus
|
||||
container_id: Optional[str] = None
|
||||
ports: Dict[int, int] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class Config(BaseModel):
|
||||
docker: Dict[str, str] = Field(default_factory=dict)
|
||||
images: Dict[str, Image] = Field(default_factory=dict)
|
||||
defaults: Dict[str, object] = Field(
|
||||
default_factory=dict
|
||||
) # Can store strings, booleans, or other values
|
||||
mcps: List[Dict[str, Any]] = Field(default_factory=list)
|
||||
86
cubbi/session.py
Normal file
86
cubbi/session.py
Normal file
@@ -0,0 +1,86 @@
|
||||
"""
|
||||
Session storage management for Cubbi Container Tool.
|
||||
"""
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Dict, Optional
|
||||
|
||||
import yaml
|
||||
|
||||
DEFAULT_SESSIONS_FILE = Path.home() / ".config" / "cubbi" / "sessions.yaml"
|
||||
|
||||
|
||||
class SessionManager:
|
||||
"""Manager for container sessions."""
|
||||
|
||||
def __init__(self, sessions_path: Optional[Path] = None):
|
||||
"""Initialize the session manager.
|
||||
|
||||
Args:
|
||||
sessions_path: Optional path to the sessions file.
|
||||
Defaults to ~/.config/cubbi/sessions.yaml.
|
||||
"""
|
||||
self.sessions_path = sessions_path or DEFAULT_SESSIONS_FILE
|
||||
self.sessions = self._load_sessions()
|
||||
|
||||
def _load_sessions(self) -> Dict[str, dict]:
|
||||
"""Load sessions from file or create an empty sessions file if it doesn't exist."""
|
||||
if not self.sessions_path.exists():
|
||||
# Create directory if it doesn't exist
|
||||
self.sessions_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
# Create empty sessions file
|
||||
with open(self.sessions_path, "w") as f:
|
||||
yaml.safe_dump({}, f)
|
||||
# Set secure permissions
|
||||
os.chmod(self.sessions_path, 0o600)
|
||||
return {}
|
||||
|
||||
# Load existing sessions
|
||||
with open(self.sessions_path, "r") as f:
|
||||
sessions = yaml.safe_load(f) or {}
|
||||
return sessions
|
||||
|
||||
def save(self) -> None:
|
||||
"""Save the sessions to file."""
|
||||
with open(self.sessions_path, "w") as f:
|
||||
yaml.safe_dump(self.sessions, f)
|
||||
|
||||
def add_session(self, session_id: str, session_data: dict) -> None:
|
||||
"""Add a session to storage.
|
||||
|
||||
Args:
|
||||
session_id: The unique session ID
|
||||
session_data: The session data (Session model dump as dict)
|
||||
"""
|
||||
self.sessions[session_id] = session_data
|
||||
self.save()
|
||||
|
||||
def get_session(self, session_id: str) -> Optional[dict]:
|
||||
"""Get a session by ID.
|
||||
|
||||
Args:
|
||||
session_id: The session ID
|
||||
|
||||
Returns:
|
||||
The session data or None if not found
|
||||
"""
|
||||
return self.sessions.get(session_id)
|
||||
|
||||
def list_sessions(self) -> Dict[str, dict]:
|
||||
"""List all sessions.
|
||||
|
||||
Returns:
|
||||
Dict of session ID to session data
|
||||
"""
|
||||
return self.sessions
|
||||
|
||||
def remove_session(self, session_id: str) -> None:
|
||||
"""Remove a session from storage.
|
||||
|
||||
Args:
|
||||
session_id: The session ID to remove
|
||||
"""
|
||||
if session_id in self.sessions:
|
||||
del self.sessions[session_id]
|
||||
self.save()
|
||||
295
cubbi/user_config.py
Normal file
295
cubbi/user_config.py
Normal file
@@ -0,0 +1,295 @@
|
||||
"""
|
||||
User configuration manager for Cubbi Container Tool.
|
||||
"""
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
import yaml
|
||||
|
||||
# Define the environment variable mappings
|
||||
ENV_MAPPINGS = {
|
||||
"services.langfuse.url": "LANGFUSE_URL",
|
||||
"services.langfuse.public_key": "LANGFUSE_INIT_PROJECT_PUBLIC_KEY",
|
||||
"services.langfuse.secret_key": "LANGFUSE_INIT_PROJECT_SECRET_KEY",
|
||||
"services.openai.api_key": "OPENAI_API_KEY",
|
||||
"services.anthropic.api_key": "ANTHROPIC_API_KEY",
|
||||
"services.openrouter.api_key": "OPENROUTER_API_KEY",
|
||||
"services.google.api_key": "GOOGLE_API_KEY",
|
||||
}
|
||||
|
||||
|
||||
class UserConfigManager:
|
||||
"""Manager for user-specific configuration."""
|
||||
|
||||
def __init__(self, config_path: Optional[str] = None):
|
||||
"""Initialize the user configuration manager.
|
||||
|
||||
Args:
|
||||
config_path: Optional path to the configuration file.
|
||||
Defaults to ~/.config/cubbi/config.yaml.
|
||||
"""
|
||||
# Default to ~/.config/cubbi/config.yaml
|
||||
self.config_path = Path(
|
||||
config_path or os.path.expanduser("~/.config/cubbi/config.yaml")
|
||||
)
|
||||
self.config = self._load_config()
|
||||
|
||||
def _load_config(self) -> Dict[str, Any]:
|
||||
"""Load configuration from file or create with defaults if it doesn't exist."""
|
||||
if not self.config_path.exists():
|
||||
# Create directory if it doesn't exist
|
||||
self.config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
# Create default config
|
||||
default_config = self._get_default_config()
|
||||
# Save to file
|
||||
with open(self.config_path, "w") as f:
|
||||
yaml.safe_dump(default_config, f)
|
||||
# Set secure permissions
|
||||
os.chmod(self.config_path, 0o600)
|
||||
return default_config
|
||||
|
||||
# Load existing config with error handling
|
||||
try:
|
||||
with open(self.config_path, "r") as f:
|
||||
config = yaml.safe_load(f) or {}
|
||||
|
||||
# Check for backup file that might be newer
|
||||
backup_path = self.config_path.with_suffix(".yaml.bak")
|
||||
if backup_path.exists():
|
||||
# Check if backup is newer than main config
|
||||
if backup_path.stat().st_mtime > self.config_path.stat().st_mtime:
|
||||
try:
|
||||
with open(backup_path, "r") as f:
|
||||
backup_config = yaml.safe_load(f) or {}
|
||||
print("Found newer backup config, using that instead")
|
||||
config = backup_config
|
||||
except Exception as e:
|
||||
print(f"Failed to load backup config: {e}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error loading configuration: {e}")
|
||||
# Try to load from backup if main config is corrupted
|
||||
backup_path = self.config_path.with_suffix(".yaml.bak")
|
||||
if backup_path.exists():
|
||||
try:
|
||||
with open(backup_path, "r") as f:
|
||||
config = yaml.safe_load(f) or {}
|
||||
print("Loaded configuration from backup file")
|
||||
except Exception as backup_e:
|
||||
print(f"Failed to load backup configuration: {backup_e}")
|
||||
config = {}
|
||||
else:
|
||||
config = {}
|
||||
|
||||
# Merge with defaults for any missing fields
|
||||
return self._merge_with_defaults(config)
|
||||
|
||||
def _get_default_config(self) -> Dict[str, Any]:
|
||||
"""Get the default configuration."""
|
||||
return {
|
||||
"defaults": {
|
||||
"image": "goose",
|
||||
"connect": True,
|
||||
"mount_local": True,
|
||||
"networks": [], # Default networks to connect to (besides cubbi-network)
|
||||
"volumes": [], # Default volumes to mount, format: "source:dest"
|
||||
"mcps": [], # Default MCP servers to connect to
|
||||
"model": "claude-3-5-sonnet-latest", # Default LLM model to use
|
||||
"provider": "anthropic", # Default LLM provider to use
|
||||
},
|
||||
"services": {
|
||||
"langfuse": {},
|
||||
"openai": {},
|
||||
"anthropic": {},
|
||||
"openrouter": {},
|
||||
"google": {},
|
||||
},
|
||||
"docker": {
|
||||
"network": "cubbi-network",
|
||||
},
|
||||
"ui": {
|
||||
"colors": True,
|
||||
"verbose": False,
|
||||
},
|
||||
}
|
||||
|
||||
def _merge_with_defaults(self, config: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Merge user config with defaults for missing values."""
|
||||
defaults = self._get_default_config()
|
||||
|
||||
# Deep merge of config with defaults
|
||||
def _deep_merge(source, destination):
|
||||
for key, value in source.items():
|
||||
if key not in destination:
|
||||
destination[key] = value
|
||||
elif isinstance(value, dict) and isinstance(destination[key], dict):
|
||||
_deep_merge(value, destination[key])
|
||||
return destination
|
||||
|
||||
return _deep_merge(defaults, config)
|
||||
|
||||
def get(self, key_path: str, default: Any = None) -> Any:
|
||||
"""Get a configuration value by dot-notation path.
|
||||
|
||||
Args:
|
||||
key_path: The configuration path (e.g., "defaults.image")
|
||||
default: The default value to return if not found
|
||||
|
||||
Returns:
|
||||
The configuration value or default if not found
|
||||
"""
|
||||
# Handle shorthand service paths (e.g., "langfuse.url")
|
||||
if (
|
||||
"." in key_path
|
||||
and not key_path.startswith("services.")
|
||||
and not any(
|
||||
key_path.startswith(section + ".")
|
||||
for section in ["defaults", "docker", "remote", "ui"]
|
||||
)
|
||||
):
|
||||
service, setting = key_path.split(".", 1)
|
||||
key_path = f"services.{service}.{setting}"
|
||||
|
||||
parts = key_path.split(".")
|
||||
result = self.config
|
||||
|
||||
for part in parts:
|
||||
if part not in result:
|
||||
return default
|
||||
result = result[part]
|
||||
|
||||
return result
|
||||
|
||||
def set(self, key_path: str, value: Any) -> None:
|
||||
"""Set a configuration value by dot-notation path.
|
||||
|
||||
Args:
|
||||
key_path: The configuration path (e.g., "defaults.image")
|
||||
value: The value to set
|
||||
"""
|
||||
# Handle shorthand service paths (e.g., "langfuse.url")
|
||||
if (
|
||||
"." in key_path
|
||||
and not key_path.startswith("services.")
|
||||
and not any(
|
||||
key_path.startswith(section + ".")
|
||||
for section in ["defaults", "docker", "remote", "ui"]
|
||||
)
|
||||
):
|
||||
service, setting = key_path.split(".", 1)
|
||||
key_path = f"services.{service}.{setting}"
|
||||
|
||||
parts = key_path.split(".")
|
||||
config = self.config
|
||||
|
||||
# Navigate to the containing dictionary
|
||||
for part in parts[:-1]:
|
||||
if part not in config:
|
||||
config[part] = {}
|
||||
config = config[part]
|
||||
|
||||
# Set the value
|
||||
config[parts[-1]] = value
|
||||
self.save()
|
||||
|
||||
def save(self) -> None:
|
||||
"""Save the configuration to file with error handling and backup."""
|
||||
# Create backup of existing config file if it exists
|
||||
if self.config_path.exists():
|
||||
backup_path = self.config_path.with_suffix(".yaml.bak")
|
||||
try:
|
||||
import shutil
|
||||
|
||||
shutil.copy2(self.config_path, backup_path)
|
||||
except Exception as e:
|
||||
print(f"Warning: Failed to create config backup: {e}")
|
||||
|
||||
# Ensure parent directory exists
|
||||
self.config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
try:
|
||||
# Write to a temporary file first
|
||||
temp_path = self.config_path.with_suffix(".yaml.tmp")
|
||||
with open(temp_path, "w") as f:
|
||||
yaml.safe_dump(self.config, f)
|
||||
|
||||
# Set secure permissions on temp file
|
||||
os.chmod(temp_path, 0o600)
|
||||
|
||||
# Rename temp file to actual config file (atomic operation)
|
||||
# Use os.replace which is atomic on Unix systems
|
||||
os.replace(temp_path, self.config_path)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error saving configuration: {e}")
|
||||
# If we have a backup and the save failed, try to restore from backup
|
||||
backup_path = self.config_path.with_suffix(".yaml.bak")
|
||||
if backup_path.exists():
|
||||
try:
|
||||
import shutil
|
||||
|
||||
shutil.copy2(backup_path, self.config_path)
|
||||
print("Restored configuration from backup")
|
||||
except Exception as restore_error:
|
||||
print(
|
||||
f"Failed to restore configuration from backup: {restore_error}"
|
||||
)
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset the configuration to defaults."""
|
||||
self.config = self._get_default_config()
|
||||
self.save()
|
||||
|
||||
def get_environment_variables(self) -> Dict[str, str]:
|
||||
"""Get environment variables from the configuration.
|
||||
|
||||
Returns:
|
||||
A dictionary of environment variables to set in the container.
|
||||
"""
|
||||
env_vars = {}
|
||||
|
||||
# Process the service configurations and map to environment variables
|
||||
for config_path, env_var in ENV_MAPPINGS.items():
|
||||
value = self.get(config_path)
|
||||
if value:
|
||||
# Handle environment variable references
|
||||
if (
|
||||
isinstance(value, str)
|
||||
and value.startswith("${")
|
||||
and value.endswith("}")
|
||||
):
|
||||
env_var_name = value[2:-1]
|
||||
value = os.environ.get(env_var_name, "")
|
||||
|
||||
env_vars[env_var] = str(value)
|
||||
|
||||
return env_vars
|
||||
|
||||
def list_config(self) -> List[Tuple[str, Any]]:
|
||||
"""List all configuration values as flattened key-value pairs.
|
||||
|
||||
Returns:
|
||||
A list of (key, value) tuples with flattened key paths.
|
||||
"""
|
||||
result = []
|
||||
|
||||
def _flatten_dict(d, prefix=""):
|
||||
for key, value in d.items():
|
||||
full_key = f"{prefix}.{key}" if prefix else key
|
||||
if isinstance(value, dict):
|
||||
_flatten_dict(value, full_key)
|
||||
else:
|
||||
# Mask sensitive values
|
||||
if any(
|
||||
substr in full_key.lower()
|
||||
for substr in ["key", "token", "secret", "password"]
|
||||
):
|
||||
displayed_value = "*****" if value else value
|
||||
else:
|
||||
displayed_value = value
|
||||
result.append((full_key, displayed_value))
|
||||
|
||||
_flatten_dict(self.config)
|
||||
return sorted(result)
|
||||
682
docs/specs/1_SPECIFICATIONS.md
Normal file
682
docs/specs/1_SPECIFICATIONS.md
Normal file
@@ -0,0 +1,682 @@
|
||||
# Cubbi - Container Tool
|
||||
|
||||
## Overview
|
||||
|
||||
Cubbi is a command-line tool for managing ephemeral
|
||||
containers that run AI tools and development environments. It works with both
|
||||
local Docker and a dedicated remote web service that manages containers in a
|
||||
Docker-in-Docker (DinD) environment.
|
||||
|
||||
## Technology Stack
|
||||
|
||||
### Cubbi Service
|
||||
- **Web Framework**: FastAPI for high-performance, async API endpoints
|
||||
- **Package Management**: uv (Astral) for dependency management
|
||||
- **Database**: SQLite for development, PostgreSQL for production
|
||||
- **Container Management**: Docker SDK for Python
|
||||
- **Authentication**: OAuth 2.0 integration with Authentik
|
||||
|
||||
### Cubbi CLI
|
||||
- **Language**: Python
|
||||
- **Package Management**: uv for dependency management
|
||||
- **Distribution**: Standalone binary via PyInstaller or similar
|
||||
- **Configuration**: YAML for configuration files
|
||||
|
||||
## System Architecture
|
||||
|
||||
### Components
|
||||
|
||||
1. **CLI Tool (`cubbi`)**: The command-line interface users interact with
|
||||
2. **Cubbi Service**: A web service that handles remote container execution
|
||||
3. **Container Images**: Predefined container templates for various AI tools
|
||||
|
||||
### Architecture Diagram
|
||||
|
||||
```
|
||||
┌─────────────┐ ┌─────────────────────────┐
|
||||
│ │ │ │
|
||||
│ Cubbi CLI │◄─────────►│ Local Docker Daemon │
|
||||
│ (cubbi) │ │ │
|
||||
│ │ └─────────────────────────┘
|
||||
└──────┬──────┘
|
||||
│
|
||||
│ REST API
|
||||
│
|
||||
┌──────▼──────┐ ┌─────────────────────────┐
|
||||
│ │ │ │
|
||||
│ Cubbi │◄─────────►│ Docker-in-Docker │
|
||||
│ Service │ │ │
|
||||
│ │ └─────────────────────────┘
|
||||
└─────────────┘
|
||||
│
|
||||
├──────────────┬───────────────┐
|
||||
│ │ │
|
||||
┌──────▼──────┐ ┌─────▼─────┐ ┌──────▼──────┐
|
||||
│ │ │ │ │ │
|
||||
│ Fluentd │ │ Langfuse │ │ Other │
|
||||
│ Logging │ │ Logging │ │ Services │
|
||||
│ │ │ │ │ │
|
||||
└─────────────┘ └───────────┘ └─────────────┘
|
||||
```
|
||||
|
||||
## Core Concepts
|
||||
|
||||
- **Session**: An active container instance with a specific image
|
||||
- **Image**: A predefined container template with specific AI tools installed
|
||||
- **Remote**: A configured cubbi service instance
|
||||
|
||||
## User Configuration
|
||||
|
||||
Cubbi supports user-specific configuration via a YAML file located at `~/.config/cubbi/config.yaml`. This provides a way to set default values, store service credentials, and customize behavior without modifying code.
|
||||
|
||||
### Configuration File Structure
|
||||
|
||||
```yaml
|
||||
# ~/.config/cubbi/config.yaml
|
||||
defaults:
|
||||
image: "goose" # Default image to use
|
||||
connect: true # Automatically connect after creating session
|
||||
mount_local: true # Mount local directory by default
|
||||
networks: [] # Default networks to connect to (besides cubbi-network)
|
||||
|
||||
services:
|
||||
# Service credentials with simplified naming
|
||||
# These are mapped to environment variables in containers
|
||||
langfuse:
|
||||
url: "" # Will be set by the user
|
||||
public_key: "pk-lf-..."
|
||||
secret_key: "sk-lf-..."
|
||||
|
||||
openai:
|
||||
api_key: "sk-..."
|
||||
|
||||
anthropic:
|
||||
api_key: "sk-ant-..."
|
||||
|
||||
openrouter:
|
||||
api_key: "sk-or-..."
|
||||
|
||||
docker:
|
||||
network: "cubbi-network" # Default Docker network to use
|
||||
socket: "/var/run/docker.sock" # Docker socket path
|
||||
|
||||
remote:
|
||||
default: "production" # Default remote to use
|
||||
endpoints:
|
||||
production:
|
||||
url: "https://cubbi.monadical.com"
|
||||
auth_method: "oauth"
|
||||
staging:
|
||||
url: "https://cubbi-staging.monadical.com"
|
||||
auth_method: "oauth"
|
||||
|
||||
ui:
|
||||
colors: true # Enable/disable colors in terminal output
|
||||
verbose: false # Enable/disable verbose output
|
||||
table_format: "grid" # Table format for session listings
|
||||
```
|
||||
|
||||
### Environment Variable Mapping
|
||||
|
||||
The simplified configuration names are mapped to environment variables:
|
||||
|
||||
| Config Path | Environment Variable |
|
||||
|-------------|---------------------|
|
||||
| `services.langfuse.url` | `LANGFUSE_URL` |
|
||||
| `services.langfuse.public_key` | `LANGFUSE_INIT_PROJECT_PUBLIC_KEY` |
|
||||
| `services.langfuse.secret_key` | `LANGFUSE_INIT_PROJECT_SECRET_KEY` |
|
||||
| `services.openai.api_key` | `OPENAI_API_KEY` |
|
||||
| `services.anthropic.api_key` | `ANTHROPIC_API_KEY` |
|
||||
| `services.openrouter.api_key` | `OPENROUTER_API_KEY` |
|
||||
|
||||
### Environment Variable Precedence
|
||||
|
||||
1. Command-line arguments (`-e KEY=VALUE`) take highest precedence
|
||||
2. User config file takes second precedence
|
||||
3. System defaults take lowest precedence
|
||||
|
||||
### Security Considerations
|
||||
|
||||
- Configuration file permissions are set to 600 (user read/write only)
|
||||
- Sensitive values can be referenced from environment variables: `${ENV_VAR}`
|
||||
- API keys and secrets are never logged or displayed in verbose output
|
||||
|
||||
### CLI Configuration Commands
|
||||
|
||||
```bash
|
||||
# View entire configuration
|
||||
cubbi config list
|
||||
|
||||
# Get specific configuration value
|
||||
cubbi config get defaults.driver
|
||||
|
||||
# Set configuration value (using simplified naming)
|
||||
cubbi config set langfuse.url "https://cloud.langfuse.com"
|
||||
cubbi config set openai.api_key "sk-..."
|
||||
|
||||
# Network configuration
|
||||
cubbi config network list # List default networks
|
||||
cubbi config network add example-network # Add a network to defaults
|
||||
cubbi config network remove example-network # Remove a network from defaults
|
||||
|
||||
# Reset configuration to defaults
|
||||
cubbi config reset
|
||||
```
|
||||
|
||||
## CLI Tool Commands
|
||||
|
||||
### Basic Commands
|
||||
|
||||
```bash
|
||||
# Create a new session locally (shorthand)
|
||||
cubbi
|
||||
|
||||
# List active sessions on local system
|
||||
cubbi session list
|
||||
|
||||
# Create a new session locally
|
||||
cubbi session create [OPTIONS]
|
||||
|
||||
# Create a session with a specific image
|
||||
cubbi session create --image goose
|
||||
|
||||
# Create a session with a specific project repository
|
||||
cubbi session create --image goose --project github.com/hello/private
|
||||
|
||||
# Create a session with external networks
|
||||
cubbi session create --network teamnet --network othernetwork
|
||||
|
||||
# Create a session with a project (shorthand)
|
||||
cubbi git@github.com:hello/private
|
||||
|
||||
# Close a specific session
|
||||
cubbi session close <id>
|
||||
|
||||
# Connect to an existing session
|
||||
cubbi session connect <id>
|
||||
|
||||
```
|
||||
|
||||
### Remote Management
|
||||
|
||||
```bash
|
||||
# Add a remote Cubbi service
|
||||
cubbi remote add <name> <url>
|
||||
|
||||
# List configured remote services
|
||||
cubbi remote list
|
||||
|
||||
# Remove a remote service
|
||||
cubbi remote remove <name>
|
||||
|
||||
# Authenticate with a remote service
|
||||
cubbi -r <remote_name> auth
|
||||
|
||||
# Create a session on a remote service
|
||||
cubbi -r <remote_name> [session create]
|
||||
|
||||
# List sessions on a remote service
|
||||
cubbi -r <remote_name> session list
|
||||
```
|
||||
|
||||
### Environment Variables
|
||||
|
||||
```bash
|
||||
# Set environment variables for a session
|
||||
cubbi session create -e VAR1=value1 -e VAR2=value2
|
||||
|
||||
# Set environment variables for a remote session
|
||||
cubbi -r <remote_name> session create -e VAR1=value1
|
||||
```
|
||||
|
||||
### Logging
|
||||
|
||||
```bash
|
||||
# Stream logs from a session
|
||||
cubbi session logs <id>
|
||||
|
||||
# Stream logs with follow option
|
||||
cubbi session logs <id> -f
|
||||
```
|
||||
|
||||
## Cubbi Service Specification
|
||||
|
||||
### Overview
|
||||
|
||||
The Cubbi Service is a web service that manages ephemeral containers in a Docker-in-Docker environment. It provides a REST API for container lifecycle management, authentication, and real-time log streaming.
|
||||
|
||||
### API Endpoints
|
||||
|
||||
#### Authentication
|
||||
|
||||
```
|
||||
POST /auth/login - Initiate Authentik authentication flow
|
||||
POST /auth/callback - Handle Authentik OAuth callback
|
||||
POST /auth/refresh - Refresh an existing token
|
||||
POST /auth/logout - Invalidate current token
|
||||
```
|
||||
|
||||
### Authentik Integration
|
||||
|
||||
The Cubbi Service integrates with Authentik at https://authentik.monadical.io using OAuth 2.0:
|
||||
|
||||
1. **Application Registration**:
|
||||
- Cubbi Service is registered as an OAuth application in Authentik
|
||||
- Configured with redirect URI to `/auth/callback`
|
||||
- Assigned appropriate scopes for user identification
|
||||
|
||||
2. **Authentication Flow**:
|
||||
- User initiates authentication via CLI
|
||||
- Cubbi CLI opens browser to Authentik authorization URL
|
||||
- User logs in through Authentik's interface
|
||||
- Authentik redirects to callback URL with authorization code
|
||||
- Cubbi Service exchanges code for access and refresh tokens
|
||||
- CLI receives and securely stores tokens
|
||||
|
||||
3. **Token Management**:
|
||||
- Access tokens used for API authorization
|
||||
- Refresh tokens used to obtain new access tokens
|
||||
- Tokens are encrypted at rest in CLI configuration
|
||||
|
||||
#### Sessions
|
||||
|
||||
```
|
||||
GET /sessions - List all sessions
|
||||
POST /sessions - Create a new session
|
||||
GET /sessions/{id} - Get session details
|
||||
DELETE /sessions/{id} - Terminate a session
|
||||
POST /sessions/{id}/connect - Establish connection to session
|
||||
GET /sessions/{id}/logs - Stream session logs
|
||||
```
|
||||
|
||||
#### Images
|
||||
|
||||
```
|
||||
GET /images - List available images
|
||||
GET /images/{name} - Get image details
|
||||
```
|
||||
|
||||
#### Projects
|
||||
|
||||
```
|
||||
GET /projects - List all projects
|
||||
POST /projects - Add a new project
|
||||
GET /projects/{id} - Get project details
|
||||
PUT /projects/{id} - Update project details
|
||||
DELETE /projects/{id} - Remove a project
|
||||
```
|
||||
|
||||
### Service Configuration
|
||||
|
||||
```yaml
|
||||
# cubbi-service.yaml
|
||||
server:
|
||||
port: 3000
|
||||
host: 0.0.0.0
|
||||
|
||||
docker:
|
||||
socket: /var/run/docker.sock
|
||||
network: cubbi-network
|
||||
|
||||
auth:
|
||||
provider: authentik
|
||||
url: https://authentik.monadical.io
|
||||
clientId: cubbi-service
|
||||
|
||||
logging:
|
||||
providers:
|
||||
- type: fluentd
|
||||
url: http://fluentd.example.com:24224
|
||||
- type: langfuse
|
||||
url: https://cloud.langfuse.com
|
||||
public_key: ${LANGFUSE_INIT_PROJECT_PUBLIC_KEY}
|
||||
secret_key: ${LANGFUSE_INIT_PROJECT_SECRET_KEY}
|
||||
|
||||
images:
|
||||
- name: goose
|
||||
image: monadical/cubbi-goose:latest
|
||||
- name: aider
|
||||
image: monadical/cubbi-aider:latest
|
||||
- name: claude-code
|
||||
image: monadical/cubbi-claude-code:latest
|
||||
|
||||
projects:
|
||||
storage:
|
||||
type: encrypted
|
||||
key: ${PROJECT_ENCRYPTION_KEY}
|
||||
default_ssh_scan:
|
||||
- github.com
|
||||
- gitlab.com
|
||||
- bitbucket.org
|
||||
```
|
||||
|
||||
### Docker-in-Docker Implementation
|
||||
|
||||
The Cubbi Service runs in a container with access to the host's Docker socket, allowing it to create and manage sibling containers. This approach provides:
|
||||
|
||||
1. Isolation between containers
|
||||
2. Simple lifecycle management
|
||||
3. Resource constraints for security
|
||||
|
||||
### Connection Handling
|
||||
|
||||
For remote connections to containers, the service provides two methods:
|
||||
|
||||
1. **WebSocket Terminal**: Browser-based terminal access
|
||||
2. **SSH Server**: Each container runs an SSH server for CLI access
|
||||
|
||||
### Logging Implementation
|
||||
|
||||
The Cubbi Service implements log collection and forwarding:
|
||||
|
||||
1. Container logs are captured using Docker's logging drivers
|
||||
2. Logs are forwarded to configured providers (Fluentd, Langfuse)
|
||||
3. Real-time log streaming is available via WebSockets
|
||||
|
||||
## Project Management
|
||||
|
||||
### Persistent Project Configuration
|
||||
|
||||
Cubbi provides persistent storage for project-specific configurations that need to survive container restarts. This is implemented through a dedicated volume mount and symlink system:
|
||||
|
||||
1. **Configuration Storage**:
|
||||
- Each project has a dedicated configuration directory on the host at `~/.cubbi/projects/<project-hash>/config`
|
||||
- For projects specified by URL, the hash is derived from the repository URL
|
||||
- For local projects, the hash is derived from the absolute path of the local directory
|
||||
- This directory is mounted into the container at `/cubbi-config`
|
||||
|
||||
2. **Image Configuration**:
|
||||
- Each image can specify configuration files/directories that should persist across sessions
|
||||
- These are defined in the image's `cubbi-image.yaml` file in the `persistent_configs` section
|
||||
- Example for Goose image:
|
||||
```yaml
|
||||
persistent_configs:
|
||||
- source: "/app/.goose" # Path in container
|
||||
target: "/cubbi-config/goose" # Path in persistent storage
|
||||
type: "directory" # directory or file
|
||||
description: "Goose memory and configuration"
|
||||
```
|
||||
|
||||
3. **Automatic Symlinking**:
|
||||
- During container initialization, the system:
|
||||
- Creates all target directories in the persistent storage
|
||||
- Creates symlinks from the source paths to the target paths
|
||||
- This makes the persistence transparent to the application
|
||||
|
||||
4. **Environment Variables**:
|
||||
- Container has access to configuration location via environment variables:
|
||||
```
|
||||
CUBBI_CONFIG_DIR=/cubbi-config
|
||||
CUBBI_IMAGE_CONFIG_DIR=/cubbi-config/<image-name>
|
||||
```
|
||||
|
||||
This ensures that important configurations like Goose's memory store, authentication tokens, and other state information persist between container sessions while maintaining isolation between different projects.
|
||||
|
||||
### Adding Projects
|
||||
|
||||
Users can add projects with associated credentials:
|
||||
|
||||
```bash
|
||||
# Add a project with SSH key
|
||||
cubbi project add github.com/hello/private --ssh-key ~/.ssh/id_ed25519
|
||||
|
||||
# Add a project with token authentication
|
||||
cubbi project add github.com/hello/private --token ghp_123456789
|
||||
|
||||
# List all projects
|
||||
cubbi project list
|
||||
|
||||
# Remove a project
|
||||
cubbi project remove github.com/hello/private
|
||||
```
|
||||
|
||||
### Project Configuration
|
||||
|
||||
Projects are stored in the Cubbi service and referenced by their repository URL. The configuration includes:
|
||||
|
||||
```yaml
|
||||
# Project configuration
|
||||
id: github.com/hello/private
|
||||
url: git@github.com:hello/private.git
|
||||
type: git
|
||||
auth:
|
||||
type: ssh
|
||||
key: |
|
||||
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||
...encrypted key data...
|
||||
-----END OPENSSH PRIVATE KEY-----
|
||||
public_key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAI...
|
||||
```
|
||||
|
||||
## Image Implementation
|
||||
|
||||
### Image Structure
|
||||
|
||||
Each image is a Docker container with a standardized structure:
|
||||
|
||||
```
|
||||
/
|
||||
├── entrypoint.sh # Container initialization
|
||||
├── cubbi-init.sh # Standardized initialization script
|
||||
├── cubbi-image.yaml # Image metadata and configuration
|
||||
├── tool/ # AI tool installation
|
||||
└── ssh/ # SSH server configuration
|
||||
```
|
||||
|
||||
### Standardized Initialization Script
|
||||
|
||||
All images include a standardized `cubbi-init.sh` script that handles common initialization tasks:
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
# Project initialization
|
||||
if [ -n "$CUBBI_PROJECT_URL" ]; then
|
||||
echo "Initializing project: $CUBBI_PROJECT_URL"
|
||||
|
||||
# Set up SSH key if provided
|
||||
if [ -n "$CUBBI_GIT_SSH_KEY" ]; then
|
||||
mkdir -p ~/.ssh
|
||||
echo "$CUBBI_GIT_SSH_KEY" > ~/.ssh/id_ed25519
|
||||
chmod 600 ~/.ssh/id_ed25519
|
||||
ssh-keyscan github.com >> ~/.ssh/known_hosts 2>/dev/null
|
||||
fi
|
||||
|
||||
# Set up token if provided
|
||||
if [ -n "$CUBBI_GIT_TOKEN" ]; then
|
||||
git config --global credential.helper store
|
||||
echo "https://$CUBBI_GIT_TOKEN:x-oauth-basic@github.com" > ~/.git-credentials
|
||||
fi
|
||||
|
||||
# Clone repository
|
||||
git clone $CUBBI_PROJECT_URL /app
|
||||
cd /app
|
||||
|
||||
# Run project-specific initialization if present
|
||||
if [ -f "/app/.cubbi/init.sh" ]; then
|
||||
bash /app/.cubbi/init.sh
|
||||
fi
|
||||
fi
|
||||
|
||||
# Image-specific initialization continues...
|
||||
```
|
||||
|
||||
### Image Configuration (cubbi-image.yaml)
|
||||
|
||||
```yaml
|
||||
name: goose
|
||||
description: Goose with MCP servers
|
||||
version: 1.0.0
|
||||
maintainer: team@monadical.com
|
||||
|
||||
init:
|
||||
pre_command: /cubbi-init.sh
|
||||
command: /entrypoint.sh
|
||||
|
||||
environment:
|
||||
- name: MCP_HOST
|
||||
description: MCP server host
|
||||
required: true
|
||||
default: http://localhost:8000
|
||||
|
||||
- name: GOOSE_ID
|
||||
description: Goose instance ID
|
||||
required: false
|
||||
|
||||
# Project environment variables
|
||||
- name: CUBBI_PROJECT_URL
|
||||
description: Project repository URL
|
||||
required: false
|
||||
|
||||
- name: CUBBI_PROJECT_TYPE
|
||||
description: Project repository type (git, svn, etc.)
|
||||
required: false
|
||||
default: git
|
||||
|
||||
- name: CUBBI_GIT_SSH_KEY
|
||||
description: SSH key for Git authentication
|
||||
required: false
|
||||
sensitive: true
|
||||
|
||||
- name: CUBBI_GIT_TOKEN
|
||||
description: Token for Git authentication
|
||||
required: false
|
||||
sensitive: true
|
||||
|
||||
ports:
|
||||
- 8000 # Main application
|
||||
- 22 # SSH server
|
||||
|
||||
volumes:
|
||||
- mountPath: /app
|
||||
description: Application directory
|
||||
|
||||
persistent_configs:
|
||||
- source: "/app/.goose"
|
||||
target: "/cubbi-config/goose"
|
||||
type: "directory"
|
||||
description: "Goose memory and configuration"
|
||||
```
|
||||
|
||||
### Example Built-in images
|
||||
|
||||
1. **goose**: Goose with MCP servers
|
||||
2. **aider**: Aider coding assistant
|
||||
3. **claude-code**: Claude Code environment
|
||||
4. **custom**: Custom Dockerfile support
|
||||
|
||||
## Network Management
|
||||
|
||||
### Docker Network Integration
|
||||
|
||||
Cubbi provides flexible network management for containers:
|
||||
|
||||
1. **Default Cubbi Network**:
|
||||
- Each container is automatically connected to the Cubbi network (`cubbi-network` by default)
|
||||
- This ensures containers can communicate with each other
|
||||
|
||||
2. **External Network Connection**:
|
||||
- Containers can be connected to one or more external Docker networks
|
||||
- This allows integration with existing infrastructure (e.g., databases, web servers)
|
||||
- Networks can be specified at session creation time: `cubbi session create --network mynetwork`
|
||||
|
||||
3. **Default Networks Configuration**:
|
||||
- Users can configure default networks in their configuration
|
||||
- These networks will be used for all new sessions unless overridden
|
||||
- Managed with `cubbi config network` commands
|
||||
|
||||
4. **Network Command Examples**:
|
||||
```bash
|
||||
# Use with session creation
|
||||
cubbi session create --network teamnet
|
||||
|
||||
# Use with multiple networks
|
||||
cubbi session create --network teamnet --network dbnet
|
||||
|
||||
# Configure default networks
|
||||
cubbi config network add teamnet
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
1. **Container Isolation**: Each session runs in an isolated container
|
||||
2. **Authentication**: Integration with Authentik for secure authentication
|
||||
3. **Resource Limits**: Configurable CPU, memory, and storage limits
|
||||
4. **Network Isolation**: Internal Docker network for container-to-container communication with optional external network connections
|
||||
5. **Encrypted Connections**: TLS for API connections and SSH for terminal access
|
||||
|
||||
## Deployment
|
||||
|
||||
### Cubbi Service Deployment
|
||||
|
||||
```yaml
|
||||
# docker-compose.yml for Cubbi Service
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
cubbi-service:
|
||||
image: monadical/cubbi-service:latest
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- ./config:/app/config
|
||||
ports:
|
||||
- "3000:3000"
|
||||
environment:
|
||||
- AUTH_URL=https://authentik.monadical.io
|
||||
- LANGFUSE_API_KEY=your_api_key
|
||||
networks:
|
||||
- cubbi-network
|
||||
|
||||
networks:
|
||||
cubbi-network:
|
||||
driver: bridge
|
||||
```
|
||||
|
||||
## Project Repository Integration Workflow
|
||||
|
||||
### Adding a Project Repository
|
||||
|
||||
1. User adds project repository with authentication:
|
||||
```bash
|
||||
cubbi project add github.com/hello/private --ssh-key ~/.ssh/id_ed25519
|
||||
```
|
||||
|
||||
2. Cubbi CLI reads the SSH key, encrypts it, and sends to Cubbi Service
|
||||
|
||||
3. Cubbi Service stores the project configuration securely
|
||||
|
||||
### Using a Project in a Session
|
||||
|
||||
1. User creates a session with a project:
|
||||
```bash
|
||||
cubbi -r monadical git@github.com:hello/private
|
||||
```
|
||||
|
||||
2. Cubbi Service:
|
||||
- Identifies the project from the URL
|
||||
- Retrieves project authentication details
|
||||
- Sets up environment variables:
|
||||
```
|
||||
CUBBI_PROJECT_URL=git@github.com:hello/private
|
||||
CUBBI_PROJECT_TYPE=git
|
||||
CUBBI_GIT_SSH_KEY=<contents of the SSH key>
|
||||
```
|
||||
- Creates container with these environment variables
|
||||
|
||||
3. Container initialization:
|
||||
- The standardized `cubbi-init.sh` script detects the project environment variables
|
||||
- Sets up SSH key or token authentication
|
||||
- Clones the repository to `/app`
|
||||
- Runs any project-specific initialization scripts
|
||||
|
||||
4. User can immediately begin working with the repository
|
||||
|
||||
## Implementation Roadmap
|
||||
|
||||
1. **Phase 1**: Local CLI tool with Docker integration
|
||||
2. **Phase 2**: Cubbi Service REST API with basic container management
|
||||
3. **Phase 3**: Authentication and secure connections
|
||||
4. **Phase 4**: Project management functionality
|
||||
5. **Phase 5**: Image implementation (Goose, Aider, Claude Code)
|
||||
6. **Phase 6**: Logging integration with Fluentd and Langfuse
|
||||
7. **Phase 7**: CLI remote connectivity improvements
|
||||
8. **Phase 8**: Additional images and extensibility features
|
||||
160
docs/specs/2_MCP_SERVER.md
Normal file
160
docs/specs/2_MCP_SERVER.md
Normal file
@@ -0,0 +1,160 @@
|
||||
# MCP Server Specification
|
||||
|
||||
## Overview
|
||||
|
||||
This document specifies the implementation for Model Control Protocol (MCP) server support in the Cubbi system. The MCP server feature allows users to connect, build, and manage external MCP servers that can be attached to Cubbi sessions.
|
||||
|
||||
An MCP server is a service that can be accessed by a image (such as Goose or Claude Code) to extend the LLM's capabilities through tool calls. It can be either:
|
||||
- A local stdio-based MCP server running in a container (accessed via an SSE proxy)
|
||||
- A remote HTTP SSE server accessed directly via its URL
|
||||
|
||||
## Key Features
|
||||
|
||||
1. Support two types of MCP servers:
|
||||
- **Proxy-based MCP servers** (default): Container running an MCP stdio server with a proxy that converts to SSE
|
||||
- **Remote MCP servers**: External HTTP SSE servers accessed via URL
|
||||
|
||||
2. Persistent MCP containers that can be:
|
||||
- Started/stopped independently of sessions
|
||||
- Connected to multiple sessions
|
||||
- Automatically started when referenced in a session creation
|
||||
|
||||
3. Management of MCP server configurations and containers
|
||||
|
||||
## MCP Configuration Model
|
||||
|
||||
The MCP configuration will be stored in the user configuration file and will include:
|
||||
|
||||
```yaml
|
||||
mcps:
|
||||
# Proxy-based MCP server (default type)
|
||||
- name: github
|
||||
type: proxy
|
||||
base_image: mcp/github
|
||||
command: "github-mcp" # Optional command to run in the base image
|
||||
proxy_image: ghcr.io/sparfenyuk/mcp-proxy:latest # Optional, defaults to standard proxy image
|
||||
proxy_options:
|
||||
sse_port: 8080
|
||||
sse_host: "0.0.0.0"
|
||||
allow_origin: "*"
|
||||
env:
|
||||
GITHUB_TOKEN: "your-token-here"
|
||||
|
||||
# Remote MCP server
|
||||
- name: remote-mcp
|
||||
type: remote
|
||||
url: "http://mcp-server.example.com/sse"
|
||||
headers:
|
||||
Authorization: "Bearer your-token-here"
|
||||
```
|
||||
|
||||
## CLI Commands
|
||||
|
||||
### MCP Management
|
||||
|
||||
```
|
||||
cubbi mcp list # List all configured MCP servers and their status
|
||||
cubbi mcp status <name> # Show detailed status of a specific MCP server
|
||||
cubbi mcp start <name> # Start an MCP server container
|
||||
cubbi mcp stop <name> # Stop and remove an MCP server container
|
||||
cubbi mcp restart <name> # Restart an MCP server container
|
||||
cubbi mcp start --all # Start all MCP server containers
|
||||
cubbi mcp stop --all # Stop and remove all MCP server containers
|
||||
cubbi mcp inspector # Run the MCP Inspector UI with network connectivity to all MCP servers
|
||||
cubbi mcp inspector --client-port <cp> --server-port <sp> # Run with custom client port (default: 5173) and server port (default: 3000)
|
||||
cubbi mcp inspector --detach # Run the inspector in detached mode
|
||||
cubbi mcp inspector --stop # Stop the running inspector
|
||||
cubbi mcp logs <name> # Show logs for an MCP server container
|
||||
```
|
||||
|
||||
### MCP Configuration
|
||||
|
||||
```
|
||||
# Add a proxy-based MCP server (default)
|
||||
cubbi mcp add <name> <base_image> [--command CMD] [--proxy-image IMG] [--sse-port PORT] [--sse-host HOST] [--allow-origin ORIGIN] [--env KEY=VALUE...]
|
||||
|
||||
# Add a remote MCP server
|
||||
cubbi mcp add-remote <name> <url> [--header KEY=VALUE...]
|
||||
|
||||
# Remove an MCP configuration
|
||||
cubbi mcp remove <name>
|
||||
```
|
||||
|
||||
### Session Integration
|
||||
|
||||
```
|
||||
cubbi session create [--mcp <name>] # Create a session with an MCP server attached
|
||||
```
|
||||
|
||||
## Implementation Details
|
||||
|
||||
### MCP Container Management
|
||||
|
||||
1. MCP containers will have their own dedicated Docker network (`cubbi-mcp-network`)
|
||||
2. Session containers will be attached to both their session network and the MCP network when using an MCP
|
||||
3. MCP containers will be persistent across sessions unless explicitly stopped
|
||||
4. MCP containers will be named with a prefix to identify them (`cubbi_mcp_<name>`)
|
||||
5. Each MCP container will have a network alias matching its name without the prefix (e.g., `cubbi_mcp_github` will have the alias `github`)
|
||||
6. Network aliases enable DNS-based service discovery between containers
|
||||
|
||||
### MCP Inspector
|
||||
|
||||
The MCP Inspector is a web-based UI tool that allows you to:
|
||||
|
||||
1. Visualize and interact with multiple MCP servers
|
||||
2. Debug MCP server messages and interactions
|
||||
3. Test MCP server capabilities directly
|
||||
|
||||
The MCP Inspector implementation includes:
|
||||
|
||||
1. A container based on the `mcp/inspector` image
|
||||
2. Automatic joining of all MCP server networks for seamless DNS resolution
|
||||
3. A modified Express server that binds to all interfaces (0.0.0.0)
|
||||
4. Port mapping for both the frontend (default: 5173) and backend API (default: 3000)
|
||||
5. Network connectivity to all MCP servers using their simple names as DNS hostnames
|
||||
|
||||
### Proxy-based MCP Servers (Default)
|
||||
|
||||
For proxy-based MCP servers:
|
||||
1. Create a custom Dockerfile that:
|
||||
- Uses the specified proxy image as the base
|
||||
- Installs Docker-in-Docker capabilities
|
||||
- Sets up the base MCP server image
|
||||
- Configures the entrypoint to run the MCP proxy with the right parameters
|
||||
2. Build the custom image
|
||||
3. Run the container with:
|
||||
- The Docker socket mounted to enable Docker-in-Docker
|
||||
- Environment variables from the configuration
|
||||
- The SSE server port exposed
|
||||
|
||||
The proxy container will:
|
||||
1. Pull the base image
|
||||
2. Run the base image with the specified command
|
||||
3. Connect the stdio of the base image to the MCP proxy
|
||||
4. Expose an SSE server that clients can connect to
|
||||
|
||||
### Remote MCP Servers
|
||||
|
||||
For remote MCP servers:
|
||||
1. Store the URL and headers
|
||||
2. Provide these to the session container when connecting
|
||||
|
||||
## Session Integration
|
||||
|
||||
When a session is created with an MCP server:
|
||||
1. If the MCP server is not running, start it automatically
|
||||
2. Connect the session container to the MCP server's network
|
||||
3. Set the appropriate environment variables in the session to enable MCP connectivity
|
||||
|
||||
## Security Considerations
|
||||
|
||||
1. MCP server credentials and tokens should be handled securely through environment variables
|
||||
2. Network isolation should be maintained between different MCP servers
|
||||
3. Consider options for access control between sessions and MCP servers
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
1. Support for MCP server version management
|
||||
2. Health checking and automatic restart capabilities
|
||||
3. Support for MCP server clusters or load balancing
|
||||
4. Integration with monitoring systems
|
||||
@@ -1,47 +0,0 @@
|
||||
FROM python:3.12-slim
|
||||
|
||||
LABEL maintainer="team@monadical.com"
|
||||
LABEL description="Goose with MCP servers"
|
||||
|
||||
# Install system dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
git \
|
||||
openssh-server \
|
||||
bash \
|
||||
curl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Set up SSH server
|
||||
RUN mkdir /var/run/sshd
|
||||
RUN echo 'root:root' | chpasswd
|
||||
RUN sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config
|
||||
RUN sed -i 's/#PasswordAuthentication yes/PasswordAuthentication yes/' /etc/ssh/sshd_config
|
||||
|
||||
# Create app directory
|
||||
WORKDIR /app
|
||||
|
||||
# Install python dependencies
|
||||
# This is done before copying scripts for better cache management
|
||||
RUN pip install --no-cache-dir goose-ai langfuse
|
||||
|
||||
# Copy initialization scripts
|
||||
COPY mc-init.sh /mc-init.sh
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
COPY mc-driver.yaml /mc-driver.yaml
|
||||
COPY init-status.sh /init-status.sh
|
||||
|
||||
# Make scripts executable
|
||||
RUN chmod +x /mc-init.sh /entrypoint.sh /init-status.sh
|
||||
|
||||
# Set up initialization status check on login
|
||||
RUN echo '[ -x /init-status.sh ] && /init-status.sh' >> /etc/bash.bashrc
|
||||
|
||||
# Set up environment
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
|
||||
# Expose ports
|
||||
EXPOSE 8000 22
|
||||
|
||||
# Set entrypoint
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
@@ -1,47 +0,0 @@
|
||||
# Goose Driver for MC
|
||||
|
||||
This driver provides a containerized environment for running [Goose](https://goose.ai) with MCP servers.
|
||||
|
||||
## Features
|
||||
|
||||
- Pre-configured environment for Goose AI
|
||||
- MCP server integration
|
||||
- SSH access
|
||||
- Git repository integration
|
||||
- Langfuse logging support
|
||||
|
||||
## Environment Variables
|
||||
|
||||
| Variable | Description | Required |
|
||||
|----------|-------------|----------|
|
||||
| `MCP_HOST` | MCP server host | Yes |
|
||||
| `GOOSE_API_KEY` | Goose API key | Yes |
|
||||
| `GOOSE_ID` | Goose instance ID | No |
|
||||
| `LANGFUSE_PUBLIC_KEY` | Langfuse public key | No |
|
||||
| `LANGFUSE_SECRET_KEY` | Langfuse secret key | No |
|
||||
| `LANGFUSE_HOST` | Langfuse API host | No |
|
||||
| `MC_PROJECT_URL` | Project repository URL | No |
|
||||
| `MC_GIT_SSH_KEY` | SSH key for Git authentication | No |
|
||||
| `MC_GIT_TOKEN` | Token for Git authentication | No |
|
||||
|
||||
## Build
|
||||
|
||||
To build this driver:
|
||||
|
||||
```bash
|
||||
cd drivers/goose
|
||||
docker build -t monadical/mc-goose:latest .
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Create a new session with this driver
|
||||
mc session create --driver goose
|
||||
|
||||
# Create with specific MCP server
|
||||
mc session create --driver goose -e MCP_HOST=http://mcp.example.com:8000
|
||||
|
||||
# Create with project repository
|
||||
mc session create --driver goose --project github.com/username/repo
|
||||
```
|
||||
@@ -1,17 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Entrypoint script for Goose driver
|
||||
|
||||
# Run the standard initialization script
|
||||
/mc-init.sh
|
||||
|
||||
# Start SSH server in the background
|
||||
/usr/sbin/sshd
|
||||
|
||||
# Print welcome message
|
||||
echo "==============================================="
|
||||
echo "Goose driver container started"
|
||||
echo "SSH server running on port 22"
|
||||
echo "==============================================="
|
||||
|
||||
# Keep container running
|
||||
exec tail -f /dev/null
|
||||
@@ -1,65 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Script to check and display initialization status
|
||||
|
||||
# Function to display initialization logs
|
||||
show_init_logs() {
|
||||
if [ -f "/init.log" ]; then
|
||||
echo "Displaying initialization logs:"
|
||||
echo "----------------------------------------"
|
||||
cat /init.log
|
||||
echo "----------------------------------------"
|
||||
else
|
||||
echo "No initialization logs found."
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to follow logs until initialization completes
|
||||
follow_init_logs() {
|
||||
if [ ! -f "/init.log" ]; then
|
||||
echo "No initialization logs found."
|
||||
return
|
||||
fi
|
||||
|
||||
echo "Initialization is still in progress. Showing logs:"
|
||||
echo "----------------------------------------"
|
||||
tail -f /init.log &
|
||||
tail_pid=$!
|
||||
|
||||
# Check every second if initialization has completed
|
||||
while true; do
|
||||
if [ -f "/init.status" ] && grep -q "INIT_COMPLETE=true" "/init.status"; then
|
||||
kill $tail_pid 2>/dev/null
|
||||
echo "----------------------------------------"
|
||||
echo "Initialization completed."
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
}
|
||||
|
||||
# Check if we're in an interactive shell
|
||||
if [ -t 0 ]; then
|
||||
INTERACTIVE=true
|
||||
else
|
||||
INTERACTIVE=false
|
||||
fi
|
||||
|
||||
# Check initialization status
|
||||
if [ -f "/init.status" ]; then
|
||||
if grep -q "INIT_COMPLETE=true" "/init.status"; then
|
||||
echo "MC initialization has completed."
|
||||
# No longer prompt to show logs when initialization is complete
|
||||
else
|
||||
echo "MC initialization is still in progress."
|
||||
follow_init_logs
|
||||
fi
|
||||
else
|
||||
echo "Cannot determine initialization status."
|
||||
# Ask if user wants to see logs if they exist (only in interactive mode)
|
||||
if [ -f "/init.log" ] && [ "$INTERACTIVE" = true ]; then
|
||||
read -p "Do you want to see initialization logs? (y/n): " show_logs
|
||||
if [[ "$show_logs" =~ ^[Yy] ]]; then
|
||||
show_init_logs
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
@@ -1,66 +0,0 @@
|
||||
name: goose
|
||||
description: Goose with MCP servers
|
||||
version: 1.0.0
|
||||
maintainer: team@monadical.com
|
||||
|
||||
init:
|
||||
pre_command: /mc-init.sh
|
||||
command: /entrypoint.sh
|
||||
|
||||
environment:
|
||||
- name: MCP_HOST
|
||||
description: MCP server host
|
||||
required: true
|
||||
default: http://localhost:8000
|
||||
|
||||
- name: GOOSE_API_KEY
|
||||
description: Goose API key
|
||||
required: true
|
||||
sensitive: true
|
||||
|
||||
- name: GOOSE_ID
|
||||
description: Goose instance ID
|
||||
required: false
|
||||
|
||||
- name: LANGFUSE_PUBLIC_KEY
|
||||
description: Langfuse public key
|
||||
required: false
|
||||
sensitive: true
|
||||
|
||||
- name: LANGFUSE_SECRET_KEY
|
||||
description: Langfuse secret key
|
||||
required: false
|
||||
sensitive: true
|
||||
|
||||
- name: LANGFUSE_HOST
|
||||
description: Langfuse API host
|
||||
required: false
|
||||
default: https://api.langfuse.com
|
||||
|
||||
# Project environment variables
|
||||
- name: MC_PROJECT_URL
|
||||
description: Project repository URL
|
||||
required: false
|
||||
|
||||
- name: MC_PROJECT_TYPE
|
||||
description: Project repository type (git, svn, etc.)
|
||||
required: false
|
||||
default: git
|
||||
|
||||
- name: MC_GIT_SSH_KEY
|
||||
description: SSH key for Git authentication
|
||||
required: false
|
||||
sensitive: true
|
||||
|
||||
- name: MC_GIT_TOKEN
|
||||
description: Token for Git authentication
|
||||
required: false
|
||||
sensitive: true
|
||||
|
||||
ports:
|
||||
- 8000 # Main application
|
||||
- 22 # SSH server
|
||||
|
||||
volumes:
|
||||
- mountPath: /app
|
||||
description: Application directory
|
||||
@@ -1,65 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Standardized initialization script for MC drivers
|
||||
|
||||
# Redirect all output to both stdout and the log file
|
||||
exec > >(tee -a /init.log) 2>&1
|
||||
|
||||
# Mark initialization as started
|
||||
echo "=== MC Initialization started at $(date) ==="
|
||||
echo "INIT_COMPLETE=false" > /init.status
|
||||
|
||||
# Project initialization
|
||||
if [ -n "$MC_PROJECT_URL" ]; then
|
||||
echo "Initializing project: $MC_PROJECT_URL"
|
||||
|
||||
# Set up SSH key if provided
|
||||
if [ -n "$MC_GIT_SSH_KEY" ]; then
|
||||
mkdir -p ~/.ssh
|
||||
echo "$MC_GIT_SSH_KEY" > ~/.ssh/id_ed25519
|
||||
chmod 600 ~/.ssh/id_ed25519
|
||||
ssh-keyscan github.com >> ~/.ssh/known_hosts 2>/dev/null
|
||||
ssh-keyscan gitlab.com >> ~/.ssh/known_hosts 2>/dev/null
|
||||
ssh-keyscan bitbucket.org >> ~/.ssh/known_hosts 2>/dev/null
|
||||
fi
|
||||
|
||||
# Set up token if provided
|
||||
if [ -n "$MC_GIT_TOKEN" ]; then
|
||||
git config --global credential.helper store
|
||||
echo "https://$MC_GIT_TOKEN:x-oauth-basic@github.com" > ~/.git-credentials
|
||||
fi
|
||||
|
||||
# Clone repository
|
||||
git clone $MC_PROJECT_URL /app
|
||||
cd /app
|
||||
|
||||
# Run project-specific initialization if present
|
||||
if [ -f "/app/.mc/init.sh" ]; then
|
||||
bash /app/.mc/init.sh
|
||||
fi
|
||||
fi
|
||||
|
||||
# Set up Goose API key if provided
|
||||
if [ -n "$GOOSE_API_KEY" ]; then
|
||||
echo "Setting up Goose API key"
|
||||
export GOOSE_API_KEY="$GOOSE_API_KEY"
|
||||
fi
|
||||
|
||||
# Set up MCP connection if provided
|
||||
if [ -n "$MCP_HOST" ]; then
|
||||
echo "Setting up MCP connection to $MCP_HOST"
|
||||
export MCP_HOST="$MCP_HOST"
|
||||
fi
|
||||
|
||||
# Set up Langfuse logging if credentials are provided
|
||||
if [ -n "$LANGFUSE_SECRET_KEY" ] && [ -n "$LANGFUSE_PUBLIC_KEY" ]; then
|
||||
echo "Setting up Langfuse logging"
|
||||
export LANGFUSE_SECRET_KEY="$LANGFUSE_SECRET_KEY"
|
||||
export LANGFUSE_PUBLIC_KEY="$LANGFUSE_PUBLIC_KEY"
|
||||
export LANGFUSE_HOST="${LANGFUSE_HOST:-https://api.langfuse.com}"
|
||||
fi
|
||||
|
||||
echo "MC driver initialization complete"
|
||||
|
||||
# Mark initialization as complete
|
||||
echo "=== MC Initialization completed at $(date) ==="
|
||||
echo "INIT_COMPLETE=true" > /init.status
|
||||
@@ -1,403 +0,0 @@
|
||||
import os
|
||||
from typing import List, Optional
|
||||
import typer
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
|
||||
from .config import ConfigManager
|
||||
from .container import ContainerManager
|
||||
from .models import SessionStatus
|
||||
|
||||
app = typer.Typer(help="Monadical Container Tool")
|
||||
session_app = typer.Typer(help="Manage MC sessions")
|
||||
driver_app = typer.Typer(help="Manage MC drivers", no_args_is_help=True)
|
||||
app.add_typer(session_app, name="session", no_args_is_help=True)
|
||||
app.add_typer(driver_app, name="driver", no_args_is_help=True)
|
||||
|
||||
console = Console()
|
||||
config_manager = ConfigManager()
|
||||
container_manager = ContainerManager(config_manager)
|
||||
|
||||
|
||||
@app.callback(invoke_without_command=True)
|
||||
def main(ctx: typer.Context) -> None:
|
||||
"""Monadical Container Tool"""
|
||||
# If no command is specified, create a session
|
||||
if ctx.invoked_subcommand is None:
|
||||
create_session(
|
||||
driver=None,
|
||||
project=None,
|
||||
env=[],
|
||||
name=None,
|
||||
no_connect=False,
|
||||
no_mount=False,
|
||||
)
|
||||
|
||||
|
||||
@app.command()
|
||||
def version() -> None:
|
||||
"""Show MC version information"""
|
||||
from importlib.metadata import version as get_version
|
||||
|
||||
try:
|
||||
version_str = get_version("mcontainer")
|
||||
console.print(f"MC - Monadical Container Tool v{version_str}")
|
||||
except Exception:
|
||||
console.print("MC - Monadical Container Tool (development version)")
|
||||
|
||||
|
||||
@session_app.command("list")
|
||||
def list_sessions() -> None:
|
||||
"""List active MC sessions"""
|
||||
sessions = container_manager.list_sessions()
|
||||
|
||||
if not sessions:
|
||||
console.print("No active sessions found")
|
||||
return
|
||||
|
||||
table = Table(show_header=True, header_style="bold")
|
||||
table.add_column("ID")
|
||||
table.add_column("Name")
|
||||
table.add_column("Driver")
|
||||
table.add_column("Status")
|
||||
table.add_column("Ports")
|
||||
table.add_column("Project")
|
||||
|
||||
for session in sessions:
|
||||
ports_str = ", ".join(
|
||||
[
|
||||
f"{container_port}:{host_port}"
|
||||
for container_port, host_port in session.ports.items()
|
||||
]
|
||||
)
|
||||
|
||||
status_color = {
|
||||
SessionStatus.RUNNING: "green",
|
||||
SessionStatus.STOPPED: "red",
|
||||
SessionStatus.CREATING: "yellow",
|
||||
SessionStatus.FAILED: "red",
|
||||
}.get(session.status, "white")
|
||||
|
||||
table.add_row(
|
||||
session.id,
|
||||
session.name,
|
||||
session.driver,
|
||||
f"[{status_color}]{session.status}[/{status_color}]",
|
||||
ports_str,
|
||||
session.project or "",
|
||||
)
|
||||
|
||||
console.print(table)
|
||||
|
||||
|
||||
@session_app.command("create")
|
||||
def create_session(
|
||||
driver: Optional[str] = typer.Option(None, "--driver", "-d", help="Driver to use"),
|
||||
project: Optional[str] = typer.Option(
|
||||
None, "--project", "-p", help="Project repository URL"
|
||||
),
|
||||
env: List[str] = typer.Option(
|
||||
[], "--env", "-e", help="Environment variables (KEY=VALUE)"
|
||||
),
|
||||
name: Optional[str] = typer.Option(None, "--name", "-n", help="Session name"),
|
||||
no_connect: bool = typer.Option(
|
||||
False, "--no-connect", help="Don't automatically connect to the session"
|
||||
),
|
||||
no_mount: bool = typer.Option(
|
||||
False,
|
||||
"--no-mount",
|
||||
help="Don't mount local directory to /app (ignored if --project is used)",
|
||||
),
|
||||
) -> None:
|
||||
"""Create a new MC session"""
|
||||
# Use default driver if not specified
|
||||
if not driver:
|
||||
driver = config_manager.config.defaults.get("driver", "goose")
|
||||
|
||||
# Parse environment variables
|
||||
environment = {}
|
||||
for var in env:
|
||||
if "=" in var:
|
||||
key, value = var.split("=", 1)
|
||||
environment[key] = value
|
||||
else:
|
||||
console.print(
|
||||
f"[yellow]Warning: Ignoring invalid environment variable format: {var}[/yellow]"
|
||||
)
|
||||
|
||||
with console.status(f"Creating session with driver '{driver}'..."):
|
||||
session = container_manager.create_session(
|
||||
driver_name=driver,
|
||||
project=project,
|
||||
environment=environment,
|
||||
session_name=name,
|
||||
mount_local=not no_mount,
|
||||
)
|
||||
|
||||
if session:
|
||||
console.print("[green]Session created successfully![/green]")
|
||||
console.print(f"Session ID: {session.id}")
|
||||
console.print(f"Driver: {session.driver}")
|
||||
|
||||
if session.ports:
|
||||
console.print("Ports:")
|
||||
for container_port, host_port in session.ports.items():
|
||||
console.print(f" {container_port} -> {host_port}")
|
||||
|
||||
# Auto-connect unless --no-connect flag is provided
|
||||
if not no_connect:
|
||||
console.print(f"\nConnecting to session {session.id}...")
|
||||
container_manager.connect_session(session.id)
|
||||
else:
|
||||
console.print(
|
||||
f"\nConnect to the session with:\n mc session connect {session.id}"
|
||||
)
|
||||
else:
|
||||
console.print("[red]Failed to create session[/red]")
|
||||
|
||||
|
||||
@session_app.command("close")
|
||||
def close_session(
|
||||
session_id: Optional[str] = typer.Argument(None, help="Session ID to close"),
|
||||
all_sessions: bool = typer.Option(False, "--all", help="Close all active sessions"),
|
||||
) -> None:
|
||||
"""Close a MC session or all sessions"""
|
||||
if all_sessions:
|
||||
# Get sessions first to display them
|
||||
sessions = container_manager.list_sessions()
|
||||
if not sessions:
|
||||
console.print("No active sessions to close")
|
||||
return
|
||||
|
||||
console.print(f"Closing {len(sessions)} sessions...")
|
||||
|
||||
# Simple progress function that prints a line when a session is closed
|
||||
def update_progress(session_id, status, message):
|
||||
if status == "completed":
|
||||
console.print(
|
||||
f"[green]Session {session_id} closed successfully[/green]"
|
||||
)
|
||||
elif status == "failed":
|
||||
console.print(
|
||||
f"[red]Failed to close session {session_id}: {message}[/red]"
|
||||
)
|
||||
|
||||
# Start closing sessions with progress updates
|
||||
count, success = container_manager.close_all_sessions(update_progress)
|
||||
|
||||
# Final result
|
||||
if success:
|
||||
console.print(f"[green]{count} sessions closed successfully[/green]")
|
||||
else:
|
||||
console.print("[red]Failed to close all sessions[/red]")
|
||||
elif session_id:
|
||||
with console.status(f"Closing session {session_id}..."):
|
||||
success = container_manager.close_session(session_id)
|
||||
|
||||
if success:
|
||||
console.print(f"[green]Session {session_id} closed successfully[/green]")
|
||||
else:
|
||||
console.print(f"[red]Failed to close session {session_id}[/red]")
|
||||
else:
|
||||
console.print("[red]Error: Please provide a session ID or use --all flag[/red]")
|
||||
|
||||
|
||||
@session_app.command("connect")
|
||||
def connect_session(
|
||||
session_id: str = typer.Argument(..., help="Session ID to connect to"),
|
||||
) -> None:
|
||||
"""Connect to a MC session"""
|
||||
console.print(f"Connecting to session {session_id}...")
|
||||
success = container_manager.connect_session(session_id)
|
||||
|
||||
if not success:
|
||||
console.print(f"[red]Failed to connect to session {session_id}[/red]")
|
||||
|
||||
|
||||
@session_app.command("logs")
|
||||
def session_logs(
|
||||
session_id: str = typer.Argument(..., help="Session ID to get logs from"),
|
||||
follow: bool = typer.Option(False, "--follow", "-f", help="Follow log output"),
|
||||
init: bool = typer.Option(
|
||||
False, "--init", "-i", help="Show initialization logs instead of container logs"
|
||||
),
|
||||
) -> None:
|
||||
"""Stream logs from a MC session"""
|
||||
if init:
|
||||
# Show initialization logs
|
||||
if follow:
|
||||
console.print(
|
||||
f"Streaming initialization logs from session {session_id}... (Ctrl+C to exit)"
|
||||
)
|
||||
container_manager.get_init_logs(session_id, follow=True)
|
||||
else:
|
||||
logs = container_manager.get_init_logs(session_id)
|
||||
if logs:
|
||||
console.print(logs)
|
||||
else:
|
||||
# Show regular container logs
|
||||
if follow:
|
||||
console.print(
|
||||
f"Streaming logs from session {session_id}... (Ctrl+C to exit)"
|
||||
)
|
||||
container_manager.get_session_logs(session_id, follow=True)
|
||||
else:
|
||||
logs = container_manager.get_session_logs(session_id)
|
||||
if logs:
|
||||
console.print(logs)
|
||||
|
||||
|
||||
@app.command()
|
||||
def stop() -> None:
|
||||
"""Stop the current MC session (from inside the container)"""
|
||||
# Check if running inside a container
|
||||
if not os.path.exists("/.dockerenv"):
|
||||
console.print(
|
||||
"[red]This command can only be run from inside a MC container[/red]"
|
||||
)
|
||||
return
|
||||
|
||||
# Stop the container from inside
|
||||
console.print("Stopping the current session...")
|
||||
os.system("kill 1") # Send SIGTERM to PID 1 (container's init process)
|
||||
|
||||
|
||||
# Main CLI entry point that handles project repository URLs
|
||||
@app.command(name="")
|
||||
def quick_create(
|
||||
project: Optional[str] = typer.Argument(..., help="Project repository URL"),
|
||||
driver: Optional[str] = typer.Option(None, "--driver", "-d", help="Driver to use"),
|
||||
env: List[str] = typer.Option(
|
||||
[], "--env", "-e", help="Environment variables (KEY=VALUE)"
|
||||
),
|
||||
name: Optional[str] = typer.Option(None, "--name", "-n", help="Session name"),
|
||||
no_connect: bool = typer.Option(
|
||||
False, "--no-connect", help="Don't automatically connect to the session"
|
||||
),
|
||||
no_mount: bool = typer.Option(
|
||||
False,
|
||||
"--no-mount",
|
||||
help="Don't mount local directory to /app (ignored if a project is specified)",
|
||||
),
|
||||
) -> None:
|
||||
"""Create a new MC session with a project repository"""
|
||||
create_session(
|
||||
driver=driver,
|
||||
project=project,
|
||||
env=env,
|
||||
name=name,
|
||||
no_connect=no_connect,
|
||||
no_mount=no_mount,
|
||||
)
|
||||
|
||||
|
||||
@driver_app.command("list")
|
||||
def list_drivers() -> None:
|
||||
"""List available MC drivers"""
|
||||
drivers = config_manager.list_drivers()
|
||||
|
||||
if not drivers:
|
||||
console.print("No drivers found")
|
||||
return
|
||||
|
||||
table = Table(show_header=True, header_style="bold")
|
||||
table.add_column("Name")
|
||||
table.add_column("Description")
|
||||
table.add_column("Version")
|
||||
table.add_column("Maintainer")
|
||||
table.add_column("Image")
|
||||
|
||||
for name, driver in drivers.items():
|
||||
table.add_row(
|
||||
driver.name,
|
||||
driver.description,
|
||||
driver.version,
|
||||
driver.maintainer,
|
||||
driver.image,
|
||||
)
|
||||
|
||||
console.print(table)
|
||||
|
||||
|
||||
@driver_app.command("build")
|
||||
def build_driver(
|
||||
driver_name: str = typer.Argument(..., help="Driver name to build"),
|
||||
tag: str = typer.Option("latest", "--tag", "-t", help="Image tag"),
|
||||
push: bool = typer.Option(
|
||||
False, "--push", "-p", help="Push image to registry after building"
|
||||
),
|
||||
) -> None:
|
||||
"""Build a driver Docker image"""
|
||||
# Get driver path
|
||||
driver_path = config_manager.get_driver_path(driver_name)
|
||||
if not driver_path:
|
||||
console.print(f"[red]Driver '{driver_name}' not found[/red]")
|
||||
return
|
||||
|
||||
# Check if Dockerfile exists
|
||||
dockerfile_path = driver_path / "Dockerfile"
|
||||
if not dockerfile_path.exists():
|
||||
console.print(f"[red]Dockerfile not found in {driver_path}[/red]")
|
||||
return
|
||||
|
||||
# Build image name
|
||||
image_name = f"monadical/mc-{driver_name}:{tag}"
|
||||
|
||||
# Build the image
|
||||
with console.status(f"Building image {image_name}..."):
|
||||
result = os.system(f"cd {driver_path} && docker build -t {image_name} .")
|
||||
|
||||
if result != 0:
|
||||
console.print("[red]Failed to build driver image[/red]")
|
||||
return
|
||||
|
||||
console.print(f"[green]Successfully built image: {image_name}[/green]")
|
||||
|
||||
# Push if requested
|
||||
if push:
|
||||
with console.status(f"Pushing image {image_name}..."):
|
||||
result = os.system(f"docker push {image_name}")
|
||||
|
||||
if result != 0:
|
||||
console.print("[red]Failed to push driver image[/red]")
|
||||
return
|
||||
|
||||
console.print(f"[green]Successfully pushed image: {image_name}[/green]")
|
||||
|
||||
|
||||
@driver_app.command("info")
|
||||
def driver_info(
|
||||
driver_name: str = typer.Argument(..., help="Driver name to get info for"),
|
||||
) -> None:
|
||||
"""Show detailed information about a driver"""
|
||||
driver = config_manager.get_driver(driver_name)
|
||||
if not driver:
|
||||
console.print(f"[red]Driver '{driver_name}' not found[/red]")
|
||||
return
|
||||
|
||||
console.print(f"[bold]Driver: {driver.name}[/bold]")
|
||||
console.print(f"Description: {driver.description}")
|
||||
console.print(f"Version: {driver.version}")
|
||||
console.print(f"Maintainer: {driver.maintainer}")
|
||||
console.print(f"Image: {driver.image}")
|
||||
|
||||
if driver.ports:
|
||||
console.print("\n[bold]Ports:[/bold]")
|
||||
for port in driver.ports:
|
||||
console.print(f" {port}")
|
||||
|
||||
# Get driver path
|
||||
driver_path = config_manager.get_driver_path(driver_name)
|
||||
if driver_path:
|
||||
console.print(f"\n[bold]Path:[/bold] {driver_path}")
|
||||
|
||||
# Check for README
|
||||
readme_path = driver_path / "README.md"
|
||||
if readme_path.exists():
|
||||
console.print("\n[bold]README:[/bold]")
|
||||
with open(readme_path, "r") as f:
|
||||
console.print(f.read())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
app()
|
||||
@@ -1,204 +0,0 @@
|
||||
import yaml
|
||||
from pathlib import Path
|
||||
from typing import Dict, Optional
|
||||
|
||||
from .models import Config, Driver
|
||||
|
||||
DEFAULT_CONFIG_DIR = Path.home() / ".config" / "mc"
|
||||
DEFAULT_CONFIG_FILE = DEFAULT_CONFIG_DIR / "config.yaml"
|
||||
DEFAULT_DRIVERS_DIR = Path.home() / ".config" / "mc" / "drivers"
|
||||
PROJECT_ROOT = Path(__file__).parent.parent
|
||||
BUILTIN_DRIVERS_DIR = PROJECT_ROOT / "drivers"
|
||||
|
||||
# Default built-in driver configurations
|
||||
DEFAULT_DRIVERS = {
|
||||
"goose": Driver(
|
||||
name="goose",
|
||||
description="Goose with MCP servers",
|
||||
version="1.0.0",
|
||||
maintainer="team@monadical.com",
|
||||
image="monadical/mc-goose:latest",
|
||||
ports=[8000, 22],
|
||||
),
|
||||
"aider": Driver(
|
||||
name="aider",
|
||||
description="Aider coding assistant",
|
||||
version="1.0.0",
|
||||
maintainer="team@monadical.com",
|
||||
image="monadical/mc-aider:latest",
|
||||
ports=[22],
|
||||
),
|
||||
"claude-code": Driver(
|
||||
name="claude-code",
|
||||
description="Claude Code environment",
|
||||
version="1.0.0",
|
||||
maintainer="team@monadical.com",
|
||||
image="monadical/mc-claude-code:latest",
|
||||
ports=[22],
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
class ConfigManager:
|
||||
def __init__(self, config_path: Optional[Path] = None):
|
||||
self.config_path = config_path or DEFAULT_CONFIG_FILE
|
||||
self.config_dir = self.config_path.parent
|
||||
self.drivers_dir = DEFAULT_DRIVERS_DIR
|
||||
self.config = self._load_or_create_config()
|
||||
|
||||
def _load_or_create_config(self) -> Config:
|
||||
"""Load existing config or create a new one with defaults"""
|
||||
if self.config_path.exists():
|
||||
try:
|
||||
with open(self.config_path, "r") as f:
|
||||
config_data = yaml.safe_load(f) or {}
|
||||
|
||||
# Create a new config from scratch, then update with data from file
|
||||
config = Config(
|
||||
docker=config_data.get("docker", {}),
|
||||
defaults=config_data.get("defaults", {}),
|
||||
)
|
||||
|
||||
# Add drivers
|
||||
if "drivers" in config_data:
|
||||
for driver_name, driver_data in config_data["drivers"].items():
|
||||
config.drivers[driver_name] = Driver.model_validate(driver_data)
|
||||
|
||||
# Add sessions (stored as simple dictionaries)
|
||||
if "sessions" in config_data:
|
||||
config.sessions = config_data["sessions"]
|
||||
|
||||
return config
|
||||
except Exception as e:
|
||||
print(f"Error loading config: {e}")
|
||||
return self._create_default_config()
|
||||
else:
|
||||
return self._create_default_config()
|
||||
|
||||
def _create_default_config(self) -> Config:
|
||||
"""Create a default configuration"""
|
||||
self.config_dir.mkdir(parents=True, exist_ok=True)
|
||||
self.drivers_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Load built-in drivers from directories
|
||||
builtin_drivers = self.load_builtin_drivers()
|
||||
|
||||
# Merge with default drivers, with directory drivers taking precedence
|
||||
drivers = {**DEFAULT_DRIVERS, **builtin_drivers}
|
||||
|
||||
config = Config(
|
||||
docker={
|
||||
"socket": "/var/run/docker.sock",
|
||||
"network": "mc-network",
|
||||
},
|
||||
drivers=drivers,
|
||||
defaults={
|
||||
"driver": "goose",
|
||||
},
|
||||
)
|
||||
|
||||
self.save_config(config)
|
||||
return config
|
||||
|
||||
def save_config(self, config: Optional[Config] = None) -> None:
|
||||
"""Save the current config to disk"""
|
||||
if config:
|
||||
self.config = config
|
||||
|
||||
self.config_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Use model_dump with mode="json" for proper serialization of enums
|
||||
config_dict = self.config.model_dump(mode="json")
|
||||
|
||||
# Write to file
|
||||
with open(self.config_path, "w") as f:
|
||||
yaml.dump(config_dict, f)
|
||||
|
||||
def get_driver(self, name: str) -> Optional[Driver]:
|
||||
"""Get a driver by name"""
|
||||
return self.config.drivers.get(name)
|
||||
|
||||
def list_drivers(self) -> Dict[str, Driver]:
|
||||
"""List all available drivers"""
|
||||
return self.config.drivers
|
||||
|
||||
def add_session(self, session_id: str, session_data: dict) -> None:
|
||||
"""Add a session to the config"""
|
||||
# Store session data as a dictionary in the config
|
||||
self.config.sessions[session_id] = session_data
|
||||
self.save_config()
|
||||
|
||||
def remove_session(self, session_id: str) -> None:
|
||||
"""Remove a session from the config"""
|
||||
if session_id in self.config.sessions:
|
||||
del self.config.sessions[session_id]
|
||||
self.save_config()
|
||||
|
||||
def list_sessions(self) -> Dict:
|
||||
"""List all sessions in the config"""
|
||||
return self.config.sessions
|
||||
|
||||
def load_driver_from_dir(self, driver_dir: Path) -> Optional[Driver]:
|
||||
"""Load a driver configuration from a directory"""
|
||||
yaml_path = (
|
||||
driver_dir / "mai-driver.yaml"
|
||||
) # Keep this name for backward compatibility
|
||||
|
||||
if not yaml_path.exists():
|
||||
return None
|
||||
|
||||
try:
|
||||
with open(yaml_path, "r") as f:
|
||||
driver_data = yaml.safe_load(f)
|
||||
|
||||
# Extract required fields
|
||||
if not all(
|
||||
k in driver_data
|
||||
for k in ["name", "description", "version", "maintainer"]
|
||||
):
|
||||
print(f"Driver config {yaml_path} missing required fields")
|
||||
return None
|
||||
|
||||
# Create driver object
|
||||
driver = Driver(
|
||||
name=driver_data["name"],
|
||||
description=driver_data["description"],
|
||||
version=driver_data["version"],
|
||||
maintainer=driver_data["maintainer"],
|
||||
image=f"monadical/mc-{driver_data['name']}:latest",
|
||||
ports=driver_data.get("ports", []),
|
||||
)
|
||||
|
||||
return driver
|
||||
except Exception as e:
|
||||
print(f"Error loading driver from {yaml_path}: {e}")
|
||||
return None
|
||||
|
||||
def load_builtin_drivers(self) -> Dict[str, Driver]:
|
||||
"""Load all built-in drivers from the drivers directory"""
|
||||
drivers = {}
|
||||
|
||||
if not BUILTIN_DRIVERS_DIR.exists():
|
||||
return drivers
|
||||
|
||||
for driver_dir in BUILTIN_DRIVERS_DIR.iterdir():
|
||||
if driver_dir.is_dir():
|
||||
driver = self.load_driver_from_dir(driver_dir)
|
||||
if driver:
|
||||
drivers[driver.name] = driver
|
||||
|
||||
return drivers
|
||||
|
||||
def get_driver_path(self, driver_name: str) -> Optional[Path]:
|
||||
"""Get the directory path for a driver"""
|
||||
# Check built-in drivers first
|
||||
builtin_path = BUILTIN_DRIVERS_DIR / driver_name
|
||||
if builtin_path.exists() and builtin_path.is_dir():
|
||||
return builtin_path
|
||||
|
||||
# Then check user drivers
|
||||
user_path = self.drivers_dir / driver_name
|
||||
if user_path.exists() and user_path.is_dir():
|
||||
return user_path
|
||||
|
||||
return None
|
||||
@@ -1,409 +0,0 @@
|
||||
import os
|
||||
import sys
|
||||
import uuid
|
||||
import docker
|
||||
import concurrent.futures
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
from docker.errors import DockerException, ImageNotFound
|
||||
|
||||
from .models import Session, SessionStatus
|
||||
from .config import ConfigManager
|
||||
|
||||
|
||||
class ContainerManager:
|
||||
def __init__(self, config_manager: Optional[ConfigManager] = None):
|
||||
self.config_manager = config_manager or ConfigManager()
|
||||
try:
|
||||
self.client = docker.from_env()
|
||||
# Test connection
|
||||
self.client.ping()
|
||||
except DockerException as e:
|
||||
print(f"Error connecting to Docker: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
def _ensure_network(self) -> None:
|
||||
"""Ensure the MC network exists"""
|
||||
network_name = self.config_manager.config.docker.get("network", "mc-network")
|
||||
networks = self.client.networks.list(names=[network_name])
|
||||
if not networks:
|
||||
self.client.networks.create(network_name, driver="bridge")
|
||||
|
||||
def _generate_session_id(self) -> str:
|
||||
"""Generate a unique session ID"""
|
||||
return str(uuid.uuid4())[:8]
|
||||
|
||||
def list_sessions(self) -> List[Session]:
|
||||
"""List all active MC sessions"""
|
||||
sessions = []
|
||||
try:
|
||||
containers = self.client.containers.list(
|
||||
all=True, filters={"label": "mc.session"}
|
||||
)
|
||||
|
||||
for container in containers:
|
||||
container_id = container.id
|
||||
labels = container.labels
|
||||
|
||||
session_id = labels.get("mc.session.id")
|
||||
if not session_id:
|
||||
continue
|
||||
|
||||
status = SessionStatus.RUNNING
|
||||
if container.status == "exited":
|
||||
status = SessionStatus.STOPPED
|
||||
elif container.status == "created":
|
||||
status = SessionStatus.CREATING
|
||||
|
||||
session = Session(
|
||||
id=session_id,
|
||||
name=labels.get("mc.session.name", f"mc-{session_id}"),
|
||||
driver=labels.get("mc.driver", "unknown"),
|
||||
status=status,
|
||||
container_id=container_id,
|
||||
created_at=container.attrs["Created"],
|
||||
project=labels.get("mc.project"),
|
||||
)
|
||||
|
||||
# Get port mappings
|
||||
if container.attrs.get("NetworkSettings", {}).get("Ports"):
|
||||
ports = {}
|
||||
for container_port, host_ports in container.attrs[
|
||||
"NetworkSettings"
|
||||
]["Ports"].items():
|
||||
if host_ports:
|
||||
# Strip /tcp or /udp suffix and convert to int
|
||||
container_port_num = int(container_port.split("/")[0])
|
||||
host_port = int(host_ports[0]["HostPort"])
|
||||
ports[container_port_num] = host_port
|
||||
session.ports = ports
|
||||
|
||||
sessions.append(session)
|
||||
|
||||
except DockerException as e:
|
||||
print(f"Error listing sessions: {e}")
|
||||
|
||||
return sessions
|
||||
|
||||
def create_session(
|
||||
self,
|
||||
driver_name: str,
|
||||
project: Optional[str] = None,
|
||||
environment: Optional[Dict[str, str]] = None,
|
||||
session_name: Optional[str] = None,
|
||||
mount_local: bool = True,
|
||||
) -> Optional[Session]:
|
||||
"""Create a new MC session
|
||||
|
||||
Args:
|
||||
driver_name: The name of the driver to use
|
||||
project: Optional project repository URL
|
||||
environment: Optional environment variables
|
||||
session_name: Optional session name
|
||||
mount_local: Whether to mount the current directory to /app
|
||||
"""
|
||||
try:
|
||||
# Validate driver exists
|
||||
driver = self.config_manager.get_driver(driver_name)
|
||||
if not driver:
|
||||
print(f"Driver '{driver_name}' not found")
|
||||
return None
|
||||
|
||||
# Generate session ID and name
|
||||
session_id = self._generate_session_id()
|
||||
if not session_name:
|
||||
session_name = f"mc-{session_id}"
|
||||
|
||||
# Ensure network exists
|
||||
self._ensure_network()
|
||||
|
||||
# Prepare environment variables
|
||||
env_vars = environment or {}
|
||||
|
||||
# Add project URL to environment if provided
|
||||
if project:
|
||||
env_vars["MC_PROJECT_URL"] = project
|
||||
|
||||
# Pull image if needed
|
||||
try:
|
||||
self.client.images.get(driver.image)
|
||||
except ImageNotFound:
|
||||
print(f"Pulling image {driver.image}...")
|
||||
self.client.images.pull(driver.image)
|
||||
|
||||
# Set up volume mounts
|
||||
volumes = {}
|
||||
# If project URL is provided, don't mount local directory (will clone into /app)
|
||||
# If no project URL and mount_local is True, mount local directory to /app
|
||||
if not project and mount_local:
|
||||
# Mount current directory to /app in the container
|
||||
import os
|
||||
|
||||
current_dir = os.getcwd()
|
||||
volumes[current_dir] = {"bind": "/app", "mode": "rw"}
|
||||
print(f"Mounting local directory {current_dir} to /app")
|
||||
elif project:
|
||||
print(
|
||||
f"Project URL provided - container will clone {project} into /app during initialization"
|
||||
)
|
||||
|
||||
# Create container
|
||||
container = self.client.containers.create(
|
||||
image=driver.image,
|
||||
name=session_name,
|
||||
hostname=session_name,
|
||||
detach=True,
|
||||
tty=True,
|
||||
stdin_open=True,
|
||||
environment=env_vars,
|
||||
volumes=volumes,
|
||||
labels={
|
||||
"mc.session": "true",
|
||||
"mc.session.id": session_id,
|
||||
"mc.session.name": session_name,
|
||||
"mc.driver": driver_name,
|
||||
"mc.project": project or "",
|
||||
},
|
||||
network=self.config_manager.config.docker.get("network", "mc-network"),
|
||||
ports={f"{port}/tcp": None for port in driver.ports},
|
||||
)
|
||||
|
||||
# Start container
|
||||
container.start()
|
||||
|
||||
# Get updated port information
|
||||
container.reload()
|
||||
ports = {}
|
||||
if container.attrs.get("NetworkSettings", {}).get("Ports"):
|
||||
for container_port, host_ports in container.attrs["NetworkSettings"][
|
||||
"Ports"
|
||||
].items():
|
||||
if host_ports:
|
||||
container_port_num = int(container_port.split("/")[0])
|
||||
host_port = int(host_ports[0]["HostPort"])
|
||||
ports[container_port_num] = host_port
|
||||
|
||||
# Create session object
|
||||
session = Session(
|
||||
id=session_id,
|
||||
name=session_name,
|
||||
driver=driver_name,
|
||||
status=SessionStatus.RUNNING,
|
||||
container_id=container.id,
|
||||
environment=env_vars,
|
||||
project=project,
|
||||
created_at=container.attrs["Created"],
|
||||
ports=ports,
|
||||
)
|
||||
|
||||
# Save session to config as JSON-compatible dict
|
||||
self.config_manager.add_session(session_id, session.model_dump(mode="json"))
|
||||
|
||||
return session
|
||||
|
||||
except DockerException as e:
|
||||
print(f"Error creating session: {e}")
|
||||
return None
|
||||
|
||||
def close_session(self, session_id: str) -> bool:
|
||||
"""Close a MC session"""
|
||||
try:
|
||||
sessions = self.list_sessions()
|
||||
for session in sessions:
|
||||
if session.id == session_id:
|
||||
return self._close_single_session(session)
|
||||
|
||||
print(f"Session '{session_id}' not found")
|
||||
return False
|
||||
|
||||
except DockerException as e:
|
||||
print(f"Error closing session: {e}")
|
||||
return False
|
||||
|
||||
def connect_session(self, session_id: str) -> bool:
|
||||
"""Connect to a running MC session"""
|
||||
try:
|
||||
sessions = self.list_sessions()
|
||||
for session in sessions:
|
||||
if session.id == session_id and session.container_id:
|
||||
if session.status != SessionStatus.RUNNING:
|
||||
print(f"Session '{session_id}' is not running")
|
||||
return False
|
||||
|
||||
# Execute interactive shell in container
|
||||
# The init-status.sh script will automatically show logs if needed
|
||||
print(f"Connecting to session {session_id}...")
|
||||
os.system(f"docker exec -it {session.container_id} /bin/bash")
|
||||
return True
|
||||
|
||||
print(f"Session '{session_id}' not found")
|
||||
return False
|
||||
|
||||
except DockerException as e:
|
||||
print(f"Error connecting to session: {e}")
|
||||
return False
|
||||
|
||||
def _close_single_session(self, session: Session) -> bool:
|
||||
"""Close a single session (helper for parallel processing)
|
||||
|
||||
Args:
|
||||
session: The session to close
|
||||
|
||||
Returns:
|
||||
bool: Whether the session was successfully closed
|
||||
"""
|
||||
if not session.container_id:
|
||||
return False
|
||||
|
||||
try:
|
||||
container = self.client.containers.get(session.container_id)
|
||||
container.stop()
|
||||
container.remove()
|
||||
self.config_manager.remove_session(session.id)
|
||||
return True
|
||||
except DockerException as e:
|
||||
print(f"Error closing session {session.id}: {e}")
|
||||
return False
|
||||
|
||||
def close_all_sessions(self, progress_callback=None) -> Tuple[int, bool]:
|
||||
"""Close all MC sessions with parallel processing and progress reporting
|
||||
|
||||
Args:
|
||||
progress_callback: Optional callback function to report progress
|
||||
The callback should accept (session_id, status, message)
|
||||
|
||||
Returns:
|
||||
tuple: (number of sessions closed, success)
|
||||
"""
|
||||
try:
|
||||
sessions = self.list_sessions()
|
||||
if not sessions:
|
||||
return 0, True
|
||||
|
||||
# No need for session status as we receive it via callback
|
||||
|
||||
# Define a wrapper to track progress
|
||||
def close_with_progress(session):
|
||||
if not session.container_id:
|
||||
return False
|
||||
|
||||
try:
|
||||
container = self.client.containers.get(session.container_id)
|
||||
# Stop and remove container
|
||||
container.stop()
|
||||
container.remove()
|
||||
# Remove from config
|
||||
self.config_manager.remove_session(session.id)
|
||||
|
||||
# Notify about completion
|
||||
if progress_callback:
|
||||
progress_callback(
|
||||
session.id,
|
||||
"completed",
|
||||
f"{session.name} closed successfully",
|
||||
)
|
||||
|
||||
return True
|
||||
except DockerException as e:
|
||||
error_msg = f"Error: {str(e)}"
|
||||
if progress_callback:
|
||||
progress_callback(session.id, "failed", error_msg)
|
||||
print(f"Error closing session {session.id}: {e}")
|
||||
return False
|
||||
|
||||
# Use ThreadPoolExecutor to close sessions in parallel
|
||||
with concurrent.futures.ThreadPoolExecutor(
|
||||
max_workers=min(10, len(sessions))
|
||||
) as executor:
|
||||
# Submit all session closing tasks
|
||||
future_to_session = {
|
||||
executor.submit(close_with_progress, session): session
|
||||
for session in sessions
|
||||
}
|
||||
|
||||
# Collect results
|
||||
closed_count = 0
|
||||
for future in concurrent.futures.as_completed(future_to_session):
|
||||
session = future_to_session[future]
|
||||
try:
|
||||
success = future.result()
|
||||
if success:
|
||||
closed_count += 1
|
||||
except Exception as e:
|
||||
print(f"Error closing session {session.id}: {e}")
|
||||
|
||||
return closed_count, closed_count > 0
|
||||
|
||||
except DockerException as e:
|
||||
print(f"Error closing all sessions: {e}")
|
||||
return 0, False
|
||||
|
||||
def get_session_logs(self, session_id: str, follow: bool = False) -> Optional[str]:
|
||||
"""Get logs from a MC session"""
|
||||
try:
|
||||
sessions = self.list_sessions()
|
||||
for session in sessions:
|
||||
if session.id == session_id and session.container_id:
|
||||
container = self.client.containers.get(session.container_id)
|
||||
if follow:
|
||||
for line in container.logs(stream=True, follow=True):
|
||||
print(line.decode().strip())
|
||||
return None
|
||||
else:
|
||||
return container.logs().decode()
|
||||
|
||||
print(f"Session '{session_id}' not found")
|
||||
return None
|
||||
|
||||
except DockerException as e:
|
||||
print(f"Error getting session logs: {e}")
|
||||
return None
|
||||
|
||||
def get_init_logs(self, session_id: str, follow: bool = False) -> Optional[str]:
|
||||
"""Get initialization logs from a MC session
|
||||
|
||||
Args:
|
||||
session_id: The session ID
|
||||
follow: Whether to follow the logs
|
||||
|
||||
Returns:
|
||||
The logs as a string, or None if there was an error
|
||||
"""
|
||||
try:
|
||||
sessions = self.list_sessions()
|
||||
for session in sessions:
|
||||
if session.id == session_id and session.container_id:
|
||||
container = self.client.containers.get(session.container_id)
|
||||
|
||||
# Check if initialization is complete
|
||||
init_complete = False
|
||||
try:
|
||||
exit_code, output = container.exec_run(
|
||||
"grep -q 'INIT_COMPLETE=true' /init.status"
|
||||
)
|
||||
init_complete = exit_code == 0
|
||||
except DockerException:
|
||||
pass
|
||||
|
||||
if follow and not init_complete:
|
||||
print(
|
||||
f"Following initialization logs for session {session_id}..."
|
||||
)
|
||||
print("Press Ctrl+C to stop following")
|
||||
container.exec_run(
|
||||
"tail -f /init.log", stream=True, demux=True, tty=True
|
||||
)
|
||||
return None
|
||||
else:
|
||||
exit_code, output = container.exec_run("cat /init.log")
|
||||
if exit_code == 0:
|
||||
return output.decode()
|
||||
else:
|
||||
print("No initialization logs found")
|
||||
return None
|
||||
|
||||
print(f"Session '{session_id}' not found")
|
||||
return None
|
||||
|
||||
except DockerException as e:
|
||||
print(f"Error getting initialization logs: {e}")
|
||||
return None
|
||||
@@ -1,28 +0,0 @@
|
||||
"""
|
||||
Base driver implementation for MAI
|
||||
"""
|
||||
|
||||
from typing import Dict, Optional
|
||||
|
||||
from ..models import Driver
|
||||
|
||||
|
||||
class DriverManager:
|
||||
"""Manager for MAI drivers"""
|
||||
|
||||
@staticmethod
|
||||
def get_default_drivers() -> Dict[str, Driver]:
|
||||
"""Get the default built-in drivers"""
|
||||
from ..config import DEFAULT_DRIVERS
|
||||
|
||||
return DEFAULT_DRIVERS
|
||||
|
||||
@staticmethod
|
||||
def get_driver_metadata(driver_name: str) -> Optional[Dict]:
|
||||
"""Get metadata for a specific driver"""
|
||||
from ..config import DEFAULT_DRIVERS
|
||||
|
||||
if driver_name in DEFAULT_DRIVERS:
|
||||
return DEFAULT_DRIVERS[driver_name].model_dump()
|
||||
|
||||
return None
|
||||
@@ -1,50 +0,0 @@
|
||||
from enum import Enum
|
||||
from typing import Dict, List, Optional
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class SessionStatus(str, Enum):
|
||||
CREATING = "creating"
|
||||
RUNNING = "running"
|
||||
STOPPED = "stopped"
|
||||
FAILED = "failed"
|
||||
|
||||
|
||||
class DriverEnvironmentVariable(BaseModel):
|
||||
name: str
|
||||
description: str
|
||||
required: bool = False
|
||||
default: Optional[str] = None
|
||||
sensitive: bool = False
|
||||
|
||||
|
||||
class Driver(BaseModel):
|
||||
name: str
|
||||
description: str
|
||||
version: str
|
||||
maintainer: str
|
||||
image: str
|
||||
environment: List[DriverEnvironmentVariable] = []
|
||||
ports: List[int] = []
|
||||
volumes: List[Dict[str, str]] = []
|
||||
|
||||
|
||||
class Session(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
driver: str
|
||||
status: SessionStatus
|
||||
container_id: Optional[str] = None
|
||||
environment: Dict[str, str] = Field(default_factory=dict)
|
||||
project: Optional[str] = None
|
||||
created_at: str
|
||||
ports: Dict[int, int] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class Config(BaseModel):
|
||||
docker: Dict[str, str] = Field(default_factory=dict)
|
||||
drivers: Dict[str, Driver] = Field(default_factory=dict)
|
||||
sessions: Dict[str, dict] = Field(
|
||||
default_factory=dict
|
||||
) # Store as dict to avoid serialization issues
|
||||
defaults: Dict[str, str] = Field(default_factory=dict)
|
||||
@@ -1,14 +0,0 @@
|
||||
"""
|
||||
MC Service - Container Management Web Service
|
||||
(This is a placeholder for Phase 2)
|
||||
"""
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Run the MC service"""
|
||||
print("MC Service - Container Management Web Service")
|
||||
print("This feature will be implemented in Phase 2")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,7 +1,7 @@
|
||||
[project]
|
||||
name = "mcontainer"
|
||||
name = "cubbi"
|
||||
version = "0.1.0"
|
||||
description = "Monadical Container Tool"
|
||||
description = "Cubbi Container Tool"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.12"
|
||||
dependencies = [
|
||||
@@ -24,7 +24,8 @@ dev = [
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
mc = "mcontainer.cli:app"
|
||||
cubbi = "cubbi.cli:app"
|
||||
cubbix = "cubbi.cli:session_create_entry_point"
|
||||
|
||||
[tool.ruff]
|
||||
line-length = 88
|
||||
@@ -36,3 +37,8 @@ warn_return_any = true
|
||||
warn_unused_configs = true
|
||||
disallow_untyped_defs = true
|
||||
disallow_incomplete_defs = true
|
||||
|
||||
[dependency-groups]
|
||||
dev = [
|
||||
"pytest>=8.3.5",
|
||||
]
|
||||
|
||||
179
tests/conftest.py
Normal file
179
tests/conftest.py
Normal file
@@ -0,0 +1,179 @@
|
||||
"""
|
||||
Common test fixtures for Cubbi Container tests.
|
||||
"""
|
||||
|
||||
import uuid
|
||||
import tempfile
|
||||
import pytest
|
||||
import docker
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
from cubbi.container import ContainerManager
|
||||
from cubbi.session import SessionManager
|
||||
from cubbi.config import ConfigManager
|
||||
from cubbi.models import Session, SessionStatus
|
||||
from cubbi.user_config import UserConfigManager
|
||||
|
||||
|
||||
# Check if Docker is available
|
||||
def is_docker_available():
|
||||
"""Check if Docker is available and running."""
|
||||
try:
|
||||
client = docker.from_env()
|
||||
client.ping()
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
# Register custom mark for Docker-dependent tests
|
||||
def pytest_configure(config):
|
||||
config.addinivalue_line(
|
||||
"markers", "requires_docker: mark test that requires Docker to be running"
|
||||
)
|
||||
|
||||
|
||||
# Decorator to mark tests that require Docker
|
||||
requires_docker = pytest.mark.skipif(
|
||||
not is_docker_available(),
|
||||
reason="Docker is not available or not running",
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def temp_dir():
|
||||
"""Create a temporary directory for test files."""
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
yield Path(tmp_dir)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def temp_config_dir():
|
||||
"""Create a temporary directory for configuration files."""
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
yield Path(temp_dir)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def isolated_config(temp_config_dir):
|
||||
"""Provide an isolated UserConfigManager instance."""
|
||||
config_path = temp_config_dir / "config.yaml"
|
||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
return UserConfigManager(str(config_path))
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def isolated_session_manager(temp_config_dir):
|
||||
"""Create an isolated session manager for testing."""
|
||||
sessions_path = temp_config_dir / "sessions.yaml"
|
||||
return SessionManager(sessions_path)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def isolated_config_manager():
|
||||
"""Create an isolated config manager for testing."""
|
||||
config_manager = ConfigManager()
|
||||
# Ensure we're using the built-in images, not trying to load from user config
|
||||
return config_manager
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_session_manager():
|
||||
"""Mock the SessionManager class."""
|
||||
with patch("cubbi.cli.session_manager") as mock_manager:
|
||||
yield mock_manager
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_container_manager():
|
||||
"""Mock the ContainerManager class with proper initialization."""
|
||||
mock_session = Session(
|
||||
id="test-session-id",
|
||||
name="test-session",
|
||||
image="goose",
|
||||
status=SessionStatus.RUNNING,
|
||||
ports={"8080": "8080"},
|
||||
)
|
||||
|
||||
with patch("cubbi.cli.container_manager") as mock_manager:
|
||||
# Set behaviors to avoid TypeErrors
|
||||
mock_manager.list_sessions.return_value = []
|
||||
mock_manager.create_session.return_value = mock_session
|
||||
mock_manager.close_session.return_value = True
|
||||
mock_manager.close_all_sessions.return_value = (3, True)
|
||||
# MCP-related mocks
|
||||
mock_manager.get_mcp_status.return_value = {
|
||||
"status": "running",
|
||||
"container_id": "test-id",
|
||||
}
|
||||
mock_manager.start_mcp.return_value = {
|
||||
"status": "running",
|
||||
"container_id": "test-id",
|
||||
}
|
||||
mock_manager.stop_mcp.return_value = True
|
||||
mock_manager.restart_mcp.return_value = {
|
||||
"status": "running",
|
||||
"container_id": "test-id",
|
||||
}
|
||||
mock_manager.get_mcp_logs.return_value = "Test log output"
|
||||
yield mock_manager
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def container_manager(isolated_session_manager, isolated_config_manager):
|
||||
"""Create a container manager with isolated components."""
|
||||
return ContainerManager(
|
||||
config_manager=isolated_config_manager, session_manager=isolated_session_manager
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def cli_runner():
|
||||
"""Provide a CLI runner for testing commands."""
|
||||
from typer.testing import CliRunner
|
||||
|
||||
return CliRunner()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_file_content(temp_dir):
|
||||
"""Create a test file with content in the temporary directory."""
|
||||
test_content = "This is a test file for volume mounting"
|
||||
test_file = temp_dir / "test_volume_file.txt"
|
||||
with open(test_file, "w") as f:
|
||||
f.write(test_content)
|
||||
return test_file, test_content
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_network_name():
|
||||
"""Generate a unique network name for testing."""
|
||||
return f"cubbi-test-network-{uuid.uuid4().hex[:8]}"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def docker_test_network(test_network_name):
|
||||
"""Create a Docker network for testing and clean it up after."""
|
||||
if not is_docker_available():
|
||||
pytest.skip("Docker is not available")
|
||||
return None
|
||||
|
||||
client = docker.from_env()
|
||||
network = client.networks.create(test_network_name, driver="bridge")
|
||||
|
||||
yield test_network_name
|
||||
|
||||
# Clean up
|
||||
try:
|
||||
network.remove()
|
||||
except Exception:
|
||||
# Network might be in use by other containers
|
||||
pass
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def patched_config_manager(isolated_config):
|
||||
"""Patch the UserConfigManager in cli.py to use our isolated instance."""
|
||||
with patch("cubbi.cli.user_config", isolated_config):
|
||||
yield isolated_config
|
||||
@@ -1,5 +1,6 @@
|
||||
from typer.testing import CliRunner
|
||||
from mcontainer.cli import app
|
||||
|
||||
from cubbi.cli import app
|
||||
|
||||
runner = CliRunner()
|
||||
|
||||
@@ -8,15 +9,15 @@ def test_version() -> None:
|
||||
"""Test version command"""
|
||||
result = runner.invoke(app, ["version"])
|
||||
assert result.exit_code == 0
|
||||
assert "MC - Monadical Container Tool" in result.stdout
|
||||
assert "Cubbi - Cubbi Container Tool" in result.stdout
|
||||
|
||||
|
||||
def test_session_list() -> None:
|
||||
"""Test session list command"""
|
||||
result = runner.invoke(app, ["session", "list"])
|
||||
assert result.exit_code == 0
|
||||
# Could be either "No active sessions found" or a table of sessions
|
||||
assert "sessions" in result.stdout.lower() or "no active" in result.stdout.lower()
|
||||
# Could be either "No active sessions found" or a table with headers
|
||||
assert "no active" in result.stdout.lower() or "id" in result.stdout.lower()
|
||||
|
||||
|
||||
def test_help() -> None:
|
||||
@@ -24,4 +25,4 @@ def test_help() -> None:
|
||||
result = runner.invoke(app, ["--help"])
|
||||
assert result.exit_code == 0
|
||||
assert "Usage" in result.stdout
|
||||
assert "Monadical Container Tool" in result.stdout
|
||||
assert "Cubbi Container Tool" in result.stdout
|
||||
|
||||
192
tests/test_config_commands.py
Normal file
192
tests/test_config_commands.py
Normal file
@@ -0,0 +1,192 @@
|
||||
"""
|
||||
Tests for the configuration management commands.
|
||||
"""
|
||||
|
||||
from cubbi.cli import app
|
||||
|
||||
|
||||
def test_config_list(cli_runner, patched_config_manager):
|
||||
"""Test the 'cubbi config list' command."""
|
||||
result = cli_runner.invoke(app, ["config", "list"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Configuration" in result.stdout
|
||||
assert "Value" in result.stdout
|
||||
|
||||
# Check for default configurations
|
||||
assert "defaults.image" in result.stdout
|
||||
assert "defaults.connect" in result.stdout
|
||||
assert "defaults.mount_local" in result.stdout
|
||||
|
||||
|
||||
def test_config_get(cli_runner, patched_config_manager):
|
||||
"""Test the 'cubbi config get' command."""
|
||||
# Test getting an existing value
|
||||
result = cli_runner.invoke(app, ["config", "get", "defaults.image"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "defaults.image" in result.stdout
|
||||
assert "goose" in result.stdout
|
||||
|
||||
# Test getting a non-existent value
|
||||
result = cli_runner.invoke(app, ["config", "get", "nonexistent.key"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "not found" in result.stdout
|
||||
|
||||
|
||||
def test_config_set(cli_runner, patched_config_manager):
|
||||
"""Test the 'cubbi config set' command."""
|
||||
# Test setting a string value
|
||||
result = cli_runner.invoke(app, ["config", "set", "defaults.image", "claude"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Configuration updated" in result.stdout
|
||||
assert patched_config_manager.get("defaults.image") == "claude"
|
||||
|
||||
# Test setting a boolean value
|
||||
result = cli_runner.invoke(app, ["config", "set", "defaults.connect", "false"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Configuration updated" in result.stdout
|
||||
assert patched_config_manager.get("defaults.connect") is False
|
||||
|
||||
# Test setting a new value
|
||||
result = cli_runner.invoke(app, ["config", "set", "new.setting", "value"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Configuration updated" in result.stdout
|
||||
assert patched_config_manager.get("new.setting") == "value"
|
||||
|
||||
|
||||
def test_volume_list_empty(cli_runner, patched_config_manager):
|
||||
"""Test the 'cubbi config volume list' command with no volumes."""
|
||||
result = cli_runner.invoke(app, ["config", "volume", "list"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "No default volumes configured" in result.stdout
|
||||
|
||||
|
||||
def test_volume_add_and_list(cli_runner, patched_config_manager, temp_config_dir):
|
||||
"""Test adding a volume and then listing it."""
|
||||
# Create a test directory
|
||||
test_dir = temp_config_dir / "test_dir"
|
||||
test_dir.mkdir()
|
||||
|
||||
# Add a volume
|
||||
result = cli_runner.invoke(
|
||||
app, ["config", "volume", "add", f"{test_dir}:/container/path"]
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Added volume" in result.stdout
|
||||
|
||||
# Verify volume was added to the configuration
|
||||
volumes = patched_config_manager.get("defaults.volumes", [])
|
||||
assert f"{test_dir}:/container/path" in volumes
|
||||
|
||||
# List volumes - just check the command runs without error
|
||||
result = cli_runner.invoke(app, ["config", "volume", "list"])
|
||||
assert result.exit_code == 0
|
||||
assert "/container/path" in result.stdout
|
||||
|
||||
|
||||
def test_volume_remove(cli_runner, patched_config_manager, temp_config_dir):
|
||||
"""Test removing a volume."""
|
||||
# Create a test directory
|
||||
test_dir = temp_config_dir / "test_dir"
|
||||
test_dir.mkdir()
|
||||
|
||||
# Add a volume
|
||||
patched_config_manager.set("defaults.volumes", [f"{test_dir}:/container/path"])
|
||||
|
||||
# Remove the volume
|
||||
result = cli_runner.invoke(app, ["config", "volume", "remove", f"{test_dir}"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Removed volume" in result.stdout
|
||||
|
||||
# Verify it's gone
|
||||
volumes = patched_config_manager.get("defaults.volumes")
|
||||
assert len(volumes) == 0
|
||||
|
||||
|
||||
def test_volume_add_nonexistent_path(cli_runner, patched_config_manager, monkeypatch):
|
||||
"""Test adding a volume with a nonexistent path."""
|
||||
nonexistent_path = "/path/that/does/not/exist"
|
||||
|
||||
# Mock typer.confirm to return True
|
||||
monkeypatch.setattr("typer.confirm", lambda message: True)
|
||||
|
||||
# Add a volume with nonexistent path
|
||||
result = cli_runner.invoke(
|
||||
app, ["config", "volume", "add", f"{nonexistent_path}:/container/path"]
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Warning: Local path" in result.stdout
|
||||
assert "Added volume" in result.stdout
|
||||
|
||||
# Verify it was added
|
||||
volumes = patched_config_manager.get("defaults.volumes")
|
||||
assert f"{nonexistent_path}:/container/path" in volumes
|
||||
|
||||
|
||||
def test_network_list_empty(cli_runner, patched_config_manager):
|
||||
"""Test the 'cubbi config network list' command with no networks."""
|
||||
result = cli_runner.invoke(app, ["config", "network", "list"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "No default networks configured" in result.stdout
|
||||
|
||||
|
||||
def test_network_add_and_list(cli_runner, patched_config_manager):
|
||||
"""Test adding a network and then listing it."""
|
||||
# Add a network
|
||||
result = cli_runner.invoke(app, ["config", "network", "add", "test-network"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Added network" in result.stdout
|
||||
|
||||
# List networks
|
||||
result = cli_runner.invoke(app, ["config", "network", "list"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "test-network" in result.stdout
|
||||
|
||||
|
||||
def test_network_remove(cli_runner, patched_config_manager):
|
||||
"""Test removing a network."""
|
||||
# Add a network
|
||||
patched_config_manager.set("defaults.networks", ["test-network"])
|
||||
|
||||
# Remove the network
|
||||
result = cli_runner.invoke(app, ["config", "network", "remove", "test-network"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Removed network" in result.stdout
|
||||
|
||||
# Verify it's gone
|
||||
networks = patched_config_manager.get("defaults.networks")
|
||||
assert len(networks) == 0
|
||||
|
||||
|
||||
def test_config_reset(cli_runner, patched_config_manager, monkeypatch):
|
||||
"""Test resetting the configuration."""
|
||||
# Set a custom value first
|
||||
patched_config_manager.set("defaults.image", "custom-image")
|
||||
|
||||
# Mock typer.confirm to return True
|
||||
monkeypatch.setattr("typer.confirm", lambda message: True)
|
||||
|
||||
# Reset config
|
||||
result = cli_runner.invoke(app, ["config", "reset"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Configuration reset to defaults" in result.stdout
|
||||
|
||||
# Verify it was reset
|
||||
assert patched_config_manager.get("defaults.image") == "goose"
|
||||
|
||||
|
||||
# patched_config_manager fixture is now in conftest.py
|
||||
102
tests/test_integration_docker.py
Normal file
102
tests/test_integration_docker.py
Normal file
@@ -0,0 +1,102 @@
|
||||
"""
|
||||
Integration tests for Docker interactions in Cubbi Container.
|
||||
These tests require Docker to be running.
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
import time
|
||||
import uuid
|
||||
|
||||
# Import the requires_docker decorator from conftest
|
||||
from conftest import requires_docker
|
||||
|
||||
|
||||
def execute_command_in_container(container_id, command):
|
||||
"""Execute a command in a Docker container and return the output."""
|
||||
result = subprocess.run(
|
||||
["docker", "exec", container_id, "bash", "-c", command],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
return result.stdout.strip()
|
||||
|
||||
|
||||
@requires_docker
|
||||
def test_integration_session_create_with_volumes(container_manager, test_file_content):
|
||||
"""Test creating a session with a volume mount."""
|
||||
test_file, test_content = test_file_content
|
||||
session = None
|
||||
|
||||
try:
|
||||
# Create a session with a volume mount
|
||||
session = container_manager.create_session(
|
||||
image_name="goose",
|
||||
session_name=f"cubbi-test-volume-{uuid.uuid4().hex[:8]}",
|
||||
mount_local=False, # Don't mount current directory
|
||||
volumes={str(test_file): {"bind": "/test/volume_test.txt", "mode": "ro"}},
|
||||
)
|
||||
|
||||
assert session is not None
|
||||
assert session.status == "running"
|
||||
|
||||
# Give container time to fully start
|
||||
time.sleep(2)
|
||||
|
||||
# Verify the file exists in the container and has correct content
|
||||
container_content = execute_command_in_container(
|
||||
session.container_id, "cat /test/volume_test.txt"
|
||||
)
|
||||
|
||||
assert container_content == test_content
|
||||
|
||||
finally:
|
||||
# Clean up the container
|
||||
if session and session.container_id:
|
||||
container_manager.close_session(session.id)
|
||||
|
||||
|
||||
@requires_docker
|
||||
def test_integration_session_create_with_networks(
|
||||
container_manager, docker_test_network
|
||||
):
|
||||
"""Test creating a session connected to a custom network."""
|
||||
session = None
|
||||
|
||||
try:
|
||||
# Create a session with the test network
|
||||
session = container_manager.create_session(
|
||||
image_name="goose",
|
||||
session_name=f"cubbi-test-network-{uuid.uuid4().hex[:8]}",
|
||||
mount_local=False, # Don't mount current directory
|
||||
networks=[docker_test_network],
|
||||
)
|
||||
|
||||
assert session is not None
|
||||
assert session.status == "running"
|
||||
|
||||
# Give container time to fully start
|
||||
time.sleep(2)
|
||||
|
||||
# Verify the container is connected to the test network
|
||||
# Use inspect to check network connections
|
||||
import docker
|
||||
|
||||
client = docker.from_env()
|
||||
container = client.containers.get(session.container_id)
|
||||
container_networks = container.attrs["NetworkSettings"]["Networks"]
|
||||
|
||||
# Container should be connected to both the default cubbi-network and our test network
|
||||
assert docker_test_network in container_networks
|
||||
|
||||
# Verify network interface exists in container
|
||||
network_interfaces = execute_command_in_container(
|
||||
session.container_id, "ip link show | grep -v 'lo' | wc -l"
|
||||
)
|
||||
|
||||
# Should have at least 2 interfaces (eth0 for cubbi-network, eth1 for test network)
|
||||
assert int(network_interfaces) >= 2
|
||||
|
||||
finally:
|
||||
# Clean up the container
|
||||
if session and session.container_id:
|
||||
container_manager.close_session(session.id)
|
||||
314
tests/test_mcp_commands.py
Normal file
314
tests/test_mcp_commands.py
Normal file
@@ -0,0 +1,314 @@
|
||||
"""
|
||||
Tests for the MCP server management commands.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from unittest.mock import patch
|
||||
from cubbi.cli import app
|
||||
|
||||
|
||||
def test_mcp_list_empty(cli_runner, patched_config_manager):
|
||||
"""Test the 'cubbi mcp list' command with no MCPs configured."""
|
||||
# Make sure mcps is empty
|
||||
patched_config_manager.set("mcps", [])
|
||||
|
||||
with patch("cubbi.cli.mcp_manager.list_mcps") as mock_list_mcps:
|
||||
mock_list_mcps.return_value = []
|
||||
|
||||
result = cli_runner.invoke(app, ["mcp", "list"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "No MCP servers configured" in result.stdout
|
||||
|
||||
|
||||
def test_mcp_add_remote(cli_runner, patched_config_manager):
|
||||
"""Test adding a remote MCP server and listing it."""
|
||||
# Add a remote MCP server
|
||||
result = cli_runner.invoke(
|
||||
app,
|
||||
[
|
||||
"mcp",
|
||||
"add-remote",
|
||||
"test-remote-mcp",
|
||||
"http://mcp-server.example.com/sse",
|
||||
"--header",
|
||||
"Authorization=Bearer test-token",
|
||||
],
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Added remote MCP server" in result.stdout
|
||||
|
||||
# List MCP servers
|
||||
result = cli_runner.invoke(app, ["mcp", "list"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "test-remote-mcp" in result.stdout
|
||||
assert "remote" in result.stdout
|
||||
# Check partial URL since it may be truncated in the table display
|
||||
assert "http://mcp-se" in result.stdout # Truncated in table view
|
||||
|
||||
|
||||
def test_mcp_add(cli_runner, patched_config_manager):
|
||||
"""Test adding a proxy-based MCP server and listing it."""
|
||||
# Add a Docker MCP server
|
||||
result = cli_runner.invoke(
|
||||
app,
|
||||
[
|
||||
"mcp",
|
||||
"add",
|
||||
"test-docker-mcp",
|
||||
"mcp/github:latest",
|
||||
"--command",
|
||||
"github-mcp",
|
||||
"--env",
|
||||
"GITHUB_TOKEN=test-token",
|
||||
],
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Added MCP server" in result.stdout
|
||||
|
||||
# List MCP servers
|
||||
result = cli_runner.invoke(app, ["mcp", "list"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "test-docker-mcp" in result.stdout
|
||||
assert "proxy" in result.stdout # It's a proxy-based MCP
|
||||
assert "mcp/github:la" in result.stdout # Truncated in table view
|
||||
|
||||
|
||||
def test_mcp_remove(cli_runner, patched_config_manager):
|
||||
"""Test removing an MCP server."""
|
||||
# Add a remote MCP server
|
||||
patched_config_manager.set(
|
||||
"mcps",
|
||||
[
|
||||
{
|
||||
"name": "test-mcp",
|
||||
"type": "remote",
|
||||
"url": "http://test-server.com/sse",
|
||||
"headers": {"Authorization": "Bearer test-token"},
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
# Mock the get_mcp and remove_mcp methods
|
||||
with patch("cubbi.cli.mcp_manager.get_mcp") as mock_get_mcp:
|
||||
# First make get_mcp return our MCP
|
||||
mock_get_mcp.return_value = {
|
||||
"name": "test-mcp",
|
||||
"type": "remote",
|
||||
"url": "http://test-server.com/sse",
|
||||
"headers": {"Authorization": "Bearer test-token"},
|
||||
}
|
||||
|
||||
# Remove the MCP server
|
||||
result = cli_runner.invoke(app, ["mcp", "remove", "test-mcp"])
|
||||
|
||||
# Just check it ran successfully with exit code 0
|
||||
assert result.exit_code == 0
|
||||
|
||||
|
||||
@pytest.mark.requires_docker
|
||||
def test_mcp_status(cli_runner, patched_config_manager, mock_container_manager):
|
||||
"""Test the MCP status command."""
|
||||
# Add a Docker MCP
|
||||
patched_config_manager.set(
|
||||
"mcps",
|
||||
[
|
||||
{
|
||||
"name": "test-docker-mcp",
|
||||
"type": "docker",
|
||||
"image": "mcp/test:latest",
|
||||
"command": "test-command",
|
||||
"env": {"TEST_ENV": "test-value"},
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
# First mock get_mcp to return our MCP config
|
||||
with patch("cubbi.cli.mcp_manager.get_mcp") as mock_get_mcp:
|
||||
mock_get_mcp.return_value = {
|
||||
"name": "test-docker-mcp",
|
||||
"type": "docker",
|
||||
"image": "mcp/test:latest",
|
||||
"command": "test-command",
|
||||
"env": {"TEST_ENV": "test-value"},
|
||||
}
|
||||
|
||||
# Then mock the get_mcp_status method
|
||||
with patch("cubbi.cli.mcp_manager.get_mcp_status") as mock_get_status:
|
||||
mock_get_status.return_value = {
|
||||
"status": "running",
|
||||
"container_id": "test-container-id",
|
||||
"name": "test-docker-mcp",
|
||||
"type": "docker",
|
||||
"image": "mcp/test:latest",
|
||||
"ports": {"8080/tcp": 8080},
|
||||
"created": "2023-01-01T00:00:00Z",
|
||||
}
|
||||
|
||||
# Check MCP status
|
||||
result = cli_runner.invoke(app, ["mcp", "status", "test-docker-mcp"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "test-docker-mcp" in result.stdout
|
||||
assert "running" in result.stdout
|
||||
assert "mcp/test:latest" in result.stdout
|
||||
|
||||
|
||||
@pytest.mark.requires_docker
|
||||
def test_mcp_start(cli_runner, patched_config_manager, mock_container_manager):
|
||||
"""Test starting an MCP server."""
|
||||
# Add a Docker MCP
|
||||
patched_config_manager.set(
|
||||
"mcps",
|
||||
[
|
||||
{
|
||||
"name": "test-docker-mcp",
|
||||
"type": "docker",
|
||||
"image": "mcp/test:latest",
|
||||
"command": "test-command",
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
# Mock the start operation
|
||||
mock_container_manager.start_mcp.return_value = {
|
||||
"container_id": "test-container-id",
|
||||
"status": "running",
|
||||
}
|
||||
|
||||
# Start the MCP
|
||||
result = cli_runner.invoke(app, ["mcp", "start", "test-docker-mcp"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Started MCP server" in result.stdout
|
||||
assert "test-docker-mcp" in result.stdout
|
||||
|
||||
|
||||
@pytest.mark.requires_docker
|
||||
def test_mcp_stop(cli_runner, patched_config_manager, mock_container_manager):
|
||||
"""Test stopping an MCP server."""
|
||||
# Add a Docker MCP
|
||||
patched_config_manager.set(
|
||||
"mcps",
|
||||
[
|
||||
{
|
||||
"name": "test-docker-mcp",
|
||||
"type": "docker",
|
||||
"image": "mcp/test:latest",
|
||||
"command": "test-command",
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
# Mock the stop operation
|
||||
mock_container_manager.stop_mcp.return_value = True
|
||||
|
||||
# Stop the MCP
|
||||
result = cli_runner.invoke(app, ["mcp", "stop", "test-docker-mcp"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Stopped and removed MCP server" in result.stdout
|
||||
assert "test-docker-mcp" in result.stdout
|
||||
|
||||
|
||||
@pytest.mark.requires_docker
|
||||
def test_mcp_restart(cli_runner, patched_config_manager, mock_container_manager):
|
||||
"""Test restarting an MCP server."""
|
||||
# Add a Docker MCP
|
||||
patched_config_manager.set(
|
||||
"mcps",
|
||||
[
|
||||
{
|
||||
"name": "test-docker-mcp",
|
||||
"type": "docker",
|
||||
"image": "mcp/test:latest",
|
||||
"command": "test-command",
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
# Mock the restart operation
|
||||
mock_container_manager.restart_mcp.return_value = {
|
||||
"container_id": "test-container-id",
|
||||
"status": "running",
|
||||
}
|
||||
|
||||
# Restart the MCP
|
||||
result = cli_runner.invoke(app, ["mcp", "restart", "test-docker-mcp"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Restarted MCP server" in result.stdout
|
||||
assert "test-docker-mcp" in result.stdout
|
||||
|
||||
|
||||
@pytest.mark.requires_docker
|
||||
def test_mcp_logs(cli_runner, patched_config_manager, mock_container_manager):
|
||||
"""Test viewing MCP server logs."""
|
||||
# Add a Docker MCP
|
||||
patched_config_manager.set(
|
||||
"mcps",
|
||||
[
|
||||
{
|
||||
"name": "test-docker-mcp",
|
||||
"type": "docker",
|
||||
"image": "mcp/test:latest",
|
||||
"command": "test-command",
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
# Mock the logs operation
|
||||
with patch("cubbi.cli.mcp_manager.get_mcp_logs") as mock_get_logs:
|
||||
mock_get_logs.return_value = "Test log output"
|
||||
|
||||
# View MCP logs
|
||||
result = cli_runner.invoke(app, ["mcp", "logs", "test-docker-mcp"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Test log output" in result.stdout
|
||||
|
||||
|
||||
def test_session_with_mcp(cli_runner, patched_config_manager, mock_container_manager):
|
||||
"""Test creating a session with an MCP server attached."""
|
||||
# Add an MCP server
|
||||
patched_config_manager.set(
|
||||
"mcps",
|
||||
[
|
||||
{
|
||||
"name": "test-mcp",
|
||||
"type": "docker",
|
||||
"image": "mcp/test:latest",
|
||||
"command": "test-command",
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
# Mock the session creation with MCP
|
||||
from cubbi.models import Session, SessionStatus
|
||||
|
||||
# timestamp no longer needed since we don't use created_at in Session
|
||||
mock_container_manager.create_session.return_value = Session(
|
||||
id="test-session-id",
|
||||
name="test-session",
|
||||
image="goose",
|
||||
status=SessionStatus.RUNNING,
|
||||
container_id="test-container-id",
|
||||
ports={},
|
||||
)
|
||||
|
||||
# Create a session with MCP
|
||||
result = cli_runner.invoke(app, ["session", "create", "--mcp", "test-mcp"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Session created successfully" in result.stdout
|
||||
assert "test-session" in result.stdout
|
||||
# Check that the create_session was called with the mcp parameter
|
||||
assert mock_container_manager.create_session.called
|
||||
# The keyword arguments are in the second element of call_args
|
||||
kwargs = mock_container_manager.create_session.call_args[1]
|
||||
assert "mcp" in kwargs
|
||||
assert "test-mcp" in kwargs["mcp"]
|
||||
82
tests/test_mcp_port_binding.py
Normal file
82
tests/test_mcp_port_binding.py
Normal file
@@ -0,0 +1,82 @@
|
||||
"""
|
||||
Integration test for MCP port binding.
|
||||
"""
|
||||
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from conftest import requires_docker
|
||||
from cubbi.mcp import MCPManager
|
||||
|
||||
|
||||
@requires_docker
|
||||
def test_mcp_port_binding():
|
||||
"""Test that MCP containers don't bind to host ports."""
|
||||
mcp_manager = MCPManager()
|
||||
|
||||
# Add a proxy MCP
|
||||
mcp_name = f"test-mcp-{uuid.uuid4().hex[:8]}"
|
||||
mcp_name2 = None
|
||||
|
||||
try:
|
||||
# Let's check if host port binding was removed
|
||||
mcps_before = len(mcp_manager.list_mcp_containers())
|
||||
|
||||
# Use alpine image for a simple test
|
||||
mcp_manager.add_docker_mcp(
|
||||
name=mcp_name,
|
||||
image="alpine:latest",
|
||||
command="sleep 60", # Keep container running for the test
|
||||
env={"TEST": "test"},
|
||||
)
|
||||
|
||||
# Start the MCP
|
||||
result = mcp_manager.start_mcp(mcp_name)
|
||||
print(f"Start result: {result}")
|
||||
|
||||
# Give container time to start
|
||||
time.sleep(2)
|
||||
|
||||
# Start another MCP to verify we can run multiple instances
|
||||
mcp_name2 = f"test-mcp2-{uuid.uuid4().hex[:8]}"
|
||||
mcp_manager.add_docker_mcp(
|
||||
name=mcp_name2,
|
||||
image="alpine:latest",
|
||||
command="sleep 60", # Keep container running for the test
|
||||
env={"TEST": "test2"},
|
||||
)
|
||||
|
||||
# Start the second MCP
|
||||
result2 = mcp_manager.start_mcp(mcp_name2)
|
||||
print(f"Start result 2: {result2}")
|
||||
|
||||
# Give container time to start
|
||||
time.sleep(2)
|
||||
|
||||
# Check how many containers we have now
|
||||
mcps_after = len(mcp_manager.list_mcp_containers())
|
||||
|
||||
# We should have two more containers than before
|
||||
assert mcps_after >= mcps_before + 2, "Not all MCP containers were created"
|
||||
|
||||
# Get container details and verify no host port bindings
|
||||
all_mcps = mcp_manager.list_mcp_containers()
|
||||
print(f"All MCPs: {all_mcps}")
|
||||
|
||||
# Test successful - we were able to start multiple MCPs without port conflicts
|
||||
|
||||
finally:
|
||||
# Clean up
|
||||
try:
|
||||
if mcp_name:
|
||||
mcp_manager.stop_mcp(mcp_name)
|
||||
mcp_manager.remove_mcp(mcp_name)
|
||||
except Exception as e:
|
||||
print(f"Error cleaning up {mcp_name}: {e}")
|
||||
|
||||
try:
|
||||
if mcp_name2:
|
||||
mcp_manager.stop_mcp(mcp_name2)
|
||||
mcp_manager.remove_mcp(mcp_name2)
|
||||
except Exception as e:
|
||||
print(f"Error cleaning up {mcp_name2}: {e}")
|
||||
118
tests/test_session_commands.py
Normal file
118
tests/test_session_commands.py
Normal file
@@ -0,0 +1,118 @@
|
||||
"""
|
||||
Tests for the session management commands.
|
||||
"""
|
||||
|
||||
from unittest.mock import patch
|
||||
|
||||
|
||||
from cubbi.cli import app
|
||||
|
||||
|
||||
def test_session_list_empty(cli_runner, mock_container_manager):
|
||||
"""Test 'cubbi session list' with no active sessions."""
|
||||
mock_container_manager.list_sessions.return_value = []
|
||||
|
||||
result = cli_runner.invoke(app, ["session", "list"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "No active sessions found" in result.stdout
|
||||
|
||||
|
||||
def test_session_list_with_sessions(cli_runner, mock_container_manager):
|
||||
"""Test 'cubbi session list' with active sessions."""
|
||||
# Create a mock session and set list_sessions to return it
|
||||
from cubbi.models import Session, SessionStatus
|
||||
|
||||
mock_session = Session(
|
||||
id="test-session-id",
|
||||
name="test-session",
|
||||
image="goose",
|
||||
status=SessionStatus.RUNNING,
|
||||
ports={"8080": "8080"},
|
||||
)
|
||||
mock_container_manager.list_sessions.return_value = [mock_session]
|
||||
|
||||
result = cli_runner.invoke(app, ["session", "list"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
# The output display can vary depending on terminal width, so just check
|
||||
# that the command executed successfully
|
||||
|
||||
|
||||
def test_session_create_basic(cli_runner, mock_container_manager):
|
||||
"""Test 'cubbi session create' with basic options."""
|
||||
# We need to patch user_config.get with a side_effect to handle different keys
|
||||
with patch("cubbi.cli.user_config") as mock_user_config:
|
||||
# Handle different key requests appropriately
|
||||
def mock_get_side_effect(key, default=None):
|
||||
if key == "defaults.image":
|
||||
return "goose"
|
||||
elif key == "defaults.volumes":
|
||||
return [] # Return empty list for volumes
|
||||
elif key == "defaults.connect":
|
||||
return True
|
||||
elif key == "defaults.mount_local":
|
||||
return True
|
||||
elif key == "defaults.networks":
|
||||
return []
|
||||
return default
|
||||
|
||||
mock_user_config.get.side_effect = mock_get_side_effect
|
||||
mock_user_config.get_environment_variables.return_value = {}
|
||||
|
||||
result = cli_runner.invoke(app, ["session", "create"])
|
||||
|
||||
if result.exit_code != 0:
|
||||
print(f"Error: {result.exception}")
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Session created successfully" in result.stdout
|
||||
|
||||
# Verify container_manager was called with the expected image
|
||||
mock_container_manager.create_session.assert_called_once()
|
||||
assert (
|
||||
mock_container_manager.create_session.call_args[1]["image_name"] == "goose"
|
||||
)
|
||||
|
||||
|
||||
def test_session_close(cli_runner, mock_container_manager):
|
||||
"""Test 'cubbi session close' command."""
|
||||
mock_container_manager.close_session.return_value = True
|
||||
|
||||
result = cli_runner.invoke(app, ["session", "close", "test-session-id"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "closed successfully" in result.stdout
|
||||
mock_container_manager.close_session.assert_called_once_with("test-session-id")
|
||||
|
||||
|
||||
def test_session_close_all(cli_runner, mock_container_manager):
|
||||
"""Test 'cubbi session close --all' command."""
|
||||
# Set up mock sessions
|
||||
from cubbi.models import Session, SessionStatus
|
||||
|
||||
# timestamp no longer needed since we don't use created_at in Session
|
||||
mock_sessions = [
|
||||
Session(
|
||||
id=f"session-{i}",
|
||||
name=f"Session {i}",
|
||||
image="goose",
|
||||
status=SessionStatus.RUNNING,
|
||||
ports={},
|
||||
)
|
||||
for i in range(3)
|
||||
]
|
||||
|
||||
mock_container_manager.list_sessions.return_value = mock_sessions
|
||||
mock_container_manager.close_all_sessions.return_value = (3, True)
|
||||
|
||||
result = cli_runner.invoke(app, ["session", "close", "--all"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "3 sessions closed successfully" in result.stdout
|
||||
mock_container_manager.close_all_sessions.assert_called_once()
|
||||
|
||||
|
||||
# For more complex tests that need actual Docker,
|
||||
# we've implemented them in test_integration_docker.py
|
||||
# They will run automatically if Docker is available
|
||||
70
uv.lock
generated
70
uv.lock
generated
@@ -75,6 +75,45 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cubbi"
|
||||
version = "0.1.0"
|
||||
source = { editable = "." }
|
||||
dependencies = [
|
||||
{ name = "docker" },
|
||||
{ name = "pydantic" },
|
||||
{ name = "pyyaml" },
|
||||
{ name = "rich" },
|
||||
{ name = "typer" },
|
||||
]
|
||||
|
||||
[package.optional-dependencies]
|
||||
dev = [
|
||||
{ name = "mypy" },
|
||||
{ name = "pytest" },
|
||||
{ name = "ruff" },
|
||||
]
|
||||
|
||||
[package.dev-dependencies]
|
||||
dev = [
|
||||
{ name = "pytest" },
|
||||
]
|
||||
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "docker", specifier = ">=7.0.0" },
|
||||
{ name = "mypy", marker = "extra == 'dev'", specifier = ">=1.7.0" },
|
||||
{ name = "pydantic", specifier = ">=2.5.0" },
|
||||
{ name = "pytest", marker = "extra == 'dev'", specifier = ">=7.4.0" },
|
||||
{ name = "pyyaml", specifier = ">=6.0.1" },
|
||||
{ name = "rich", specifier = ">=13.6.0" },
|
||||
{ name = "ruff", marker = "extra == 'dev'", specifier = ">=0.1.9" },
|
||||
{ name = "typer", specifier = ">=0.9.0" },
|
||||
]
|
||||
|
||||
[package.metadata.requires-dev]
|
||||
dev = [{ name = "pytest", specifier = ">=8.3.5" }]
|
||||
|
||||
[[package]]
|
||||
name = "docker"
|
||||
version = "7.1.0"
|
||||
@@ -119,37 +158,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mcontainer"
|
||||
version = "0.1.0"
|
||||
source = { editable = "." }
|
||||
dependencies = [
|
||||
{ name = "docker" },
|
||||
{ name = "pydantic" },
|
||||
{ name = "pyyaml" },
|
||||
{ name = "rich" },
|
||||
{ name = "typer" },
|
||||
]
|
||||
|
||||
[package.optional-dependencies]
|
||||
dev = [
|
||||
{ name = "mypy" },
|
||||
{ name = "pytest" },
|
||||
{ name = "ruff" },
|
||||
]
|
||||
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "docker", specifier = ">=7.0.0" },
|
||||
{ name = "mypy", marker = "extra == 'dev'", specifier = ">=1.7.0" },
|
||||
{ name = "pydantic", specifier = ">=2.5.0" },
|
||||
{ name = "pytest", marker = "extra == 'dev'", specifier = ">=7.4.0" },
|
||||
{ name = "pyyaml", specifier = ">=6.0.1" },
|
||||
{ name = "rich", specifier = ">=13.6.0" },
|
||||
{ name = "ruff", marker = "extra == 'dev'", specifier = ">=0.1.9" },
|
||||
{ name = "typer", specifier = ">=0.9.0" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mdurl"
|
||||
version = "0.1.2"
|
||||
|
||||
Reference in New Issue
Block a user