mirror of
https://github.com/Monadical-SAS/cubbi.git
synced 2025-12-21 04:39:07 +00:00
Compare commits
100 Commits
doc-mcp-fi
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
21cb53b597 | ||
| 10d9e9d3ab | |||
| b788f3f52e | |||
| 3795de1484 | |||
| be171cf2c6 | |||
| b9cffe3008 | |||
| a66843714d | |||
| 407c1a1c9b | |||
| fc819a3861 | |||
| 7d6bc5dbfa | |||
| 310149dc34 | |||
| 3a7b9213b0 | |||
| a709071d10 | |||
| bae951cf7c | |||
| e4c64a54ed | |||
| b7b78ea075 | |||
|
|
de1b3c0976 | ||
| 75c9849315 | |||
| 9dc11582a2 | |||
| 472f030924 | |||
| b8ecad6227 | |||
|
|
fd23e12ff8 | ||
| 2eb15a31f8 | |||
| afae8a13e1 | |||
| d41faf6b30 | |||
| 672b8a8e31 | |||
| da5937e708 | |||
| 4958b07401 | |||
| 4c4e207b67 | |||
| dba7a7c1ef | |||
| 9c8ddbb3f3 | |||
|
|
d750e64608 | ||
|
|
fc0d6b51af | ||
|
|
b28c2bd63e | ||
| e70ec3538b | |||
| 5fca51e515 | |||
| e5121ddea4 | |||
|
|
563e41f213 | ||
| 31e09bc7ba | |||
| 252d8be735 | |||
| fbba8b7613 | |||
| 75daccb366 | |||
| e852fdd1fb | |||
| e34fc56df4 | |||
| ba852d502e | |||
| 15d86d25e7 | |||
| 5218bb1218 | |||
| 13c896a58d | |||
|
|
ea0d44f4db | ||
| 3850bc3212 | |||
| 12d77d0128 | |||
| 51fb79baa3 | |||
| 3799f04c13 | |||
| 7fc9cfd8e1 | |||
| 979b43846a | |||
| 4f54c0fbe7 | |||
| 3a182fd265 | |||
| 2f9fd68cad | |||
| e25e30e749 | |||
| b1aa415dde | |||
| 5b9713dc2f | |||
| 9e742b439b | |||
| cfa7dd647d | |||
| deff036406 | |||
| 5678438661 | |||
| 30c6b995cb | |||
| f32b3dd269 | |||
| a74251b119 | |||
| 9c21611a7f | |||
| 6b2c1ebf1c | |||
| 33d90d0531 | |||
| a51115a45d | |||
| 0d75bfc3d8 | |||
| 7805aa720e | |||
| 16f59b1c40 | |||
| 4b0461a6fa | |||
| 5d674f7508 | |||
| 3ee8ce6338 | |||
| d098f268cd | |||
| 0892b6c8c4 | |||
| 212f271268 | |||
| 20916c5713 | |||
| 7c46d66b53 | |||
| 2caeb42551 | |||
| deb5945e40 | |||
| 7736573b84 | |||
| 133583b941 | |||
| 028bd26cf1 | |||
| 7649173d6c | |||
| 307eee4fce | |||
| 6f08e2b274 | |||
| b72f1eef9a | |||
| 092f497ecc | |||
| dab783b01d | |||
| d42af870ff | |||
| e36eef4ef7 | |||
| f83c49c0f3 | |||
| e36f4540bf | |||
| 1c538f8a59 | |||
| a4591ddbd8 |
21
.github/workflows/conventional_commit_pr_title.yml
vendored
Normal file
21
.github/workflows/conventional_commit_pr_title.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: "Lint PR"
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- synchronize
|
||||
- reopened
|
||||
|
||||
permissions:
|
||||
pull-requests: read
|
||||
|
||||
jobs:
|
||||
main:
|
||||
name: Validate PR title
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: amannn/action-semantic-pull-request@v5
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
14
.github/workflows/pre_commit.yml
vendored
Normal file
14
.github/workflows/pre_commit.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
name: pre-commit
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
pre-commit:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v3
|
||||
- uses: pre-commit/action@v3.0.1
|
||||
41
.github/workflows/pytests.yml
vendored
Normal file
41
.github/workflows/pytests.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: Pytests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
checks: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
pytest:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: true
|
||||
matrix:
|
||||
python-version: ["3.12"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
enable-cache: true
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install all dependencies
|
||||
run: uv sync --frozen --all-extras --all-groups
|
||||
|
||||
- name: Build required images
|
||||
run: |
|
||||
uv tool install --with-editable . .
|
||||
cubbi image build goose
|
||||
cubbi image build aider
|
||||
|
||||
- name: Tests
|
||||
run: |
|
||||
uv run --frozen -m pytest -v
|
||||
127
.github/workflows/release.yml
vendored
Normal file
127
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
release_force:
|
||||
# see https://python-semantic-release.readthedocs.io/en/latest/github-action.html#command-line-options
|
||||
description: |
|
||||
Force release be one of: [major | minor | patch]
|
||||
Leave empty for auto-detect based on commit messages.
|
||||
type: choice
|
||||
options:
|
||||
- "" # auto - no force
|
||||
- major # force major
|
||||
- minor # force minor
|
||||
- patch # force patch
|
||||
default: ""
|
||||
required: false
|
||||
prerelease_token:
|
||||
description: 'The "prerelease identifier" to use as a prefix for the "prerelease" part of a semver. Like the rc in `1.2.0-rc.8`.'
|
||||
type: choice
|
||||
options:
|
||||
- rc
|
||||
- beta
|
||||
- alpha
|
||||
default: rc
|
||||
required: false
|
||||
prerelease:
|
||||
description: "Is a pre-release"
|
||||
type: boolean
|
||||
default: false
|
||||
required: false
|
||||
|
||||
concurrency:
|
||||
group: deploy
|
||||
cancel-in-progress: false # prevent hickups with semantic-release
|
||||
|
||||
env:
|
||||
PYTHON_VERSION_DEFAULT: "3.12"
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
concurrency: release
|
||||
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
# Note: we need to checkout the repository at the workflow sha in case during the workflow
|
||||
# the branch was updated. To keep PSR working with the configured release branches,
|
||||
# we force a checkout of the desired release branch but at the workflow sha HEAD.
|
||||
- name: Setup | Checkout Repository at workflow sha
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.sha }}
|
||||
ssh-key: ${{ secrets.DEPLOY_KEY }}
|
||||
|
||||
- name: Setup | Force correct release branch on workflow sha
|
||||
run: |
|
||||
git checkout -B ${{ github.ref_name }} ${{ github.sha }}
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
enable-cache: true
|
||||
python-version: ${{ env.PYTHON_VERSION_DEFAULT }}
|
||||
|
||||
- name: Install all dependencies
|
||||
run: uv sync --frozen --all-extras --all-groups
|
||||
|
||||
# 2 steps to prevent uv.lock out of sync
|
||||
# CF https://github.com/python-semantic-release/python-semantic-release/issues/1125
|
||||
- name: Action | Semantic Version Release (stamp only)
|
||||
uses: python-semantic-release/python-semantic-release@v9.21.1
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
git_committer_name: "github-actions"
|
||||
git_committer_email: "actions@users.noreply.github.com"
|
||||
force: ${{ github.event.inputs.release_force }}
|
||||
prerelease: ${{ github.event.inputs.prerelease }}
|
||||
prerelease_token: ${{ github.event.inputs.prerelease_token }}
|
||||
ssh_public_signing_key: ${{ secrets.DEPLOY_KEY_PUB }}
|
||||
ssh_private_signing_key: ${{ secrets.DEPLOY_KEY }}
|
||||
push: false
|
||||
commit: false
|
||||
tag: false
|
||||
changelog: false
|
||||
|
||||
- name: Push and tags
|
||||
run: |
|
||||
uv lock
|
||||
git add uv.lock pyproject.toml
|
||||
|
||||
- name: Action | Semantic Version Release (fully to create release)
|
||||
id: release
|
||||
uses: python-semantic-release/python-semantic-release@v9.21.1
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
git_committer_name: "github-actions"
|
||||
git_committer_email: "actions@users.noreply.github.com"
|
||||
force: ${{ github.event.inputs.release_force }}
|
||||
prerelease: ${{ github.event.inputs.prerelease }}
|
||||
prerelease_token: ${{ github.event.inputs.prerelease_token }}
|
||||
ssh_public_signing_key: ${{ secrets.DEPLOY_KEY_PUB }}
|
||||
ssh_private_signing_key: ${{ secrets.DEPLOY_KEY }}
|
||||
push: false
|
||||
|
||||
- name: Push and tags
|
||||
run: |
|
||||
git push --set-upstream --follow-tags origin ${{ github.ref_name }}
|
||||
|
||||
- name: Build package
|
||||
run: uv build
|
||||
|
||||
- name: Publish | Upload package to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
if: steps.release.outputs.released == 'true'
|
||||
|
||||
- name: Publish | Upload to GitHub Release Assets
|
||||
uses: python-semantic-release/publish-action@v9.8.9
|
||||
if: steps.release.outputs.released == 'true'
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
tag: ${{ steps.release.outputs.tag }}
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -8,3 +8,8 @@ wheels/
|
||||
|
||||
# Virtual environments
|
||||
.venv
|
||||
|
||||
# Aider
|
||||
.aider*
|
||||
.goose
|
||||
.claude/settings.local.json
|
||||
|
||||
718
CHANGELOG.md
Normal file
718
CHANGELOG.md
Normal file
@@ -0,0 +1,718 @@
|
||||
# CHANGELOG
|
||||
|
||||
|
||||
## v0.5.0 (2025-12-15)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- Crush providers configuration ([#30](https://github.com/Monadical-SAS/cubbi/pull/30),
|
||||
[`a709071`](https://github.com/Monadical-SAS/cubbi/commit/a709071d1008d7b805da86d82fb056e144a328fd))
|
||||
|
||||
- Cubbi configure not working when configuring other provider
|
||||
([#32](https://github.com/Monadical-SAS/cubbi/pull/32),
|
||||
[`310149d`](https://github.com/Monadical-SAS/cubbi/commit/310149dc34bfd41237ee92ff42620bf3f4316634))
|
||||
|
||||
- Ensure Docker containers are always removed when closing sessions
|
||||
([#35](https://github.com/Monadical-SAS/cubbi/pull/35),
|
||||
[`b788f3f`](https://github.com/Monadical-SAS/cubbi/commit/b788f3f52e6f85fd99e1dd117565850dbe13332b))
|
||||
|
||||
When closing sessions with already-stopped containers, the stop/kill operation would raise an
|
||||
exception, preventing container.remove() from being called. This left stopped containers in Docker
|
||||
even though they were removed from cubbi's session tracking.
|
||||
|
||||
The fix wraps stop/kill operations in their own try-except block, allowing the code to always reach
|
||||
container.remove() regardless of whether the container was already stopped.
|
||||
|
||||
- Make groupadd optional (group already may exist, like gid 20 from osx)
|
||||
([`407c1a1`](https://github.com/Monadical-SAS/cubbi/commit/407c1a1c9bc85e06600c762c78905d1bfdf89922))
|
||||
|
||||
- Prevent concurrent YAML corruption in sessions
|
||||
([#36](https://github.com/Monadical-SAS/cubbi/pull/36),
|
||||
[`10d9e9d`](https://github.com/Monadical-SAS/cubbi/commit/10d9e9d3abc135718be667adc574a7b3f8470ff7))
|
||||
|
||||
fix: add file locking to prevent concurrent YAML corruption in sessions
|
||||
|
||||
When multiple cubbi instances run simultaneously, they can corrupt the sessions.yaml file due to
|
||||
concurrent writes. This manifests as malformed YAML entries (e.g., "status:
|
||||
running\ning2dc3ff11:").
|
||||
|
||||
This commit adds: - fcntl-based file locking for all write operations - Read-modify-write pattern
|
||||
that reloads from disk before each write - Proper lock acquisition/release via context manager
|
||||
|
||||
All write operations (add_session, remove_session, save) now: 1. Acquire exclusive lock on
|
||||
sessions.yaml 2. Reload latest state from disk 3. Apply modifications 4. Write atomically to file
|
||||
5. Update in-memory cache 6. Release lock
|
||||
|
||||
This ensures that concurrent cubbi instances can safely modify the sessions file without corruption.
|
||||
|
||||
- Remove container even if already removed
|
||||
([`a668437`](https://github.com/Monadical-SAS/cubbi/commit/a66843714d01d163e2ce17dd4399a0fa64d2be65))
|
||||
|
||||
- Remove persistent_configs of images ([#28](https://github.com/Monadical-SAS/cubbi/pull/28),
|
||||
[`e4c64a5`](https://github.com/Monadical-SAS/cubbi/commit/e4c64a54ed39ba0a65ace75c7f03ff287073e71e))
|
||||
|
||||
### Documentation
|
||||
|
||||
- Update README with --no-cache and local MCP server documentation
|
||||
([`3795de1`](https://github.com/Monadical-SAS/cubbi/commit/3795de1484e1df3905c8eb90908ab79927b03194))
|
||||
|
||||
- Added documentation for the new --no-cache flag in image build command - Added documentation for
|
||||
local MCP server support (add-local command) - Updated MCP server types to include local MCP
|
||||
servers - Added examples for all three types of MCP servers (Docker, Remote, Local)
|
||||
|
||||
### Features
|
||||
|
||||
- Add --no-cache option to image build command
|
||||
([`be171cf`](https://github.com/Monadical-SAS/cubbi/commit/be171cf2c6252dfa926a759915a057a3a6791cc2))
|
||||
|
||||
Added a --no-cache flag to 'cubbi image build' command to allow building Docker images without using
|
||||
the build cache, useful for forcing fresh builds.
|
||||
|
||||
- Add local MCP server support
|
||||
([`b9cffe3`](https://github.com/Monadical-SAS/cubbi/commit/b9cffe3008bccbcf4eaa7c5c03e62215520d8627))
|
||||
|
||||
- Add LocalMCP model for stdio-based MCP servers - Implement add_local_mcp() method in MCPManager -
|
||||
Add 'mcp add-local' CLI command with args and env support - Update cubbi_init.py MCPConfig with
|
||||
command, args, env fields - Add local MCP support in interactive configure tool - Update image
|
||||
plugins (opencode, goose, crush) to handle local MCPs - OpenCode: Maps to "local" type with
|
||||
command array - Goose: Maps to "stdio" type with command/args - Crush: Maps to "stdio" transport
|
||||
type
|
||||
|
||||
Local MCPs run as stdio-based commands inside containers, allowing users to integrate local MCP
|
||||
servers without containerization.
|
||||
|
||||
- Add opencode state/cache to persistent_config
|
||||
([#27](https://github.com/Monadical-SAS/cubbi/pull/27),
|
||||
[`b7b78ea`](https://github.com/Monadical-SAS/cubbi/commit/b7b78ea0754360efe56cf3f3255f90efda737a91))
|
||||
|
||||
- Comprehensive configuration system and environment variable forwarding
|
||||
([#29](https://github.com/Monadical-SAS/cubbi/pull/29),
|
||||
[`bae951c`](https://github.com/Monadical-SAS/cubbi/commit/bae951cf7c4e498b6cdd7cd00836935acbd98e42))
|
||||
|
||||
* feat: migrate container configuration from env vars to YAML config files
|
||||
|
||||
- Replace environment variable-based configuration with structured YAML config files - Add Pydantic
|
||||
models for type-safe configuration management in cubbi_init.py - Update container.py to generate
|
||||
/cubbi/config.yaml and mount into containers - Simplify goose plugin to extract provider from
|
||||
default model format - Remove complex environment variable handling in favor of direct config
|
||||
access - Maintain backward compatibility while enabling cleaner plugin architecture
|
||||
|
||||
* feat: optimize goose plugin to only pass required API key for selected model
|
||||
|
||||
- Update goose plugin to set only the API key for the provider of the selected model - Add selective
|
||||
API key configuration for anthropic, openai, google, and openrouter - Update README.md with
|
||||
comprehensive automated testing documentation - Add litellm/gpt-oss:120b to test.sh model matrix
|
||||
(now 5 images × 4 models = 20 tests) - Include single prompt command syntax for each tool in the
|
||||
documentation
|
||||
|
||||
* feat: add comprehensive integration tests with pytest parametrization
|
||||
|
||||
- Create tests/test_integration.py with parametrized tests for 5 images × 4 models (20 combinations)
|
||||
- Add pytest configuration to exclude integration tests by default - Add integration marker for
|
||||
selective test running - Include help command tests and image availability tests - Document test
|
||||
usage in tests/README_integration.md
|
||||
|
||||
Integration tests cover: - goose, aider, claudecode, opencode, crush images -
|
||||
anthropic/claude-sonnet-4-20250514, openai/gpt-4o, openrouter/openai/gpt-4o, litellm/gpt-oss:120b
|
||||
models - Proper command syntax for each tool - Success validation with exit codes and completion
|
||||
markers
|
||||
|
||||
Usage: - pytest (regular tests only) - pytest -m integration (integration tests only) - pytest -m
|
||||
integration -k "goose" (specific image)
|
||||
|
||||
* feat: update OpenCode plugin with perfect multi-provider configuration
|
||||
|
||||
- Add global STANDARD_PROVIDERS constant for maintainability - Support custom providers (with
|
||||
baseURL) vs standard providers - Custom providers: include npm package, name, baseURL, apiKey,
|
||||
models - Standard providers: include only apiKey and empty models - Use direct API key values from
|
||||
cubbi config instead of env vars - Only add default model to the provider that matches the default
|
||||
model - Use @ai-sdk/openai-compatible for OpenAI-compatible providers - Preserve model names
|
||||
without transformation - All providers get required empty models{} section per OpenCode spec
|
||||
|
||||
This ensures OpenCode can properly recognize and use both native providers (anthropic, openai,
|
||||
google, openrouter) and custom providers (litellm, etc.) with correct configuration format.
|
||||
|
||||
* refactor: model is now a combination of provider/model
|
||||
|
||||
* feat: add separate integration test for Claude Code without model config
|
||||
|
||||
Claude Code is Anthropic-specific and doesn't require model selection like other tools. Created
|
||||
dedicated test that verifies basic functionality without model preselection.
|
||||
|
||||
* feat: update Claude Code and Crush plugins to use new config system
|
||||
|
||||
- Claude Code plugin now uses cubbi_config.providers to get Anthropic API key - Crush plugin updated
|
||||
to use cubbi_config.providers for provider configuration - Both plugins maintain backwards
|
||||
compatibility with environment variables - Consistent plugin structure across all cubbi images
|
||||
|
||||
* feat: add environments_to_forward support for images
|
||||
|
||||
- Add environments_to_forward field to ImageConfig and Image models - Update container creation
|
||||
logic to forward specified environment variables from host - Add environments_to_forward to
|
||||
claudecode cubbi_image.yaml to ensure Anthropic API key is always available - Claude Code now gets
|
||||
required environment variables regardless of model selection - This ensures Claude Code works
|
||||
properly even when other models are specified
|
||||
|
||||
Fixes the issue where Claude Code couldn't access Anthropic API key when using different model
|
||||
configurations.
|
||||
|
||||
* refactor: remove unused environment field from cubbi_image.yaml files
|
||||
|
||||
The 'environment' field was loaded but never processed at runtime. Only 'environments_to_forward' is
|
||||
actually used to pass environment variables from host to container.
|
||||
|
||||
Cleaned up configuration files by removing: - 72 lines from aider/cubbi_image.yaml - 42 lines from
|
||||
claudecode/cubbi_image.yaml - 28 lines from crush/cubbi_image.yaml - 16 lines from
|
||||
goose/cubbi_image.yaml - Empty environment: [] from opencode/cubbi_image.yaml
|
||||
|
||||
This makes the configuration files cleaner and only contains fields that are actually used by the
|
||||
system.
|
||||
|
||||
* feat: implement environment variable forwarding for aider
|
||||
|
||||
Updates aider to automatically receive all relevant environment variables from the host, similar to
|
||||
how opencode works.
|
||||
|
||||
Changes: - Added environments_to_forward field to aider/cubbi_image.yaml with comprehensive list of
|
||||
API keys, configuration, and proxy variables - Updated aider_plugin.py to use cubbi_config system
|
||||
for provider/model setup - Environment variables now forwarded automatically during container
|
||||
creation - Maintains backward compatibility with legacy environment variables
|
||||
|
||||
Environment variables forwarded: - API Keys: OPENAI_API_KEY, ANTHROPIC_API_KEY, DEEPSEEK_API_KEY,
|
||||
etc. - Configuration: AIDER_MODEL, GIT_* variables, HTTP_PROXY, etc. - Timezone: TZ for proper log
|
||||
timestamps
|
||||
|
||||
Tested: All aider tests pass, environment variables confirmed forwarded.
|
||||
|
||||
* refactor: remove unused volumes and init fields from cubbi_image.yaml files
|
||||
|
||||
Both 'volumes' and 'init' fields were loaded but never processed at runtime. These were incomplete
|
||||
implementations that didn't affect container behavior.
|
||||
|
||||
Removed from all 5 images: - volumes: List with mountPath: /app (incomplete, missing host paths) -
|
||||
init: pre_command and command fields (unused during container creation)
|
||||
|
||||
The cubbi_image.yaml files now only contain fields that are actually used: - Basic metadata (name,
|
||||
description, version, maintainer, image) - persistent_configs (working functionality) -
|
||||
environments_to_forward (working functionality where present)
|
||||
|
||||
This makes the configuration files cleaner and eliminates confusion about what functionality is
|
||||
actually implemented.
|
||||
|
||||
* refactor: remove unused ImageInit and VolumeMount models
|
||||
|
||||
These models were only referenced in the Image model definition but never used at runtime since we
|
||||
removed all init: and volumes: fields from cubbi_image.yaml files.
|
||||
|
||||
Removed: - VolumeMount class (mountPath, description fields) - ImageInit class (pre_command, command
|
||||
fields) - init: Optional[ImageInit] field from Image model - volumes: List[VolumeMount] field from
|
||||
Image model
|
||||
|
||||
The Image model now only contains fields that are actually used: - Basic metadata (name,
|
||||
description, version, maintainer, image) - environment (loaded but unused - kept for future
|
||||
cleanup) - persistent_configs (working functionality) - environments_to_forward (working
|
||||
functionality)
|
||||
|
||||
This makes the data model cleaner and eliminates dead code.
|
||||
|
||||
* feat: add interactive configuration command
|
||||
|
||||
Adds `cubbi configure` command for interactive setup of LLM providers and models through a
|
||||
user-friendly questionnaire interface.
|
||||
|
||||
New features: - Interactive provider configuration (OpenAI, Anthropic, OpenRouter, etc.) - API key
|
||||
management with environment variable references - Model selection with provider/model format
|
||||
validation - Default settings configuration (image, ports, volumes, etc.) - Added questionary
|
||||
dependency for interactive prompts
|
||||
|
||||
Changes: - Added cubbi/configure.py with full interactive configuration logic - Added configure
|
||||
command to cubbi/cli.py - Updated uv.lock with questionary and prompt-toolkit dependencies
|
||||
|
||||
Usage: `cubbi configure`
|
||||
|
||||
* refactor: update integration tests for current functionality
|
||||
|
||||
Updates integration tests to reflect current cubbi functionality:
|
||||
|
||||
test_integration.py: - Simplified image list (removed crush temporarily) - Updated model list with
|
||||
current supported models - Removed outdated help command tests that were timing out - Simplified
|
||||
claudecode test to basic functionality test - Updated command templates for current tool versions
|
||||
|
||||
test_integration_docker.py: - Cleaned up container management tests - Fixed formatting and improved
|
||||
readability - Updated assertion formatting for better error messages
|
||||
|
||||
These changes align the tests with the current state of the codebase and remove tests that were
|
||||
causing timeouts or failures.
|
||||
|
||||
* fix: fix temporary file chmod
|
||||
|
||||
- Dynamic model management for OpenAI-compatible providers
|
||||
([#33](https://github.com/Monadical-SAS/cubbi/pull/33),
|
||||
[`7d6bc5d`](https://github.com/Monadical-SAS/cubbi/commit/7d6bc5dbfa5f4d4ef69a7b806846aebdeec38aa0))
|
||||
|
||||
feat: add models fetch for openai-compatible endpoint
|
||||
|
||||
- Universal model management for all standard providers
|
||||
([#34](https://github.com/Monadical-SAS/cubbi/pull/34),
|
||||
[`fc819a3`](https://github.com/Monadical-SAS/cubbi/commit/fc819a386185330e60946ee4712f268cfed2b66a))
|
||||
|
||||
* fix: add crush plugin support too
|
||||
|
||||
* feat: comprehensive model management for all standard providers
|
||||
|
||||
- Add universal provider support for model fetching (OpenAI, Anthropic, Google, OpenRouter) - Add
|
||||
default API URLs for standard providers in config.py - Enhance model fetcher with
|
||||
provider-specific authentication: * Anthropic: x-api-key header + anthropic-version header *
|
||||
Google: x-goog-api-key header + custom response format handling * OpenAI/OpenRouter: Bearer token
|
||||
(unchanged) - Support Google's unique API response format (models vs data key, name vs id field) -
|
||||
Update CLI commands to work with all supported provider types - Enhance configure interface to
|
||||
include all providers (even those without API keys) - Update both OpenCode and Crush plugins to
|
||||
populate models for all provider types - Add comprehensive provider support detection methods
|
||||
|
||||
### Refactoring
|
||||
|
||||
- Deep clean plugins ([#31](https://github.com/Monadical-SAS/cubbi/pull/31),
|
||||
[`3a7b921`](https://github.com/Monadical-SAS/cubbi/commit/3a7b9213b0d4e5ce0cfb1250624651b242fdc325))
|
||||
|
||||
* refactor: deep clean plugins
|
||||
|
||||
* refactor: modernize plugin system with Python 3.12+ typing and simplified discovery
|
||||
|
||||
- Update typing to Python 3.12+ style (Dict->dict, Optional->union types) - Simplify plugin
|
||||
discovery using PLUGIN_CLASS exports instead of dir() reflection - Add public get_user_ids() and
|
||||
set_ownership() functions in cubbi_init - Add create_directory_with_ownership() helper method to
|
||||
ToolPlugin base class - Replace initialize() + integrate_mcp_servers() pattern with unified
|
||||
configure() - Add is_already_configured() checks to prevent overwriting existing configs - Remove
|
||||
excessive comments and clean up code structure - All 5 plugins updated: goose, opencode,
|
||||
claudecode, aider, crush
|
||||
|
||||
* fix: remove duplicate
|
||||
|
||||
|
||||
## v0.4.0 (2025-08-06)
|
||||
|
||||
### Documentation
|
||||
|
||||
- Update readme ([#25](https://github.com/Monadical-SAS/cubbi/pull/25),
|
||||
[`9dc1158`](https://github.com/Monadical-SAS/cubbi/commit/9dc11582a21371a069d407390308340a87358a9f))
|
||||
|
||||
doc: update readme
|
||||
|
||||
### Features
|
||||
|
||||
- Add user port support ([#26](https://github.com/Monadical-SAS/cubbi/pull/26),
|
||||
[`75c9849`](https://github.com/Monadical-SAS/cubbi/commit/75c9849315aebb41ffbd5ac942c7eb3c4a151663))
|
||||
|
||||
* feat: add user port support
|
||||
|
||||
* fix: fix unit test and improve isolation
|
||||
|
||||
* refactor: remove some fixture
|
||||
|
||||
- Make opencode beautiful by default ([#24](https://github.com/Monadical-SAS/cubbi/pull/24),
|
||||
[`b8ecad6`](https://github.com/Monadical-SAS/cubbi/commit/b8ecad6227f6a328517edfc442cd9bcf4d3361dc))
|
||||
|
||||
opencode: try having compatible default theme
|
||||
|
||||
- Support for crush ([#23](https://github.com/Monadical-SAS/cubbi/pull/23),
|
||||
[`472f030`](https://github.com/Monadical-SAS/cubbi/commit/472f030924e58973dea0a41188950540550c125d))
|
||||
|
||||
|
||||
## v0.3.0 (2025-07-31)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- Claudecode and opencode arm64 images ([#21](https://github.com/Monadical-SAS/cubbi/pull/21),
|
||||
[`dba7a7c`](https://github.com/Monadical-SAS/cubbi/commit/dba7a7c1efcc04570a92ecbc4eee39eb6353aaea))
|
||||
|
||||
- Update readme
|
||||
([`4958b07`](https://github.com/Monadical-SAS/cubbi/commit/4958b07401550fb5a6751b99a257eda6c4558ea4))
|
||||
|
||||
### Continuous Integration
|
||||
|
||||
- Remove conventional commit, as only PR is required
|
||||
([`afae8a1`](https://github.com/Monadical-SAS/cubbi/commit/afae8a13e1ea02801b2e5c9d5c84aa65a32d637c))
|
||||
|
||||
### Features
|
||||
|
||||
- Add --mcp-type option for remote MCP servers
|
||||
([`d41faf6`](https://github.com/Monadical-SAS/cubbi/commit/d41faf6b3072d4f8bdb2adc896125c7fd0d6117d))
|
||||
|
||||
Auto-detects connection type from URL (/sse -> sse, /mcp -> streamable_http) or allows manual
|
||||
specification. Updates goose plugin to use actual MCP type instead of hardcoded sse.
|
||||
|
||||
🤖 Generated with [Claude Code](https://claude.ai/code)
|
||||
|
||||
Co-Authored-By: Claude <noreply@anthropic.com>
|
||||
|
||||
- Add Claude Code image support ([#16](https://github.com/Monadical-SAS/cubbi/pull/16),
|
||||
[`b28c2bd`](https://github.com/Monadical-SAS/cubbi/commit/b28c2bd63e324f875b2d862be9e0afa4a7a17ffc))
|
||||
|
||||
* feat: add Claude Code image support
|
||||
|
||||
Add a new Cubbi image for Claude Code (Anthropic's official CLI) with: - Full Claude Code CLI
|
||||
functionality via NPM package - Secure API key management with multiple authentication options -
|
||||
Enterprise support (Bedrock, Vertex AI, proxy configuration) - Persistent configuration and cache
|
||||
directories - Comprehensive test suite and documentation
|
||||
|
||||
The image allows users to run Claude Code in containers with proper isolation, persistent settings,
|
||||
and seamless Cubbi integration. It gracefully handles missing API keys to allow flexible
|
||||
authentication.
|
||||
|
||||
Also adds optional Claude Code API keys to container.py for enterprise deployments.
|
||||
|
||||
🤖 Generated with [Claude Code](https://claude.ai/code)
|
||||
|
||||
Co-Authored-By: Claude <noreply@anthropic.com>
|
||||
|
||||
* Pre-commit fixes
|
||||
|
||||
---------
|
||||
|
||||
Co-authored-by: Claude <noreply@anthropic.com>
|
||||
|
||||
Co-authored-by: Your Name <you@example.com>
|
||||
|
||||
- Add configuration override in session create with --config/-c
|
||||
([`672b8a8`](https://github.com/Monadical-SAS/cubbi/commit/672b8a8e315598d98f40d269dfcfbde6203cbb57))
|
||||
|
||||
- Add MCP tracking to sessions ([#19](https://github.com/Monadical-SAS/cubbi/pull/19),
|
||||
[`d750e64`](https://github.com/Monadical-SAS/cubbi/commit/d750e64608998f6f3a03928bba18428f576b412f))
|
||||
|
||||
Add mcps field to Session model to track active MCP servers and populate it from container labels in
|
||||
ContainerManager. Enhance MCP remove command to warn when removing servers used by active
|
||||
sessions.
|
||||
|
||||
🤖 Generated with [Claude Code](https://claude.ai/code)
|
||||
|
||||
Co-authored-by: Claude <noreply@anthropic.com>
|
||||
|
||||
- Add network filtering with domain restrictions
|
||||
([#22](https://github.com/Monadical-SAS/cubbi/pull/22),
|
||||
[`2eb15a3`](https://github.com/Monadical-SAS/cubbi/commit/2eb15a31f8bb97f93461bea5e567cc2ccde3f86c))
|
||||
|
||||
* fix: remove config override logging to prevent API key exposure
|
||||
|
||||
* feat: add network filtering with domain restrictions
|
||||
|
||||
- Add --domains flag to restrict container network access to specific domains/ports - Integrate
|
||||
monadicalsas/network-filter container for network isolation - Support domain patterns like
|
||||
'example.com:443', '*.api.com' - Add defaults.domains configuration option - Automatically handle
|
||||
network-filter container lifecycle - Prevent conflicts between --domains and --network options
|
||||
|
||||
* docs: add --domains option to README usage examples
|
||||
|
||||
* docs: remove wildcard domain example from --domains help
|
||||
|
||||
Wildcard domains are not currently supported by network-filter
|
||||
|
||||
- Add ripgrep and openssh-client in images ([#15](https://github.com/Monadical-SAS/cubbi/pull/15),
|
||||
[`e70ec35`](https://github.com/Monadical-SAS/cubbi/commit/e70ec3538ba4e02a60afedca583da1c35b7b6d7a))
|
||||
|
||||
- Add sudo and sudoers ([#20](https://github.com/Monadical-SAS/cubbi/pull/20),
|
||||
[`9c8ddbb`](https://github.com/Monadical-SAS/cubbi/commit/9c8ddbb3f3f2fc97db9283898b6a85aee7235fae))
|
||||
|
||||
* feat: add sudo and sudoers
|
||||
|
||||
* Update cubbi/images/cubbi_init.py
|
||||
|
||||
Co-authored-by: pr-agent-monadical[bot] <198624643+pr-agent-monadical[bot]@users.noreply.github.com>
|
||||
|
||||
---------
|
||||
|
||||
- Implement Aider AI pair programming support
|
||||
([#17](https://github.com/Monadical-SAS/cubbi/pull/17),
|
||||
[`fc0d6b5`](https://github.com/Monadical-SAS/cubbi/commit/fc0d6b51af12ddb0bd8655309209dd88e7e4d6f1))
|
||||
|
||||
* feat: implement Aider AI pair programming support
|
||||
|
||||
- Add comprehensive Aider Docker image with Python 3.12 and system pip installation - Implement
|
||||
aider_plugin.py for secure API key management and environment configuration - Support multiple LLM
|
||||
providers: OpenAI, Anthropic, DeepSeek, Gemini, OpenRouter - Add persistent configuration for
|
||||
~/.aider/ and ~/.cache/aider/ directories - Create comprehensive documentation with usage examples
|
||||
and troubleshooting - Include automated test suite with 6 test categories covering all
|
||||
functionality - Update container.py to support DEEPSEEK_API_KEY and GEMINI_API_KEY - Integrate
|
||||
with Cubbi CLI for seamless session management
|
||||
|
||||
🤖 Generated with [Claude Code](https://claude.ai/code)
|
||||
|
||||
Co-Authored-By: Claude <noreply@anthropic.com>
|
||||
|
||||
* Fix pytest for aider
|
||||
|
||||
* Fix pre-commit
|
||||
|
||||
---------
|
||||
|
||||
Co-authored-by: Your Name <you@example.com>
|
||||
|
||||
- Include new image opencode ([#14](https://github.com/Monadical-SAS/cubbi/pull/14),
|
||||
[`5fca51e`](https://github.com/Monadical-SAS/cubbi/commit/5fca51e5152dcf7503781eb707fa04414cf33c05))
|
||||
|
||||
* feat: include new image opencode
|
||||
|
||||
* docs: update readme
|
||||
|
||||
- Support config `openai.url` for goose/opencode/aider
|
||||
([`da5937e`](https://github.com/Monadical-SAS/cubbi/commit/da5937e70829b88a66f96c3ce7be7dacfc98facb))
|
||||
|
||||
### Refactoring
|
||||
|
||||
- New image layout and organization ([#13](https://github.com/Monadical-SAS/cubbi/pull/13),
|
||||
[`e5121dd`](https://github.com/Monadical-SAS/cubbi/commit/e5121ddea4230e78a05a85c4ce668e0c169b5ace))
|
||||
|
||||
* refactor: rework how image are defined, in order to create others wrapper for others tools
|
||||
|
||||
* refactor: fix issues with ownership
|
||||
|
||||
* refactor: image share now information with others images type
|
||||
|
||||
* fix: update readme
|
||||
|
||||
|
||||
## v0.2.0 (2025-05-21)
|
||||
|
||||
### Continuous Integration
|
||||
|
||||
- Add semantic release configuration (and use pyproject version)
|
||||
([`fbba8b7`](https://github.com/Monadical-SAS/cubbi/commit/fbba8b7613c76c6a1ae21c81d9f07697320f6d10))
|
||||
|
||||
- Try fixing the dynamic_import issue
|
||||
([`252d8be`](https://github.com/Monadical-SAS/cubbi/commit/252d8be735e6d18761c42e9c138ccafde89fd6ee))
|
||||
|
||||
- Try fixing the dynamic_import issue (2, force adding pyproject.toml)
|
||||
([`31e09bc`](https://github.com/Monadical-SAS/cubbi/commit/31e09bc7ba8446508a90f5a9423271ac386498fe))
|
||||
|
||||
### Documentation
|
||||
|
||||
- Add information for uvx
|
||||
([`ba852d5`](https://github.com/Monadical-SAS/cubbi/commit/ba852d502eea4fc558c0f96d9015436101d5ef43))
|
||||
|
||||
- Add mit license
|
||||
([`13c896a`](https://github.com/Monadical-SAS/cubbi/commit/13c896a58d9bc6f25b0688f9ae7117ae868ae705))
|
||||
|
||||
- Update classifiers
|
||||
([`5218bb1`](https://github.com/Monadical-SAS/cubbi/commit/5218bb121804c440dc69c9d932787ed6d54b90f5))
|
||||
|
||||
- Update README
|
||||
([`15d86d2`](https://github.com/Monadical-SAS/cubbi/commit/15d86d25e74162153c26d6c254059f24d46c4095))
|
||||
|
||||
### Features
|
||||
|
||||
- **cubbix**: Add --no-shell in combination with --run to not drop a shell and exit when the command
|
||||
is done
|
||||
([`75daccb`](https://github.com/Monadical-SAS/cubbi/commit/75daccb3662d059d178fd0f12026bb97f29f2452))
|
||||
|
||||
|
||||
## v0.1.0-rc.1 (2025-04-18)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- Mcp tests
|
||||
([`3799f04`](https://github.com/Monadical-SAS/cubbi/commit/3799f04c1395d3b018f371db0c0cb8714e6fb8b3))
|
||||
|
||||
- Osx tests on volume
|
||||
([`7fc9cfd`](https://github.com/Monadical-SAS/cubbi/commit/7fc9cfd8e1babfa069691d3b7997449535069674))
|
||||
|
||||
- Remove double connecting to message
|
||||
([`e36f454`](https://github.com/Monadical-SAS/cubbi/commit/e36f4540bfe3794ab2d065f552cfb9528489de71))
|
||||
|
||||
- Remove the "mc stop" meant to be in the container, but not implemented
|
||||
([`4f54c0f`](https://github.com/Monadical-SAS/cubbi/commit/4f54c0fbe7886c8551368b4b35be3ad8c7ae49ab))
|
||||
|
||||
- **cli**: Rename MAI->MC
|
||||
([`354834f`](https://github.com/Monadical-SAS/cubbi/commit/354834fff733c37202b01a6fc49ebdf5003390c1))
|
||||
|
||||
- **goose**: Add ping, nano and vim to the default image
|
||||
([`028bd26`](https://github.com/Monadical-SAS/cubbi/commit/028bd26cf12e181541e006650b58d97e1d568a45))
|
||||
|
||||
- **goose**: Always update the file
|
||||
([`b1aa415`](https://github.com/Monadical-SAS/cubbi/commit/b1aa415ddee981dc1278cd24f7509363b9c54a54))
|
||||
|
||||
- **goose**: Ensure configuration is run as user
|
||||
([`cfa7dd6`](https://github.com/Monadical-SAS/cubbi/commit/cfa7dd647d1e4055bf9159be2ee9c2280f2d908e))
|
||||
|
||||
- **goose**: Install latest goose version, do not use pip
|
||||
([`7649173`](https://github.com/Monadical-SAS/cubbi/commit/7649173d6c8a82ac236d0f89263591eaa6e21a20))
|
||||
|
||||
- **goose**: Remove MCP_HOST and such, this is not how mcp works
|
||||
([`d42af87`](https://github.com/Monadical-SAS/cubbi/commit/d42af870ff56112b4503f2568b8a5b0f385c435c))
|
||||
|
||||
- **goose**: Rename mai to mc, add initialization status
|
||||
([`74c723d`](https://github.com/Monadical-SAS/cubbi/commit/74c723db7b6b7dd57c4ca32a804436a990e5260c))
|
||||
|
||||
- **langfuse**: Fix goose langfuse integration (wrong env variables)
|
||||
([`e36eef4`](https://github.com/Monadical-SAS/cubbi/commit/e36eef4ef7c2d0cbdef31704afb45c50c4293986))
|
||||
|
||||
- **mc**: Fix runtime issue when starting mc
|
||||
([`6f08e2b`](https://github.com/Monadical-SAS/cubbi/commit/6f08e2b274b67001694123b5bb977401df0810c6))
|
||||
|
||||
- **mcp**: Fix UnboundLocalError: cannot access local variable 'container_name' where it is not
|
||||
associated with a value
|
||||
([`deff036`](https://github.com/Monadical-SAS/cubbi/commit/deff036406d72d55659da40520a3a09599d65f07))
|
||||
|
||||
- **session**: Ensure a session connect only to the mcp server passed in --mcp
|
||||
([`5d674f7`](https://github.com/Monadical-SAS/cubbi/commit/5d674f750878f0895dc1544620e8b1da4da29752))
|
||||
|
||||
- **session**: Fix session status display
|
||||
([`092f497`](https://github.com/Monadical-SAS/cubbi/commit/092f497ecc19938d4917a18441995170d1f68704))
|
||||
|
||||
- **ssh**: Do not enable ssh automatically
|
||||
([`f32b3dd`](https://github.com/Monadical-SAS/cubbi/commit/f32b3dd269d1a3d6ebaa2e7b2893f267b5175b20))
|
||||
|
||||
- **uid**: Correctly pass uid/gid to project
|
||||
([`e25e30e`](https://github.com/Monadical-SAS/cubbi/commit/e25e30e7492c6b0a03017440a18bb2708927fc19))
|
||||
|
||||
- **uid**: Use symlink instead of volume for persistent volume in the container
|
||||
([`a74251b`](https://github.com/Monadical-SAS/cubbi/commit/a74251b119d24714c7cc1eaadeea851008006137))
|
||||
|
||||
### Chores
|
||||
|
||||
- Remove unnecessary output
|
||||
([`30c6b99`](https://github.com/Monadical-SAS/cubbi/commit/30c6b995cbb5bdf3dc7adf2e79d8836660d4f295))
|
||||
|
||||
- Update doc and add pre-commit
|
||||
([`958d87b`](https://github.com/Monadical-SAS/cubbi/commit/958d87bcaeed16210a7c22574b5e63f2422af098))
|
||||
|
||||
### Continuous Integration
|
||||
|
||||
- Add ci files ([#11](https://github.com/Monadical-SAS/cubbi/pull/11),
|
||||
[`3850bc3`](https://github.com/Monadical-SAS/cubbi/commit/3850bc32129da539f53b69427ddca85f8c5f390a))
|
||||
|
||||
* ci: add ci files
|
||||
|
||||
* fix: add goose image build
|
||||
|
||||
### Documentation
|
||||
|
||||
- Add --run option examples to README
|
||||
([`6b2c1eb`](https://github.com/Monadical-SAS/cubbi/commit/6b2c1ebf1cd7a5d9970234112f32fe7a231303f9))
|
||||
|
||||
- Prefer mcx alias in README examples
|
||||
([`9c21611`](https://github.com/Monadical-SAS/cubbi/commit/9c21611a7fa1497f7cbddb1f1b4cd22b4ebc8a19))
|
||||
|
||||
- **mcp**: Add specification for MCP server support
|
||||
([`20916c5`](https://github.com/Monadical-SAS/cubbi/commit/20916c5713b3a047f4a8a33194f751f36e3c8a7a))
|
||||
|
||||
- **readme**: Remove license part
|
||||
([`1c538f8`](https://github.com/Monadical-SAS/cubbi/commit/1c538f8a59e28888309c181ae8f8034b9e70a631))
|
||||
|
||||
- **readme**: Update README to update tool call
|
||||
([`a4591dd`](https://github.com/Monadical-SAS/cubbi/commit/a4591ddbd863bc6658a7643d3f33d06c82816cae))
|
||||
|
||||
### Features
|
||||
|
||||
- First commit
|
||||
([`fde6529`](https://github.com/Monadical-SAS/cubbi/commit/fde6529d545b5625484c5c1236254d2e0c6f0f4d))
|
||||
|
||||
- **cli**: Auto connect to a session
|
||||
([`4a63606`](https://github.com/Monadical-SAS/cubbi/commit/4a63606d58cc3e331a349974e9b3bf2d856a72a1))
|
||||
|
||||
- **cli**: Auto mount current directory as /app
|
||||
([`e6e3c20`](https://github.com/Monadical-SAS/cubbi/commit/e6e3c207bcee531b135824688adf1a56ae427a01))
|
||||
|
||||
- **cli**: More information when closing session
|
||||
([`08ba1ab`](https://github.com/Monadical-SAS/cubbi/commit/08ba1ab2da3c24237c0f0bc411924d8ffbe71765))
|
||||
|
||||
- **cli**: Phase 1 - local cli with docker integration
|
||||
([`6443083`](https://github.com/Monadical-SAS/cubbi/commit/64430830d883308e4d52e17b25c260a0d5385141))
|
||||
|
||||
- **cli**: Separate session state into its own session.yaml file
|
||||
([`7736573`](https://github.com/Monadical-SAS/cubbi/commit/7736573b84c7a51eaa60b932f835726b411ca742))
|
||||
|
||||
- **cli**: Support to join external network
|
||||
([`133583b`](https://github.com/Monadical-SAS/cubbi/commit/133583b941ed56d1b0636277bb847c45eee7f3b8))
|
||||
|
||||
- **config**: Add global user configuration for the tool
|
||||
([`dab783b`](https://github.com/Monadical-SAS/cubbi/commit/dab783b01d82bcb210b5e01ac3b93ba64c7bc023))
|
||||
|
||||
- langfuse - default driver - and api keys
|
||||
|
||||
- **config**: Ensure config is correctly saved
|
||||
([`deb5945`](https://github.com/Monadical-SAS/cubbi/commit/deb5945e40d55643dca4e1aa4201dfa8da1bfd70))
|
||||
|
||||
- **gemini**: Support for gemini model
|
||||
([`2f9fd68`](https://github.com/Monadical-SAS/cubbi/commit/2f9fd68cada9b5aaba652efb67368c2641046da5))
|
||||
|
||||
- **goose**: Auto add mcp server to goose configuration when starting a session
|
||||
([`7805aa7`](https://github.com/Monadical-SAS/cubbi/commit/7805aa720eba78d47f2ad565f6944e84a21c4b1c))
|
||||
|
||||
- **goose**: Optimize init status
|
||||
([`16f59b1`](https://github.com/Monadical-SAS/cubbi/commit/16f59b1c408dbff4781ad7ccfa70e81d6d98f7bd))
|
||||
|
||||
- **goose**: Update config using uv script with pyyaml
|
||||
([#6](https://github.com/Monadical-SAS/cubbi/pull/6),
|
||||
[`9e742b4`](https://github.com/Monadical-SAS/cubbi/commit/9e742b439b7b852efa4219850f8b67c143274045))
|
||||
|
||||
- **keys**: Pass local keys to the session by default
|
||||
([`f83c49c`](https://github.com/Monadical-SAS/cubbi/commit/f83c49c0f340d1a3accba1fe1317994b492755c0))
|
||||
|
||||
- **llm**: Add default model/provider to auto configure the driver
|
||||
([#7](https://github.com/Monadical-SAS/cubbi/pull/7),
|
||||
[`5b9713d`](https://github.com/Monadical-SAS/cubbi/commit/5b9713dc2f7d7c25808ad37094838c697c056fec))
|
||||
|
||||
- **mc**: Support for uid/gid, and use default current user
|
||||
([`a51115a`](https://github.com/Monadical-SAS/cubbi/commit/a51115a45d88bf703fb5380171042276873b7207))
|
||||
|
||||
- **mcp**: Add inspector
|
||||
([`d098f26`](https://github.com/Monadical-SAS/cubbi/commit/d098f268cd164e9d708089c9f9525a940653c010))
|
||||
|
||||
- **mcp**: Add the possibility to have default mcp to connect to
|
||||
([`4b0461a`](https://github.com/Monadical-SAS/cubbi/commit/4b0461a6faf81de1e1b54d1fe78fea7977cde9dd))
|
||||
|
||||
- **mcp**: Ensure inner mcp environemnt variables are passed
|
||||
([`0d75bfc`](https://github.com/Monadical-SAS/cubbi/commit/0d75bfc3d8e130fb05048c2bc8a674f6b7e5de83))
|
||||
|
||||
- **mcp**: First docker proxy working
|
||||
([`0892b6c`](https://github.com/Monadical-SAS/cubbi/commit/0892b6c8c472063c639cc78cf29b322bb39f998f))
|
||||
|
||||
- **mcp**: Improve inspector reliability over re-run
|
||||
([`3ee8ce6`](https://github.com/Monadical-SAS/cubbi/commit/3ee8ce6338c35b7e48d788d2dddfa9b6a70381cb))
|
||||
|
||||
- **mcp**: Initial version of mcp
|
||||
([`212f271`](https://github.com/Monadical-SAS/cubbi/commit/212f271268c5724775beceae119f97aec2748dcb))
|
||||
|
||||
- **project**: Explicitely add --project to save information in /mc-config across run.
|
||||
([`3a182fd`](https://github.com/Monadical-SAS/cubbi/commit/3a182fd2658c0eb361ce5ed88938686e2bd19e59))
|
||||
|
||||
Containers are now isolated by default.
|
||||
|
||||
- **run**: Add --run command
|
||||
([`33d90d0`](https://github.com/Monadical-SAS/cubbi/commit/33d90d05311ad872b7a7d4cd303ff6f7b7726038))
|
||||
|
||||
- **ssh**: Make SSH server optional with --ssh flag
|
||||
([`5678438`](https://github.com/Monadical-SAS/cubbi/commit/56784386614fcd0a52be8a2eb89d2deef9323ca1))
|
||||
|
||||
- Added --ssh flag to session create command - Modified mc-init.sh to check MC_SSH_ENABLED
|
||||
environment variable - SSH server is now disabled by default - Updated README.md with new flag
|
||||
example - Fixed UnboundLocalError with container_name in exception handler
|
||||
|
||||
- **volume**: Add mc config volume command
|
||||
([`2caeb42`](https://github.com/Monadical-SAS/cubbi/commit/2caeb425518242fbe1c921b9678e6e7571b9b0a6))
|
||||
|
||||
- **volume**: Add the possibilty to mount local directory into the container (like docker volume)
|
||||
([`b72f1ee`](https://github.com/Monadical-SAS/cubbi/commit/b72f1eef9af598f2090a0edae8921c16814b3cda))
|
||||
|
||||
### Refactoring
|
||||
|
||||
- Move drivers directory into mcontainer package
|
||||
([`307eee4`](https://github.com/Monadical-SAS/cubbi/commit/307eee4fcef47189a98a76187d6080a36423ad6e))
|
||||
|
||||
- Relocate goose driver to mcontainer/drivers/ - Update ConfigManager to dynamically scan for driver
|
||||
YAML files - Add support for mc-driver.yaml instead of mai-driver.yaml - Update Driver model to
|
||||
support init commands and other YAML fields - Auto-discover drivers at runtime instead of
|
||||
hardcoding them - Update documentation to reflect new directory structure
|
||||
|
||||
- Reduce amount of data in session.yaml
|
||||
([`979b438`](https://github.com/Monadical-SAS/cubbi/commit/979b43846a798f1fb25ff05e6dc1fc27fa16f590))
|
||||
|
||||
- Rename driver to image, first pass
|
||||
([`51fb79b`](https://github.com/Monadical-SAS/cubbi/commit/51fb79baa30ff479ac5479ba5ea0cad70bbb4c20))
|
||||
|
||||
- Rename project to cubbi
|
||||
([`12d77d0`](https://github.com/Monadical-SAS/cubbi/commit/12d77d0128e4d82e5ddc1a4ab7e873ddaa22e130))
|
||||
|
||||
### Testing
|
||||
|
||||
- Add unit tests
|
||||
([`7c46d66`](https://github.com/Monadical-SAS/cubbi/commit/7c46d66b53ac49c08458bc5d72e636e7d296e74f))
|
||||
21
CLAUDE.md
21
CLAUDE.md
@@ -1,15 +1,12 @@
|
||||
# Monadical Container Development Guide
|
||||
# Cubbi Container Development Guide
|
||||
|
||||
## Build Commands
|
||||
```bash
|
||||
# Install dependencies using uv (Astral)
|
||||
uv sync
|
||||
|
||||
# Run MC service
|
||||
uv run -m mcontainer.service
|
||||
|
||||
# Run MC CLI
|
||||
uv run -m mcontainer.cli
|
||||
# Run Cubbi CLI
|
||||
uv run -m cubbi.cli
|
||||
```
|
||||
|
||||
## Lint/Test Commands
|
||||
@@ -51,3 +48,15 @@ Use uv instead:
|
||||
- **Configuration**: Use environment variables with YAML for configuration
|
||||
|
||||
Refer to SPECIFICATIONS.md for detailed architecture and implementation guidance.
|
||||
|
||||
## Cubbi images
|
||||
|
||||
A cubbi image is a flavored docker image that wrap a tool (let's say goose), and dynamically configure the tool when the image is starting. All cubbi images are defined in `cubbi/images` directory.
|
||||
|
||||
Each image must have (let's take goose image for example):
|
||||
- `goose/cubbi_image.yaml`, list of persistent paths, etc.
|
||||
- `goose/Dockerfile`, that is used to build the cubbi image with cubbi tools
|
||||
- `goose/goose_plugin.py`, a plugin file named of the cubbi image name, that is specific for this image, with the intent to configure dynamically the docker image when starting with the preferences of the user (via environment variable). They all import `cubbi_init.py`, but this file is shared accross all images, so it is normal that execution of the plugin import does not work, because the build system will copy the file in place during the build.
|
||||
- `goose/README.md`, a tiny readme about the image
|
||||
|
||||
If you are creating a new image, look about existing images (goose, opencode).
|
||||
|
||||
9
LICENSE
Normal file
9
LICENSE
Normal file
@@ -0,0 +1,9 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2025 Monadical SAS
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
450
README.md
450
README.md
@@ -1,75 +1,209 @@
|
||||
# MC - Monadical Container Tool
|
||||
<div align="center">
|
||||
|
||||
MC (Monadical Container) is a command-line tool for managing ephemeral
|
||||
containers that run AI tools and development environments. It works with both
|
||||
local Docker and a dedicated remote web service that manages containers in a
|
||||
Docker-in-Docker (DinD) environment.
|
||||
# Cubbi - Container Tool
|
||||
|
||||
## Requirements
|
||||
Cubbi is a command-line tool for managing ephemeral containers that run AI tools and development environments, with support for MCP servers. It supports [Aider](https://github.com/Aider-AI/aider), [Crush](https://github.com/charmbracelet/crush), [Claude Code](https://github.com/anthropics/claude-code), [Goose](https://github.com/block/goose), [Opencode](https://github.com/sst/opencode).
|
||||
|
||||
- [uv](https://docs.astral.sh/uv/)
|
||||

|
||||

|
||||
[](https://github.com/monadical-sas/cubbi/actions/workflows/pytests.yml)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
|
||||
## Installation
|
||||
</div>
|
||||
|
||||
## 🚀 Quick Reference
|
||||
|
||||
- `cubbi session create` - Create a new session
|
||||
- `cubbix` - Shortcut for `cubbi session create`
|
||||
- `cubbix .` - Mount the current directory
|
||||
- `cubbix /path/to/dir` - Mount a specific directory
|
||||
|
||||
## 📋 Requirements
|
||||
|
||||
- [Docker](https://www.docker.com/)
|
||||
- [uv](https://astral.sh/uv)
|
||||
|
||||
## 📥 Installation
|
||||
|
||||
```bash
|
||||
# Clone the repository
|
||||
git clone https://github.com/monadical/mcontainer.git
|
||||
cd mc
|
||||
# Via uv
|
||||
uv tool install cubbi
|
||||
|
||||
# Install with uv
|
||||
uv sync
|
||||
# Without installation
|
||||
# (meaning all commands below must be prefixed with `uvx`)
|
||||
uvx cubbi
|
||||
```
|
||||
|
||||
## Basic Usage
|
||||
Then compile your first image:
|
||||
|
||||
```bash
|
||||
# Create a new session with the default driver
|
||||
mc session create
|
||||
cubbi image build goose
|
||||
cubbi image build opencode
|
||||
cubbi image build crush
|
||||
```
|
||||
|
||||
### For Developers
|
||||
|
||||
If you are looking to contribute to the development, you will need to use `uv` as well:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/monadical-sas/cubbi
|
||||
cd cubbi
|
||||
uv tool install --with-editable . .
|
||||
# You'll have cubbi and cubbix executable files in your PATH, pointing to the local installation.
|
||||
```
|
||||
|
||||
## 📚 Basic Usage
|
||||
|
||||
```bash
|
||||
# Show help message (displays available commands)
|
||||
cubbi
|
||||
|
||||
# Create a new session with the default image (using cubbix alias)
|
||||
cubbix
|
||||
|
||||
# Create a session and run an initial command before the shell starts
|
||||
cubbix --run "ls -l"
|
||||
|
||||
# Create a session, run a command, and exit (no shell prompt)
|
||||
cubbix --run "ls -l" --no-shell
|
||||
|
||||
# List all active sessions
|
||||
mc session list
|
||||
cubbi session list
|
||||
|
||||
# Connect to a specific session
|
||||
mc session connect SESSION_ID
|
||||
cubbi session connect SESSION_ID
|
||||
|
||||
# Close a session when done
|
||||
mc session close SESSION_ID
|
||||
cubbi session close SESSION_ID
|
||||
|
||||
# Create a session with a specific driver
|
||||
mc session create --driver goose
|
||||
# Close a session quickly (kill instead of graceful stop)
|
||||
cubbi session close SESSION_ID --kill
|
||||
|
||||
# Close all sessions at once
|
||||
cubbi session close --all
|
||||
|
||||
# Close all sessions quickly
|
||||
cubbi session close --all --kill
|
||||
|
||||
# Create a session with a specific image
|
||||
cubbix --image goose
|
||||
cubbix --image opencode
|
||||
cubbix --image crush
|
||||
|
||||
# Create a session with environment variables
|
||||
mc session create -e VAR1=value1 -e VAR2=value2
|
||||
cubbix -e VAR1=value1 -e VAR2=value2
|
||||
|
||||
# Shorthand for creating a session with a project repository
|
||||
mc github.com/username/repo
|
||||
# Mount custom volumes (similar to Docker's -v flag)
|
||||
cubbix -v /local/path:/container/path
|
||||
cubbix -v ~/data:/data -v ./configs:/etc/app/config
|
||||
|
||||
# Mount a local directory (current directory or specific path)
|
||||
cubbix .
|
||||
cubbix /path/to/project
|
||||
|
||||
# Forward ports from container to host
|
||||
cubbix --port 8000 # Forward port 8000
|
||||
cubbix --port 8000,3000,5173 # Forward multiple ports (comma-separated)
|
||||
cubbix --port 8000 --port 3000 # Forward multiple ports (repeated flag)
|
||||
|
||||
# Connect to external Docker networks
|
||||
cubbix --network teamnet --network dbnet
|
||||
|
||||
# Restrict network access to specific domains
|
||||
cubbix --domains github.com --domains "api.example.com:443"
|
||||
|
||||
# Connect to MCP servers for extended capabilities
|
||||
cubbix --mcp github --mcp jira
|
||||
|
||||
# Clone a Git repository
|
||||
cubbix https://github.com/username/repo
|
||||
|
||||
# Using the cubbix shortcut (equivalent to cubbi session create)
|
||||
cubbix # Creates a session without mounting anything
|
||||
cubbix . # Mounts the current directory
|
||||
cubbix /path/to/project # Mounts the specified directory
|
||||
cubbix https://github.com/username/repo # Clones the repository
|
||||
|
||||
# Shorthand with MCP servers
|
||||
cubbix https://github.com/username/repo --mcp github
|
||||
|
||||
# Shorthand with an initial command
|
||||
cubbix . --run "apt-get update && apt-get install -y my-package"
|
||||
|
||||
# Execute a command and exit without starting a shell
|
||||
cubbix . --run "python script.py" --no-shell
|
||||
|
||||
# Enable SSH server in the container
|
||||
cubbix --ssh
|
||||
```
|
||||
|
||||
## Driver Management
|
||||
## 🖼️ Image Management
|
||||
|
||||
MC includes a driver management system that allows you to build, manage, and use Docker images for different AI tools:
|
||||
Cubbi includes an image management system that allows you to build, manage, and use Docker images for different AI tools
|
||||
|
||||
**Supported Images**
|
||||
|
||||
| Image Name | Langtrace Support | Single Prompt Command |
|
||||
|------------|-------------------|----------------------|
|
||||
| goose | yes | `goose run -t 'prompt' --no-session --quiet` |
|
||||
| opencode | no | `opencode run -m MODEL 'prompt'` |
|
||||
| claudecode | no | `claude -p 'prompt'` |
|
||||
| aider | no | `aider --message 'prompt' --yes-always --no-fancy-input` |
|
||||
| crush | no | `crush run 'prompt'` |
|
||||
|
||||
**Automated Testing:**
|
||||
|
||||
Each image can be tested with single prompt commands using different models:
|
||||
|
||||
```bash
|
||||
# List available drivers
|
||||
mc driver list
|
||||
# Test a single image with a specific model
|
||||
cubbix -i goose -m anthropic/claude-sonnet-4-20250514 --no-connect --no-shell --run "goose run -t 'What is 2+2?' --no-session --quiet"
|
||||
|
||||
# Get detailed information about a driver
|
||||
mc driver info goose
|
||||
# Test aider with non-interactive flags
|
||||
cubbix -i aider -m openai/gpt-4o --no-connect --no-shell --run "aider --message 'What is 2+2?' --yes-always --no-fancy-input --no-check-update"
|
||||
|
||||
# Build a driver image
|
||||
mc driver build goose
|
||||
# Test claude-code (note: binary name is 'claude', not 'claude-code')
|
||||
cubbix -i claudecode -m anthropic/claude-sonnet-4-20250514 --no-connect --no-shell --run "claude -p 'What is 2+2?'"
|
||||
|
||||
# Build and push a driver image
|
||||
mc driver build goose --push
|
||||
# Test opencode with model specification
|
||||
cubbix -i opencode -m anthropic/claude-sonnet-4-20250514 --no-connect --no-shell --run "opencode run -m anthropic/claude-sonnet-4-20250514 'What is 2+2?'"
|
||||
|
||||
# Test crush
|
||||
cubbix -i crush -m anthropic/claude-sonnet-4-20250514 --no-connect --no-shell --run "crush run 'What is 2+2?'"
|
||||
|
||||
# Run comprehensive test suite (requires test.sh script)
|
||||
./test.sh # Tests all images with multiple models: anthropic/claude-sonnet-4-20250514, openai/gpt-4o, openrouter/openai/gpt-4o, litellm/gpt-oss:120b
|
||||
```
|
||||
|
||||
Drivers are defined in the `drivers/` directory, with each subdirectory containing:
|
||||
```bash
|
||||
# List available images
|
||||
cubbi image list
|
||||
|
||||
# Get detailed information about an image
|
||||
cubbi image info goose
|
||||
cubbi image info opencode
|
||||
cubbi image info crush
|
||||
|
||||
# Build an image
|
||||
cubbi image build goose
|
||||
cubbi image build opencode
|
||||
cubbi image build crush
|
||||
|
||||
# Build an image without using cache (force fresh build)
|
||||
cubbi image build --no-cache goose
|
||||
```
|
||||
|
||||
Images are defined in the `cubbi/images/` directory, with each subdirectory containing:
|
||||
|
||||
- `Dockerfile`: Docker image definition
|
||||
- `entrypoint.sh`: Container entrypoint script
|
||||
- `mai-init.sh`: Standardized initialization script
|
||||
- `mai-driver.yaml`: Driver metadata and configuration
|
||||
- `README.md`: Driver documentation
|
||||
- `cubbi-init.sh`: Standardized initialization script
|
||||
- `cubbi_image.yaml`: Image metadata and configuration
|
||||
- `README.md`: Image documentation
|
||||
|
||||
Cubbi automatically discovers and loads image definitions from the YAML files.
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
@@ -80,13 +214,243 @@ uv run -m pytest
|
||||
# Run linting
|
||||
uvx ruff check .
|
||||
|
||||
# Run type checking
|
||||
uvx mypy .
|
||||
|
||||
# Format code
|
||||
uvx ruff format .
|
||||
```
|
||||
|
||||
## License
|
||||
## ⚙️ Configuration
|
||||
|
||||
See LICENSE file for details.
|
||||
Cubbi supports user-specific configuration via a YAML file located at `~/.config/cubbi/config.yaml`. This allows you to set default values and configure service credentials.
|
||||
|
||||
### Managing Configuration
|
||||
|
||||
```bash
|
||||
# View all configuration
|
||||
cubbi config list
|
||||
|
||||
# Get a specific configuration value
|
||||
cubbi config get langfuse.url
|
||||
|
||||
# Set configuration values
|
||||
cubbi config set langfuse.url "https://cloud.langfuse.com"
|
||||
cubbi config set langfuse.public_key "pk-lf-..."
|
||||
cubbi config set langfuse.secret_key "sk-lf-..."
|
||||
|
||||
# Set API keys for various services
|
||||
cubbi config set openai.api_key "sk-..."
|
||||
cubbi config set anthropic.api_key "sk-ant-..."
|
||||
|
||||
# Reset configuration to defaults
|
||||
cubbi config reset
|
||||
```
|
||||
|
||||
### Default Networks Configuration
|
||||
|
||||
You can configure default networks that will be applied to every new session:
|
||||
|
||||
```bash
|
||||
# List default networks
|
||||
cubbi config network list
|
||||
|
||||
# Add a network to defaults
|
||||
cubbi config network add teamnet
|
||||
|
||||
# Remove a network from defaults
|
||||
cubbi config network remove teamnet
|
||||
```
|
||||
|
||||
### Default Volumes Configuration
|
||||
|
||||
You can configure default volumes that will be automatically mounted in every new session:
|
||||
|
||||
```bash
|
||||
# List default volumes
|
||||
cubbi config volume list
|
||||
|
||||
# Add a volume to defaults
|
||||
cubbi config volume add /local/path:/container/path
|
||||
|
||||
# Remove a volume from defaults (will prompt if multiple matches found)
|
||||
cubbi config volume remove /local/path
|
||||
```
|
||||
|
||||
Default volumes will be combined with any volumes specified using the `-v` flag when creating a session.
|
||||
|
||||
### Default Ports Configuration
|
||||
|
||||
You can configure default ports that will be automatically forwarded in every new session:
|
||||
|
||||
```bash
|
||||
# List default ports
|
||||
cubbi config port list
|
||||
|
||||
# Add a single port to defaults
|
||||
cubbi config port add 8000
|
||||
|
||||
# Add multiple ports to defaults (comma-separated)
|
||||
cubbi config port add 8000,3000,5173
|
||||
|
||||
# Remove a port from defaults
|
||||
cubbi config port remove 8000
|
||||
```
|
||||
|
||||
Default ports will be combined with any ports specified using the `--port` flag when creating a session.
|
||||
|
||||
### Default MCP Servers Configuration
|
||||
|
||||
You can configure default MCP servers that sessions will automatically connect to:
|
||||
|
||||
```bash
|
||||
# List default MCP servers
|
||||
cubbi config mcp list
|
||||
|
||||
# Add an MCP server to defaults
|
||||
cubbi config mcp add github
|
||||
|
||||
# Remove an MCP server from defaults
|
||||
cubbi config mcp remove github
|
||||
```
|
||||
|
||||
When adding new MCP servers, they are added to defaults by default. Use the `--no-default` flag to prevent this:
|
||||
|
||||
```bash
|
||||
cubbi mcp add github -e GITHUB_PERSONAL_ACCESS_TOKEN=xxxx github mcp/github --no-default
|
||||
```
|
||||
|
||||
When creating sessions, if no MCP server is specified with `--mcp`, the default MCP servers will be used automatically.
|
||||
|
||||
### External Network Connectivity
|
||||
|
||||
Cubbi containers can connect to external Docker networks, allowing them to communicate with other services in those networks:
|
||||
|
||||
```bash
|
||||
# Create a session connected to external networks
|
||||
cubbi session create --network teamnet --network dbnet
|
||||
```
|
||||
|
||||
**Important**: Networks must be "attachable" to be joined by Cubbi containers. Here's how to create attachable networks:
|
||||
|
||||
```bash
|
||||
# Create an attachable network with Docker
|
||||
docker network create --driver bridge --attachable teamnet
|
||||
|
||||
# Example docker-compose.yml with attachable network
|
||||
# docker-compose.yml
|
||||
version: '3'
|
||||
services:
|
||||
web:
|
||||
image: nginx
|
||||
networks:
|
||||
- teamnet
|
||||
|
||||
networks:
|
||||
teamnet:
|
||||
driver: bridge
|
||||
attachable: true # This is required for Cubbi containers to connect
|
||||
```
|
||||
|
||||
### Service Credentials
|
||||
|
||||
Service credentials like API keys configured in `~/.config/cubbi/config.yaml` are automatically passed to containers as environment variables:
|
||||
|
||||
| Config Setting | Environment Variable |
|
||||
|----------------|---------------------|
|
||||
| `langfuse.url` | `LANGFUSE_URL` |
|
||||
| `langfuse.public_key` | `LANGFUSE_INIT_PROJECT_PUBLIC_KEY` |
|
||||
| `langfuse.secret_key` | `LANGFUSE_INIT_PROJECT_SECRET_KEY` |
|
||||
| `openai.api_key` | `OPENAI_API_KEY` |
|
||||
| `anthropic.api_key` | `ANTHROPIC_API_KEY` |
|
||||
| `openrouter.api_key` | `OPENROUTER_API_KEY` |
|
||||
| `google.api_key` | `GOOGLE_API_KEY` |
|
||||
|
||||
## 🌐 MCP Server Management
|
||||
|
||||
MCP (Model Control Protocol) servers provide tool-calling capabilities to AI models, enhancing their ability to interact with external services, databases, and systems. Cubbi supports multiple types of MCP servers:
|
||||
|
||||
1. **Remote HTTP SSE servers** - External MCP servers accessed over HTTP
|
||||
2. **Docker-based MCP servers** - MCP servers running in Docker containers, with a SSE proxy for stdio-to-SSE conversion
|
||||
3. **Local MCP servers** - MCP servers running as local processes on your host machine
|
||||
|
||||
### Managing MCP Servers
|
||||
|
||||
```bash
|
||||
# List all configured MCP servers and their status
|
||||
cubbi mcp list
|
||||
|
||||
# View detailed status of an MCP server
|
||||
cubbi mcp status github
|
||||
|
||||
# Start/stop/restart individual MCP servers
|
||||
cubbi mcp start github
|
||||
cubbi mcp stop github
|
||||
cubbi mcp restart github
|
||||
|
||||
# Start all MCP servers at once
|
||||
cubbi mcp start --all
|
||||
|
||||
# Stop and remove all MCP servers at once
|
||||
cubbi mcp stop --all
|
||||
|
||||
# Run the MCP Inspector to visualize and interact with MCP servers
|
||||
# It automatically joins all MCP networks for seamless DNS resolution
|
||||
# Uses two ports: frontend UI (default: 5173) and backend API (default: 3000)
|
||||
cubbi mcp inspector
|
||||
|
||||
# Run the MCP Inspector with custom ports
|
||||
cubbi mcp inspector --client-port 6173 --server-port 6174
|
||||
|
||||
# Run the MCP Inspector in detached mode
|
||||
cubbi mcp inspector --detach
|
||||
|
||||
# Stop the MCP Inspector
|
||||
cubbi mcp inspector --stop
|
||||
|
||||
# View MCP server logs
|
||||
cubbi mcp logs github
|
||||
|
||||
# Remove an MCP server configuration
|
||||
cubbi mcp remove github
|
||||
```
|
||||
|
||||
### Adding MCP Servers
|
||||
|
||||
Cubbi supports different types of MCP servers:
|
||||
|
||||
```bash
|
||||
# Docker-based MCP server (with proxy)
|
||||
cubbi mcp add fetch mcp/fetch
|
||||
cubbi mcp add github -e GITHUB_PERSONAL_ACCESS_TOKEN=xxxx mcp/github mcp/github-proxy
|
||||
|
||||
# Remote HTTP SSE server
|
||||
cubbi mcp add-remote myserver https://myssemcp.com/sse
|
||||
|
||||
# Local MCP server (runs as a local process)
|
||||
cubbi mcp add-local mylocalmcp /path/to/mcp-executable
|
||||
cubbi mcp add-local mylocalmcp /usr/local/bin/mcp-tool --args "--config" --args "/etc/mcp.conf"
|
||||
cubbi mcp add-local mylocalmcp npx --args "@modelcontextprotocol/server-filesystem" --args "/path/to/data"
|
||||
|
||||
# Add environment variables to local MCP servers
|
||||
cubbi mcp add-local mylocalmcp /path/to/mcp-server -e API_KEY=xxx -e BASE_URL=https://api.example.com
|
||||
|
||||
# Prevent adding to default MCPs
|
||||
cubbi mcp add myserver mcp/server --no-default
|
||||
cubbi mcp add-local mylocalmcp /path/to/executable --no-default
|
||||
```
|
||||
|
||||
### Using MCP Servers with Sessions
|
||||
|
||||
MCP servers can be attached to sessions when they are created:
|
||||
|
||||
```bash
|
||||
# Create a session with a single MCP server
|
||||
cubbi session create --mcp github
|
||||
|
||||
# Create a session with multiple MCP servers
|
||||
cubbi session create --mcp github --mcp jira
|
||||
```
|
||||
|
||||
MCP servers are persistent and can be shared between sessions. They continue running even when sessions are closed, allowing for efficient reuse across multiple sessions.
|
||||
|
||||
## 📜 License
|
||||
|
||||
Cubbi is licensed under the [MIT License](LICENSE).
|
||||
|
||||
@@ -1,510 +0,0 @@
|
||||
# MC - Monadical AI Container Tool
|
||||
|
||||
## Overview
|
||||
|
||||
MC (Monadical Container) is a command-line tool for managing ephemeral
|
||||
containers that run AI tools and development environments. It works with both
|
||||
local Docker and a dedicated remote web service that manages containers in a
|
||||
Docker-in-Docker (DinD) environment.
|
||||
|
||||
## Technology Stack
|
||||
|
||||
### MC Service
|
||||
- **Web Framework**: FastAPI for high-performance, async API endpoints
|
||||
- **Package Management**: uv (Astral) for dependency management
|
||||
- **Database**: SQLite for development, PostgreSQL for production
|
||||
- **Container Management**: Docker SDK for Python
|
||||
- **Authentication**: OAuth 2.0 integration with Authentik
|
||||
|
||||
### MC CLI
|
||||
- **Language**: Python
|
||||
- **Package Management**: uv for dependency management
|
||||
- **Distribution**: Standalone binary via PyInstaller or similar
|
||||
- **Configuration**: YAML for configuration files
|
||||
|
||||
## System Architecture
|
||||
|
||||
### Components
|
||||
|
||||
1. **CLI Tool (`mc`)**: The command-line interface users interact with
|
||||
2. **MC Service**: A web service that handles remote container execution
|
||||
3. **Container Drivers**: Predefined container templates for various AI tools
|
||||
|
||||
### Architecture Diagram
|
||||
|
||||
```
|
||||
┌─────────────┐ ┌─────────────────────────┐
|
||||
│ │ │ │
|
||||
│ MC CLI │◄─────────►│ Local Docker Daemon │
|
||||
│ (mc) │ │ │
|
||||
│ │ └─────────────────────────┘
|
||||
└──────┬──────┘
|
||||
│
|
||||
│ REST API
|
||||
│
|
||||
┌──────▼──────┐ ┌─────────────────────────┐
|
||||
│ │ │ │
|
||||
│ MC Service │◄─────────►│ Docker-in-Docker │
|
||||
│ (Web API) │ │ │
|
||||
│ │ └─────────────────────────┘
|
||||
└─────────────┘
|
||||
│
|
||||
├──────────────┬───────────────┐
|
||||
│ │ │
|
||||
┌──────▼──────┐ ┌─────▼─────┐ ┌──────▼──────┐
|
||||
│ │ │ │ │ │
|
||||
│ Fluentd │ │ Langfuse │ │ Other │
|
||||
│ Logging │ │ Logging │ │ Services │
|
||||
│ │ │ │ │ │
|
||||
└─────────────┘ └───────────┘ └─────────────┘
|
||||
```
|
||||
|
||||
## Core Concepts
|
||||
|
||||
- **Session**: An active container instance with a specific driver
|
||||
- **Driver**: A predefined container template with specific AI tools installed
|
||||
- **Remote**: A configured MC service instance
|
||||
|
||||
## CLI Tool Commands
|
||||
|
||||
### Basic Commands
|
||||
|
||||
```bash
|
||||
# Create a new session locally (shorthand)
|
||||
mc
|
||||
|
||||
# List active sessions on local system
|
||||
mc session list
|
||||
|
||||
# Create a new session locally
|
||||
mc session create [OPTIONS]
|
||||
|
||||
# Create a session with a specific driver
|
||||
mc session create --driver goose
|
||||
|
||||
# Create a session with a specific project repository
|
||||
mc session create --driver goose --project github.com/hello/private
|
||||
|
||||
# Create a session with a project (shorthand)
|
||||
mc git@github.com:hello/private
|
||||
|
||||
# Close a specific session
|
||||
mc session close <id>
|
||||
|
||||
# Connect to an existing session
|
||||
mc session connect <id>
|
||||
|
||||
# Stop the current session (from inside the container)
|
||||
mc stop
|
||||
```
|
||||
|
||||
### Remote Management
|
||||
|
||||
```bash
|
||||
# Add a remote MC service
|
||||
mc remote add <name> <url>
|
||||
|
||||
# List configured remote services
|
||||
mc remote list
|
||||
|
||||
# Remove a remote service
|
||||
mc remote remove <name>
|
||||
|
||||
# Authenticate with a remote service
|
||||
mc -r <remote_name> auth
|
||||
|
||||
# Create a session on a remote service
|
||||
mc -r <remote_name> [session create]
|
||||
|
||||
# List sessions on a remote service
|
||||
mc -r <remote_name> session list
|
||||
```
|
||||
|
||||
### Environment Variables
|
||||
|
||||
```bash
|
||||
# Set environment variables for a session
|
||||
mc session create -e VAR1=value1 -e VAR2=value2
|
||||
|
||||
# Set environment variables for a remote session
|
||||
mc -r <remote_name> session create -e VAR1=value1
|
||||
```
|
||||
|
||||
### Logging
|
||||
|
||||
```bash
|
||||
# Stream logs from a session
|
||||
mc session logs <id>
|
||||
|
||||
# Stream logs with follow option
|
||||
mc session logs <id> -f
|
||||
```
|
||||
|
||||
## MC Service Specification
|
||||
|
||||
### Overview
|
||||
|
||||
The MC Service is a web service that manages ephemeral containers in a Docker-in-Docker environment. It provides a REST API for container lifecycle management, authentication, and real-time log streaming.
|
||||
|
||||
### API Endpoints
|
||||
|
||||
#### Authentication
|
||||
|
||||
```
|
||||
POST /auth/login - Initiate Authentik authentication flow
|
||||
POST /auth/callback - Handle Authentik OAuth callback
|
||||
POST /auth/refresh - Refresh an existing token
|
||||
POST /auth/logout - Invalidate current token
|
||||
```
|
||||
|
||||
### Authentik Integration
|
||||
|
||||
The MC Service integrates with Authentik at https://authentik.monadical.io using OAuth 2.0:
|
||||
|
||||
1. **Application Registration**:
|
||||
- MC Service is registered as an OAuth application in Authentik
|
||||
- Configured with redirect URI to `/auth/callback`
|
||||
- Assigned appropriate scopes for user identification
|
||||
|
||||
2. **Authentication Flow**:
|
||||
- User initiates authentication via CLI
|
||||
- MC CLI opens browser to Authentik authorization URL
|
||||
- User logs in through Authentik's interface
|
||||
- Authentik redirects to callback URL with authorization code
|
||||
- MC Service exchanges code for access and refresh tokens
|
||||
- CLI receives and securely stores tokens
|
||||
|
||||
3. **Token Management**:
|
||||
- Access tokens used for API authorization
|
||||
- Refresh tokens used to obtain new access tokens
|
||||
- Tokens are encrypted at rest in CLI configuration
|
||||
|
||||
#### Sessions
|
||||
|
||||
```
|
||||
GET /sessions - List all sessions
|
||||
POST /sessions - Create a new session
|
||||
GET /sessions/{id} - Get session details
|
||||
DELETE /sessions/{id} - Terminate a session
|
||||
POST /sessions/{id}/connect - Establish connection to session
|
||||
GET /sessions/{id}/logs - Stream session logs
|
||||
```
|
||||
|
||||
#### Drivers
|
||||
|
||||
```
|
||||
GET /drivers - List available drivers
|
||||
GET /drivers/{name} - Get driver details
|
||||
```
|
||||
|
||||
#### Projects
|
||||
|
||||
```
|
||||
GET /projects - List all projects
|
||||
POST /projects - Add a new project
|
||||
GET /projects/{id} - Get project details
|
||||
PUT /projects/{id} - Update project details
|
||||
DELETE /projects/{id} - Remove a project
|
||||
```
|
||||
|
||||
### Service Configuration
|
||||
|
||||
```yaml
|
||||
# mc-service.yaml
|
||||
server:
|
||||
port: 3000
|
||||
host: 0.0.0.0
|
||||
|
||||
docker:
|
||||
socket: /var/run/docker.sock
|
||||
network: mc-network
|
||||
|
||||
auth:
|
||||
provider: authentik
|
||||
url: https://authentik.monadical.io
|
||||
clientId: mc-service
|
||||
|
||||
logging:
|
||||
providers:
|
||||
- type: fluentd
|
||||
url: http://fluentd.example.com:24224
|
||||
- type: langfuse
|
||||
url: https://api.langfuse.com
|
||||
apiKey: ${LANGFUSE_API_KEY}
|
||||
|
||||
drivers:
|
||||
- name: goose
|
||||
image: monadical/mc-goose:latest
|
||||
env:
|
||||
MCP_HOST: http://mcp:8000
|
||||
- name: aider
|
||||
image: monadical/mc-aider:latest
|
||||
- name: claude-code
|
||||
image: monadical/mc-claude-code:latest
|
||||
|
||||
projects:
|
||||
storage:
|
||||
type: encrypted
|
||||
key: ${PROJECT_ENCRYPTION_KEY}
|
||||
default_ssh_scan:
|
||||
- github.com
|
||||
- gitlab.com
|
||||
- bitbucket.org
|
||||
```
|
||||
|
||||
### Docker-in-Docker Implementation
|
||||
|
||||
The MC Service runs in a container with access to the host's Docker socket, allowing it to create and manage sibling containers. This approach provides:
|
||||
|
||||
1. Isolation between containers
|
||||
2. Simple lifecycle management
|
||||
3. Resource constraints for security
|
||||
|
||||
### Connection Handling
|
||||
|
||||
For remote connections to containers, the service provides two methods:
|
||||
|
||||
1. **WebSocket Terminal**: Browser-based terminal access
|
||||
2. **SSH Server**: Each container runs an SSH server for CLI access
|
||||
|
||||
### Logging Implementation
|
||||
|
||||
The MC Service implements log collection and forwarding:
|
||||
|
||||
1. Container logs are captured using Docker's logging drivers
|
||||
2. Logs are forwarded to configured providers (Fluentd, Langfuse)
|
||||
3. Real-time log streaming is available via WebSockets
|
||||
|
||||
## Project Management
|
||||
|
||||
### Adding Projects
|
||||
|
||||
Users can add projects with associated credentials:
|
||||
|
||||
```bash
|
||||
# Add a project with SSH key
|
||||
mc project add github.com/hello/private --ssh-key ~/.ssh/id_ed25519
|
||||
|
||||
# Add a project with token authentication
|
||||
mc project add github.com/hello/private --token ghp_123456789
|
||||
|
||||
# List all projects
|
||||
mc project list
|
||||
|
||||
# Remove a project
|
||||
mc project remove github.com/hello/private
|
||||
```
|
||||
|
||||
### Project Configuration
|
||||
|
||||
Projects are stored in the MC service and referenced by their repository URL. The configuration includes:
|
||||
|
||||
```yaml
|
||||
# Project configuration
|
||||
id: github.com/hello/private
|
||||
url: git@github.com:hello/private.git
|
||||
type: git
|
||||
auth:
|
||||
type: ssh
|
||||
key: |
|
||||
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||
...encrypted key data...
|
||||
-----END OPENSSH PRIVATE KEY-----
|
||||
public_key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAI...
|
||||
```
|
||||
|
||||
## Driver Implementation
|
||||
|
||||
### Driver Structure
|
||||
|
||||
Each driver is a Docker image with a standardized structure:
|
||||
|
||||
```
|
||||
/
|
||||
├── entrypoint.sh # Container initialization
|
||||
├── mc-init.sh # Standardized initialization script
|
||||
├── mc-driver.yaml # Driver metadata and configuration
|
||||
├── tool/ # AI tool installation
|
||||
└── ssh/ # SSH server configuration
|
||||
```
|
||||
|
||||
### Standardized Initialization Script
|
||||
|
||||
All drivers include a standardized `mc-init.sh` script that handles common initialization tasks:
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
# Project initialization
|
||||
if [ -n "$MC_PROJECT_URL" ]; then
|
||||
echo "Initializing project: $MC_PROJECT_URL"
|
||||
|
||||
# Set up SSH key if provided
|
||||
if [ -n "$MC_GIT_SSH_KEY" ]; then
|
||||
mkdir -p ~/.ssh
|
||||
echo "$MC_GIT_SSH_KEY" > ~/.ssh/id_ed25519
|
||||
chmod 600 ~/.ssh/id_ed25519
|
||||
ssh-keyscan github.com >> ~/.ssh/known_hosts 2>/dev/null
|
||||
fi
|
||||
|
||||
# Set up token if provided
|
||||
if [ -n "$MC_GIT_TOKEN" ]; then
|
||||
git config --global credential.helper store
|
||||
echo "https://$MC_GIT_TOKEN:x-oauth-basic@github.com" > ~/.git-credentials
|
||||
fi
|
||||
|
||||
# Clone repository
|
||||
git clone $MC_PROJECT_URL /app
|
||||
cd /app
|
||||
|
||||
# Run project-specific initialization if present
|
||||
if [ -f "/app/.mc/init.sh" ]; then
|
||||
bash /app/.mc/init.sh
|
||||
fi
|
||||
fi
|
||||
|
||||
# Driver-specific initialization continues...
|
||||
```
|
||||
|
||||
### Driver Configuration (mc-driver.yaml)
|
||||
|
||||
```yaml
|
||||
name: goose
|
||||
description: Goose with MCP servers
|
||||
version: 1.0.0
|
||||
maintainer: team@monadical.com
|
||||
|
||||
init:
|
||||
pre_command: /mc-init.sh
|
||||
command: /entrypoint.sh
|
||||
|
||||
environment:
|
||||
- name: MCP_HOST
|
||||
description: MCP server host
|
||||
required: true
|
||||
default: http://localhost:8000
|
||||
|
||||
- name: GOOSE_ID
|
||||
description: Goose instance ID
|
||||
required: false
|
||||
|
||||
# Project environment variables
|
||||
- name: MC_PROJECT_URL
|
||||
description: Project repository URL
|
||||
required: false
|
||||
|
||||
- name: MC_PROJECT_TYPE
|
||||
description: Project repository type (git, svn, etc.)
|
||||
required: false
|
||||
default: git
|
||||
|
||||
- name: MC_GIT_SSH_KEY
|
||||
description: SSH key for Git authentication
|
||||
required: false
|
||||
sensitive: true
|
||||
|
||||
- name: MC_GIT_TOKEN
|
||||
description: Token for Git authentication
|
||||
required: false
|
||||
sensitive: true
|
||||
|
||||
ports:
|
||||
- 8000 # Main application
|
||||
- 22 # SSH server
|
||||
|
||||
volumes:
|
||||
- mountPath: /app
|
||||
description: Application directory
|
||||
```
|
||||
|
||||
### Example Built-in Drivers
|
||||
|
||||
1. **goose**: Goose with MCP servers
|
||||
2. **aider**: Aider coding assistant
|
||||
3. **claude-code**: Claude Code environment
|
||||
4. **custom**: Custom Dockerfile support
|
||||
|
||||
## Security Considerations
|
||||
|
||||
1. **Container Isolation**: Each session runs in an isolated container
|
||||
2. **Authentication**: Integration with Authentik for secure authentication
|
||||
3. **Resource Limits**: Configurable CPU, memory, and storage limits
|
||||
4. **Network Isolation**: Internal Docker network for container-to-container communication
|
||||
5. **Encrypted Connections**: TLS for API connections and SSH for terminal access
|
||||
|
||||
## Deployment
|
||||
|
||||
### MC Service Deployment
|
||||
|
||||
```yaml
|
||||
# docker-compose.yml for MC Service
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
mc-service:
|
||||
image: monadical/mc-service:latest
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- ./config:/app/config
|
||||
ports:
|
||||
- "3000:3000"
|
||||
environment:
|
||||
- AUTH_URL=https://authentik.monadical.io
|
||||
- LANGFUSE_API_KEY=your_api_key
|
||||
networks:
|
||||
- mc-network
|
||||
|
||||
networks:
|
||||
mc-network:
|
||||
driver: bridge
|
||||
```
|
||||
|
||||
## Project Repository Integration Workflow
|
||||
|
||||
### Adding a Project Repository
|
||||
|
||||
1. User adds project repository with authentication:
|
||||
```bash
|
||||
mc project add github.com/hello/private --ssh-key ~/.ssh/id_ed25519
|
||||
```
|
||||
|
||||
2. MC CLI reads the SSH key, encrypts it, and sends to MC Service
|
||||
|
||||
3. MC Service stores the project configuration securely
|
||||
|
||||
### Using a Project in a Session
|
||||
|
||||
1. User creates a session with a project:
|
||||
```bash
|
||||
mc -r monadical git@github.com:hello/private
|
||||
```
|
||||
|
||||
2. MC Service:
|
||||
- Identifies the project from the URL
|
||||
- Retrieves project authentication details
|
||||
- Sets up environment variables:
|
||||
```
|
||||
MC_PROJECT_URL=git@github.com:hello/private
|
||||
MC_PROJECT_TYPE=git
|
||||
MC_GIT_SSH_KEY=<contents of the SSH key>
|
||||
```
|
||||
- Creates container with these environment variables
|
||||
|
||||
3. Container initialization:
|
||||
- The standardized `mc-init.sh` script detects the project environment variables
|
||||
- Sets up SSH key or token authentication
|
||||
- Clones the repository to `/app`
|
||||
- Runs any project-specific initialization scripts
|
||||
|
||||
4. User can immediately begin working with the repository
|
||||
|
||||
## Implementation Roadmap
|
||||
|
||||
1. **Phase 1**: Local CLI tool with Docker integration
|
||||
2. **Phase 2**: MC Service REST API with basic container management
|
||||
3. **Phase 3**: Authentication and secure connections
|
||||
4. **Phase 4**: Project management functionality
|
||||
5. **Phase 5**: Driver implementation (Goose, Aider, Claude Code)
|
||||
6. **Phase 6**: Logging integration with Fluentd and Langfuse
|
||||
7. **Phase 7**: CLI remote connectivity improvements
|
||||
8. **Phase 8**: Additional drivers and extensibility features
|
||||
@@ -1,5 +1,5 @@
|
||||
"""
|
||||
MC - Monadical Container Tool
|
||||
Cubbi - Cubbi Container Tool
|
||||
"""
|
||||
|
||||
__version__ = "0.1.0"
|
||||
2443
cubbi/cli.py
Normal file
2443
cubbi/cli.py
Normal file
File diff suppressed because it is too large
Load Diff
183
cubbi/config.py
Normal file
183
cubbi/config.py
Normal file
@@ -0,0 +1,183 @@
|
||||
from pathlib import Path
|
||||
from typing import Dict, Optional
|
||||
|
||||
import yaml
|
||||
|
||||
from .models import Config, Image
|
||||
|
||||
DEFAULT_CONFIG_DIR = Path.home() / ".config" / "cubbi"
|
||||
DEFAULT_CONFIG_FILE = DEFAULT_CONFIG_DIR / "config.yaml"
|
||||
DEFAULT_IMAGES_DIR = Path.home() / ".config" / "cubbi" / "images"
|
||||
PROJECT_ROOT = Path(__file__).parent.parent
|
||||
BUILTIN_IMAGES_DIR = Path(__file__).parent / "images"
|
||||
|
||||
# Dynamically loaded from images directory at runtime
|
||||
DEFAULT_IMAGES = {}
|
||||
|
||||
# Default API URLs for standard providers
|
||||
PROVIDER_DEFAULT_URLS = {
|
||||
"openai": "https://api.openai.com",
|
||||
"anthropic": "https://api.anthropic.com",
|
||||
"google": "https://generativelanguage.googleapis.com",
|
||||
"openrouter": "https://openrouter.ai/api",
|
||||
}
|
||||
|
||||
|
||||
class ConfigManager:
|
||||
def __init__(self, config_path: Optional[Path] = None):
|
||||
self.config_path = config_path or DEFAULT_CONFIG_FILE
|
||||
self.config_dir = self.config_path.parent
|
||||
self.images_dir = DEFAULT_IMAGES_DIR
|
||||
self.config = self._load_or_create_config()
|
||||
|
||||
# Always load package images on initialization
|
||||
# These are separate from the user config
|
||||
self.builtin_images = self._load_package_images()
|
||||
|
||||
def _load_or_create_config(self) -> Config:
|
||||
"""Load existing config or create a new one with defaults"""
|
||||
if self.config_path.exists():
|
||||
try:
|
||||
with open(self.config_path, "r") as f:
|
||||
config_data = yaml.safe_load(f) or {}
|
||||
|
||||
# Create a new config from scratch, then update with data from file
|
||||
config = Config(
|
||||
docker=config_data.get("docker", {}),
|
||||
defaults=config_data.get("defaults", {}),
|
||||
)
|
||||
|
||||
# Add images
|
||||
if "images" in config_data:
|
||||
for image_name, image_data in config_data["images"].items():
|
||||
config.images[image_name] = Image.model_validate(image_data)
|
||||
|
||||
return config
|
||||
except Exception as e:
|
||||
print(f"Error loading config: {e}")
|
||||
return self._create_default_config()
|
||||
else:
|
||||
return self._create_default_config()
|
||||
|
||||
def _create_default_config(self) -> Config:
|
||||
"""Create a default configuration"""
|
||||
self.config_dir.mkdir(parents=True, exist_ok=True)
|
||||
self.images_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Initial config without images
|
||||
config = Config(
|
||||
docker={
|
||||
"socket": "/var/run/docker.sock",
|
||||
"network": "cubbi-network",
|
||||
},
|
||||
defaults={
|
||||
"image": "goose",
|
||||
"domains": [],
|
||||
},
|
||||
)
|
||||
|
||||
self.save_config(config)
|
||||
return config
|
||||
|
||||
def save_config(self, config: Optional[Config] = None) -> None:
|
||||
"""Save the current config to disk"""
|
||||
if config:
|
||||
self.config = config
|
||||
|
||||
self.config_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Use model_dump with mode="json" for proper serialization of enums
|
||||
config_dict = self.config.model_dump(mode="json")
|
||||
|
||||
# Write to file
|
||||
with open(self.config_path, "w") as f:
|
||||
yaml.dump(config_dict, f)
|
||||
|
||||
def get_image(self, name: str) -> Optional[Image]:
|
||||
"""Get an image by name, checking builtin images first, then user-configured ones"""
|
||||
# Check builtin images first (package images take precedence)
|
||||
if name in self.builtin_images:
|
||||
return self.builtin_images[name]
|
||||
# If not found, check user-configured images
|
||||
return self.config.images.get(name)
|
||||
|
||||
def list_images(self) -> Dict[str, Image]:
|
||||
"""List all available images (both builtin and user-configured)"""
|
||||
# Start with user config images
|
||||
all_images = dict(self.config.images)
|
||||
|
||||
# Add builtin images, overriding any user images with the same name
|
||||
# This ensures that package-provided images always take precedence
|
||||
all_images.update(self.builtin_images)
|
||||
|
||||
return all_images
|
||||
|
||||
# Session management has been moved to SessionManager in session.py
|
||||
|
||||
def load_image_from_dir(self, image_dir: Path) -> Optional[Image]:
|
||||
"""Load an image configuration from a directory"""
|
||||
# Check for image config file
|
||||
yaml_path = image_dir / "cubbi_image.yaml"
|
||||
if not yaml_path.exists():
|
||||
return None
|
||||
|
||||
try:
|
||||
with open(yaml_path, "r") as f:
|
||||
image_data = yaml.safe_load(f)
|
||||
|
||||
# Extract required fields
|
||||
if not all(
|
||||
k in image_data
|
||||
for k in ["name", "description", "version", "maintainer"]
|
||||
):
|
||||
print(f"Image config {yaml_path} missing required fields")
|
||||
return None
|
||||
|
||||
# Use Image.model_validate to handle all fields from YAML
|
||||
# This will map all fields according to the Image model structure
|
||||
try:
|
||||
# Ensure image field is set if not in YAML
|
||||
if "image" not in image_data:
|
||||
image_data["image"] = f"monadical/cubbi-{image_data['name']}:latest"
|
||||
|
||||
image = Image.model_validate(image_data)
|
||||
return image
|
||||
except Exception as validation_error:
|
||||
print(
|
||||
f"Error validating image data from {yaml_path}: {validation_error}"
|
||||
)
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error loading image from {yaml_path}: {e}")
|
||||
return None
|
||||
|
||||
def _load_package_images(self) -> Dict[str, Image]:
|
||||
"""Load all package images from the cubbi/images directory"""
|
||||
images = {}
|
||||
|
||||
if not BUILTIN_IMAGES_DIR.exists():
|
||||
return images
|
||||
|
||||
# Search for cubbi_image.yaml files in each subdirectory
|
||||
for image_dir in BUILTIN_IMAGES_DIR.iterdir():
|
||||
if image_dir.is_dir():
|
||||
image = self.load_image_from_dir(image_dir)
|
||||
if image:
|
||||
images[image.name] = image
|
||||
|
||||
return images
|
||||
|
||||
def get_image_path(self, image_name: str) -> Optional[Path]:
|
||||
"""Get the directory path for an image"""
|
||||
# Check package images first (these are the bundled ones)
|
||||
package_path = BUILTIN_IMAGES_DIR / image_name
|
||||
if package_path.exists() and package_path.is_dir():
|
||||
return package_path
|
||||
|
||||
# Then check user images
|
||||
user_path = self.images_dir / image_name
|
||||
if user_path.exists() and user_path.is_dir():
|
||||
return user_path
|
||||
|
||||
return None
|
||||
1125
cubbi/configure.py
Normal file
1125
cubbi/configure.py
Normal file
File diff suppressed because it is too large
Load Diff
1206
cubbi/container.py
Normal file
1206
cubbi/container.py
Normal file
File diff suppressed because it is too large
Load Diff
36
cubbi/cubbi_inspector_entrypoint.sh
Executable file
36
cubbi/cubbi_inspector_entrypoint.sh
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/bin/sh
|
||||
# This script modifies the Express server to bind to all interfaces
|
||||
|
||||
# Try to find the CLI script
|
||||
CLI_FILE=$(find /app -name "cli.js" | grep -v node_modules | head -1)
|
||||
|
||||
if [ -z "$CLI_FILE" ]; then
|
||||
echo "Could not find CLI file. Trying common locations..."
|
||||
for path in "/app/client/bin/cli.js" "/app/bin/cli.js" "./client/bin/cli.js" "./bin/cli.js"; do
|
||||
if [ -f "$path" ]; then
|
||||
CLI_FILE="$path"
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [ -z "$CLI_FILE" ]; then
|
||||
echo "ERROR: Could not find the MCP Inspector CLI file."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Found CLI file at: $CLI_FILE"
|
||||
|
||||
# Make a backup of the original file
|
||||
cp "$CLI_FILE" "$CLI_FILE.bak"
|
||||
|
||||
# Modify the file to use 0.0.0.0 as the host
|
||||
sed -i 's/app.listen(PORT/app.listen(PORT, "0.0.0.0"/g' "$CLI_FILE"
|
||||
sed -i 's/server.listen(port/server.listen(port, "0.0.0.0"/g' "$CLI_FILE"
|
||||
sed -i 's/listen(PORT/listen(PORT, "0.0.0.0"/g' "$CLI_FILE"
|
||||
|
||||
echo "Modified server to listen on all interfaces (0.0.0.0)"
|
||||
|
||||
# Start the MCP Inspector
|
||||
echo "Starting MCP Inspector on all interfaces..."
|
||||
exec npm start
|
||||
68
cubbi/images/aider/Dockerfile
Normal file
68
cubbi/images/aider/Dockerfile
Normal file
@@ -0,0 +1,68 @@
|
||||
FROM python:3.12-slim
|
||||
|
||||
LABEL maintainer="team@monadical.com"
|
||||
LABEL description="Aider AI pair programming for Cubbi"
|
||||
|
||||
# Install system dependencies including gosu for user switching
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
gosu \
|
||||
sudo \
|
||||
passwd \
|
||||
bash \
|
||||
curl \
|
||||
bzip2 \
|
||||
iputils-ping \
|
||||
iproute2 \
|
||||
libxcb1 \
|
||||
libdbus-1-3 \
|
||||
nano \
|
||||
tmux \
|
||||
git-core \
|
||||
ripgrep \
|
||||
openssh-client \
|
||||
vim \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install uv (Python package manager)
|
||||
WORKDIR /tmp
|
||||
RUN curl -fsSL https://astral.sh/uv/install.sh -o install.sh && \
|
||||
sh install.sh && \
|
||||
mv /root/.local/bin/uv /usr/local/bin/uv && \
|
||||
mv /root/.local/bin/uvx /usr/local/bin/uvx && \
|
||||
rm install.sh
|
||||
|
||||
# Install Aider using pip in system Python (more compatible with user switching)
|
||||
RUN python -m pip install aider-chat
|
||||
|
||||
# Make sure aider is in PATH
|
||||
ENV PATH="/root/.local/bin:$PATH"
|
||||
|
||||
# Create app directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy initialization system
|
||||
COPY cubbi_init.py /cubbi/cubbi_init.py
|
||||
COPY aider_plugin.py /cubbi/aider_plugin.py
|
||||
COPY cubbi_image.yaml /cubbi/cubbi_image.yaml
|
||||
COPY init-status.sh /cubbi/init-status.sh
|
||||
|
||||
# Make scripts executable
|
||||
RUN chmod +x /cubbi/cubbi_init.py /cubbi/init-status.sh
|
||||
|
||||
# Add aider to PATH in bashrc and init status check
|
||||
RUN echo 'PATH="/root/.local/bin:$PATH"' >> /etc/bash.bashrc
|
||||
RUN echo '[ -x /cubbi/init-status.sh ] && /cubbi/init-status.sh' >> /etc/bash.bashrc
|
||||
|
||||
# Set up environment
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ENV UV_LINK_MODE=copy
|
||||
|
||||
# Pre-install the cubbi_init
|
||||
RUN /cubbi/cubbi_init.py --help
|
||||
|
||||
# Set WORKDIR to /app
|
||||
WORKDIR /app
|
||||
|
||||
ENTRYPOINT ["/cubbi/cubbi_init.py"]
|
||||
CMD ["tail", "-f", "/dev/null"]
|
||||
277
cubbi/images/aider/README.md
Normal file
277
cubbi/images/aider/README.md
Normal file
@@ -0,0 +1,277 @@
|
||||
# Aider for Cubbi
|
||||
|
||||
This image provides Aider (AI pair programming) in a Cubbi container environment.
|
||||
|
||||
## Overview
|
||||
|
||||
Aider is an AI pair programming tool that works in your terminal. This Cubbi image integrates Aider with secure API key management, persistent configuration, and support for multiple LLM providers.
|
||||
|
||||
## Features
|
||||
|
||||
- **Multiple LLM Support**: Works with OpenAI, Anthropic, DeepSeek, Gemini, OpenRouter, and more
|
||||
- **Secure Authentication**: API key management through Cubbi's secure environment system
|
||||
- **Persistent Configuration**: Settings and history preserved across container restarts
|
||||
- **Git Integration**: Automatic commits and git awareness
|
||||
- **Multi-Language Support**: Works with 100+ programming languages
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Set up API Key
|
||||
|
||||
```bash
|
||||
# For OpenAI (GPT models)
|
||||
uv run -m cubbi.cli config set services.openai.api_key "your-openai-key"
|
||||
|
||||
# For Anthropic (Claude models)
|
||||
uv run -m cubbi.cli config set services.anthropic.api_key "your-anthropic-key"
|
||||
|
||||
# For DeepSeek (recommended for cost-effectiveness)
|
||||
uv run -m cubbi.cli config set services.deepseek.api_key "your-deepseek-key"
|
||||
```
|
||||
|
||||
### 2. Run Aider Environment
|
||||
|
||||
```bash
|
||||
# Start Aider container with your project
|
||||
uv run -m cubbi.cli session create --image aider /path/to/your/project
|
||||
|
||||
# Or without a project
|
||||
uv run -m cubbi.cli session create --image aider
|
||||
```
|
||||
|
||||
### 3. Use Aider
|
||||
|
||||
```bash
|
||||
# Basic usage
|
||||
aider
|
||||
|
||||
# With specific model
|
||||
aider --model sonnet
|
||||
|
||||
# With specific files
|
||||
aider main.py utils.py
|
||||
|
||||
# One-shot request
|
||||
aider --message "Add error handling to the login function"
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Supported API Keys
|
||||
|
||||
- `OPENAI_API_KEY`: OpenAI GPT models (GPT-4, GPT-4o, etc.)
|
||||
- `ANTHROPIC_API_KEY`: Anthropic Claude models (Sonnet, Haiku, etc.)
|
||||
- `DEEPSEEK_API_KEY`: DeepSeek models (cost-effective option)
|
||||
- `GEMINI_API_KEY`: Google Gemini models
|
||||
- `OPENROUTER_API_KEY`: OpenRouter (access to many models)
|
||||
|
||||
### Additional Configuration
|
||||
|
||||
- `AIDER_MODEL`: Default model to use (e.g., "sonnet", "o3-mini", "deepseek")
|
||||
- `AIDER_AUTO_COMMITS`: Enable automatic git commits (default: true)
|
||||
- `AIDER_DARK_MODE`: Enable dark mode interface (default: false)
|
||||
- `AIDER_API_KEYS`: Additional API keys in format "provider1=key1,provider2=key2"
|
||||
|
||||
### Network Configuration
|
||||
|
||||
- `HTTP_PROXY`: HTTP proxy server URL
|
||||
- `HTTPS_PROXY`: HTTPS proxy server URL
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Basic AI Pair Programming
|
||||
|
||||
```bash
|
||||
# Start Aider with your project
|
||||
uv run -m cubbi.cli session create --image aider /path/to/project
|
||||
|
||||
# Inside the container:
|
||||
aider # Start interactive session
|
||||
aider main.py # Work on specific file
|
||||
aider --message "Add tests" # One-shot request
|
||||
```
|
||||
|
||||
### Model Selection
|
||||
|
||||
```bash
|
||||
# Use Claude Sonnet
|
||||
aider --model sonnet
|
||||
|
||||
# Use GPT-4o
|
||||
aider --model gpt-4o
|
||||
|
||||
# Use DeepSeek (cost-effective)
|
||||
aider --model deepseek
|
||||
|
||||
# Use OpenRouter
|
||||
aider --model openrouter/anthropic/claude-3.5-sonnet
|
||||
```
|
||||
|
||||
### Advanced Features
|
||||
|
||||
```bash
|
||||
# Work with multiple files
|
||||
aider src/main.py tests/test_main.py
|
||||
|
||||
# Auto-commit changes
|
||||
aider --auto-commits
|
||||
|
||||
# Read-only mode (won't edit files)
|
||||
aider --read
|
||||
|
||||
# Apply a specific change
|
||||
aider --message "Refactor the database connection code to use connection pooling"
|
||||
```
|
||||
|
||||
### Enterprise/Proxy Setup
|
||||
|
||||
```bash
|
||||
# With proxy
|
||||
uv run -m cubbi.cli session create --image aider \
|
||||
--env HTTPS_PROXY="https://proxy.company.com:8080" \
|
||||
/path/to/project
|
||||
|
||||
# With custom model
|
||||
uv run -m cubbi.cli session create --image aider \
|
||||
--env AIDER_MODEL="sonnet" \
|
||||
/path/to/project
|
||||
```
|
||||
|
||||
## Persistent Configuration
|
||||
|
||||
The following directories are automatically persisted:
|
||||
|
||||
- `~/.aider/`: Aider configuration and chat history
|
||||
- `~/.cache/aider/`: Model cache and temporary files
|
||||
|
||||
Configuration files are maintained across container restarts, ensuring your preferences and chat history are preserved.
|
||||
|
||||
## Model Recommendations
|
||||
|
||||
### Best Overall Performance
|
||||
- **Claude 3.5 Sonnet**: Excellent code understanding and generation
|
||||
- **OpenAI GPT-4o**: Strong performance across languages
|
||||
- **Gemini 2.5 Pro**: Good balance of quality and speed
|
||||
|
||||
### Cost-Effective Options
|
||||
- **DeepSeek V3**: Very cost-effective, good quality
|
||||
- **OpenRouter**: Access to multiple models with competitive pricing
|
||||
|
||||
### Free Options
|
||||
- **Gemini 2.5 Pro Exp**: Free tier available
|
||||
- **OpenRouter**: Some free models available
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
cubbi/images/aider/
|
||||
├── Dockerfile # Container image definition
|
||||
├── cubbi_image.yaml # Cubbi image configuration
|
||||
├── aider_plugin.py # Authentication and setup plugin
|
||||
└── README.md # This documentation
|
||||
```
|
||||
|
||||
## Authentication Flow
|
||||
|
||||
1. **Environment Variables**: API keys passed from Cubbi configuration
|
||||
2. **Plugin Setup**: `aider_plugin.py` creates environment configuration
|
||||
3. **Environment File**: Creates `~/.aider/.env` with API keys
|
||||
4. **Ready**: Aider is ready for use with configured authentication
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
**No API Key Found**
|
||||
```
|
||||
ℹ️ No API keys found - Aider will run without pre-configuration
|
||||
```
|
||||
**Solution**: Set API key in Cubbi configuration:
|
||||
```bash
|
||||
uv run -m cubbi.cli config set services.openai.api_key "your-key"
|
||||
```
|
||||
|
||||
**Model Not Available**
|
||||
```
|
||||
Error: Model 'xyz' not found
|
||||
```
|
||||
**Solution**: Check available models for your provider:
|
||||
```bash
|
||||
aider --models # List available models
|
||||
```
|
||||
|
||||
**Git Issues**
|
||||
```
|
||||
Git repository not found
|
||||
```
|
||||
**Solution**: Initialize git in your project or mount a git repository:
|
||||
```bash
|
||||
git init
|
||||
# or
|
||||
uv run -m cubbi.cli session create --image aider /path/to/git/project
|
||||
```
|
||||
|
||||
**Network/Proxy Issues**
|
||||
```
|
||||
Connection timeout or proxy errors
|
||||
```
|
||||
**Solution**: Configure proxy settings:
|
||||
```bash
|
||||
uv run -m cubbi.cli config set network.https_proxy "your-proxy-url"
|
||||
```
|
||||
|
||||
### Debug Mode
|
||||
|
||||
```bash
|
||||
# Check Aider version
|
||||
aider --version
|
||||
|
||||
# List available models
|
||||
aider --models
|
||||
|
||||
# Check configuration
|
||||
cat ~/.aider/.env
|
||||
|
||||
# Verbose output
|
||||
aider --verbose
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
- **API Keys**: Stored securely with 0o600 permissions
|
||||
- **Environment**: Isolated container environment
|
||||
- **Git Integration**: Respects .gitignore and git configurations
|
||||
- **Code Safety**: Always review changes before accepting
|
||||
|
||||
## Advanced Configuration
|
||||
|
||||
### Custom Model Configuration
|
||||
|
||||
```bash
|
||||
# Use with custom API endpoint
|
||||
uv run -m cubbi.cli session create --image aider \
|
||||
--env OPENAI_API_BASE="https://api.custom-provider.com/v1" \
|
||||
--env OPENAI_API_KEY="your-key"
|
||||
```
|
||||
|
||||
### Multiple API Keys
|
||||
|
||||
```bash
|
||||
# Configure multiple providers
|
||||
uv run -m cubbi.cli session create --image aider \
|
||||
--env OPENAI_API_KEY="openai-key" \
|
||||
--env ANTHROPIC_API_KEY="anthropic-key" \
|
||||
--env AIDER_API_KEYS="provider1=key1,provider2=key2"
|
||||
```
|
||||
|
||||
## Support
|
||||
|
||||
For issues related to:
|
||||
- **Cubbi Integration**: Check Cubbi documentation or open an issue
|
||||
- **Aider Functionality**: Visit [Aider documentation](https://aider.chat/)
|
||||
- **Model Configuration**: Check [LLM documentation](https://aider.chat/docs/llms.html)
|
||||
- **API Keys**: Visit provider documentation (OpenAI, Anthropic, etc.)
|
||||
|
||||
## License
|
||||
|
||||
This image configuration is provided under the same license as the Cubbi project. Aider is licensed separately under Apache 2.0.
|
||||
183
cubbi/images/aider/aider_plugin.py
Executable file
183
cubbi/images/aider/aider_plugin.py
Executable file
@@ -0,0 +1,183 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import stat
|
||||
from pathlib import Path
|
||||
|
||||
from cubbi_init import ToolPlugin, cubbi_config, set_ownership
|
||||
|
||||
|
||||
class AiderPlugin(ToolPlugin):
|
||||
@property
|
||||
def tool_name(self) -> str:
|
||||
return "aider"
|
||||
|
||||
def _get_aider_config_dir(self) -> Path:
|
||||
return Path("/home/cubbi/.aider")
|
||||
|
||||
def _get_aider_cache_dir(self) -> Path:
|
||||
return Path("/home/cubbi/.cache/aider")
|
||||
|
||||
def _ensure_aider_dirs(self) -> tuple[Path, Path]:
|
||||
config_dir = self._get_aider_config_dir()
|
||||
cache_dir = self._get_aider_cache_dir()
|
||||
|
||||
self.create_directory_with_ownership(config_dir)
|
||||
self.create_directory_with_ownership(cache_dir)
|
||||
|
||||
return config_dir, cache_dir
|
||||
|
||||
def is_already_configured(self) -> bool:
|
||||
config_dir = self._get_aider_config_dir()
|
||||
env_file = config_dir / ".env"
|
||||
return env_file.exists()
|
||||
|
||||
def configure(self) -> bool:
|
||||
self.status.log("Setting up Aider configuration...")
|
||||
|
||||
config_dir, cache_dir = self._ensure_aider_dirs()
|
||||
|
||||
env_vars = self._create_environment_config()
|
||||
|
||||
if env_vars:
|
||||
env_file = config_dir / ".env"
|
||||
success = self._write_env_file(env_file, env_vars)
|
||||
if success:
|
||||
self.status.log("✅ Aider environment configured successfully")
|
||||
else:
|
||||
self.status.log("⚠️ Failed to write Aider environment file", "WARNING")
|
||||
else:
|
||||
self.status.log(
|
||||
"ℹ️ No API keys found - Aider will run without pre-configuration", "INFO"
|
||||
)
|
||||
self.status.log(
|
||||
" You can configure API keys later using environment variables",
|
||||
"INFO",
|
||||
)
|
||||
|
||||
if not cubbi_config.mcps:
|
||||
self.status.log("No MCP servers to integrate")
|
||||
return True
|
||||
|
||||
self.status.log(
|
||||
f"Found {len(cubbi_config.mcps)} MCP server(s) - no direct integration available for Aider"
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
def _create_environment_config(self) -> dict[str, str]:
|
||||
env_vars = {}
|
||||
|
||||
provider_config = cubbi_config.get_provider_for_default_model()
|
||||
if provider_config and cubbi_config.defaults.model:
|
||||
_, model_name = cubbi_config.defaults.model.split("/", 1)
|
||||
|
||||
env_vars["AIDER_MODEL"] = model_name
|
||||
self.status.log(f"Set Aider model to {model_name}")
|
||||
|
||||
if provider_config.type == "anthropic":
|
||||
env_vars["AIDER_ANTHROPIC_API_KEY"] = provider_config.api_key
|
||||
self.status.log("Configured Anthropic API key for Aider")
|
||||
|
||||
elif provider_config.type == "openai":
|
||||
env_vars["AIDER_OPENAI_API_KEY"] = provider_config.api_key
|
||||
if provider_config.base_url:
|
||||
env_vars["AIDER_OPENAI_API_BASE"] = provider_config.base_url
|
||||
self.status.log(
|
||||
f"Set Aider OpenAI API base to {provider_config.base_url}"
|
||||
)
|
||||
self.status.log("Configured OpenAI API key for Aider")
|
||||
|
||||
elif provider_config.type == "google":
|
||||
env_vars["GEMINI_API_KEY"] = provider_config.api_key
|
||||
self.status.log("Configured Google/Gemini API key for Aider")
|
||||
|
||||
elif provider_config.type == "openrouter":
|
||||
env_vars["OPENROUTER_API_KEY"] = provider_config.api_key
|
||||
self.status.log("Configured OpenRouter API key for Aider")
|
||||
|
||||
else:
|
||||
self.status.log(
|
||||
f"Provider type '{provider_config.type}' not directly supported by Aider plugin",
|
||||
"WARNING",
|
||||
)
|
||||
else:
|
||||
self.status.log(
|
||||
"No default model or provider configured - checking legacy environment variables",
|
||||
"WARNING",
|
||||
)
|
||||
|
||||
api_key_mappings = {
|
||||
"OPENAI_API_KEY": "AIDER_OPENAI_API_KEY",
|
||||
"ANTHROPIC_API_KEY": "AIDER_ANTHROPIC_API_KEY",
|
||||
"DEEPSEEK_API_KEY": "DEEPSEEK_API_KEY",
|
||||
"GEMINI_API_KEY": "GEMINI_API_KEY",
|
||||
"OPENROUTER_API_KEY": "OPENROUTER_API_KEY",
|
||||
}
|
||||
|
||||
for env_var, aider_var in api_key_mappings.items():
|
||||
value = os.environ.get(env_var)
|
||||
if value:
|
||||
env_vars[aider_var] = value
|
||||
provider = env_var.replace("_API_KEY", "").lower()
|
||||
self.status.log(f"Added {provider} API key from environment")
|
||||
|
||||
openai_url = os.environ.get("OPENAI_URL")
|
||||
if openai_url:
|
||||
env_vars["AIDER_OPENAI_API_BASE"] = openai_url
|
||||
self.status.log(
|
||||
f"Set OpenAI API base URL to {openai_url} from environment"
|
||||
)
|
||||
|
||||
model = os.environ.get("AIDER_MODEL")
|
||||
if model:
|
||||
env_vars["AIDER_MODEL"] = model
|
||||
self.status.log(f"Set model to {model} from environment")
|
||||
|
||||
additional_keys = os.environ.get("AIDER_API_KEYS")
|
||||
if additional_keys:
|
||||
try:
|
||||
for pair in additional_keys.split(","):
|
||||
if "=" in pair:
|
||||
provider, key = pair.strip().split("=", 1)
|
||||
env_var_name = f"{provider.upper()}_API_KEY"
|
||||
env_vars[env_var_name] = key
|
||||
self.status.log(f"Added {provider} API key from AIDER_API_KEYS")
|
||||
except Exception as e:
|
||||
self.status.log(f"Failed to parse AIDER_API_KEYS: {e}", "WARNING")
|
||||
|
||||
auto_commits = os.environ.get("AIDER_AUTO_COMMITS", "true")
|
||||
if auto_commits.lower() in ["true", "false"]:
|
||||
env_vars["AIDER_AUTO_COMMITS"] = auto_commits
|
||||
|
||||
dark_mode = os.environ.get("AIDER_DARK_MODE", "false")
|
||||
if dark_mode.lower() in ["true", "false"]:
|
||||
env_vars["AIDER_DARK_MODE"] = dark_mode
|
||||
|
||||
for proxy_var in ["HTTP_PROXY", "HTTPS_PROXY"]:
|
||||
value = os.environ.get(proxy_var)
|
||||
if value:
|
||||
env_vars[proxy_var] = value
|
||||
self.status.log(f"Added proxy configuration: {proxy_var}")
|
||||
|
||||
return env_vars
|
||||
|
||||
def _write_env_file(self, env_file: Path, env_vars: dict[str, str]) -> bool:
|
||||
try:
|
||||
content = "\n".join(f"{key}={value}" for key, value in env_vars.items())
|
||||
|
||||
with open(env_file, "w") as f:
|
||||
f.write(content)
|
||||
f.write("\n")
|
||||
|
||||
set_ownership(env_file)
|
||||
os.chmod(env_file, stat.S_IRUSR | stat.S_IWUSR)
|
||||
|
||||
self.status.log(f"Created Aider environment file at {env_file}")
|
||||
return True
|
||||
except Exception as e:
|
||||
self.status.log(f"Failed to write Aider environment file: {e}", "ERROR")
|
||||
return False
|
||||
|
||||
|
||||
PLUGIN_CLASS = AiderPlugin
|
||||
42
cubbi/images/aider/cubbi_image.yaml
Normal file
42
cubbi/images/aider/cubbi_image.yaml
Normal file
@@ -0,0 +1,42 @@
|
||||
name: aider
|
||||
description: Aider AI pair programming environment
|
||||
version: 1.0.0
|
||||
maintainer: team@monadical.com
|
||||
image: monadical/cubbi-aider:latest
|
||||
persistent_configs: []
|
||||
environments_to_forward:
|
||||
# API Keys
|
||||
- OPENAI_API_KEY
|
||||
- ANTHROPIC_API_KEY
|
||||
- ANTHROPIC_AUTH_TOKEN
|
||||
- ANTHROPIC_CUSTOM_HEADERS
|
||||
- DEEPSEEK_API_KEY
|
||||
- GEMINI_API_KEY
|
||||
- OPENROUTER_API_KEY
|
||||
- AIDER_API_KEYS
|
||||
|
||||
# Model Configuration
|
||||
- AIDER_MODEL
|
||||
- CUBBI_MODEL
|
||||
- CUBBI_PROVIDER
|
||||
|
||||
# Git Configuration
|
||||
- AIDER_AUTO_COMMITS
|
||||
- AIDER_DARK_MODE
|
||||
- GIT_AUTHOR_NAME
|
||||
- GIT_AUTHOR_EMAIL
|
||||
- GIT_COMMITTER_NAME
|
||||
- GIT_COMMITTER_EMAIL
|
||||
|
||||
# Proxy Configuration
|
||||
- HTTP_PROXY
|
||||
- HTTPS_PROXY
|
||||
- NO_PROXY
|
||||
|
||||
# OpenAI Configuration
|
||||
- OPENAI_URL
|
||||
- OPENAI_API_BASE
|
||||
- AIDER_OPENAI_API_BASE
|
||||
|
||||
# Timezone (useful for logs and timestamps)
|
||||
- TZ
|
||||
274
cubbi/images/aider/test_aider.py
Executable file
274
cubbi/images/aider/test_aider.py
Executable file
@@ -0,0 +1,274 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Comprehensive test script for Aider Cubbi image
|
||||
Tests Docker image build, API key configuration, and Cubbi CLI integration
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import re
|
||||
|
||||
|
||||
def run_command(cmd, description="", check=True):
|
||||
"""Run a shell command and return result"""
|
||||
print(f"\n🔍 {description}")
|
||||
print(f"Running: {cmd}")
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
cmd, shell=True, capture_output=True, text=True, check=check
|
||||
)
|
||||
|
||||
if result.stdout:
|
||||
print("STDOUT:")
|
||||
print(result.stdout)
|
||||
|
||||
if result.stderr:
|
||||
print("STDERR:")
|
||||
print(result.stderr)
|
||||
|
||||
return result
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"❌ Command failed with exit code {e.returncode}")
|
||||
if e.stdout:
|
||||
print("STDOUT:")
|
||||
print(e.stdout)
|
||||
if e.stderr:
|
||||
print("STDERR:")
|
||||
print(e.stderr)
|
||||
if check:
|
||||
raise
|
||||
return e
|
||||
|
||||
|
||||
def test_docker_image_exists():
|
||||
"""Test if the Aider Docker image exists"""
|
||||
print("\n" + "=" * 60)
|
||||
print("🧪 Testing Docker Image Existence")
|
||||
print("=" * 60)
|
||||
|
||||
result = run_command(
|
||||
"docker images monadical/cubbi-aider:latest --format 'table {{.Repository}}\t{{.Tag}}\t{{.Size}}'",
|
||||
"Checking if Aider Docker image exists",
|
||||
)
|
||||
|
||||
if "monadical/cubbi-aider" in result.stdout:
|
||||
print("✅ Aider Docker image exists")
|
||||
else:
|
||||
print("❌ Aider Docker image not found")
|
||||
assert False, "Aider Docker image not found"
|
||||
|
||||
|
||||
def test_aider_version():
|
||||
"""Test basic Aider functionality in container"""
|
||||
print("\n" + "=" * 60)
|
||||
print("🧪 Testing Aider Version")
|
||||
print("=" * 60)
|
||||
|
||||
result = run_command(
|
||||
"docker run --rm monadical/cubbi-aider:latest bash -c 'aider --version'",
|
||||
"Testing Aider version command",
|
||||
)
|
||||
|
||||
assert (
|
||||
"aider" in result.stdout and result.returncode == 0
|
||||
), "Aider version command failed"
|
||||
print("✅ Aider version command works")
|
||||
|
||||
|
||||
def test_api_key_configuration():
|
||||
"""Test API key configuration and environment setup"""
|
||||
print("\n" + "=" * 60)
|
||||
print("🧪 Testing API Key Configuration")
|
||||
print("=" * 60)
|
||||
|
||||
# Test with multiple API keys
|
||||
test_keys = {
|
||||
"OPENAI_API_KEY": "test-openai-key",
|
||||
"ANTHROPIC_API_KEY": "test-anthropic-key",
|
||||
"DEEPSEEK_API_KEY": "test-deepseek-key",
|
||||
"GEMINI_API_KEY": "test-gemini-key",
|
||||
"OPENROUTER_API_KEY": "test-openrouter-key",
|
||||
}
|
||||
|
||||
env_flags = " ".join([f'-e {key}="{value}"' for key, value in test_keys.items()])
|
||||
|
||||
result = run_command(
|
||||
f"docker run --rm {env_flags} monadical/cubbi-aider:latest bash -c 'cat ~/.aider/.env'",
|
||||
"Testing API key configuration in .env file",
|
||||
)
|
||||
|
||||
success = True
|
||||
for key, value in test_keys.items():
|
||||
if f"{key}={value}" not in result.stdout:
|
||||
print(f"❌ {key} not found in .env file")
|
||||
success = False
|
||||
else:
|
||||
print(f"✅ {key} configured correctly")
|
||||
|
||||
# Test default configuration values
|
||||
if "AIDER_AUTO_COMMITS=true" in result.stdout:
|
||||
print("✅ Default AIDER_AUTO_COMMITS configured")
|
||||
else:
|
||||
print("❌ Default AIDER_AUTO_COMMITS not found")
|
||||
success = False
|
||||
|
||||
if "AIDER_DARK_MODE=false" in result.stdout:
|
||||
print("✅ Default AIDER_DARK_MODE configured")
|
||||
else:
|
||||
print("❌ Default AIDER_DARK_MODE not found")
|
||||
success = False
|
||||
|
||||
assert success, "API key configuration test failed"
|
||||
|
||||
|
||||
def test_cubbi_cli_integration():
|
||||
"""Test Cubbi CLI integration"""
|
||||
print("\n" + "=" * 60)
|
||||
print("🧪 Testing Cubbi CLI Integration")
|
||||
print("=" * 60)
|
||||
|
||||
# Test image listing
|
||||
result = run_command(
|
||||
"uv run -m cubbi.cli image list | grep aider",
|
||||
"Testing Cubbi CLI can see Aider image",
|
||||
)
|
||||
|
||||
if "aider" in result.stdout and "Aider AI pair" in result.stdout:
|
||||
print("✅ Cubbi CLI can list Aider image")
|
||||
else:
|
||||
print("❌ Cubbi CLI cannot see Aider image")
|
||||
return False
|
||||
|
||||
# Test session creation with test command
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
test_env = {
|
||||
"OPENAI_API_KEY": "test-session-key",
|
||||
"ANTHROPIC_API_KEY": "test-anthropic-session-key",
|
||||
}
|
||||
|
||||
env_vars = " ".join([f"{k}={v}" for k, v in test_env.items()])
|
||||
|
||||
result = run_command(
|
||||
f"{env_vars} uv run -m cubbi.cli session create --image aider {temp_dir} --no-shell --run \"aider --version && echo 'Cubbi CLI test successful'\"",
|
||||
"Testing Cubbi CLI session creation with Aider",
|
||||
)
|
||||
|
||||
assert (
|
||||
result.returncode == 0
|
||||
and re.search(r"aider \d+\.\d+\.\d+", result.stdout)
|
||||
and "Cubbi CLI test successful" in result.stdout
|
||||
), "Cubbi CLI session creation failed"
|
||||
print("✅ Cubbi CLI session creation works")
|
||||
|
||||
|
||||
def test_persistent_configuration():
|
||||
"""Test persistent configuration directories"""
|
||||
print("\n" + "=" * 60)
|
||||
print("🧪 Testing Persistent Configuration")
|
||||
print("=" * 60)
|
||||
|
||||
# Test that persistent directories are created
|
||||
result = run_command(
|
||||
"docker run --rm -e OPENAI_API_KEY='test-key' monadical/cubbi-aider:latest bash -c 'ls -la /home/cubbi/.aider/ && ls -la /home/cubbi/.cache/'",
|
||||
"Testing persistent configuration directories",
|
||||
)
|
||||
|
||||
success = True
|
||||
|
||||
if ".env" in result.stdout:
|
||||
print("✅ .env file created in ~/.aider/")
|
||||
else:
|
||||
print("❌ .env file not found in ~/.aider/")
|
||||
success = False
|
||||
|
||||
if "aider" in result.stdout:
|
||||
print("✅ ~/.cache/aider directory exists")
|
||||
else:
|
||||
print("❌ ~/.cache/aider directory not found")
|
||||
success = False
|
||||
|
||||
assert success, "API key configuration test failed"
|
||||
|
||||
|
||||
def test_plugin_functionality():
|
||||
"""Test the Aider plugin functionality"""
|
||||
print("\n" + "=" * 60)
|
||||
print("🧪 Testing Plugin Functionality")
|
||||
print("=" * 60)
|
||||
|
||||
# Test plugin without API keys (should still work)
|
||||
result = run_command(
|
||||
"docker run --rm monadical/cubbi-aider:latest bash -c 'echo \"Plugin test without API keys\"'",
|
||||
"Testing plugin functionality without API keys",
|
||||
)
|
||||
|
||||
if "No API keys found - Aider will run without pre-configuration" in result.stdout:
|
||||
print("✅ Plugin handles missing API keys gracefully")
|
||||
else:
|
||||
# This might be in stderr or initialization might have changed
|
||||
print("ℹ️ Plugin API key handling test - check output above")
|
||||
|
||||
# Test plugin with API keys
|
||||
result = run_command(
|
||||
"docker run --rm -e OPENAI_API_KEY='test-plugin-key' monadical/cubbi-aider:latest bash -c 'echo \"Plugin test with API keys\"'",
|
||||
"Testing plugin functionality with API keys",
|
||||
)
|
||||
|
||||
if "Aider environment configured successfully" in result.stdout:
|
||||
print("✅ Plugin configures environment successfully")
|
||||
else:
|
||||
print("❌ Plugin environment configuration failed")
|
||||
assert False, "Plugin environment configuration failed"
|
||||
|
||||
|
||||
def main():
|
||||
"""Run all tests"""
|
||||
print("🚀 Starting Aider Cubbi Image Tests")
|
||||
print("=" * 60)
|
||||
|
||||
tests = [
|
||||
("Docker Image Exists", test_docker_image_exists),
|
||||
("Aider Version", test_aider_version),
|
||||
("API Key Configuration", test_api_key_configuration),
|
||||
("Persistent Configuration", test_persistent_configuration),
|
||||
("Plugin Functionality", test_plugin_functionality),
|
||||
("Cubbi CLI Integration", test_cubbi_cli_integration),
|
||||
]
|
||||
|
||||
results = {}
|
||||
|
||||
for test_name, test_func in tests:
|
||||
try:
|
||||
test_func()
|
||||
results[test_name] = True
|
||||
except Exception as e:
|
||||
print(f"❌ Test '{test_name}' failed with exception: {e}")
|
||||
results[test_name] = False
|
||||
|
||||
# Print summary
|
||||
print("\n" + "=" * 60)
|
||||
print("📊 TEST SUMMARY")
|
||||
print("=" * 60)
|
||||
|
||||
total_tests = len(tests)
|
||||
passed_tests = sum(1 for result in results.values() if result)
|
||||
failed_tests = total_tests - passed_tests
|
||||
|
||||
for test_name, result in results.items():
|
||||
status = "✅ PASS" if result else "❌ FAIL"
|
||||
print(f"{status} {test_name}")
|
||||
|
||||
print(f"\nTotal: {total_tests} | Passed: {passed_tests} | Failed: {failed_tests}")
|
||||
|
||||
if failed_tests == 0:
|
||||
print("\n🎉 All tests passed! Aider image is ready for use.")
|
||||
return 0
|
||||
else:
|
||||
print(f"\n⚠️ {failed_tests} test(s) failed. Please check the output above.")
|
||||
return 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
28
cubbi/images/base.py
Normal file
28
cubbi/images/base.py
Normal file
@@ -0,0 +1,28 @@
|
||||
"""
|
||||
Base image implementation for MAI
|
||||
"""
|
||||
|
||||
from typing import Dict, Optional
|
||||
|
||||
from ..models import Image
|
||||
|
||||
|
||||
class ImageManager:
|
||||
"""Manager for MAI images"""
|
||||
|
||||
@staticmethod
|
||||
def get_default_images() -> Dict[str, Image]:
|
||||
"""Get the default built-in images"""
|
||||
from ..config import DEFAULT_IMAGES
|
||||
|
||||
return DEFAULT_IMAGES
|
||||
|
||||
@staticmethod
|
||||
def get_image_metadata(image_name: str) -> Optional[Dict]:
|
||||
"""Get metadata for a specific image"""
|
||||
from ..config import DEFAULT_IMAGES
|
||||
|
||||
if image_name in DEFAULT_IMAGES:
|
||||
return DEFAULT_IMAGES[image_name].model_dump()
|
||||
|
||||
return None
|
||||
82
cubbi/images/claudecode/Dockerfile
Normal file
82
cubbi/images/claudecode/Dockerfile
Normal file
@@ -0,0 +1,82 @@
|
||||
FROM python:3.12-slim
|
||||
|
||||
LABEL maintainer="team@monadical.com"
|
||||
LABEL description="Claude Code for Cubbi"
|
||||
|
||||
# Install system dependencies including gosu for user switching
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
gosu \
|
||||
sudo \
|
||||
passwd \
|
||||
bash \
|
||||
curl \
|
||||
bzip2 \
|
||||
iputils-ping \
|
||||
iproute2 \
|
||||
libxcb1 \
|
||||
libdbus-1-3 \
|
||||
nano \
|
||||
tmux \
|
||||
git-core \
|
||||
ripgrep \
|
||||
openssh-client \
|
||||
vim \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install uv (Python package manager)
|
||||
WORKDIR /tmp
|
||||
RUN curl -fsSL https://astral.sh/uv/install.sh -o install.sh && \
|
||||
sh install.sh && \
|
||||
mv /root/.local/bin/uv /usr/local/bin/uv && \
|
||||
mv /root/.local/bin/uvx /usr/local/bin/uvx && \
|
||||
rm install.sh
|
||||
|
||||
# Install Node.js (for Claude Code NPM package)
|
||||
ARG NODE_VERSION=v22.16.0
|
||||
RUN mkdir -p /opt/node && \
|
||||
ARCH=$(uname -m) && \
|
||||
if [ "$ARCH" = "x86_64" ]; then \
|
||||
NODE_ARCH=linux-x64; \
|
||||
elif [ "$ARCH" = "aarch64" ]; then \
|
||||
NODE_ARCH=linux-arm64; \
|
||||
else \
|
||||
echo "Unsupported architecture"; exit 1; \
|
||||
fi && \
|
||||
curl -fsSL https://nodejs.org/dist/$NODE_VERSION/node-$NODE_VERSION-$NODE_ARCH.tar.gz -o node.tar.gz && \
|
||||
tar -xf node.tar.gz -C /opt/node --strip-components=1 && \
|
||||
rm node.tar.gz
|
||||
|
||||
ENV PATH="/opt/node/bin:$PATH"
|
||||
|
||||
# Install Claude Code globally
|
||||
RUN npm install -g @anthropic-ai/claude-code
|
||||
|
||||
# Create app directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy initialization system
|
||||
COPY cubbi_init.py /cubbi/cubbi_init.py
|
||||
COPY claudecode_plugin.py /cubbi/claudecode_plugin.py
|
||||
COPY cubbi_image.yaml /cubbi/cubbi_image.yaml
|
||||
COPY init-status.sh /cubbi/init-status.sh
|
||||
|
||||
# Make scripts executable
|
||||
RUN chmod +x /cubbi/cubbi_init.py /cubbi/init-status.sh
|
||||
|
||||
# Add Node.js to PATH in bashrc and init status check
|
||||
RUN echo 'PATH="/opt/node/bin:$PATH"' >> /etc/bash.bashrc
|
||||
RUN echo '[ -x /cubbi/init-status.sh ] && /cubbi/init-status.sh' >> /etc/bash.bashrc
|
||||
|
||||
# Set up environment
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ENV UV_LINK_MODE=copy
|
||||
|
||||
# Pre-install the cubbi_init
|
||||
RUN /cubbi/cubbi_init.py --help
|
||||
|
||||
# Set WORKDIR to /app
|
||||
WORKDIR /app
|
||||
|
||||
ENTRYPOINT ["/cubbi/cubbi_init.py"]
|
||||
CMD ["tail", "-f", "/dev/null"]
|
||||
222
cubbi/images/claudecode/README.md
Normal file
222
cubbi/images/claudecode/README.md
Normal file
@@ -0,0 +1,222 @@
|
||||
# Claude Code for Cubbi
|
||||
|
||||
This image provides Claude Code (Anthropic's official CLI for Claude) in a Cubbi container environment.
|
||||
|
||||
## Overview
|
||||
|
||||
Claude Code is an interactive CLI tool that helps with software engineering tasks. This Cubbi image integrates Claude Code with secure API key management, persistent configuration, and enterprise features.
|
||||
|
||||
## Features
|
||||
|
||||
- **Claude Code CLI**: Full access to Claude's coding capabilities
|
||||
- **Secure Authentication**: API key management through Cubbi's secure environment system
|
||||
- **Persistent Configuration**: Settings and cache preserved across container restarts
|
||||
- **Enterprise Support**: Bedrock and Vertex AI integration
|
||||
- **Network Support**: Proxy configuration for corporate environments
|
||||
- **Tool Permissions**: Pre-configured permissions for all Claude Code tools
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Set up API Key
|
||||
|
||||
```bash
|
||||
# Set your Anthropic API key in Cubbi configuration
|
||||
cubbi config set services.anthropic.api_key "your-api-key-here"
|
||||
```
|
||||
|
||||
### 2. Run Claude Code Environment
|
||||
|
||||
```bash
|
||||
# Start Claude Code container
|
||||
cubbi run claudecode
|
||||
|
||||
# Execute Claude Code commands
|
||||
cubbi exec claudecode "claude 'help me write a Python function'"
|
||||
|
||||
# Start interactive session
|
||||
cubbi exec claudecode "claude"
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Required Environment Variables
|
||||
|
||||
- `ANTHROPIC_API_KEY`: Your Anthropic API key (required)
|
||||
|
||||
### Optional Environment Variables
|
||||
|
||||
- `ANTHROPIC_AUTH_TOKEN`: Custom authorization token for enterprise deployments
|
||||
- `ANTHROPIC_CUSTOM_HEADERS`: Additional HTTP headers (JSON format)
|
||||
- `CLAUDE_CODE_USE_BEDROCK`: Set to "true" to use Amazon Bedrock
|
||||
- `CLAUDE_CODE_USE_VERTEX`: Set to "true" to use Google Vertex AI
|
||||
- `HTTP_PROXY`: HTTP proxy server URL
|
||||
- `HTTPS_PROXY`: HTTPS proxy server URL
|
||||
- `DISABLE_TELEMETRY`: Set to "true" to disable telemetry
|
||||
|
||||
### Advanced Configuration
|
||||
|
||||
```bash
|
||||
# Enterprise deployment with Bedrock
|
||||
cubbi config set environment.claude_code_use_bedrock true
|
||||
cubbi run claudecode
|
||||
|
||||
# With custom proxy
|
||||
cubbi config set network.https_proxy "https://proxy.company.com:8080"
|
||||
cubbi run claudecode
|
||||
|
||||
# Disable telemetry
|
||||
cubbi config set environment.disable_telemetry true
|
||||
cubbi run claudecode
|
||||
```
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Basic Usage
|
||||
|
||||
```bash
|
||||
# Get help
|
||||
cubbi exec claudecode "claude --help"
|
||||
|
||||
# One-time task
|
||||
cubbi exec claudecode "claude 'write a unit test for this function'"
|
||||
|
||||
# Interactive mode
|
||||
cubbi exec claudecode "claude"
|
||||
```
|
||||
|
||||
### Working with Projects
|
||||
|
||||
```bash
|
||||
# Start Claude Code in your project directory
|
||||
cubbi run claudecode --mount /path/to/your/project:/app
|
||||
cubbi exec claudecode "cd /app && claude"
|
||||
|
||||
# Create a commit
|
||||
cubbi exec claudecode "cd /app && claude commit"
|
||||
```
|
||||
|
||||
### Advanced Features
|
||||
|
||||
```bash
|
||||
# Run with specific model configuration
|
||||
cubbi exec claudecode "claude -m claude-3-5-sonnet-20241022 'analyze this code'"
|
||||
|
||||
# Use with plan mode
|
||||
cubbi exec claudecode "claude -p 'refactor this function'"
|
||||
```
|
||||
|
||||
## Persistent Configuration
|
||||
|
||||
The following directories are automatically persisted:
|
||||
|
||||
- `~/.claude/`: Claude Code settings and configuration
|
||||
- `~/.cache/claude/`: Claude Code cache and temporary files
|
||||
|
||||
Configuration files are maintained across container restarts, ensuring your settings and preferences are preserved.
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
cubbi/images/claudecode/
|
||||
├── Dockerfile # Container image definition
|
||||
├── cubbi_image.yaml # Cubbi image configuration
|
||||
├── claudecode_plugin.py # Authentication and setup plugin
|
||||
├── cubbi_init.py # Initialization script (shared)
|
||||
├── init-status.sh # Status check script (shared)
|
||||
└── README.md # This documentation
|
||||
```
|
||||
|
||||
## Authentication Flow
|
||||
|
||||
1. **Environment Variables**: API key passed from Cubbi configuration
|
||||
2. **Plugin Setup**: `claudecode_plugin.py` creates `~/.claude/settings.json`
|
||||
3. **Verification**: Plugin verifies Claude Code installation and configuration
|
||||
4. **Ready**: Claude Code is ready for use with configured authentication
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
**API Key Not Set**
|
||||
```
|
||||
⚠️ No authentication configuration found
|
||||
Please set ANTHROPIC_API_KEY environment variable
|
||||
```
|
||||
**Solution**: Set API key in Cubbi configuration:
|
||||
```bash
|
||||
cubbi config set services.anthropic.api_key "your-api-key-here"
|
||||
```
|
||||
|
||||
**Claude Code Not Found**
|
||||
```
|
||||
❌ Claude Code not properly installed
|
||||
```
|
||||
**Solution**: Rebuild the container image:
|
||||
```bash
|
||||
docker build -t cubbi-claudecode:latest cubbi/images/claudecode/
|
||||
```
|
||||
|
||||
**Network Issues**
|
||||
```
|
||||
Connection timeout or proxy errors
|
||||
```
|
||||
**Solution**: Configure proxy settings:
|
||||
```bash
|
||||
cubbi config set network.https_proxy "your-proxy-url"
|
||||
```
|
||||
|
||||
### Debug Mode
|
||||
|
||||
Enable verbose output for debugging:
|
||||
|
||||
```bash
|
||||
# Check configuration
|
||||
cubbi exec claudecode "cat ~/.claude/settings.json"
|
||||
|
||||
# Verify installation
|
||||
cubbi exec claudecode "claude --version"
|
||||
cubbi exec claudecode "which claude"
|
||||
cubbi exec claudecode "node --version"
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
- **API Keys**: Stored securely with 0o600 permissions
|
||||
- **Configuration**: Settings files have restricted access
|
||||
- **Environment**: Isolated container environment
|
||||
- **Telemetry**: Can be disabled for privacy
|
||||
|
||||
## Development
|
||||
|
||||
### Building the Image
|
||||
|
||||
```bash
|
||||
# Build locally
|
||||
docker build -t cubbi-claudecode:test cubbi/images/claudecode/
|
||||
|
||||
# Test basic functionality
|
||||
docker run --rm -it \
|
||||
-e ANTHROPIC_API_KEY="your-api-key" \
|
||||
cubbi-claudecode:test \
|
||||
bash -c "claude --version"
|
||||
```
|
||||
|
||||
### Testing
|
||||
|
||||
```bash
|
||||
# Run through Cubbi
|
||||
cubbi run claudecode --name test-claude
|
||||
cubbi exec test-claude "claude --version"
|
||||
cubbi stop test-claude
|
||||
```
|
||||
|
||||
## Support
|
||||
|
||||
For issues related to:
|
||||
- **Cubbi Integration**: Check Cubbi documentation or open an issue
|
||||
- **Claude Code**: Visit [Claude Code documentation](https://docs.anthropic.com/en/docs/claude-code)
|
||||
- **API Keys**: Visit [Anthropic Console](https://console.anthropic.com/)
|
||||
|
||||
## License
|
||||
|
||||
This image configuration is provided under the same license as the Cubbi project. Claude Code is licensed separately by Anthropic.
|
||||
132
cubbi/images/claudecode/claudecode_plugin.py
Executable file
132
cubbi/images/claudecode/claudecode_plugin.py
Executable file
@@ -0,0 +1,132 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import json
|
||||
import os
|
||||
import stat
|
||||
from pathlib import Path
|
||||
|
||||
from cubbi_init import ToolPlugin, cubbi_config, set_ownership
|
||||
|
||||
|
||||
class ClaudeCodePlugin(ToolPlugin):
|
||||
@property
|
||||
def tool_name(self) -> str:
|
||||
return "claudecode"
|
||||
|
||||
def _get_claude_dir(self) -> Path:
|
||||
return Path("/home/cubbi/.claude")
|
||||
|
||||
def is_already_configured(self) -> bool:
|
||||
settings_file = self._get_claude_dir() / "settings.json"
|
||||
return settings_file.exists()
|
||||
|
||||
def configure(self) -> bool:
|
||||
self.status.log("Setting up Claude Code authentication...")
|
||||
|
||||
claude_dir = self.create_directory_with_ownership(self._get_claude_dir())
|
||||
claude_dir.chmod(0o700)
|
||||
|
||||
settings = self._create_settings()
|
||||
|
||||
if settings:
|
||||
settings_file = claude_dir / "settings.json"
|
||||
success = self._write_settings(settings_file, settings)
|
||||
if success:
|
||||
self.status.log("✅ Claude Code authentication configured successfully")
|
||||
self._integrate_mcp_servers()
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
else:
|
||||
self.status.log("⚠️ No authentication configuration found", "WARNING")
|
||||
self.status.log(
|
||||
" Please set ANTHROPIC_API_KEY environment variable", "WARNING"
|
||||
)
|
||||
self.status.log(" Claude Code will run without authentication", "INFO")
|
||||
self._integrate_mcp_servers()
|
||||
return True
|
||||
|
||||
def _integrate_mcp_servers(self) -> None:
|
||||
if not cubbi_config.mcps:
|
||||
self.status.log("No MCP servers to integrate")
|
||||
return
|
||||
|
||||
self.status.log("MCP server integration available for Claude Code")
|
||||
|
||||
def _create_settings(self) -> dict | None:
|
||||
settings = {}
|
||||
|
||||
anthropic_provider = None
|
||||
for provider_name, provider_config in cubbi_config.providers.items():
|
||||
if provider_config.type == "anthropic":
|
||||
anthropic_provider = provider_config
|
||||
break
|
||||
|
||||
if not anthropic_provider or not anthropic_provider.api_key:
|
||||
api_key = os.environ.get("ANTHROPIC_API_KEY")
|
||||
if not api_key:
|
||||
return None
|
||||
settings["apiKey"] = api_key
|
||||
else:
|
||||
settings["apiKey"] = anthropic_provider.api_key
|
||||
|
||||
auth_token = os.environ.get("ANTHROPIC_AUTH_TOKEN")
|
||||
if auth_token:
|
||||
settings["authToken"] = auth_token
|
||||
|
||||
custom_headers = os.environ.get("ANTHROPIC_CUSTOM_HEADERS")
|
||||
if custom_headers:
|
||||
try:
|
||||
settings["customHeaders"] = json.loads(custom_headers)
|
||||
except json.JSONDecodeError:
|
||||
self.status.log(
|
||||
"⚠️ Invalid ANTHROPIC_CUSTOM_HEADERS format, skipping", "WARNING"
|
||||
)
|
||||
|
||||
if os.environ.get("CLAUDE_CODE_USE_BEDROCK") == "true":
|
||||
settings["provider"] = "bedrock"
|
||||
|
||||
if os.environ.get("CLAUDE_CODE_USE_VERTEX") == "true":
|
||||
settings["provider"] = "vertex"
|
||||
|
||||
http_proxy = os.environ.get("HTTP_PROXY")
|
||||
https_proxy = os.environ.get("HTTPS_PROXY")
|
||||
if http_proxy or https_proxy:
|
||||
settings["proxy"] = {}
|
||||
if http_proxy:
|
||||
settings["proxy"]["http"] = http_proxy
|
||||
if https_proxy:
|
||||
settings["proxy"]["https"] = https_proxy
|
||||
|
||||
if os.environ.get("DISABLE_TELEMETRY") == "true":
|
||||
settings["telemetry"] = {"enabled": False}
|
||||
|
||||
settings["permissions"] = {
|
||||
"tools": {
|
||||
"read": {"allowed": True},
|
||||
"write": {"allowed": True},
|
||||
"edit": {"allowed": True},
|
||||
"bash": {"allowed": True},
|
||||
"webfetch": {"allowed": True},
|
||||
"websearch": {"allowed": True},
|
||||
}
|
||||
}
|
||||
|
||||
return settings
|
||||
|
||||
def _write_settings(self, settings_file: Path, settings: dict) -> bool:
|
||||
try:
|
||||
with open(settings_file, "w") as f:
|
||||
json.dump(settings, f, indent=2)
|
||||
|
||||
set_ownership(settings_file)
|
||||
os.chmod(settings_file, stat.S_IRUSR | stat.S_IWUSR)
|
||||
|
||||
self.status.log(f"Created Claude Code settings at {settings_file}")
|
||||
return True
|
||||
except Exception as e:
|
||||
self.status.log(f"Failed to write Claude Code settings: {e}", "ERROR")
|
||||
return False
|
||||
|
||||
|
||||
PLUGIN_CLASS = ClaudeCodePlugin
|
||||
15
cubbi/images/claudecode/cubbi_image.yaml
Normal file
15
cubbi/images/claudecode/cubbi_image.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
name: claudecode
|
||||
description: Claude Code AI environment
|
||||
version: 1.0.0
|
||||
maintainer: team@monadical.com
|
||||
image: monadical/cubbi-claudecode:latest
|
||||
persistent_configs: []
|
||||
environments_to_forward:
|
||||
- ANTHROPIC_API_KEY
|
||||
- ANTHROPIC_AUTH_TOKEN
|
||||
- ANTHROPIC_CUSTOM_HEADERS
|
||||
- CLAUDE_CODE_USE_BEDROCK
|
||||
- CLAUDE_CODE_USE_VERTEX
|
||||
- HTTP_PROXY
|
||||
- HTTPS_PROXY
|
||||
- DISABLE_TELEMETRY
|
||||
251
cubbi/images/claudecode/test_claudecode.py
Executable file
251
cubbi/images/claudecode/test_claudecode.py
Executable file
@@ -0,0 +1,251 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Automated test suite for Claude Code Cubbi integration
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
|
||||
|
||||
def run_test(description: str, command: list, timeout: int = 30) -> bool:
|
||||
"""Run a test command and return success status"""
|
||||
print(f"🧪 Testing: {description}")
|
||||
try:
|
||||
result = subprocess.run(
|
||||
command, capture_output=True, text=True, timeout=timeout
|
||||
)
|
||||
if result.returncode == 0:
|
||||
print(" ✅ PASS")
|
||||
return True
|
||||
else:
|
||||
print(f" ❌ FAIL: {result.stderr}")
|
||||
if result.stdout:
|
||||
print(f" 📋 stdout: {result.stdout}")
|
||||
return False
|
||||
except subprocess.TimeoutExpired:
|
||||
print(f" ⏰ TIMEOUT: Command exceeded {timeout}s")
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f" ❌ ERROR: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def test_suite():
|
||||
"""Run complete test suite"""
|
||||
tests_passed = 0
|
||||
total_tests = 0
|
||||
|
||||
print("🚀 Starting Claude Code Cubbi Integration Test Suite")
|
||||
print("=" * 60)
|
||||
|
||||
# Test 1: Build image
|
||||
total_tests += 1
|
||||
if run_test(
|
||||
"Build Claude Code image",
|
||||
["docker", "build", "-t", "cubbi-claudecode:test", "cubbi/images/claudecode/"],
|
||||
timeout=180,
|
||||
):
|
||||
tests_passed += 1
|
||||
|
||||
# Test 2: Tag image for Cubbi
|
||||
total_tests += 1
|
||||
if run_test(
|
||||
"Tag image for Cubbi",
|
||||
["docker", "tag", "cubbi-claudecode:test", "monadical/cubbi-claudecode:latest"],
|
||||
):
|
||||
tests_passed += 1
|
||||
|
||||
# Test 3: Basic container startup
|
||||
total_tests += 1
|
||||
if run_test(
|
||||
"Container startup with test API key",
|
||||
[
|
||||
"docker",
|
||||
"run",
|
||||
"--rm",
|
||||
"-e",
|
||||
"ANTHROPIC_API_KEY=test-key",
|
||||
"cubbi-claudecode:test",
|
||||
"bash",
|
||||
"-c",
|
||||
"claude --version",
|
||||
],
|
||||
):
|
||||
tests_passed += 1
|
||||
|
||||
# Test 4: Cubbi image list
|
||||
total_tests += 1
|
||||
if run_test(
|
||||
"Cubbi image list includes claudecode",
|
||||
["uv", "run", "-m", "cubbi.cli", "image", "list"],
|
||||
):
|
||||
tests_passed += 1
|
||||
|
||||
# Test 5: Cubbi session creation
|
||||
total_tests += 1
|
||||
session_result = subprocess.run(
|
||||
[
|
||||
"uv",
|
||||
"run",
|
||||
"-m",
|
||||
"cubbi.cli",
|
||||
"session",
|
||||
"create",
|
||||
"--image",
|
||||
"claudecode",
|
||||
"--name",
|
||||
"test-automation",
|
||||
"--no-connect",
|
||||
"--env",
|
||||
"ANTHROPIC_API_KEY=test-key",
|
||||
"--run",
|
||||
"claude --version",
|
||||
],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=60,
|
||||
)
|
||||
|
||||
if session_result.returncode == 0:
|
||||
print("🧪 Testing: Cubbi session creation")
|
||||
print(" ✅ PASS")
|
||||
tests_passed += 1
|
||||
|
||||
# Extract session ID for cleanup
|
||||
session_id = None
|
||||
for line in session_result.stdout.split("\n"):
|
||||
if "Session ID:" in line:
|
||||
session_id = line.split("Session ID: ")[1].strip()
|
||||
break
|
||||
|
||||
if session_id:
|
||||
# Test 6: Session cleanup
|
||||
total_tests += 1
|
||||
if run_test(
|
||||
"Clean up test session",
|
||||
["uv", "run", "-m", "cubbi.cli", "session", "close", session_id],
|
||||
):
|
||||
tests_passed += 1
|
||||
else:
|
||||
print("🧪 Testing: Clean up test session")
|
||||
print(" ⚠️ SKIP: Could not extract session ID")
|
||||
total_tests += 1
|
||||
else:
|
||||
print("🧪 Testing: Cubbi session creation")
|
||||
print(f" ❌ FAIL: {session_result.stderr}")
|
||||
total_tests += 2 # This test and cleanup test both fail
|
||||
|
||||
# Test 7: Session without API key
|
||||
total_tests += 1
|
||||
no_key_result = subprocess.run(
|
||||
[
|
||||
"uv",
|
||||
"run",
|
||||
"-m",
|
||||
"cubbi.cli",
|
||||
"session",
|
||||
"create",
|
||||
"--image",
|
||||
"claudecode",
|
||||
"--name",
|
||||
"test-no-key",
|
||||
"--no-connect",
|
||||
"--run",
|
||||
"claude --version",
|
||||
],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=60,
|
||||
)
|
||||
|
||||
if no_key_result.returncode == 0:
|
||||
print("🧪 Testing: Session without API key")
|
||||
print(" ✅ PASS")
|
||||
tests_passed += 1
|
||||
|
||||
# Extract session ID and close
|
||||
session_id = None
|
||||
for line in no_key_result.stdout.split("\n"):
|
||||
if "Session ID:" in line:
|
||||
session_id = line.split("Session ID: ")[1].strip()
|
||||
break
|
||||
|
||||
if session_id:
|
||||
subprocess.run(
|
||||
["uv", "run", "-m", "cubbi.cli", "session", "close", session_id],
|
||||
capture_output=True,
|
||||
timeout=30,
|
||||
)
|
||||
else:
|
||||
print("🧪 Testing: Session without API key")
|
||||
print(f" ❌ FAIL: {no_key_result.stderr}")
|
||||
|
||||
# Test 8: Persistent configuration test
|
||||
total_tests += 1
|
||||
persist_result = subprocess.run(
|
||||
[
|
||||
"uv",
|
||||
"run",
|
||||
"-m",
|
||||
"cubbi.cli",
|
||||
"session",
|
||||
"create",
|
||||
"--image",
|
||||
"claudecode",
|
||||
"--name",
|
||||
"test-persist-auto",
|
||||
"--project",
|
||||
"test-automation",
|
||||
"--no-connect",
|
||||
"--env",
|
||||
"ANTHROPIC_API_KEY=test-key",
|
||||
"--run",
|
||||
"echo 'automation test' > ~/.claude/automation.txt && cat ~/.claude/automation.txt",
|
||||
],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=60,
|
||||
)
|
||||
|
||||
if persist_result.returncode == 0:
|
||||
print("🧪 Testing: Persistent configuration")
|
||||
print(" ✅ PASS")
|
||||
tests_passed += 1
|
||||
|
||||
# Extract session ID and close
|
||||
session_id = None
|
||||
for line in persist_result.stdout.split("\n"):
|
||||
if "Session ID:" in line:
|
||||
session_id = line.split("Session ID: ")[1].strip()
|
||||
break
|
||||
|
||||
if session_id:
|
||||
subprocess.run(
|
||||
["uv", "run", "-m", "cubbi.cli", "session", "close", session_id],
|
||||
capture_output=True,
|
||||
timeout=30,
|
||||
)
|
||||
else:
|
||||
print("🧪 Testing: Persistent configuration")
|
||||
print(f" ❌ FAIL: {persist_result.stderr}")
|
||||
|
||||
print("=" * 60)
|
||||
print(f"📊 Test Results: {tests_passed}/{total_tests} tests passed")
|
||||
|
||||
if tests_passed == total_tests:
|
||||
print("🎉 All tests passed! Claude Code integration is working correctly.")
|
||||
return True
|
||||
else:
|
||||
print(
|
||||
f"❌ {total_tests - tests_passed} test(s) failed. Please check the output above."
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
"""Main test entry point"""
|
||||
success = test_suite()
|
||||
exit(0 if success else 1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
62
cubbi/images/crush/Dockerfile
Normal file
62
cubbi/images/crush/Dockerfile
Normal file
@@ -0,0 +1,62 @@
|
||||
FROM python:3.12-slim
|
||||
|
||||
LABEL maintainer="team@monadical.com"
|
||||
LABEL description="Crush AI coding assistant for Cubbi"
|
||||
|
||||
# Install system dependencies including gosu for user switching and shadow for useradd/groupadd
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
gosu \
|
||||
sudo \
|
||||
passwd \
|
||||
bash \
|
||||
curl \
|
||||
bzip2 \
|
||||
iputils-ping \
|
||||
iproute2 \
|
||||
libxcb1 \
|
||||
libdbus-1-3 \
|
||||
nano \
|
||||
tmux \
|
||||
git-core \
|
||||
ripgrep \
|
||||
openssh-client \
|
||||
vim \
|
||||
nodejs \
|
||||
npm \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install deps
|
||||
WORKDIR /tmp
|
||||
RUN curl -fsSL https://astral.sh/uv/install.sh -o install.sh && \
|
||||
sh install.sh && \
|
||||
mv /root/.local/bin/uv /usr/local/bin/uv && \
|
||||
mv /root/.local/bin/uvx /usr/local/bin/uvx && \
|
||||
rm install.sh
|
||||
|
||||
# Install crush via npm
|
||||
RUN npm install -g @charmland/crush
|
||||
|
||||
# Create app directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy initialization system
|
||||
COPY cubbi_init.py /cubbi/cubbi_init.py
|
||||
COPY crush_plugin.py /cubbi/crush_plugin.py
|
||||
COPY cubbi_image.yaml /cubbi/cubbi_image.yaml
|
||||
COPY init-status.sh /cubbi/init-status.sh
|
||||
RUN chmod +x /cubbi/cubbi_init.py /cubbi/init-status.sh
|
||||
RUN echo '[ -x /cubbi/init-status.sh ] && /cubbi/init-status.sh' >> /etc/bash.bashrc
|
||||
|
||||
# Set up environment
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ENV UV_LINK_MODE=copy
|
||||
|
||||
# Pre-install the cubbi_init
|
||||
RUN /cubbi/cubbi_init.py --help
|
||||
|
||||
# Set WORKDIR to /app, common practice and expected by cubbi-init.sh
|
||||
WORKDIR /app
|
||||
|
||||
ENTRYPOINT ["/cubbi/cubbi_init.py"]
|
||||
CMD ["tail", "-f", "/dev/null"]
|
||||
77
cubbi/images/crush/README.md
Normal file
77
cubbi/images/crush/README.md
Normal file
@@ -0,0 +1,77 @@
|
||||
# Crush Image for Cubbi
|
||||
|
||||
This image provides a containerized environment for running [Crush](https://github.com/charmbracelet/crush), a terminal-based AI coding assistant.
|
||||
|
||||
## Features
|
||||
|
||||
- Pre-configured environment for Crush AI coding assistant
|
||||
- Multi-model support (OpenAI, Anthropic, Groq)
|
||||
- JSON-based configuration
|
||||
- MCP server integration support
|
||||
- Session preservation across runs
|
||||
|
||||
## Environment Variables
|
||||
|
||||
### AI Provider Configuration
|
||||
|
||||
| Variable | Description | Required | Default |
|
||||
|----------|-------------|----------|---------|
|
||||
| `OPENAI_API_KEY` | OpenAI API key for crush | No | - |
|
||||
| `ANTHROPIC_API_KEY` | Anthropic API key for crush | No | - |
|
||||
| `GROQ_API_KEY` | Groq API key for crush | No | - |
|
||||
| `OPENAI_URL` | Custom OpenAI-compatible API URL | No | - |
|
||||
| `CUBBI_MODEL` | AI model to use with crush | No | - |
|
||||
| `CUBBI_PROVIDER` | AI provider to use with crush | No | - |
|
||||
|
||||
### Cubbi Core Variables
|
||||
|
||||
| Variable | Description | Required | Default |
|
||||
|----------|-------------|----------|---------|
|
||||
| `CUBBI_USER_ID` | UID for the container user | No | `1000` |
|
||||
| `CUBBI_GROUP_ID` | GID for the container user | No | `1000` |
|
||||
| `CUBBI_RUN_COMMAND` | Command to execute after initialization | No | - |
|
||||
| `CUBBI_NO_SHELL` | Exit after command execution | No | `false` |
|
||||
| `CUBBI_CONFIG_DIR` | Directory for persistent configurations | No | `/cubbi-config` |
|
||||
| `CUBBI_PERSISTENT_LINKS` | Semicolon-separated list of source:target symlinks | No | - |
|
||||
|
||||
### MCP Integration Variables
|
||||
|
||||
| Variable | Description | Required |
|
||||
|----------|-------------|----------|
|
||||
| `MCP_COUNT` | Number of available MCP servers | No |
|
||||
| `MCP_NAMES` | JSON array of MCP server names | No |
|
||||
| `MCP_{idx}_NAME` | Name of MCP server at index | No |
|
||||
| `MCP_{idx}_TYPE` | Type of MCP server | No |
|
||||
| `MCP_{idx}_HOST` | Hostname of MCP server | No |
|
||||
| `MCP_{idx}_URL` | Full URL for remote MCP servers | No |
|
||||
|
||||
## Build
|
||||
|
||||
To build this image:
|
||||
|
||||
```bash
|
||||
cd cubbi/images/crush
|
||||
docker build -t monadical/cubbi-crush:latest .
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Create a new session with this image
|
||||
cubbix -i crush
|
||||
|
||||
# Run crush with specific provider
|
||||
cubbix -i crush -e CUBBI_PROVIDER=openai -e CUBBI_MODEL=gpt-4
|
||||
|
||||
# Test crush installation
|
||||
cubbix -i crush --no-shell --run "crush --help"
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
Crush uses JSON configuration stored in `/home/cubbi/.config/crush/config.json`. The plugin automatically configures:
|
||||
|
||||
- AI providers based on available API keys
|
||||
- Default models and providers from environment variables
|
||||
- Session preservation settings
|
||||
- MCP server integrations
|
||||
230
cubbi/images/crush/crush_plugin.py
Normal file
230
cubbi/images/crush/crush_plugin.py
Normal file
@@ -0,0 +1,230 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from cubbi_init import ToolPlugin, cubbi_config, set_ownership
|
||||
|
||||
STANDARD_PROVIDERS = ["anthropic", "openai", "google", "openrouter"]
|
||||
|
||||
|
||||
class CrushPlugin(ToolPlugin):
|
||||
@property
|
||||
def tool_name(self) -> str:
|
||||
return "crush"
|
||||
|
||||
def _get_user_config_path(self) -> Path:
|
||||
return Path("/home/cubbi/.config/crush")
|
||||
|
||||
def is_already_configured(self) -> bool:
|
||||
config_file = self._get_user_config_path() / "crush.json"
|
||||
return config_file.exists()
|
||||
|
||||
def configure(self) -> bool:
|
||||
return self._setup_tool_configuration() and self._integrate_mcp_servers()
|
||||
|
||||
def _map_provider_to_crush_format(
|
||||
self, provider_name: str, provider_config, is_default_provider: bool = False
|
||||
) -> dict[str, Any] | None:
|
||||
# Handle standard providers without base_url
|
||||
if not provider_config.base_url:
|
||||
if provider_config.type in STANDARD_PROVIDERS:
|
||||
# Populate models for any standard provider that has models
|
||||
models_list = []
|
||||
if provider_config.models:
|
||||
for model in provider_config.models:
|
||||
model_id = model.get("id", "")
|
||||
if model_id:
|
||||
models_list.append({"id": model_id, "name": model_id})
|
||||
|
||||
provider_entry = {
|
||||
"api_key": provider_config.api_key,
|
||||
"models": models_list,
|
||||
}
|
||||
return provider_entry
|
||||
|
||||
# Handle custom providers with base_url
|
||||
models_list = []
|
||||
|
||||
# Add all models for any provider type that has models
|
||||
if provider_config.models:
|
||||
for model in provider_config.models:
|
||||
model_id = model.get("id", "")
|
||||
if model_id:
|
||||
models_list.append({"id": model_id, "name": model_id})
|
||||
|
||||
provider_entry = {
|
||||
"api_key": provider_config.api_key,
|
||||
"base_url": provider_config.base_url,
|
||||
"models": models_list,
|
||||
}
|
||||
|
||||
if provider_config.type in STANDARD_PROVIDERS:
|
||||
if provider_config.type == "anthropic":
|
||||
provider_entry["type"] = "anthropic"
|
||||
elif provider_config.type == "openai":
|
||||
provider_entry["type"] = "openai"
|
||||
elif provider_config.type == "google":
|
||||
provider_entry["type"] = "gemini"
|
||||
elif provider_config.type == "openrouter":
|
||||
provider_entry["type"] = "openai"
|
||||
provider_entry["name"] = f"{provider_name} ({provider_config.type})"
|
||||
else:
|
||||
provider_entry["type"] = "openai"
|
||||
provider_entry["name"] = f"{provider_name} ({provider_config.type})"
|
||||
|
||||
return provider_entry
|
||||
|
||||
def _setup_tool_configuration(self) -> bool:
|
||||
config_dir = self.create_directory_with_ownership(self._get_user_config_path())
|
||||
if not config_dir.exists():
|
||||
self.status.log(
|
||||
f"Config directory {config_dir} does not exist and could not be created",
|
||||
"ERROR",
|
||||
)
|
||||
return False
|
||||
|
||||
config_file = config_dir / "crush.json"
|
||||
|
||||
config_data = {"$schema": "https://charm.land/crush.json", "providers": {}}
|
||||
|
||||
default_provider_name = None
|
||||
if cubbi_config.defaults.model:
|
||||
default_provider_name = cubbi_config.defaults.model.split("/", 1)[0]
|
||||
|
||||
self.status.log(
|
||||
f"Found {len(cubbi_config.providers)} configured providers for Crush"
|
||||
)
|
||||
|
||||
for provider_name, provider_config in cubbi_config.providers.items():
|
||||
is_default_provider = provider_name == default_provider_name
|
||||
crush_provider = self._map_provider_to_crush_format(
|
||||
provider_name, provider_config, is_default_provider
|
||||
)
|
||||
if crush_provider:
|
||||
crush_provider_name = (
|
||||
"gemini" if provider_config.type == "google" else provider_name
|
||||
)
|
||||
config_data["providers"][crush_provider_name] = crush_provider
|
||||
self.status.log(
|
||||
f"Added {crush_provider_name} provider to Crush configuration{'(default)' if is_default_provider else ''}"
|
||||
)
|
||||
|
||||
if cubbi_config.defaults.model:
|
||||
provider_part, model_part = cubbi_config.defaults.model.split("/", 1)
|
||||
config_data["models"] = {
|
||||
"large": {"provider": provider_part, "model": model_part},
|
||||
"small": {"provider": provider_part, "model": model_part},
|
||||
}
|
||||
self.status.log(f"Set default model to {cubbi_config.defaults.model}")
|
||||
|
||||
provider = cubbi_config.providers.get(provider_part)
|
||||
if provider and provider.base_url:
|
||||
config_data["providers"][provider_part]["models"].append(
|
||||
{"id": model_part, "name": model_part}
|
||||
)
|
||||
|
||||
if not config_data["providers"]:
|
||||
self.status.log(
|
||||
"No providers configured, skipping Crush configuration file creation"
|
||||
)
|
||||
return True
|
||||
|
||||
try:
|
||||
with config_file.open("w") as f:
|
||||
json.dump(config_data, f, indent=2)
|
||||
|
||||
set_ownership(config_file)
|
||||
|
||||
self.status.log(
|
||||
f"Created Crush configuration at {config_file} with {len(config_data['providers'])} providers"
|
||||
)
|
||||
return True
|
||||
except Exception as e:
|
||||
self.status.log(f"Failed to write Crush configuration: {e}", "ERROR")
|
||||
return False
|
||||
|
||||
def _integrate_mcp_servers(self) -> bool:
|
||||
if not cubbi_config.mcps:
|
||||
self.status.log("No MCP servers to integrate")
|
||||
return True
|
||||
|
||||
config_dir = self.create_directory_with_ownership(self._get_user_config_path())
|
||||
if not config_dir.exists():
|
||||
self.status.log(
|
||||
f"Config directory {config_dir} does not exist and could not be created",
|
||||
"ERROR",
|
||||
)
|
||||
return False
|
||||
|
||||
config_file = config_dir / "crush.json"
|
||||
|
||||
if config_file.exists():
|
||||
try:
|
||||
with config_file.open("r") as f:
|
||||
config_data = json.load(f)
|
||||
except (json.JSONDecodeError, OSError) as e:
|
||||
self.status.log(f"Failed to load existing config: {e}", "WARNING")
|
||||
config_data = {
|
||||
"$schema": "https://charm.land/crush.json",
|
||||
"providers": {},
|
||||
}
|
||||
else:
|
||||
config_data = {"$schema": "https://charm.land/crush.json", "providers": {}}
|
||||
|
||||
if "mcps" not in config_data:
|
||||
config_data["mcps"] = {}
|
||||
|
||||
for mcp in cubbi_config.mcps:
|
||||
if mcp.type == "remote":
|
||||
if mcp.name and mcp.url:
|
||||
self.status.log(f"Adding remote MCP server: {mcp.name} - {mcp.url}")
|
||||
config_data["mcps"][mcp.name] = {
|
||||
"transport": {"type": "sse", "url": mcp.url},
|
||||
"enabled": True,
|
||||
}
|
||||
elif mcp.type == "local":
|
||||
if mcp.name and mcp.command:
|
||||
self.status.log(
|
||||
f"Adding local MCP server: {mcp.name} - {mcp.command}"
|
||||
)
|
||||
# Crush uses stdio type for local MCPs
|
||||
transport_config = {
|
||||
"type": "stdio",
|
||||
"command": mcp.command,
|
||||
}
|
||||
if mcp.args:
|
||||
transport_config["args"] = mcp.args
|
||||
if mcp.env:
|
||||
transport_config["env"] = mcp.env
|
||||
config_data["mcps"][mcp.name] = {
|
||||
"transport": transport_config,
|
||||
"enabled": True,
|
||||
}
|
||||
elif mcp.type in ["docker", "proxy"]:
|
||||
if mcp.name and mcp.host:
|
||||
mcp_port = mcp.port or 8080
|
||||
mcp_url = f"http://{mcp.host}:{mcp_port}/sse"
|
||||
self.status.log(f"Adding MCP server: {mcp.name} - {mcp_url}")
|
||||
config_data["mcps"][mcp.name] = {
|
||||
"transport": {"type": "sse", "url": mcp_url},
|
||||
"enabled": True,
|
||||
}
|
||||
|
||||
try:
|
||||
with config_file.open("w") as f:
|
||||
json.dump(config_data, f, indent=2)
|
||||
|
||||
set_ownership(config_file)
|
||||
|
||||
self.status.log(
|
||||
f"Integrated {len(cubbi_config.mcps)} MCP servers into Crush configuration"
|
||||
)
|
||||
return True
|
||||
except Exception as e:
|
||||
self.status.log(f"Failed to integrate MCP servers: {e}", "ERROR")
|
||||
return False
|
||||
|
||||
|
||||
PLUGIN_CLASS = CrushPlugin
|
||||
16
cubbi/images/crush/cubbi_image.yaml
Normal file
16
cubbi/images/crush/cubbi_image.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
name: crush
|
||||
description: Crush AI coding assistant environment
|
||||
version: 1.0.0
|
||||
maintainer: team@monadical.com
|
||||
image: monadical/cubbi-crush:latest
|
||||
persistent_configs: []
|
||||
environments_to_forward:
|
||||
# API Keys
|
||||
- OPENAI_API_KEY
|
||||
- ANTHROPIC_API_KEY
|
||||
- ANTHROPIC_AUTH_TOKEN
|
||||
- ANTHROPIC_CUSTOM_HEADERS
|
||||
- DEEPSEEK_API_KEY
|
||||
- GEMINI_API_KEY
|
||||
- OPENROUTER_API_KEY
|
||||
- AIDER_API_KEYS
|
||||
799
cubbi/images/cubbi_init.py
Executable file
799
cubbi/images/cubbi_init.py
Executable file
@@ -0,0 +1,799 @@
|
||||
#!/usr/bin/env -S uv run --script
|
||||
# /// script
|
||||
# dependencies = ["ruamel.yaml", "pydantic"]
|
||||
# ///
|
||||
"""
|
||||
Standalone Cubbi initialization script
|
||||
|
||||
This is a self-contained script that includes all the necessary initialization
|
||||
logic without requiring the full cubbi package to be installed.
|
||||
"""
|
||||
|
||||
import grp
|
||||
import importlib.util
|
||||
import os
|
||||
import pwd
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
|
||||
class UserConfig(BaseModel):
|
||||
uid: int = 1000
|
||||
gid: int = 1000
|
||||
|
||||
|
||||
class ProjectConfig(BaseModel):
|
||||
url: str | None = None
|
||||
config_dir: str | None = None
|
||||
image_config_dir: str | None = None
|
||||
|
||||
|
||||
class PersistentLink(BaseModel):
|
||||
source: str
|
||||
target: str
|
||||
type: str
|
||||
|
||||
|
||||
class ProviderConfig(BaseModel):
|
||||
type: str
|
||||
api_key: str
|
||||
base_url: str | None = None
|
||||
models: list[dict[str, str]] = []
|
||||
|
||||
|
||||
class MCPConfig(BaseModel):
|
||||
name: str
|
||||
type: str
|
||||
host: str | None = None
|
||||
port: int | None = None
|
||||
url: str | None = None
|
||||
headers: dict[str, str] | None = None
|
||||
command: str | None = None
|
||||
args: list[str] = []
|
||||
env: dict[str, str] = {}
|
||||
|
||||
|
||||
class DefaultsConfig(BaseModel):
|
||||
model: str | None = None
|
||||
|
||||
|
||||
class SSHConfig(BaseModel):
|
||||
enabled: bool = False
|
||||
|
||||
|
||||
class CubbiConfig(BaseModel):
|
||||
version: str = "1.0"
|
||||
user: UserConfig = UserConfig()
|
||||
providers: dict[str, ProviderConfig] = {}
|
||||
mcps: list[MCPConfig] = []
|
||||
project: ProjectConfig = ProjectConfig()
|
||||
persistent_links: list[PersistentLink] = []
|
||||
defaults: DefaultsConfig = DefaultsConfig()
|
||||
ssh: SSHConfig = SSHConfig()
|
||||
run_command: str | None = None
|
||||
no_shell: bool = False
|
||||
|
||||
def get_provider_for_default_model(self) -> ProviderConfig | None:
|
||||
if not self.defaults.model or "/" not in self.defaults.model:
|
||||
return None
|
||||
|
||||
provider_name = self.defaults.model.split("/")[0]
|
||||
return self.providers.get(provider_name)
|
||||
|
||||
|
||||
def load_cubbi_config() -> CubbiConfig:
|
||||
config_path = Path("/cubbi/config.yaml")
|
||||
if not config_path.exists():
|
||||
return CubbiConfig()
|
||||
|
||||
yaml = YAML(typ="safe")
|
||||
with open(config_path, "r") as f:
|
||||
config_data = yaml.load(f) or {}
|
||||
|
||||
return CubbiConfig(**config_data)
|
||||
|
||||
|
||||
cubbi_config = load_cubbi_config()
|
||||
|
||||
|
||||
def get_user_ids() -> tuple[int, int]:
|
||||
return cubbi_config.user.uid, cubbi_config.user.gid
|
||||
|
||||
|
||||
def set_ownership(path: Path) -> None:
|
||||
user_id, group_id = get_user_ids()
|
||||
try:
|
||||
os.chown(path, user_id, group_id)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
class StatusManager:
|
||||
def __init__(
|
||||
self, log_file: str = "/cubbi/init.log", status_file: str = "/cubbi/init.status"
|
||||
):
|
||||
self.log_file = Path(log_file)
|
||||
self.status_file = Path(status_file)
|
||||
self._setup_logging()
|
||||
|
||||
def _setup_logging(self) -> None:
|
||||
self.log_file.touch(exist_ok=True)
|
||||
self.set_status(False)
|
||||
|
||||
def log(self, message: str, level: str = "INFO") -> None:
|
||||
print(message)
|
||||
sys.stdout.flush()
|
||||
|
||||
with open(self.log_file, "a") as f:
|
||||
f.write(message + "\n")
|
||||
f.flush()
|
||||
|
||||
def set_status(self, complete: bool) -> None:
|
||||
status = "true" if complete else "false"
|
||||
with open(self.status_file, "w") as f:
|
||||
f.write(f"INIT_COMPLETE={status}\n")
|
||||
|
||||
def start_initialization(self) -> None:
|
||||
self.set_status(False)
|
||||
|
||||
def complete_initialization(self) -> None:
|
||||
self.set_status(True)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PersistentConfig:
|
||||
source: str
|
||||
target: str
|
||||
type: str = "directory"
|
||||
description: str = ""
|
||||
|
||||
|
||||
@dataclass
|
||||
class ImageConfig:
|
||||
name: str
|
||||
description: str
|
||||
version: str
|
||||
maintainer: str
|
||||
image: str
|
||||
persistent_configs: list[PersistentConfig] = field(default_factory=list)
|
||||
environments_to_forward: list[str] = field(default_factory=list)
|
||||
|
||||
|
||||
class ConfigParser:
|
||||
def __init__(self, config_file: str = "/cubbi/cubbi_image.yaml"):
|
||||
self.config_file = Path(config_file)
|
||||
self.environment: dict[str, str] = dict(os.environ)
|
||||
|
||||
def load_image_config(self) -> ImageConfig:
|
||||
if not self.config_file.exists():
|
||||
raise FileNotFoundError(f"Configuration file not found: {self.config_file}")
|
||||
|
||||
yaml = YAML(typ="safe")
|
||||
with open(self.config_file, "r") as f:
|
||||
config_data = yaml.load(f)
|
||||
|
||||
persistent_configs = []
|
||||
for pc_data in config_data.get("persistent_configs", []):
|
||||
persistent_configs.append(PersistentConfig(**pc_data))
|
||||
|
||||
return ImageConfig(
|
||||
name=config_data["name"],
|
||||
description=config_data["description"],
|
||||
version=config_data["version"],
|
||||
maintainer=config_data["maintainer"],
|
||||
image=config_data["image"],
|
||||
persistent_configs=persistent_configs,
|
||||
environments_to_forward=config_data.get("environments_to_forward", []),
|
||||
)
|
||||
|
||||
|
||||
class UserManager:
|
||||
def __init__(self, status: StatusManager):
|
||||
self.status = status
|
||||
self.username = "cubbi"
|
||||
|
||||
def _run_command(self, cmd: list[str]) -> bool:
|
||||
try:
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
|
||||
if result.stdout:
|
||||
self.status.log(f"Command output: {result.stdout.strip()}")
|
||||
return True
|
||||
except subprocess.CalledProcessError as e:
|
||||
self.status.log(f"Command failed: {' '.join(cmd)}", "ERROR")
|
||||
self.status.log(f"Error: {e.stderr}", "ERROR")
|
||||
return False
|
||||
|
||||
def setup_user_and_group(self, user_id: int, group_id: int) -> bool:
|
||||
self.status.log(
|
||||
f"Setting up user '{self.username}' with UID: {user_id}, GID: {group_id}"
|
||||
)
|
||||
|
||||
try:
|
||||
existing_group = grp.getgrnam(self.username)
|
||||
if existing_group.gr_gid != group_id:
|
||||
self.status.log(
|
||||
f"Modifying group '{self.username}' GID from {existing_group.gr_gid} to {group_id}"
|
||||
)
|
||||
if not self._run_command(
|
||||
["groupmod", "-g", str(group_id), self.username]
|
||||
):
|
||||
return False
|
||||
except KeyError:
|
||||
self._run_command(["groupadd", "-g", str(group_id), self.username])
|
||||
try:
|
||||
existing_user = pwd.getpwnam(self.username)
|
||||
if existing_user.pw_uid != user_id or existing_user.pw_gid != group_id:
|
||||
self.status.log(
|
||||
f"Modifying user '{self.username}' UID from {existing_user.pw_uid} to {user_id}, GID from {existing_user.pw_gid} to {group_id}"
|
||||
)
|
||||
if not self._run_command(
|
||||
[
|
||||
"usermod",
|
||||
"--uid",
|
||||
str(user_id),
|
||||
"--gid",
|
||||
str(group_id),
|
||||
self.username,
|
||||
]
|
||||
):
|
||||
return False
|
||||
except KeyError:
|
||||
if not self._run_command(
|
||||
[
|
||||
"useradd",
|
||||
"--shell",
|
||||
"/bin/bash",
|
||||
"--uid",
|
||||
str(user_id),
|
||||
"--gid",
|
||||
str(group_id),
|
||||
"--no-create-home",
|
||||
self.username,
|
||||
]
|
||||
):
|
||||
return False
|
||||
|
||||
sudoers_command = [
|
||||
"sh",
|
||||
"-c",
|
||||
"echo 'cubbi ALL=(ALL) NOPASSWD:ALL' > /etc/sudoers.d/cubbi && chmod 0440 /etc/sudoers.d/cubbi",
|
||||
]
|
||||
if not self._run_command(sudoers_command):
|
||||
self.status.log("Failed to create sudoers entry for cubbi", "ERROR")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class DirectoryManager:
|
||||
def __init__(self, status: StatusManager):
|
||||
self.status = status
|
||||
|
||||
def create_directory(
|
||||
self, path: str, user_id: int, group_id: int, mode: int = 0o755
|
||||
) -> bool:
|
||||
dir_path = Path(path)
|
||||
|
||||
try:
|
||||
dir_path.mkdir(parents=True, exist_ok=True)
|
||||
os.chown(path, user_id, group_id)
|
||||
dir_path.chmod(mode)
|
||||
self.status.log(f"Created directory: {path}")
|
||||
return True
|
||||
except Exception as e:
|
||||
self.status.log(
|
||||
f"Failed to create/configure directory {path}: {e}", "ERROR"
|
||||
)
|
||||
return False
|
||||
|
||||
def setup_standard_directories(self, user_id: int, group_id: int) -> bool:
|
||||
directories = [
|
||||
("/app", 0o755),
|
||||
("/cubbi-config", 0o755),
|
||||
("/cubbi-config/home", 0o755),
|
||||
]
|
||||
|
||||
self.status.log("Setting up standard directories")
|
||||
|
||||
success = True
|
||||
for dir_path, mode in directories:
|
||||
if not self.create_directory(dir_path, user_id, group_id, mode):
|
||||
success = False
|
||||
|
||||
# Create /home/cubbi as a symlink to /cubbi-config/home
|
||||
try:
|
||||
home_cubbi = Path("/home/cubbi")
|
||||
if home_cubbi.exists() or home_cubbi.is_symlink():
|
||||
home_cubbi.unlink()
|
||||
|
||||
self.status.log("Creating /home/cubbi as symlink to /cubbi-config/home")
|
||||
home_cubbi.symlink_to("/cubbi-config/home")
|
||||
os.lchown("/home/cubbi", user_id, group_id)
|
||||
except Exception as e:
|
||||
self.status.log(f"Failed to create home directory symlink: {e}", "ERROR")
|
||||
success = False
|
||||
|
||||
# Create .local directory in the persistent home
|
||||
local_dir = Path("/cubbi-config/home/.local")
|
||||
if not self.create_directory(str(local_dir), user_id, group_id, 0o755):
|
||||
success = False
|
||||
|
||||
# Copy /root/.local/bin to user's home if it exists
|
||||
root_local_bin = Path("/root/.local/bin")
|
||||
if root_local_bin.exists():
|
||||
user_local_bin = Path("/cubbi-config/home/.local/bin")
|
||||
try:
|
||||
user_local_bin.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
for item in root_local_bin.iterdir():
|
||||
if item.is_file():
|
||||
shutil.copy2(item, user_local_bin / item.name)
|
||||
elif item.is_dir():
|
||||
shutil.copytree(
|
||||
item, user_local_bin / item.name, dirs_exist_ok=True
|
||||
)
|
||||
|
||||
self._chown_recursive(user_local_bin, user_id, group_id)
|
||||
self.status.log("Copied /root/.local/bin to user directory")
|
||||
|
||||
except Exception as e:
|
||||
self.status.log(f"Failed to copy /root/.local/bin: {e}", "ERROR")
|
||||
success = False
|
||||
|
||||
return success
|
||||
|
||||
def _chown_recursive(self, path: Path, user_id: int, group_id: int) -> None:
|
||||
try:
|
||||
os.chown(path, user_id, group_id)
|
||||
for item in path.iterdir():
|
||||
if item.is_dir():
|
||||
self._chown_recursive(item, user_id, group_id)
|
||||
else:
|
||||
os.chown(item, user_id, group_id)
|
||||
except Exception as e:
|
||||
self.status.log(
|
||||
f"Warning: Could not change ownership of {path}: {e}", "WARNING"
|
||||
)
|
||||
|
||||
|
||||
class ConfigManager:
|
||||
def __init__(self, status: StatusManager):
|
||||
self.status = status
|
||||
|
||||
def create_symlink(
|
||||
self, source_path: str, target_path: str, user_id: int, group_id: int
|
||||
) -> bool:
|
||||
try:
|
||||
source = Path(source_path)
|
||||
|
||||
parent_dir = source.parent
|
||||
if not parent_dir.exists():
|
||||
self.status.log(f"Creating parent directory: {parent_dir}")
|
||||
parent_dir.mkdir(parents=True, exist_ok=True)
|
||||
os.chown(parent_dir, user_id, group_id)
|
||||
|
||||
self.status.log(f"Creating symlink: {source_path} -> {target_path}")
|
||||
if source.is_symlink() or source.exists():
|
||||
source.unlink()
|
||||
|
||||
source.symlink_to(target_path)
|
||||
os.lchown(source_path, user_id, group_id)
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
self.status.log(
|
||||
f"Failed to create symlink {source_path} -> {target_path}: {e}", "ERROR"
|
||||
)
|
||||
return False
|
||||
|
||||
def _ensure_target_directory(
|
||||
self, target_path: str, user_id: int, group_id: int
|
||||
) -> bool:
|
||||
try:
|
||||
target_dir = Path(target_path)
|
||||
if not target_dir.exists():
|
||||
self.status.log(f"Creating target directory: {target_path}")
|
||||
target_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Set ownership of the target directory to cubbi user
|
||||
os.chown(target_path, user_id, group_id)
|
||||
self.status.log(f"Set ownership of {target_path} to {user_id}:{group_id}")
|
||||
return True
|
||||
except Exception as e:
|
||||
self.status.log(
|
||||
f"Failed to ensure target directory {target_path}: {e}", "ERROR"
|
||||
)
|
||||
return False
|
||||
|
||||
def setup_persistent_configs(
|
||||
self, persistent_configs: list[PersistentConfig], user_id: int, group_id: int
|
||||
) -> bool:
|
||||
if not persistent_configs:
|
||||
self.status.log("No persistent configurations defined in image config")
|
||||
return True
|
||||
|
||||
success = True
|
||||
for config in persistent_configs:
|
||||
# Ensure target directory exists with proper ownership
|
||||
if not self._ensure_target_directory(config.target, user_id, group_id):
|
||||
success = False
|
||||
continue
|
||||
|
||||
if not self.create_symlink(config.source, config.target, user_id, group_id):
|
||||
success = False
|
||||
|
||||
return success
|
||||
|
||||
def setup_persistent_link(
|
||||
self, source: str, target: str, link_type: str, user_id: int, group_id: int
|
||||
) -> bool:
|
||||
"""Setup a single persistent link"""
|
||||
if not self._ensure_target_directory(target, user_id, group_id):
|
||||
return False
|
||||
|
||||
return self.create_symlink(source, target, user_id, group_id)
|
||||
|
||||
|
||||
class CommandManager:
|
||||
def __init__(self, status: StatusManager):
|
||||
self.status = status
|
||||
self.username = "cubbi"
|
||||
|
||||
def run_as_user(self, command: list[str], user: str = None) -> int:
|
||||
if user is None:
|
||||
user = self.username
|
||||
|
||||
full_command = ["gosu", user] + command
|
||||
self.status.log(f"Executing as {user}: {' '.join(command)}")
|
||||
|
||||
try:
|
||||
result = subprocess.run(full_command, check=False)
|
||||
return result.returncode
|
||||
except Exception as e:
|
||||
self.status.log(f"Failed to execute command: {e}", "ERROR")
|
||||
return 1
|
||||
|
||||
def run_user_command(self, command: str) -> int:
|
||||
if not command:
|
||||
return 0
|
||||
|
||||
self.status.log(f"Executing user command: {command}")
|
||||
return self.run_as_user(["sh", "-c", command])
|
||||
|
||||
def exec_as_user(self, args: list[str]) -> None:
|
||||
if not args:
|
||||
args = ["tail", "-f", "/dev/null"]
|
||||
|
||||
self.status.log(
|
||||
f"Switching to user '{self.username}' and executing: {' '.join(args)}"
|
||||
)
|
||||
|
||||
try:
|
||||
os.execvp("gosu", ["gosu", self.username] + args)
|
||||
except Exception as e:
|
||||
self.status.log(f"Failed to exec as user: {e}", "ERROR")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
class ToolPlugin(ABC):
|
||||
def __init__(self, status: StatusManager, config: dict[str, Any]):
|
||||
self.status = status
|
||||
self.config = config
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def tool_name(self) -> str:
|
||||
pass
|
||||
|
||||
def create_directory_with_ownership(self, path: Path) -> Path:
|
||||
try:
|
||||
path.mkdir(parents=True, exist_ok=True)
|
||||
set_ownership(path)
|
||||
|
||||
# Also set ownership on parent directories if they were created
|
||||
parent = path.parent
|
||||
if parent.exists() and parent != Path("/"):
|
||||
set_ownership(parent)
|
||||
|
||||
except OSError as e:
|
||||
self.status.log(f"Failed to create directory {path}: {e}", "ERROR")
|
||||
|
||||
return path
|
||||
|
||||
@abstractmethod
|
||||
def is_already_configured(self) -> bool:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def configure(self) -> bool:
|
||||
pass
|
||||
|
||||
def get_resolved_model(self) -> dict[str, Any] | None:
|
||||
model_spec = os.environ.get("CUBBI_MODEL_SPEC", "")
|
||||
if not model_spec:
|
||||
return None
|
||||
|
||||
# Parse provider/model format
|
||||
if "/" in model_spec:
|
||||
provider_name, model_name = model_spec.split("/", 1)
|
||||
else:
|
||||
# Legacy format - treat as provider name
|
||||
provider_name = model_spec
|
||||
model_name = ""
|
||||
|
||||
# Get provider type from CUBBI_PROVIDER env var
|
||||
provider_type = os.environ.get("CUBBI_PROVIDER", provider_name)
|
||||
|
||||
# Get base URL if available (for OpenAI-compatible providers)
|
||||
base_url = None
|
||||
if provider_type == "openai":
|
||||
base_url = os.environ.get("OPENAI_URL")
|
||||
|
||||
return {
|
||||
"provider_name": provider_name,
|
||||
"provider_type": provider_type,
|
||||
"model_name": model_name,
|
||||
"base_url": base_url,
|
||||
"model_spec": model_spec,
|
||||
}
|
||||
|
||||
def get_provider_config(self, provider_name: str) -> dict[str, str]:
|
||||
provider_config = {}
|
||||
|
||||
# Map provider names to their environment variables
|
||||
if provider_name == "anthropic" or provider_name.startswith("anthropic"):
|
||||
api_key = os.environ.get("ANTHROPIC_API_KEY")
|
||||
if api_key:
|
||||
provider_config["ANTHROPIC_API_KEY"] = api_key
|
||||
|
||||
elif provider_name == "openai" or provider_name.startswith("openai"):
|
||||
api_key = os.environ.get("OPENAI_API_KEY")
|
||||
base_url = os.environ.get("OPENAI_URL")
|
||||
if api_key:
|
||||
provider_config["OPENAI_API_KEY"] = api_key
|
||||
if base_url:
|
||||
provider_config["OPENAI_URL"] = base_url
|
||||
|
||||
elif provider_name == "google" or provider_name.startswith("google"):
|
||||
api_key = os.environ.get("GOOGLE_API_KEY")
|
||||
if api_key:
|
||||
provider_config["GOOGLE_API_KEY"] = api_key
|
||||
|
||||
elif provider_name == "openrouter" or provider_name.startswith("openrouter"):
|
||||
api_key = os.environ.get("OPENROUTER_API_KEY")
|
||||
if api_key:
|
||||
provider_config["OPENROUTER_API_KEY"] = api_key
|
||||
|
||||
return provider_config
|
||||
|
||||
def get_all_providers_config(self) -> dict[str, dict[str, str]]:
|
||||
all_providers = {}
|
||||
|
||||
# Check for each standard provider
|
||||
standard_providers = ["anthropic", "openai", "google", "openrouter"]
|
||||
|
||||
for provider_name in standard_providers:
|
||||
provider_config = self.get_provider_config(provider_name)
|
||||
if provider_config: # Only include providers with API keys
|
||||
all_providers[provider_name] = provider_config
|
||||
|
||||
# Also check for custom OpenAI-compatible providers
|
||||
# These would have been set up with custom names but use OpenAI env vars
|
||||
openai_config = self.get_provider_config("openai")
|
||||
if openai_config and "OPENAI_URL" in openai_config:
|
||||
# This might be a custom provider - we could check for custom naming
|
||||
# but for now, we'll just include it as openai
|
||||
pass
|
||||
|
||||
return all_providers
|
||||
|
||||
|
||||
class CubbiInitializer:
|
||||
"""Main Cubbi initialization orchestrator"""
|
||||
|
||||
def __init__(self):
|
||||
self.status = StatusManager()
|
||||
self.config_parser = ConfigParser()
|
||||
self.user_manager = UserManager(self.status)
|
||||
self.directory_manager = DirectoryManager(self.status)
|
||||
self.config_manager = ConfigManager(self.status)
|
||||
self.command_manager = CommandManager(self.status)
|
||||
|
||||
def run_initialization(self, final_args: list[str]) -> None:
|
||||
"""Run the complete initialization process"""
|
||||
try:
|
||||
self.status.start_initialization()
|
||||
|
||||
# Load configuration
|
||||
image_config = self.config_parser.load_image_config()
|
||||
|
||||
self.status.log(f"Initializing {image_config.name} v{image_config.version}")
|
||||
|
||||
# Core initialization
|
||||
success = self._run_core_initialization(image_config)
|
||||
if not success:
|
||||
self.status.log("Core initialization failed", "ERROR")
|
||||
sys.exit(1)
|
||||
|
||||
# Tool-specific initialization
|
||||
success = self._run_tool_initialization(image_config)
|
||||
if not success:
|
||||
self.status.log("Tool initialization failed", "ERROR")
|
||||
sys.exit(1)
|
||||
|
||||
# Mark complete
|
||||
self.status.complete_initialization()
|
||||
|
||||
# Handle commands
|
||||
self._handle_command_execution(final_args)
|
||||
|
||||
except Exception as e:
|
||||
self.status.log(f"Initialization failed with error: {e}", "ERROR")
|
||||
sys.exit(1)
|
||||
|
||||
def _run_core_initialization(self, image_config) -> bool:
|
||||
user_id = cubbi_config.user.uid
|
||||
group_id = cubbi_config.user.gid
|
||||
|
||||
if not self.user_manager.setup_user_and_group(user_id, group_id):
|
||||
return False
|
||||
|
||||
if not self.directory_manager.setup_standard_directories(user_id, group_id):
|
||||
return False
|
||||
|
||||
if cubbi_config.project.config_dir:
|
||||
config_path = Path(cubbi_config.project.config_dir)
|
||||
if not config_path.exists():
|
||||
self.status.log(
|
||||
f"Creating config directory: {cubbi_config.project.config_dir}"
|
||||
)
|
||||
try:
|
||||
config_path.mkdir(parents=True, exist_ok=True)
|
||||
os.chown(cubbi_config.project.config_dir, user_id, group_id)
|
||||
except Exception as e:
|
||||
self.status.log(f"Failed to create config directory: {e}", "ERROR")
|
||||
return False
|
||||
|
||||
# Setup persistent configs
|
||||
for link in cubbi_config.persistent_links:
|
||||
if not self.config_manager.setup_persistent_link(
|
||||
link.source, link.target, link.type, user_id, group_id
|
||||
):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _run_tool_initialization(self, image_config) -> bool:
|
||||
# Look for a tool-specific plugin file in the same directory
|
||||
plugin_name = image_config.name.lower().replace("-", "_")
|
||||
plugin_file = Path(__file__).parent / f"{plugin_name}_plugin.py"
|
||||
|
||||
if not plugin_file.exists():
|
||||
self.status.log(
|
||||
f"No tool-specific plugin found at {plugin_file}, skipping tool initialization"
|
||||
)
|
||||
return True
|
||||
|
||||
try:
|
||||
# Dynamically load the plugin module
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
f"{image_config.name.lower()}_plugin", plugin_file
|
||||
)
|
||||
plugin_module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(plugin_module)
|
||||
|
||||
# Get the plugin class from the standard export variable
|
||||
if not hasattr(plugin_module, "PLUGIN_CLASS"):
|
||||
self.status.log(
|
||||
f"No PLUGIN_CLASS variable found in {plugin_file}", "ERROR"
|
||||
)
|
||||
return False
|
||||
|
||||
plugin_class = plugin_module.PLUGIN_CLASS
|
||||
|
||||
# Instantiate and run the plugin
|
||||
plugin = plugin_class(self.status, {"image_config": image_config})
|
||||
|
||||
self.status.log(f"Running {plugin.tool_name}-specific initialization")
|
||||
|
||||
if not plugin.is_already_configured():
|
||||
if not plugin.configure():
|
||||
self.status.log(f"{plugin.tool_name} configuration failed", "ERROR")
|
||||
return False
|
||||
else:
|
||||
self.status.log(f"{plugin.tool_name} is already configured, skipping")
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
self.status.log(
|
||||
f"Failed to load or execute plugin {plugin_file}: {e}", "ERROR"
|
||||
)
|
||||
return False
|
||||
|
||||
def _handle_command_execution(self, final_args):
|
||||
exit_code = 0
|
||||
|
||||
if cubbi_config.run_command:
|
||||
self.status.log("--- Executing initial command ---")
|
||||
exit_code = self.command_manager.run_user_command(cubbi_config.run_command)
|
||||
self.status.log(
|
||||
f"--- Initial command finished (exit code: {exit_code}) ---"
|
||||
)
|
||||
|
||||
if cubbi_config.no_shell:
|
||||
self.status.log(
|
||||
"--- no_shell=true, exiting container without starting shell ---"
|
||||
)
|
||||
sys.exit(exit_code)
|
||||
|
||||
self.command_manager.exec_as_user(final_args)
|
||||
|
||||
|
||||
def main() -> int:
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Cubbi container initialization script",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
This script initializes a Cubbi container environment by:
|
||||
1. Setting up user and group with proper IDs
|
||||
2. Creating standard directories with correct permissions
|
||||
3. Setting up persistent configuration symlinks
|
||||
4. Running tool-specific initialization if available
|
||||
5. Executing user commands or starting an interactive shell
|
||||
|
||||
Environment Variables:
|
||||
CUBBI_USER_ID User ID for the cubbi user (default: 1000)
|
||||
CUBBI_GROUP_ID Group ID for the cubbi user (default: 1000)
|
||||
CUBBI_RUN_COMMAND Initial command to run before shell
|
||||
CUBBI_NO_SHELL Exit after run command instead of starting shell
|
||||
CUBBI_CONFIG_DIR Configuration directory path (default: /cubbi-config)
|
||||
MCP_COUNT Number of MCP servers to configure
|
||||
MCP_<N>_NAME Name of MCP server N
|
||||
MCP_<N>_TYPE Type of MCP server N
|
||||
MCP_<N>_HOST Host of MCP server N
|
||||
MCP_<N>_URL URL of MCP server N
|
||||
|
||||
Examples:
|
||||
cubbi_init.py # Initialize and start bash shell
|
||||
cubbi_init.py --help # Show this help message
|
||||
cubbi_init.py /bin/zsh # Initialize and start zsh shell
|
||||
cubbi_init.py python script.py # Initialize and run python script
|
||||
""",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"command",
|
||||
nargs="*",
|
||||
help="Command to execute after initialization (default: interactive shell)",
|
||||
)
|
||||
|
||||
# Parse known args to handle cases where the command might have its own arguments
|
||||
args, unknown = parser.parse_known_args()
|
||||
|
||||
# Combine parsed command with unknown args
|
||||
final_args = args.command + unknown
|
||||
|
||||
# Handle the common case where docker CMD passes ["tail", "-f", "/dev/null"]
|
||||
# This should be treated as "no specific command" (empty args)
|
||||
if final_args == ["tail", "-f", "/dev/null"]:
|
||||
final_args = []
|
||||
|
||||
initializer = CubbiInitializer()
|
||||
initializer.run_initialization(final_args)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
62
cubbi/images/goose/Dockerfile
Normal file
62
cubbi/images/goose/Dockerfile
Normal file
@@ -0,0 +1,62 @@
|
||||
FROM python:3.12-slim
|
||||
|
||||
LABEL maintainer="team@monadical.com"
|
||||
LABEL description="Goose for Cubbi"
|
||||
|
||||
# Install system dependencies including gosu for user switching and shadow for useradd/groupadd
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
gosu \
|
||||
sudo \
|
||||
passwd \
|
||||
bash \
|
||||
curl \
|
||||
bzip2 \
|
||||
iputils-ping \
|
||||
iproute2 \
|
||||
libxcb1 \
|
||||
libdbus-1-3 \
|
||||
nano \
|
||||
tmux \
|
||||
git-core \
|
||||
ripgrep \
|
||||
openssh-client \
|
||||
vim \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install deps
|
||||
WORKDIR /tmp
|
||||
RUN curl -fsSL https://astral.sh/uv/install.sh -o install.sh && \
|
||||
sh install.sh && \
|
||||
mv /root/.local/bin/uv /usr/local/bin/uv && \
|
||||
mv /root/.local/bin/uvx /usr/local/bin/uvx && \
|
||||
rm install.sh
|
||||
RUN curl -fsSL https://github.com/block/goose/releases/download/stable/download_cli.sh -o download_cli.sh && \
|
||||
chmod +x download_cli.sh && \
|
||||
./download_cli.sh && \
|
||||
mv /root/.local/bin/goose /usr/local/bin/goose && \
|
||||
rm -rf download_cli.sh /tmp/goose-*
|
||||
|
||||
# Create app directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy initialization system
|
||||
COPY cubbi_init.py /cubbi/cubbi_init.py
|
||||
COPY goose_plugin.py /cubbi/goose_plugin.py
|
||||
COPY cubbi_image.yaml /cubbi/cubbi_image.yaml
|
||||
COPY init-status.sh /cubbi/init-status.sh
|
||||
RUN chmod +x /cubbi/cubbi_init.py /cubbi/init-status.sh
|
||||
RUN echo '[ -x /cubbi/init-status.sh ] && /cubbi/init-status.sh' >> /etc/bash.bashrc
|
||||
|
||||
# Set up environment
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ENV UV_LINK_MODE=copy
|
||||
|
||||
# Pre-install the cubbi_init
|
||||
RUN /cubbi/cubbi_init.py --help
|
||||
|
||||
# Set WORKDIR to /app, common practice and expected by cubbi-init.sh
|
||||
WORKDIR /app
|
||||
|
||||
ENTRYPOINT ["/cubbi/cubbi_init.py"]
|
||||
CMD ["tail", "-f", "/dev/null"]
|
||||
63
cubbi/images/goose/README.md
Normal file
63
cubbi/images/goose/README.md
Normal file
@@ -0,0 +1,63 @@
|
||||
# Goose Image for Cubbi
|
||||
|
||||
This image provides a containerized environment for running [Goose](https://goose.ai).
|
||||
|
||||
## Features
|
||||
|
||||
- Pre-configured environment for Goose AI
|
||||
- Langfuse logging support
|
||||
|
||||
## Environment Variables
|
||||
|
||||
### Goose Configuration
|
||||
|
||||
| Variable | Description | Required | Default |
|
||||
|----------|-------------|----------|---------|
|
||||
| `CUBBI_MODEL` | Model to use with Goose | No | - |
|
||||
| `CUBBI_PROVIDER` | Provider to use with Goose | No | - |
|
||||
|
||||
### Langfuse Integration
|
||||
|
||||
| Variable | Description | Required | Default |
|
||||
|----------|-------------|----------|---------|
|
||||
| `LANGFUSE_INIT_PROJECT_PUBLIC_KEY` | Langfuse public key | No | - |
|
||||
| `LANGFUSE_INIT_PROJECT_SECRET_KEY` | Langfuse secret key | No | - |
|
||||
| `LANGFUSE_URL` | Langfuse API URL | No | `https://cloud.langfuse.com` |
|
||||
|
||||
### Cubbi Core Variables
|
||||
|
||||
| Variable | Description | Required | Default |
|
||||
|----------|-------------|----------|---------|
|
||||
| `CUBBI_USER_ID` | UID for the container user | No | `1000` |
|
||||
| `CUBBI_GROUP_ID` | GID for the container user | No | `1000` |
|
||||
| `CUBBI_RUN_COMMAND` | Command to execute after initialization | No | - |
|
||||
| `CUBBI_NO_SHELL` | Exit after command execution | No | `false` |
|
||||
| `CUBBI_CONFIG_DIR` | Directory for persistent configurations | No | `/cubbi-config` |
|
||||
| `CUBBI_PERSISTENT_LINKS` | Semicolon-separated list of source:target symlinks | No | - |
|
||||
|
||||
### MCP Integration Variables
|
||||
|
||||
| Variable | Description | Required |
|
||||
|----------|-------------|----------|
|
||||
| `MCP_COUNT` | Number of available MCP servers | No |
|
||||
| `MCP_NAMES` | JSON array of MCP server names | No |
|
||||
| `MCP_{idx}_NAME` | Name of MCP server at index | No |
|
||||
| `MCP_{idx}_TYPE` | Type of MCP server | No |
|
||||
| `MCP_{idx}_HOST` | Hostname of MCP server | No |
|
||||
| `MCP_{idx}_URL` | Full URL for remote MCP servers | No |
|
||||
|
||||
## Build
|
||||
|
||||
To build this image:
|
||||
|
||||
```bash
|
||||
cd drivers/goose
|
||||
docker build -t monadical/cubbi-goose:latest .
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Create a new session with this image
|
||||
cubbix -i goose
|
||||
```
|
||||
16
cubbi/images/goose/cubbi_image.yaml
Normal file
16
cubbi/images/goose/cubbi_image.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
name: goose
|
||||
description: Goose AI environment
|
||||
version: 1.0.0
|
||||
maintainer: team@monadical.com
|
||||
image: monadical/cubbi-goose:latest
|
||||
persistent_configs: []
|
||||
environments_to_forward:
|
||||
# API Keys
|
||||
- OPENAI_API_KEY
|
||||
- ANTHROPIC_API_KEY
|
||||
- ANTHROPIC_AUTH_TOKEN
|
||||
- ANTHROPIC_CUSTOM_HEADERS
|
||||
- DEEPSEEK_API_KEY
|
||||
- GEMINI_API_KEY
|
||||
- OPENROUTER_API_KEY
|
||||
- AIDER_API_KEYS
|
||||
246
cubbi/images/goose/goose_plugin.py
Normal file
246
cubbi/images/goose/goose_plugin.py
Normal file
@@ -0,0 +1,246 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from cubbi_init import ToolPlugin, cubbi_config, set_ownership
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
|
||||
class GoosePlugin(ToolPlugin):
|
||||
@property
|
||||
def tool_name(self) -> str:
|
||||
return "goose"
|
||||
|
||||
def is_already_configured(self) -> bool:
|
||||
config_file = Path("/home/cubbi/.config/goose/config.yaml")
|
||||
return config_file.exists()
|
||||
|
||||
def configure(self) -> bool:
|
||||
self._ensure_user_config_dir()
|
||||
if not self.setup_tool_configuration():
|
||||
return False
|
||||
return self.integrate_mcp_servers()
|
||||
|
||||
def _get_user_config_path(self) -> Path:
|
||||
return Path("/home/cubbi/.config/goose")
|
||||
|
||||
def _ensure_user_config_dir(self) -> Path:
|
||||
config_dir = self._get_user_config_path()
|
||||
return self.create_directory_with_ownership(config_dir)
|
||||
|
||||
def _write_env_vars_to_profile(self, env_vars: dict) -> None:
|
||||
try:
|
||||
profile_path = Path("/home/cubbi/.bashrc")
|
||||
|
||||
env_section_start = "# CUBBI GOOSE ENVIRONMENT VARIABLES"
|
||||
env_section_end = "# END CUBBI GOOSE ENVIRONMENT VARIABLES"
|
||||
|
||||
if profile_path.exists():
|
||||
with open(profile_path, "r") as f:
|
||||
lines = f.readlines()
|
||||
else:
|
||||
lines = []
|
||||
|
||||
new_lines = []
|
||||
skip_section = False
|
||||
for line in lines:
|
||||
if env_section_start in line:
|
||||
skip_section = True
|
||||
elif env_section_end in line:
|
||||
skip_section = False
|
||||
continue
|
||||
elif not skip_section:
|
||||
new_lines.append(line)
|
||||
|
||||
if env_vars:
|
||||
new_lines.append(f"\n{env_section_start}\n")
|
||||
for key, value in env_vars.items():
|
||||
new_lines.append(f'export {key}="{value}"\n')
|
||||
new_lines.append(f"{env_section_end}\n")
|
||||
|
||||
profile_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(profile_path, "w") as f:
|
||||
f.writelines(new_lines)
|
||||
|
||||
set_ownership(profile_path)
|
||||
|
||||
self.status.log(
|
||||
f"Updated shell profile with {len(env_vars)} environment variables"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.status.log(
|
||||
f"Failed to write environment variables to profile: {e}", "ERROR"
|
||||
)
|
||||
|
||||
def setup_tool_configuration(self) -> bool:
|
||||
config_dir = self._ensure_user_config_dir()
|
||||
if not config_dir.exists():
|
||||
self.status.log(
|
||||
f"Config directory {config_dir} does not exist and could not be created",
|
||||
"ERROR",
|
||||
)
|
||||
return False
|
||||
|
||||
config_file = config_dir / "config.yaml"
|
||||
yaml = YAML(typ="safe")
|
||||
|
||||
# Load or initialize configuration
|
||||
if config_file.exists():
|
||||
with config_file.open("r") as f:
|
||||
config_data = yaml.load(f) or {}
|
||||
else:
|
||||
config_data = {}
|
||||
|
||||
if "extensions" not in config_data:
|
||||
config_data["extensions"] = {}
|
||||
|
||||
# Add default developer extension
|
||||
config_data["extensions"]["developer"] = {
|
||||
"enabled": True,
|
||||
"name": "developer",
|
||||
"timeout": 300,
|
||||
"type": "builtin",
|
||||
}
|
||||
|
||||
# Configure Goose with the default model
|
||||
provider_config = cubbi_config.get_provider_for_default_model()
|
||||
if provider_config and cubbi_config.defaults.model:
|
||||
_, model_name = cubbi_config.defaults.model.split("/", 1)
|
||||
|
||||
# Set Goose model and provider
|
||||
config_data["GOOSE_MODEL"] = model_name
|
||||
config_data["GOOSE_PROVIDER"] = provider_config.type
|
||||
|
||||
# Set ONLY the specific API key for the selected provider
|
||||
# Set both in current process AND in shell environment file
|
||||
env_vars_to_set = {}
|
||||
|
||||
if provider_config.type == "anthropic" and provider_config.api_key:
|
||||
env_vars_to_set["ANTHROPIC_API_KEY"] = provider_config.api_key
|
||||
self.status.log("Set Anthropic API key for goose")
|
||||
elif provider_config.type == "openai" and provider_config.api_key:
|
||||
# For OpenAI-compatible providers (including litellm), goose expects OPENAI_API_KEY
|
||||
env_vars_to_set["OPENAI_API_KEY"] = provider_config.api_key
|
||||
self.status.log("Set OpenAI API key for goose")
|
||||
# Set base URL for OpenAI-compatible providers in both env and config
|
||||
if provider_config.base_url:
|
||||
env_vars_to_set["OPENAI_BASE_URL"] = provider_config.base_url
|
||||
config_data["OPENAI_HOST"] = provider_config.base_url
|
||||
self.status.log(
|
||||
f"Set OPENAI_BASE_URL and OPENAI_HOST to {provider_config.base_url}"
|
||||
)
|
||||
elif provider_config.type == "google" and provider_config.api_key:
|
||||
env_vars_to_set["GOOGLE_API_KEY"] = provider_config.api_key
|
||||
self.status.log("Set Google API key for goose")
|
||||
elif provider_config.type == "openrouter" and provider_config.api_key:
|
||||
env_vars_to_set["OPENROUTER_API_KEY"] = provider_config.api_key
|
||||
self.status.log("Set OpenRouter API key for goose")
|
||||
|
||||
# Set environment variables for current process (for --run commands)
|
||||
for key, value in env_vars_to_set.items():
|
||||
os.environ[key] = value
|
||||
|
||||
# Write environment variables to shell profile for interactive sessions
|
||||
self._write_env_vars_to_profile(env_vars_to_set)
|
||||
|
||||
self.status.log(
|
||||
f"Configured Goose: model={model_name}, provider={provider_config.type}"
|
||||
)
|
||||
else:
|
||||
self.status.log("No default model or provider configured", "WARNING")
|
||||
|
||||
try:
|
||||
with config_file.open("w") as f:
|
||||
yaml.dump(config_data, f)
|
||||
|
||||
set_ownership(config_file)
|
||||
|
||||
self.status.log(f"Updated Goose configuration at {config_file}")
|
||||
return True
|
||||
except Exception as e:
|
||||
self.status.log(f"Failed to write Goose configuration: {e}", "ERROR")
|
||||
return False
|
||||
|
||||
def integrate_mcp_servers(self) -> bool:
|
||||
if not cubbi_config.mcps:
|
||||
self.status.log("No MCP servers to integrate")
|
||||
return True
|
||||
|
||||
config_dir = self._ensure_user_config_dir()
|
||||
if not config_dir.exists():
|
||||
self.status.log(
|
||||
f"Config directory {config_dir} does not exist and could not be created",
|
||||
"ERROR",
|
||||
)
|
||||
return False
|
||||
|
||||
config_file = config_dir / "config.yaml"
|
||||
yaml = YAML(typ="safe")
|
||||
|
||||
if config_file.exists():
|
||||
with config_file.open("r") as f:
|
||||
config_data = yaml.load(f) or {}
|
||||
else:
|
||||
config_data = {"extensions": {}}
|
||||
|
||||
if "extensions" not in config_data:
|
||||
config_data["extensions"] = {}
|
||||
|
||||
for mcp in cubbi_config.mcps:
|
||||
if mcp.type == "remote":
|
||||
if mcp.name and mcp.url:
|
||||
self.status.log(
|
||||
f"Adding remote MCP extension: {mcp.name} - {mcp.url}"
|
||||
)
|
||||
config_data["extensions"][mcp.name] = {
|
||||
"enabled": True,
|
||||
"name": mcp.name,
|
||||
"timeout": 60,
|
||||
"type": "sse",
|
||||
"uri": mcp.url,
|
||||
"envs": {},
|
||||
}
|
||||
elif mcp.type == "local":
|
||||
if mcp.name and mcp.command:
|
||||
self.status.log(
|
||||
f"Adding local MCP extension: {mcp.name} - {mcp.command}"
|
||||
)
|
||||
# Goose uses stdio type for local MCPs
|
||||
config_data["extensions"][mcp.name] = {
|
||||
"enabled": True,
|
||||
"name": mcp.name,
|
||||
"timeout": 60,
|
||||
"type": "stdio",
|
||||
"command": mcp.command,
|
||||
"args": mcp.args if mcp.args else [],
|
||||
"envs": mcp.env if mcp.env else {},
|
||||
}
|
||||
elif mcp.type in ["docker", "proxy"]:
|
||||
if mcp.name and mcp.host:
|
||||
mcp_port = mcp.port or 8080
|
||||
mcp_url = f"http://{mcp.host}:{mcp_port}/sse"
|
||||
self.status.log(f"Adding MCP extension: {mcp.name} - {mcp_url}")
|
||||
config_data["extensions"][mcp.name] = {
|
||||
"enabled": True,
|
||||
"name": mcp.name,
|
||||
"timeout": 60,
|
||||
"type": "sse",
|
||||
"uri": mcp_url,
|
||||
"envs": {},
|
||||
}
|
||||
|
||||
try:
|
||||
with config_file.open("w") as f:
|
||||
yaml.dump(config_data, f)
|
||||
|
||||
set_ownership(config_file)
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
self.status.log(f"Failed to integrate MCP servers: {e}", "ERROR")
|
||||
return False
|
||||
|
||||
|
||||
PLUGIN_CLASS = GoosePlugin
|
||||
34
cubbi/images/init-status.sh
Executable file
34
cubbi/images/init-status.sh
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/bin/bash
|
||||
# Script to check and display initialization status
|
||||
|
||||
# Only proceed if running as root
|
||||
if [ "$(id -u)" != "0" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Ensure files exist before checking them
|
||||
touch /cubbi/init.status /cubbi/init.log
|
||||
|
||||
# Quick check instead of full logic
|
||||
if ! grep -q "INIT_COMPLETE=true" "/cubbi/init.status" 2>/dev/null; then
|
||||
# Only follow logs if initialization is incomplete
|
||||
if [ -f "/cubbi/init.log" ]; then
|
||||
echo "----------------------------------------"
|
||||
tail -f /cubbi/init.log &
|
||||
tail_pid=$!
|
||||
|
||||
# Check every second if initialization has completed
|
||||
while true; do
|
||||
if grep -q "INIT_COMPLETE=true" "/cubbi/init.status" 2>/dev/null; then
|
||||
kill $tail_pid 2>/dev/null
|
||||
echo "----------------------------------------"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
else
|
||||
echo "No initialization logs found."
|
||||
fi
|
||||
fi
|
||||
|
||||
exec gosu cubbi /bin/bash -i
|
||||
79
cubbi/images/opencode/Dockerfile
Normal file
79
cubbi/images/opencode/Dockerfile
Normal file
@@ -0,0 +1,79 @@
|
||||
FROM python:3.12-slim
|
||||
|
||||
LABEL maintainer="team@monadical.com"
|
||||
LABEL description="Opencode for Cubbi"
|
||||
|
||||
# Install system dependencies including gosu for user switching and shadow for useradd/groupadd
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
gosu \
|
||||
sudo \
|
||||
passwd \
|
||||
bash \
|
||||
curl \
|
||||
bzip2 \
|
||||
iputils-ping \
|
||||
iproute2 \
|
||||
libxcb1 \
|
||||
libdbus-1-3 \
|
||||
nano \
|
||||
tmux \
|
||||
git-core \
|
||||
ripgrep \
|
||||
openssh-client \
|
||||
vim \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install deps
|
||||
WORKDIR /tmp
|
||||
RUN curl -fsSL https://astral.sh/uv/install.sh -o install.sh && \
|
||||
sh install.sh && \
|
||||
mv /root/.local/bin/uv /usr/local/bin/uv && \
|
||||
mv /root/.local/bin/uvx /usr/local/bin/uvx && \
|
||||
rm install.sh
|
||||
|
||||
# Install Node.js
|
||||
ARG NODE_VERSION=v22.16.0
|
||||
RUN mkdir -p /opt/node && \
|
||||
ARCH=$(uname -m) && \
|
||||
if [ "$ARCH" = "x86_64" ]; then \
|
||||
NODE_ARCH=linux-x64; \
|
||||
elif [ "$ARCH" = "aarch64" ]; then \
|
||||
NODE_ARCH=linux-arm64; \
|
||||
else \
|
||||
echo "Unsupported architecture"; exit 1; \
|
||||
fi && \
|
||||
curl -fsSL https://nodejs.org/dist/$NODE_VERSION/node-$NODE_VERSION-$NODE_ARCH.tar.gz -o node.tar.gz && \
|
||||
tar -xf node.tar.gz -C /opt/node --strip-components=1 && \
|
||||
rm node.tar.gz
|
||||
|
||||
|
||||
ENV PATH="/opt/node/bin:$PATH"
|
||||
RUN npm i -g yarn
|
||||
RUN npm i -g opencode-ai
|
||||
|
||||
# Create app directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy initialization system
|
||||
COPY cubbi_init.py /cubbi/cubbi_init.py
|
||||
COPY opencode_plugin.py /cubbi/opencode_plugin.py
|
||||
COPY cubbi_image.yaml /cubbi/cubbi_image.yaml
|
||||
COPY init-status.sh /cubbi/init-status.sh
|
||||
RUN chmod +x /cubbi/cubbi_init.py /cubbi/init-status.sh
|
||||
RUN echo 'PATH="/opt/node/bin:$PATH"' >> /etc/bash.bashrc
|
||||
RUN echo '[ -x /cubbi/init-status.sh ] && /cubbi/init-status.sh' >> /etc/bash.bashrc
|
||||
|
||||
# Set up environment
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ENV UV_LINK_MODE=copy
|
||||
ENV COLORTERM=truecolor
|
||||
|
||||
# Pre-install the cubbi_init
|
||||
RUN /cubbi/cubbi_init.py --help
|
||||
|
||||
# Set WORKDIR to /app, common practice and expected by cubbi-init.sh
|
||||
WORKDIR /app
|
||||
|
||||
ENTRYPOINT ["/cubbi/cubbi_init.py"]
|
||||
CMD ["tail", "-f", "/dev/null"]
|
||||
55
cubbi/images/opencode/README.md
Normal file
55
cubbi/images/opencode/README.md
Normal file
@@ -0,0 +1,55 @@
|
||||
# Opencode Image for Cubbi
|
||||
|
||||
This image provides a containerized environment for running [Opencode](https://opencode.ai).
|
||||
|
||||
## Features
|
||||
|
||||
- Pre-configured environment for Opencode AI
|
||||
- Langfuse logging support
|
||||
|
||||
## Environment Variables
|
||||
|
||||
### Opencode Configuration
|
||||
|
||||
| Variable | Description | Required | Default |
|
||||
|----------|-------------|----------|---------|
|
||||
| `CUBBI_MODEL` | Model to use with Opencode | No | - |
|
||||
| `CUBBI_PROVIDER` | Provider to use with Opencode | No | - |
|
||||
|
||||
### Cubbi Core Variables
|
||||
|
||||
| Variable | Description | Required | Default |
|
||||
|----------|-------------|----------|---------|
|
||||
| `CUBBI_USER_ID` | UID for the container user | No | `1000` |
|
||||
| `CUBBI_GROUP_ID` | GID for the container user | No | `1000` |
|
||||
| `CUBBI_RUN_COMMAND` | Command to execute after initialization | No | - |
|
||||
| `CUBBI_NO_SHELL` | Exit after command execution | No | `false` |
|
||||
| `CUBBI_CONFIG_DIR` | Directory for persistent configurations | No | `/cubbi-config` |
|
||||
| `CUBBI_PERSISTENT_LINKS` | Semicolon-separated list of source:target symlinks | No | - |
|
||||
|
||||
### MCP Integration Variables
|
||||
|
||||
| Variable | Description | Required |
|
||||
|----------|-------------|----------|
|
||||
| `MCP_COUNT` | Number of available MCP servers | No |
|
||||
| `MCP_NAMES` | JSON array of MCP server names | No |
|
||||
| `MCP_{idx}_NAME` | Name of MCP server at index | No |
|
||||
| `MCP_{idx}_TYPE` | Type of MCP server | No |
|
||||
| `MCP_{idx}_HOST` | Hostname of MCP server | No |
|
||||
| `MCP_{idx}_URL` | Full URL for remote MCP servers | No |
|
||||
|
||||
## Build
|
||||
|
||||
To build this image:
|
||||
|
||||
```bash
|
||||
cd drivers/opencode
|
||||
docker build -t monadical/cubbi-opencode:latest .
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Create a new session with this image
|
||||
cubbix -i opencode
|
||||
```
|
||||
16
cubbi/images/opencode/cubbi_image.yaml
Normal file
16
cubbi/images/opencode/cubbi_image.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
name: opencode
|
||||
description: Opencode AI environment
|
||||
version: 1.0.0
|
||||
maintainer: team@monadical.com
|
||||
image: monadical/cubbi-opencode:latest
|
||||
persistent_configs: []
|
||||
environments_to_forward:
|
||||
# API Keys
|
||||
- OPENAI_API_KEY
|
||||
- ANTHROPIC_API_KEY
|
||||
- ANTHROPIC_AUTH_TOKEN
|
||||
- ANTHROPIC_CUSTOM_HEADERS
|
||||
- DEEPSEEK_API_KEY
|
||||
- GEMINI_API_KEY
|
||||
- OPENROUTER_API_KEY
|
||||
- AIDER_API_KEYS
|
||||
253
cubbi/images/opencode/opencode_plugin.py
Normal file
253
cubbi/images/opencode/opencode_plugin.py
Normal file
@@ -0,0 +1,253 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from cubbi_init import ToolPlugin, cubbi_config, set_ownership
|
||||
|
||||
# Standard providers that OpenCode supports natively
|
||||
STANDARD_PROVIDERS: list[str] = ["anthropic", "openai", "google", "openrouter"]
|
||||
|
||||
|
||||
class OpencodePlugin(ToolPlugin):
|
||||
@property
|
||||
def tool_name(self) -> str:
|
||||
return "opencode"
|
||||
|
||||
def _get_user_config_path(self) -> Path:
|
||||
return Path("/home/cubbi/.config/opencode")
|
||||
|
||||
def is_already_configured(self) -> bool:
|
||||
config_file = self._get_user_config_path() / "config.json"
|
||||
return config_file.exists()
|
||||
|
||||
def configure(self) -> bool:
|
||||
self.create_directory_with_ownership(self._get_user_config_path())
|
||||
|
||||
config_success = self.setup_tool_configuration()
|
||||
if not config_success:
|
||||
return False
|
||||
|
||||
return self.integrate_mcp_servers()
|
||||
|
||||
def setup_tool_configuration(self) -> bool:
|
||||
config_dir = self._get_user_config_path()
|
||||
config_file = config_dir / "config.json"
|
||||
|
||||
# Initialize configuration with schema
|
||||
config_data: dict[str, str | dict[str, dict[str, str | dict[str, str]]]] = {
|
||||
"$schema": "https://opencode.ai/config.json"
|
||||
}
|
||||
|
||||
# Set default theme to system
|
||||
config_data["theme"] = "system"
|
||||
|
||||
# Add providers configuration
|
||||
config_data["provider"] = {}
|
||||
|
||||
# Configure all available providers
|
||||
for provider_name, provider_config in cubbi_config.providers.items():
|
||||
# Check if this is a custom provider (has baseURL)
|
||||
if provider_config.base_url:
|
||||
# Custom provider - include baseURL and name
|
||||
models_dict = {}
|
||||
|
||||
# Add all models for any provider type that has models
|
||||
if provider_config.models:
|
||||
for model in provider_config.models:
|
||||
model_id = model.get("id", "")
|
||||
if model_id:
|
||||
models_dict[model_id] = {"name": model_id}
|
||||
|
||||
provider_entry: dict[str, str | dict[str, str]] = {
|
||||
"options": {
|
||||
"apiKey": provider_config.api_key,
|
||||
"baseURL": provider_config.base_url,
|
||||
},
|
||||
"models": models_dict,
|
||||
}
|
||||
|
||||
# Add npm package and name for custom providers
|
||||
if provider_config.type in STANDARD_PROVIDERS:
|
||||
# Standard provider with custom URL - determine npm package
|
||||
if provider_config.type == "anthropic":
|
||||
provider_entry["npm"] = "@ai-sdk/anthropic"
|
||||
provider_entry["name"] = f"Anthropic ({provider_name})"
|
||||
elif provider_config.type == "openai":
|
||||
provider_entry["npm"] = "@ai-sdk/openai-compatible"
|
||||
provider_entry["name"] = f"OpenAI Compatible ({provider_name})"
|
||||
elif provider_config.type == "google":
|
||||
provider_entry["npm"] = "@ai-sdk/google"
|
||||
provider_entry["name"] = f"Google ({provider_name})"
|
||||
elif provider_config.type == "openrouter":
|
||||
provider_entry["npm"] = "@ai-sdk/openai-compatible"
|
||||
provider_entry["name"] = f"OpenRouter ({provider_name})"
|
||||
else:
|
||||
# Non-standard provider with custom URL
|
||||
provider_entry["npm"] = "@ai-sdk/openai-compatible"
|
||||
provider_entry["name"] = provider_name.title()
|
||||
|
||||
config_data["provider"][provider_name] = provider_entry
|
||||
if models_dict:
|
||||
self.status.log(
|
||||
f"Added {provider_name} custom provider with {len(models_dict)} models to OpenCode configuration"
|
||||
)
|
||||
else:
|
||||
self.status.log(
|
||||
f"Added {provider_name} custom provider to OpenCode configuration"
|
||||
)
|
||||
else:
|
||||
# Standard provider without custom URL
|
||||
if provider_config.type in STANDARD_PROVIDERS:
|
||||
# Populate models for any provider that has models
|
||||
models_dict = {}
|
||||
if provider_config.models:
|
||||
for model in provider_config.models:
|
||||
model_id = model.get("id", "")
|
||||
if model_id:
|
||||
models_dict[model_id] = {"name": model_id}
|
||||
|
||||
config_data["provider"][provider_name] = {
|
||||
"options": {"apiKey": provider_config.api_key},
|
||||
"models": models_dict,
|
||||
}
|
||||
|
||||
if models_dict:
|
||||
self.status.log(
|
||||
f"Added {provider_name} standard provider with {len(models_dict)} models to OpenCode configuration"
|
||||
)
|
||||
else:
|
||||
self.status.log(
|
||||
f"Added {provider_name} standard provider to OpenCode configuration"
|
||||
)
|
||||
|
||||
# Set default model
|
||||
if cubbi_config.defaults.model:
|
||||
config_data["model"] = cubbi_config.defaults.model
|
||||
self.status.log(f"Set default model to {config_data['model']}")
|
||||
|
||||
# Add the default model to provider if it doesn't already have models
|
||||
provider_name: str
|
||||
model_name: str
|
||||
provider_name, model_name = cubbi_config.defaults.model.split("/", 1)
|
||||
if provider_name in config_data["provider"]:
|
||||
provider_config = cubbi_config.providers.get(provider_name)
|
||||
# Only add default model if provider doesn't already have models populated
|
||||
if not (provider_config and provider_config.models):
|
||||
config_data["provider"][provider_name]["models"] = {
|
||||
model_name: {"name": model_name}
|
||||
}
|
||||
self.status.log(
|
||||
f"Added default model {model_name} to {provider_name} provider"
|
||||
)
|
||||
else:
|
||||
# Fallback to legacy environment variables
|
||||
opencode_model: str | None = os.environ.get("CUBBI_MODEL")
|
||||
opencode_provider: str | None = os.environ.get("CUBBI_PROVIDER")
|
||||
|
||||
if opencode_model and opencode_provider:
|
||||
config_data["model"] = f"{opencode_provider}/{opencode_model}"
|
||||
self.status.log(f"Set model to {config_data['model']} (legacy)")
|
||||
|
||||
# Add the legacy model to the provider if it exists
|
||||
if opencode_provider in config_data["provider"]:
|
||||
config_data["provider"][opencode_provider]["models"] = {
|
||||
opencode_model: {"name": opencode_model}
|
||||
}
|
||||
|
||||
# Only write config if we have providers configured
|
||||
if not config_data["provider"]:
|
||||
self.status.log(
|
||||
"No providers configured, using minimal OpenCode configuration"
|
||||
)
|
||||
config_data = {
|
||||
"$schema": "https://opencode.ai/config.json",
|
||||
"theme": "system",
|
||||
}
|
||||
|
||||
try:
|
||||
with config_file.open("w") as f:
|
||||
json.dump(config_data, f, indent=2)
|
||||
|
||||
set_ownership(config_file)
|
||||
|
||||
self.status.log(
|
||||
f"Updated OpenCode configuration at {config_file} with {len(config_data.get('provider', {}))} providers"
|
||||
)
|
||||
return True
|
||||
except Exception as e:
|
||||
self.status.log(f"Failed to write OpenCode configuration: {e}", "ERROR")
|
||||
return False
|
||||
|
||||
def integrate_mcp_servers(self) -> bool:
|
||||
if not cubbi_config.mcps:
|
||||
self.status.log("No MCP servers to integrate")
|
||||
return True
|
||||
|
||||
config_dir = self._get_user_config_path()
|
||||
config_file = config_dir / "config.json"
|
||||
|
||||
if config_file.exists():
|
||||
with config_file.open("r") as f:
|
||||
config_data: dict[str, str | dict[str, dict[str, str]]] = (
|
||||
json.load(f) or {}
|
||||
)
|
||||
else:
|
||||
config_data: dict[str, str | dict[str, dict[str, str]]] = {}
|
||||
|
||||
if "mcp" not in config_data:
|
||||
config_data["mcp"] = {}
|
||||
|
||||
for mcp in cubbi_config.mcps:
|
||||
if mcp.type == "remote":
|
||||
if mcp.name and mcp.url:
|
||||
self.status.log(
|
||||
f"Adding remote MCP extension: {mcp.name} - {mcp.url}"
|
||||
)
|
||||
config_data["mcp"][mcp.name] = {
|
||||
"type": "remote",
|
||||
"url": mcp.url,
|
||||
}
|
||||
elif mcp.type == "local":
|
||||
if mcp.name and mcp.command:
|
||||
self.status.log(
|
||||
f"Adding local MCP extension: {mcp.name} - {mcp.command}"
|
||||
)
|
||||
# OpenCode expects command as an array with command and args combined
|
||||
command_array = [mcp.command]
|
||||
if mcp.args:
|
||||
command_array.extend(mcp.args)
|
||||
|
||||
mcp_entry: dict[str, str | list[str] | bool | dict[str, str]] = {
|
||||
"type": "local",
|
||||
"command": command_array,
|
||||
"enabled": True,
|
||||
}
|
||||
if mcp.env:
|
||||
# OpenCode expects environment (not env)
|
||||
mcp_entry["environment"] = mcp.env
|
||||
config_data["mcp"][mcp.name] = mcp_entry
|
||||
elif mcp.type in ["docker", "proxy"]:
|
||||
if mcp.name and mcp.host:
|
||||
mcp_port: int = mcp.port or 8080
|
||||
mcp_url: str = f"http://{mcp.host}:{mcp_port}/sse"
|
||||
self.status.log(f"Adding MCP extension: {mcp.name} - {mcp_url}")
|
||||
config_data["mcp"][mcp.name] = {
|
||||
"type": "remote",
|
||||
"url": mcp_url,
|
||||
}
|
||||
|
||||
try:
|
||||
with config_file.open("w") as f:
|
||||
json.dump(config_data, f, indent=2)
|
||||
|
||||
set_ownership(config_file)
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
self.status.log(f"Failed to integrate MCP servers: {e}", "ERROR")
|
||||
return False
|
||||
|
||||
|
||||
PLUGIN_CLASS = OpencodePlugin
|
||||
946
cubbi/mcp.py
Normal file
946
cubbi/mcp.py
Normal file
@@ -0,0 +1,946 @@
|
||||
"""
|
||||
MCP (Model Control Protocol) server management for Cubbi Container.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import tempfile
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import docker
|
||||
from docker.errors import DockerException, ImageNotFound, NotFound
|
||||
|
||||
from .models import DockerMCP, LocalMCP, MCPContainer, MCPStatus, ProxyMCP, RemoteMCP
|
||||
from .user_config import UserConfigManager
|
||||
|
||||
# Configure logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MCPManager:
|
||||
"""Manager for MCP (Model Control Protocol) servers."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
config_manager: Optional[UserConfigManager] = None,
|
||||
):
|
||||
"""Initialize the MCP manager."""
|
||||
self.config_manager = config_manager or UserConfigManager()
|
||||
try:
|
||||
self.client = docker.from_env()
|
||||
# Test connection
|
||||
self.client.ping()
|
||||
except DockerException as e:
|
||||
logger.error(f"Error connecting to Docker: {e}")
|
||||
self.client = None
|
||||
|
||||
def _ensure_mcp_network(self) -> str:
|
||||
"""Ensure the MCP network exists and return its name.
|
||||
Note: This is used only by the inspector, not for session-to-MCP connections.
|
||||
"""
|
||||
network_name = "cubbi-mcp-network"
|
||||
if self.client:
|
||||
networks = self.client.networks.list(names=[network_name])
|
||||
if not networks:
|
||||
self.client.networks.create(network_name, driver="bridge")
|
||||
return network_name
|
||||
|
||||
def _get_mcp_dedicated_network(self, mcp_name: str) -> str:
|
||||
"""Get or create a dedicated network for direct session-to-MCP connections.
|
||||
|
||||
Args:
|
||||
mcp_name: The name of the MCP server
|
||||
|
||||
Returns:
|
||||
The name of the dedicated network
|
||||
"""
|
||||
network_name = f"cubbi-mcp-{mcp_name}-network"
|
||||
if self.client:
|
||||
networks = self.client.networks.list(names=[network_name])
|
||||
if not networks:
|
||||
self.client.networks.create(network_name, driver="bridge")
|
||||
return network_name
|
||||
|
||||
def list_mcps(self) -> List[Dict[str, Any]]:
|
||||
"""List all configured MCP servers."""
|
||||
mcps = self.config_manager.get("mcps", [])
|
||||
return mcps
|
||||
|
||||
def get_mcp(self, name: str) -> Optional[Dict[str, Any]]:
|
||||
"""Get an MCP configuration by name."""
|
||||
mcps = self.list_mcps()
|
||||
for mcp in mcps:
|
||||
if mcp.get("name") == name:
|
||||
return mcp
|
||||
return None
|
||||
|
||||
def add_remote_mcp(
|
||||
self,
|
||||
name: str,
|
||||
url: str,
|
||||
headers: Dict[str, str] = None,
|
||||
mcp_type: Optional[str] = None,
|
||||
add_as_default: bool = True,
|
||||
) -> Dict[str, Any]:
|
||||
"""Add a remote MCP server.
|
||||
|
||||
Args:
|
||||
name: Name of the MCP server
|
||||
url: URL of the remote MCP server
|
||||
headers: HTTP headers to use when connecting
|
||||
add_as_default: Whether to add this MCP to the default MCPs list
|
||||
|
||||
Returns:
|
||||
The MCP configuration dictionary
|
||||
"""
|
||||
# Create the remote MCP configuration
|
||||
remote_mcp = RemoteMCP(
|
||||
name=name,
|
||||
url=url,
|
||||
headers=headers or {},
|
||||
mcp_type=mcp_type,
|
||||
)
|
||||
|
||||
# Add to the configuration
|
||||
mcps = self.list_mcps()
|
||||
|
||||
# Remove existing MCP with the same name if it exists
|
||||
mcps = [mcp for mcp in mcps if mcp.get("name") != name]
|
||||
|
||||
# Add the new MCP
|
||||
mcp_config = remote_mcp.model_dump()
|
||||
mcps.append(mcp_config)
|
||||
|
||||
# Save the configuration
|
||||
self.config_manager.set("mcps", mcps)
|
||||
|
||||
# Add to default MCPs if requested
|
||||
if add_as_default:
|
||||
default_mcps = self.config_manager.get("defaults.mcps", [])
|
||||
if name not in default_mcps:
|
||||
default_mcps.append(name)
|
||||
self.config_manager.set("defaults.mcps", default_mcps)
|
||||
|
||||
return mcp_config
|
||||
|
||||
def add_docker_mcp(
|
||||
self,
|
||||
name: str,
|
||||
image: str,
|
||||
command: str,
|
||||
env: Dict[str, str] = None,
|
||||
add_as_default: bool = True,
|
||||
) -> Dict[str, Any]:
|
||||
"""Add a Docker-based MCP server.
|
||||
|
||||
Args:
|
||||
name: Name of the MCP server
|
||||
image: Docker image for the MCP server
|
||||
command: Command to run in the container
|
||||
env: Environment variables to set in the container
|
||||
add_as_default: Whether to add this MCP to the default MCPs list
|
||||
|
||||
Returns:
|
||||
The MCP configuration dictionary
|
||||
"""
|
||||
# Create the Docker MCP configuration
|
||||
docker_mcp = DockerMCP(
|
||||
name=name,
|
||||
image=image,
|
||||
command=command,
|
||||
env=env or {},
|
||||
)
|
||||
|
||||
# Add to the configuration
|
||||
mcps = self.list_mcps()
|
||||
|
||||
# Remove existing MCP with the same name if it exists
|
||||
mcps = [mcp for mcp in mcps if mcp.get("name") != name]
|
||||
|
||||
# Add the new MCP
|
||||
mcp_config = docker_mcp.model_dump()
|
||||
mcps.append(mcp_config)
|
||||
|
||||
# Save the configuration
|
||||
self.config_manager.set("mcps", mcps)
|
||||
|
||||
# Add to default MCPs if requested
|
||||
if add_as_default:
|
||||
default_mcps = self.config_manager.get("defaults.mcps", [])
|
||||
if name not in default_mcps:
|
||||
default_mcps.append(name)
|
||||
self.config_manager.set("defaults.mcps", default_mcps)
|
||||
|
||||
return mcp_config
|
||||
|
||||
def add_proxy_mcp(
|
||||
self,
|
||||
name: str,
|
||||
base_image: str,
|
||||
proxy_image: str,
|
||||
command: str,
|
||||
proxy_options: Dict[str, Any] = None,
|
||||
env: Dict[str, str] = None,
|
||||
host_port: Optional[int] = None,
|
||||
add_as_default: bool = True,
|
||||
) -> Dict[str, Any]:
|
||||
"""Add a proxy-based MCP server.
|
||||
|
||||
Args:
|
||||
name: Name of the MCP server
|
||||
base_image: Base Docker image running the actual MCP server
|
||||
proxy_image: Docker image for the MCP proxy
|
||||
command: Command to run in the container
|
||||
proxy_options: Options for the MCP proxy
|
||||
env: Environment variables to set in the container
|
||||
host_port: Host port to bind the MCP server to (auto-assigned if not specified)
|
||||
add_as_default: Whether to add this MCP to the default MCPs list
|
||||
|
||||
Returns:
|
||||
The MCP configuration dictionary
|
||||
"""
|
||||
# If no host port specified, find the next available port starting from 5101
|
||||
if host_port is None:
|
||||
# Get current MCPs and find highest assigned port
|
||||
mcps = self.list_mcps()
|
||||
highest_port = 5100 # Start at 5100, so next will be 5101
|
||||
|
||||
for mcp in mcps:
|
||||
if mcp.get("type") == "proxy" and mcp.get("host_port"):
|
||||
try:
|
||||
port = int(mcp.get("host_port"))
|
||||
if port > highest_port:
|
||||
highest_port = port
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
|
||||
# Next port will be highest + 1
|
||||
host_port = highest_port + 1
|
||||
|
||||
# Create the Proxy MCP configuration
|
||||
proxy_mcp = ProxyMCP(
|
||||
name=name,
|
||||
base_image=base_image,
|
||||
proxy_image=proxy_image,
|
||||
command=command,
|
||||
proxy_options=proxy_options or {},
|
||||
env=env or {},
|
||||
host_port=host_port,
|
||||
)
|
||||
|
||||
# Add to the configuration
|
||||
mcps = self.list_mcps()
|
||||
|
||||
# Remove existing MCP with the same name if it exists
|
||||
mcps = [mcp for mcp in mcps if mcp.get("name") != name]
|
||||
|
||||
# Add the new MCP
|
||||
mcp_config = proxy_mcp.model_dump()
|
||||
mcps.append(mcp_config)
|
||||
|
||||
# Save the configuration
|
||||
self.config_manager.set("mcps", mcps)
|
||||
|
||||
# Add to default MCPs if requested
|
||||
if add_as_default:
|
||||
default_mcps = self.config_manager.get("defaults.mcps", [])
|
||||
if name not in default_mcps:
|
||||
default_mcps.append(name)
|
||||
self.config_manager.set("defaults.mcps", default_mcps)
|
||||
|
||||
return mcp_config
|
||||
|
||||
def add_local_mcp(
|
||||
self,
|
||||
name: str,
|
||||
command: str,
|
||||
args: List[str] = None,
|
||||
env: Dict[str, str] = None,
|
||||
add_as_default: bool = True,
|
||||
) -> Dict[str, Any]:
|
||||
"""Add a local MCP server.
|
||||
|
||||
Args:
|
||||
name: Name of the MCP server
|
||||
command: Path to executable
|
||||
args: Command arguments
|
||||
env: Environment variables to set for the command
|
||||
add_as_default: Whether to add this MCP to the default MCPs list
|
||||
|
||||
Returns:
|
||||
The MCP configuration dictionary
|
||||
"""
|
||||
# Create the Local MCP configuration
|
||||
local_mcp = LocalMCP(
|
||||
name=name,
|
||||
command=command,
|
||||
args=args or [],
|
||||
env=env or {},
|
||||
)
|
||||
|
||||
# Add to the configuration
|
||||
mcps = self.list_mcps()
|
||||
|
||||
# Remove existing MCP with the same name if it exists
|
||||
mcps = [mcp for mcp in mcps if mcp.get("name") != name]
|
||||
|
||||
# Add the new MCP
|
||||
mcp_config = local_mcp.model_dump()
|
||||
mcps.append(mcp_config)
|
||||
|
||||
# Save the configuration
|
||||
self.config_manager.set("mcps", mcps)
|
||||
|
||||
# Add to default MCPs if requested
|
||||
if add_as_default:
|
||||
default_mcps = self.config_manager.get("defaults.mcps", [])
|
||||
if name not in default_mcps:
|
||||
default_mcps.append(name)
|
||||
self.config_manager.set("defaults.mcps", default_mcps)
|
||||
|
||||
return mcp_config
|
||||
|
||||
def remove_mcp(self, name: str) -> bool:
|
||||
"""Remove an MCP server configuration.
|
||||
|
||||
Args:
|
||||
name: Name of the MCP server to remove
|
||||
|
||||
Returns:
|
||||
True if the MCP was successfully removed, False otherwise
|
||||
"""
|
||||
mcps = self.list_mcps()
|
||||
|
||||
# Filter out the MCP with the specified name
|
||||
updated_mcps = [mcp for mcp in mcps if mcp.get("name") != name]
|
||||
|
||||
# If the length hasn't changed, the MCP wasn't found
|
||||
if len(mcps) == len(updated_mcps):
|
||||
return False
|
||||
|
||||
# Save the updated configuration
|
||||
self.config_manager.set("mcps", updated_mcps)
|
||||
|
||||
# Also remove from default MCPs if it's there
|
||||
default_mcps = self.config_manager.get("defaults.mcps", [])
|
||||
if name in default_mcps:
|
||||
default_mcps.remove(name)
|
||||
self.config_manager.set("defaults.mcps", default_mcps)
|
||||
|
||||
# Stop and remove the container if it exists
|
||||
self.stop_mcp(name)
|
||||
|
||||
return True
|
||||
|
||||
def get_mcp_container_name(self, mcp_name: str) -> str:
|
||||
"""Get the Docker container name for an MCP server."""
|
||||
return f"cubbi_mcp_{mcp_name}"
|
||||
|
||||
def start_mcp(self, name: str) -> Dict[str, Any]:
|
||||
"""Start an MCP server container."""
|
||||
if not self.client:
|
||||
raise Exception("Docker client is not available")
|
||||
|
||||
# Get the MCP configuration
|
||||
mcp_config = self.get_mcp(name)
|
||||
if not mcp_config:
|
||||
raise ValueError(f"MCP server '{name}' not found")
|
||||
|
||||
# Get the container name
|
||||
container_name = self.get_mcp_container_name(name)
|
||||
|
||||
# Check if the container already exists
|
||||
try:
|
||||
container = self.client.containers.get(container_name)
|
||||
# Check if we need to recreate the container due to port binding changes
|
||||
needs_recreate = False
|
||||
|
||||
if mcp_config.get("type") == "proxy" and mcp_config.get("host_port"):
|
||||
# Get the current container port bindings
|
||||
port_bindings = container.attrs.get("HostConfig", {}).get(
|
||||
"PortBindings", {}
|
||||
)
|
||||
sse_port = f"{mcp_config['proxy_options'].get('sse_port', 8080)}/tcp"
|
||||
|
||||
# Check if the port binding matches the configured host port
|
||||
current_binding = port_bindings.get(sse_port, [])
|
||||
if not current_binding or int(
|
||||
current_binding[0].get("HostPort", 0)
|
||||
) != mcp_config.get("host_port"):
|
||||
logger.info(
|
||||
f"Port binding changed for MCP '{name}', recreating container"
|
||||
)
|
||||
needs_recreate = True
|
||||
|
||||
# If we don't need to recreate, just start it if it's not running
|
||||
if not needs_recreate:
|
||||
if container.status != "running":
|
||||
container.start()
|
||||
|
||||
# Return the container status
|
||||
return {
|
||||
"container_id": container.id,
|
||||
"status": "running",
|
||||
"name": name,
|
||||
}
|
||||
else:
|
||||
# We need to recreate the container with new port bindings
|
||||
logger.info(
|
||||
f"Recreating container for MCP '{name}' with updated port bindings"
|
||||
)
|
||||
container.remove(force=True)
|
||||
# Container doesn't exist, we need to create it
|
||||
pass
|
||||
except NotFound:
|
||||
# Container doesn't exist, we need to create it
|
||||
pass
|
||||
|
||||
# Ensure the MCP network exists
|
||||
network_name = self._ensure_mcp_network()
|
||||
|
||||
# Handle different MCP types
|
||||
mcp_type = mcp_config.get("type")
|
||||
|
||||
if mcp_type == "remote":
|
||||
# Remote MCP servers don't need containers
|
||||
return {
|
||||
"status": "not_applicable",
|
||||
"name": name,
|
||||
"type": "remote",
|
||||
}
|
||||
|
||||
elif mcp_type == "local":
|
||||
# Local MCP servers don't need containers
|
||||
return {
|
||||
"status": "not_applicable",
|
||||
"name": name,
|
||||
"type": "local",
|
||||
}
|
||||
|
||||
elif mcp_type == "docker":
|
||||
# Pull the image if needed
|
||||
try:
|
||||
self.client.images.get(mcp_config["image"])
|
||||
except ImageNotFound:
|
||||
logger.info(f"Pulling image {mcp_config['image']}")
|
||||
self.client.images.pull(mcp_config["image"])
|
||||
|
||||
# Create and start the container
|
||||
container = self.client.containers.run(
|
||||
image=mcp_config["image"],
|
||||
command=mcp_config.get("command"),
|
||||
name=container_name,
|
||||
detach=True,
|
||||
network=None, # Start without network, we'll add it with aliases
|
||||
environment=mcp_config.get("env", {}),
|
||||
labels={
|
||||
"cubbi.mcp": "true",
|
||||
"cubbi.mcp.name": name,
|
||||
"cubbi.mcp.type": "docker",
|
||||
},
|
||||
)
|
||||
|
||||
# Connect to the inspector network
|
||||
network = self.client.networks.get(network_name)
|
||||
network.connect(container, aliases=[name])
|
||||
logger.info(
|
||||
f"Connected MCP server '{name}' to inspector network {network_name} with alias '{name}'"
|
||||
)
|
||||
|
||||
# Create and connect to a dedicated network for session connections
|
||||
dedicated_network_name = self._get_mcp_dedicated_network(name)
|
||||
try:
|
||||
dedicated_network = self.client.networks.get(dedicated_network_name)
|
||||
except DockerException:
|
||||
dedicated_network = self.client.networks.create(
|
||||
dedicated_network_name, driver="bridge"
|
||||
)
|
||||
|
||||
dedicated_network.connect(container, aliases=[name])
|
||||
logger.info(
|
||||
f"Connected MCP server '{name}' to dedicated network {dedicated_network_name} with alias '{name}'"
|
||||
)
|
||||
|
||||
return {
|
||||
"container_id": container.id,
|
||||
"status": "running",
|
||||
"name": name,
|
||||
}
|
||||
|
||||
elif mcp_type == "proxy":
|
||||
# For proxy, we need to create a custom Dockerfile and build an image
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
# Create entrypoint script for mcp-proxy that runs the base MCP image
|
||||
entrypoint_script = """#!/bin/sh
|
||||
set -x
|
||||
echo "Starting MCP proxy with base image $MCP_BASE_IMAGE (command: $MCP_COMMAND) on port $SSE_PORT"
|
||||
|
||||
# Verify if Docker socket is available
|
||||
if [ ! -S /var/run/docker.sock ]; then
|
||||
echo "ERROR: Docker socket not available. Cannot run base MCP image."
|
||||
echo "Make sure the Docker socket is mounted from the host."
|
||||
|
||||
# Create a minimal fallback server for testing
|
||||
cat > /tmp/fallback_server.py << 'EOF'
|
||||
import json, sys, time
|
||||
print(json.dumps({"type": "ready", "message": "Fallback server - Docker socket not available"}))
|
||||
sys.stdout.flush()
|
||||
while True:
|
||||
line = sys.stdin.readline().strip()
|
||||
if line:
|
||||
try:
|
||||
data = json.loads(line)
|
||||
if data.get("type") == "ping":
|
||||
print(json.dumps({"type": "pong", "id": data.get("id")}))
|
||||
else:
|
||||
print(json.dumps({"type": "error", "message": "Docker socket not available"}))
|
||||
except:
|
||||
print(json.dumps({"type": "error"}))
|
||||
sys.stdout.flush()
|
||||
time.sleep(1)
|
||||
EOF
|
||||
|
||||
exec mcp-proxy \
|
||||
--sse-port "$SSE_PORT" \
|
||||
--sse-host "$SSE_HOST" \
|
||||
--allow-origin "$ALLOW_ORIGIN" \
|
||||
--pass-environment \
|
||||
-- \
|
||||
python /tmp/fallback_server.py
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Pull the base MCP image
|
||||
echo "Pulling base MCP image: $MCP_BASE_IMAGE"
|
||||
docker pull "$MCP_BASE_IMAGE" || true
|
||||
|
||||
# Prepare the command to run the MCP server
|
||||
if [ -n "$MCP_COMMAND" ]; then
|
||||
CMD="$MCP_COMMAND"
|
||||
else
|
||||
# Default to empty if no command specified
|
||||
CMD=""
|
||||
fi
|
||||
|
||||
echo "Running MCP server from image $MCP_BASE_IMAGE with command: $CMD"
|
||||
|
||||
# Run the actual MCP server in the base image and pipe its I/O to mcp-proxy
|
||||
# Using docker run without -d to keep stdio connected
|
||||
|
||||
# Build env vars string to pass through to the inner container
|
||||
ENV_ARGS=""
|
||||
|
||||
# Check if the environment variable names file exists
|
||||
if [ -f "/mcp-envs.txt" ]; then
|
||||
# Read env var names from file and pass them to docker
|
||||
while read -r var_name; do
|
||||
# Skip empty lines
|
||||
if [ -n "$var_name" ]; then
|
||||
# Simply add the env var - Docker will only pass it if it exists
|
||||
ENV_ARGS="$ENV_ARGS -e $var_name"
|
||||
fi
|
||||
done < "/mcp-envs.txt"
|
||||
|
||||
echo "Passing environment variables from mcp-envs.txt: $ENV_ARGS"
|
||||
fi
|
||||
|
||||
exec mcp-proxy \
|
||||
--sse-port "$SSE_PORT" \
|
||||
--sse-host "$SSE_HOST" \
|
||||
--allow-origin "$ALLOW_ORIGIN" \
|
||||
--pass-environment \
|
||||
-- \
|
||||
docker run --rm -i $ENV_ARGS "$MCP_BASE_IMAGE" $CMD
|
||||
"""
|
||||
# Write the entrypoint script
|
||||
entrypoint_path = os.path.join(tmp_dir, "entrypoint.sh")
|
||||
with open(entrypoint_path, "w") as f:
|
||||
f.write(entrypoint_script)
|
||||
|
||||
# Create a file with environment variable names (no values)
|
||||
env_names_path = os.path.join(tmp_dir, "mcp-envs.txt")
|
||||
with open(env_names_path, "w") as f:
|
||||
# Write one env var name per line
|
||||
for env_name in mcp_config.get("env", {}).keys():
|
||||
f.write(f"{env_name}\n")
|
||||
|
||||
# Create a Dockerfile for the proxy
|
||||
dockerfile_content = f"""
|
||||
FROM {mcp_config["proxy_image"]}
|
||||
|
||||
# Install Docker CLI (trying multiple package managers to handle different base images)
|
||||
USER root
|
||||
RUN (apt-get update && apt-get install -y docker.io) || \\
|
||||
(apt-get update && apt-get install -y docker-ce-cli) || \\
|
||||
(apk add --no-cache docker-cli) || \\
|
||||
(yum install -y docker) || \\
|
||||
echo "WARNING: Could not install Docker CLI - will fall back to minimal MCP server"
|
||||
|
||||
# Set environment variables for the proxy
|
||||
ENV MCP_BASE_IMAGE={mcp_config["base_image"]}
|
||||
ENV MCP_COMMAND="{mcp_config.get("command", "")}"
|
||||
ENV SSE_PORT={mcp_config["proxy_options"].get("sse_port", 8080)}
|
||||
ENV SSE_HOST={mcp_config["proxy_options"].get("sse_host", "0.0.0.0")}
|
||||
ENV ALLOW_ORIGIN={mcp_config["proxy_options"].get("allow_origin", "*")}
|
||||
ENV DEBUG=1
|
||||
|
||||
# Add environment variables from the configuration
|
||||
{chr(10).join([f'ENV {k}="{v}"' for k, v in mcp_config.get("env", {}).items()])}
|
||||
|
||||
# Add env names file and entrypoint script
|
||||
COPY mcp-envs.txt /mcp-envs.txt
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
RUN chmod +x /entrypoint.sh
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
"""
|
||||
|
||||
# Write the Dockerfile
|
||||
dockerfile_path = os.path.join(tmp_dir, "Dockerfile")
|
||||
with open(dockerfile_path, "w") as f:
|
||||
f.write(dockerfile_content)
|
||||
|
||||
# Build the image
|
||||
custom_image_name = f"cubbi_mcp_proxy_{name}"
|
||||
logger.info(f"Building custom proxy image: {custom_image_name}")
|
||||
self.client.images.build(
|
||||
path=tmp_dir,
|
||||
tag=custom_image_name,
|
||||
rm=True,
|
||||
)
|
||||
|
||||
# Format command for the Docker entrypoint arguments
|
||||
# The MCP proxy container will handle this internally based on
|
||||
# the MCP_BASE_IMAGE and MCP_COMMAND env vars we set
|
||||
logger.info(
|
||||
f"Starting MCP proxy with base_image={mcp_config['base_image']}, command={mcp_config.get('command', '')}"
|
||||
)
|
||||
|
||||
# Get the SSE port from the proxy options
|
||||
sse_port = mcp_config["proxy_options"].get("sse_port", 8080)
|
||||
|
||||
# Check if we need to bind to a host port
|
||||
port_bindings = {}
|
||||
if mcp_config.get("host_port"):
|
||||
host_port = mcp_config.get("host_port")
|
||||
port_bindings = {f"{sse_port}/tcp": host_port}
|
||||
|
||||
# Create and start the container
|
||||
container = self.client.containers.run(
|
||||
image=custom_image_name,
|
||||
name=container_name,
|
||||
detach=True,
|
||||
network=None, # Start without network, we'll add it with aliases
|
||||
volumes={
|
||||
"/var/run/docker.sock": {
|
||||
"bind": "/var/run/docker.sock",
|
||||
"mode": "rw",
|
||||
}
|
||||
},
|
||||
labels={
|
||||
"cubbi.mcp": "true",
|
||||
"cubbi.mcp.name": name,
|
||||
"cubbi.mcp.type": "proxy",
|
||||
},
|
||||
ports=port_bindings, # Bind the SSE port to the host if configured
|
||||
)
|
||||
|
||||
# Connect to the inspector network
|
||||
network = self.client.networks.get(network_name)
|
||||
network.connect(container, aliases=[name])
|
||||
logger.info(
|
||||
f"Connected MCP server '{name}' to inspector network {network_name} with alias '{name}'"
|
||||
)
|
||||
|
||||
# Create and connect to a dedicated network for session connections
|
||||
dedicated_network_name = self._get_mcp_dedicated_network(name)
|
||||
try:
|
||||
dedicated_network = self.client.networks.get(dedicated_network_name)
|
||||
except DockerException:
|
||||
dedicated_network = self.client.networks.create(
|
||||
dedicated_network_name, driver="bridge"
|
||||
)
|
||||
|
||||
dedicated_network.connect(container, aliases=[name])
|
||||
logger.info(
|
||||
f"Connected MCP server '{name}' to dedicated network {dedicated_network_name} with alias '{name}'"
|
||||
)
|
||||
|
||||
return {
|
||||
"container_id": container.id,
|
||||
"status": "running",
|
||||
"name": name,
|
||||
}
|
||||
|
||||
else:
|
||||
raise ValueError(f"Unsupported MCP type: {mcp_type}")
|
||||
|
||||
def stop_mcp(self, name: str) -> bool:
|
||||
"""Stop an MCP server container.
|
||||
|
||||
Args:
|
||||
name: The name of the MCP server to stop
|
||||
|
||||
Returns:
|
||||
True if the operation was successful (including cases where the container doesn't exist)
|
||||
"""
|
||||
if not self.client:
|
||||
logger.warning("Docker client is not available")
|
||||
return False
|
||||
|
||||
# Get the MCP configuration - don't raise an exception if not found
|
||||
mcp_config = self.get_mcp(name)
|
||||
if not mcp_config:
|
||||
logger.warning(
|
||||
f"MCP server '{name}' not found, but continuing with removal"
|
||||
)
|
||||
return True
|
||||
|
||||
# Remote and Local MCPs don't have containers to stop
|
||||
if mcp_config.get("type") in ["remote", "local"]:
|
||||
return True
|
||||
|
||||
# Get the container name
|
||||
container_name = self.get_mcp_container_name(name)
|
||||
|
||||
# Try to get, stop, and remove the container
|
||||
try:
|
||||
container = self.client.containers.get(container_name)
|
||||
|
||||
# Stop the container if it's running
|
||||
if container.status == "running":
|
||||
logger.info(f"Stopping MCP container '{name}'...")
|
||||
container.stop(timeout=10)
|
||||
|
||||
# Remove the container regardless of its status
|
||||
logger.info(f"Removing MCP container '{name}'...")
|
||||
container.remove(force=True)
|
||||
return True
|
||||
|
||||
except NotFound:
|
||||
# Container doesn't exist - this is fine when removing
|
||||
logger.info(f"MCP container '{name}' not found, nothing to stop or remove")
|
||||
return True
|
||||
except Exception as e:
|
||||
# Log the error but don't fail the removal operation
|
||||
logger.error(f"Error stopping/removing MCP container: {e}")
|
||||
return True # Return true anyway to continue with removal
|
||||
|
||||
def restart_mcp(self, name: str) -> Dict[str, Any]:
|
||||
"""Restart an MCP server container."""
|
||||
if not self.client:
|
||||
raise Exception("Docker client is not available")
|
||||
|
||||
# Get the MCP configuration
|
||||
mcp_config = self.get_mcp(name)
|
||||
if not mcp_config:
|
||||
raise ValueError(f"MCP server '{name}' not found")
|
||||
|
||||
# Remote and Local MCPs don't have containers to restart
|
||||
if mcp_config.get("type") in ["remote", "local"]:
|
||||
return {
|
||||
"status": "not_applicable",
|
||||
"name": name,
|
||||
"type": mcp_config.get("type"),
|
||||
}
|
||||
|
||||
# Get the container name
|
||||
container_name = self.get_mcp_container_name(name)
|
||||
|
||||
# Try to get and restart the container
|
||||
try:
|
||||
container = self.client.containers.get(container_name)
|
||||
container.restart(timeout=10)
|
||||
return {
|
||||
"container_id": container.id,
|
||||
"status": "running",
|
||||
"name": name,
|
||||
}
|
||||
except NotFound:
|
||||
# Container doesn't exist, start it
|
||||
return self.start_mcp(name)
|
||||
except Exception as e:
|
||||
logger.error(f"Error restarting MCP container: {e}")
|
||||
raise
|
||||
|
||||
def get_mcp_status(self, name: str) -> Dict[str, Any]:
|
||||
"""Get the status of an MCP server."""
|
||||
if not self.client:
|
||||
raise Exception("Docker client is not available")
|
||||
|
||||
# Get the MCP configuration
|
||||
mcp_config = self.get_mcp(name)
|
||||
if not mcp_config:
|
||||
raise ValueError(f"MCP server '{name}' not found")
|
||||
|
||||
# Remote MCPs don't have containers
|
||||
if mcp_config.get("type") == "remote":
|
||||
return {
|
||||
"status": "not_applicable",
|
||||
"name": name,
|
||||
"type": "remote",
|
||||
"url": mcp_config.get("url"),
|
||||
}
|
||||
|
||||
# Local MCPs don't have containers
|
||||
if mcp_config.get("type") == "local":
|
||||
return {
|
||||
"status": "not_applicable",
|
||||
"name": name,
|
||||
"type": "local",
|
||||
"command": mcp_config.get("command"),
|
||||
"args": mcp_config.get("args", []),
|
||||
}
|
||||
|
||||
# Get the container name
|
||||
container_name = self.get_mcp_container_name(name)
|
||||
|
||||
# Try to get the container status
|
||||
try:
|
||||
container = self.client.containers.get(container_name)
|
||||
status = (
|
||||
MCPStatus.RUNNING
|
||||
if container.status == "running"
|
||||
else MCPStatus.STOPPED
|
||||
)
|
||||
|
||||
# Get container details
|
||||
container_info = container.attrs
|
||||
|
||||
# Extract exposed ports from config
|
||||
ports = {}
|
||||
if (
|
||||
"Config" in container_info
|
||||
and "ExposedPorts" in container_info["Config"]
|
||||
):
|
||||
# Add all exposed ports
|
||||
for port in container_info["Config"]["ExposedPorts"].keys():
|
||||
ports[port] = None
|
||||
|
||||
# Add any ports that might be published
|
||||
if (
|
||||
"NetworkSettings" in container_info
|
||||
and "Ports" in container_info["NetworkSettings"]
|
||||
):
|
||||
for port, mappings in container_info["NetworkSettings"][
|
||||
"Ports"
|
||||
].items():
|
||||
if mappings:
|
||||
# Port is bound to host
|
||||
ports[port] = int(mappings[0]["HostPort"])
|
||||
|
||||
return {
|
||||
"status": status.value,
|
||||
"container_id": container.id,
|
||||
"name": name,
|
||||
"type": mcp_config.get("type"),
|
||||
"image": container_info["Config"]["Image"],
|
||||
"ports": ports,
|
||||
"created": container_info["Created"],
|
||||
}
|
||||
except NotFound:
|
||||
# Container doesn't exist
|
||||
return {
|
||||
"status": MCPStatus.NOT_FOUND.value,
|
||||
"name": name,
|
||||
"type": mcp_config.get("type"),
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting MCP container status: {e}")
|
||||
return {
|
||||
"status": MCPStatus.FAILED.value,
|
||||
"name": name,
|
||||
"error": str(e),
|
||||
}
|
||||
|
||||
def get_mcp_logs(self, name: str, tail: int = 100) -> str:
|
||||
"""Get logs from an MCP server container."""
|
||||
if not self.client:
|
||||
raise Exception("Docker client is not available")
|
||||
|
||||
# Get the MCP configuration
|
||||
mcp_config = self.get_mcp(name)
|
||||
if not mcp_config:
|
||||
raise ValueError(f"MCP server '{name}' not found")
|
||||
|
||||
# Remote and Local MCPs don't have logs
|
||||
if mcp_config.get("type") == "remote":
|
||||
return "Remote MCPs don't have local logs"
|
||||
if mcp_config.get("type") == "local":
|
||||
return "Local MCPs don't have container logs"
|
||||
|
||||
# Get the container name
|
||||
container_name = self.get_mcp_container_name(name)
|
||||
|
||||
# Try to get the container logs
|
||||
try:
|
||||
container = self.client.containers.get(container_name)
|
||||
logs = container.logs(tail=tail, timestamps=True).decode("utf-8")
|
||||
return logs
|
||||
except NotFound:
|
||||
# Container doesn't exist
|
||||
return f"MCP container '{name}' not found"
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting MCP container logs: {e}")
|
||||
return f"Error getting logs: {str(e)}"
|
||||
|
||||
def list_mcp_containers(self) -> List[MCPContainer]:
|
||||
"""List all MCP containers."""
|
||||
if not self.client:
|
||||
raise Exception("Docker client is not available")
|
||||
|
||||
# Get all containers with the cubbi.mcp label
|
||||
containers = self.client.containers.list(
|
||||
all=True, filters={"label": "cubbi.mcp"}
|
||||
)
|
||||
|
||||
result = []
|
||||
for container in containers:
|
||||
# Get container details
|
||||
container_info = container.attrs
|
||||
|
||||
# Extract labels
|
||||
labels = container_info["Config"]["Labels"]
|
||||
|
||||
# Extract exposed ports from config
|
||||
ports = {}
|
||||
if (
|
||||
"Config" in container_info
|
||||
and "ExposedPorts" in container_info["Config"]
|
||||
):
|
||||
# Add all exposed ports
|
||||
for port in container_info["Config"]["ExposedPorts"].keys():
|
||||
ports[port] = None
|
||||
|
||||
# Add any ports that might be published
|
||||
if (
|
||||
"NetworkSettings" in container_info
|
||||
and "Ports" in container_info["NetworkSettings"]
|
||||
):
|
||||
for port, mappings in container_info["NetworkSettings"][
|
||||
"Ports"
|
||||
].items():
|
||||
if mappings:
|
||||
# Port is bound to host
|
||||
ports[port] = int(mappings[0]["HostPort"])
|
||||
|
||||
# Determine status
|
||||
status = (
|
||||
MCPStatus.RUNNING
|
||||
if container.status == "running"
|
||||
else MCPStatus.STOPPED
|
||||
)
|
||||
|
||||
# Create MCPContainer object
|
||||
mcp_container = MCPContainer(
|
||||
name=labels.get("cubbi.mcp.name", "unknown"),
|
||||
container_id=container.id,
|
||||
status=status,
|
||||
image=container_info["Config"]["Image"],
|
||||
ports=ports,
|
||||
created_at=container_info["Created"],
|
||||
type=labels.get("cubbi.mcp.type", "unknown"),
|
||||
)
|
||||
|
||||
result.append(mcp_container)
|
||||
|
||||
return result
|
||||
248
cubbi/model_fetcher.py
Normal file
248
cubbi/model_fetcher.py
Normal file
@@ -0,0 +1,248 @@
|
||||
"""
|
||||
Model fetching utilities for OpenAI-compatible providers.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
import requests
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ModelFetcher:
|
||||
"""Fetches model lists from OpenAI-compatible API endpoints."""
|
||||
|
||||
def __init__(self, timeout: int = 30):
|
||||
"""Initialize the model fetcher.
|
||||
|
||||
Args:
|
||||
timeout: Request timeout in seconds
|
||||
"""
|
||||
self.timeout = timeout
|
||||
|
||||
def fetch_models(
|
||||
self,
|
||||
base_url: str,
|
||||
api_key: Optional[str] = None,
|
||||
headers: Optional[Dict[str, str]] = None,
|
||||
provider_type: Optional[str] = None,
|
||||
) -> List[Dict[str, str]]:
|
||||
"""Fetch models from an OpenAI-compatible /v1/models endpoint.
|
||||
|
||||
Args:
|
||||
base_url: Base URL of the provider (e.g., "https://api.openai.com" or "https://api.litellm.com")
|
||||
api_key: Optional API key for authentication
|
||||
headers: Optional additional headers
|
||||
provider_type: Optional provider type for authentication handling
|
||||
|
||||
Returns:
|
||||
List of model dictionaries with 'id' and 'name' keys
|
||||
|
||||
Raises:
|
||||
requests.RequestException: If the request fails
|
||||
ValueError: If the response format is invalid
|
||||
"""
|
||||
# Construct the models endpoint URL
|
||||
models_url = self._build_models_url(base_url)
|
||||
|
||||
# Prepare headers
|
||||
request_headers = self._build_headers(api_key, headers, provider_type)
|
||||
|
||||
logger.info(f"Fetching models from {models_url}")
|
||||
|
||||
try:
|
||||
response = requests.get(
|
||||
models_url, headers=request_headers, timeout=self.timeout
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
# Parse JSON response
|
||||
data = response.json()
|
||||
|
||||
# Handle provider-specific response formats
|
||||
if provider_type == "google":
|
||||
# Google uses {"models": [...]} format
|
||||
if not isinstance(data, dict) or "models" not in data:
|
||||
raise ValueError(
|
||||
f"Invalid Google response format: expected dict with 'models' key, got {type(data)}"
|
||||
)
|
||||
models_data = data["models"]
|
||||
else:
|
||||
# OpenAI-compatible format uses {"data": [...]}
|
||||
if not isinstance(data, dict) or "data" not in data:
|
||||
raise ValueError(
|
||||
f"Invalid response format: expected dict with 'data' key, got {type(data)}"
|
||||
)
|
||||
models_data = data["data"]
|
||||
|
||||
if not isinstance(models_data, list):
|
||||
raise ValueError(
|
||||
f"Invalid models data: expected list, got {type(models_data)}"
|
||||
)
|
||||
|
||||
# Process models
|
||||
models = []
|
||||
for model_item in models_data:
|
||||
if not isinstance(model_item, dict):
|
||||
continue
|
||||
|
||||
# Handle provider-specific model ID fields
|
||||
if provider_type == "google":
|
||||
# Google uses "name" field (e.g., "models/gemini-1.5-pro")
|
||||
model_id = model_item.get("name", "")
|
||||
else:
|
||||
# OpenAI-compatible uses "id" field
|
||||
model_id = model_item.get("id", "")
|
||||
|
||||
if not model_id:
|
||||
continue
|
||||
|
||||
# Skip models with * in their ID as requested
|
||||
if "*" in model_id:
|
||||
logger.debug(f"Skipping model with wildcard: {model_id}")
|
||||
continue
|
||||
|
||||
# Create model entry
|
||||
model = {
|
||||
"id": model_id,
|
||||
}
|
||||
models.append(model)
|
||||
|
||||
logger.info(f"Successfully fetched {len(models)} models from {base_url}")
|
||||
return models
|
||||
|
||||
except requests.exceptions.Timeout:
|
||||
logger.error(f"Request timed out after {self.timeout} seconds")
|
||||
raise requests.RequestException(f"Request to {models_url} timed out")
|
||||
except requests.exceptions.ConnectionError as e:
|
||||
logger.error(f"Connection error: {e}")
|
||||
raise requests.RequestException(f"Failed to connect to {models_url}")
|
||||
except requests.exceptions.HTTPError as e:
|
||||
logger.error(f"HTTP error {e.response.status_code}: {e}")
|
||||
if e.response.status_code == 401:
|
||||
raise requests.RequestException(
|
||||
"Authentication failed: invalid API key"
|
||||
)
|
||||
elif e.response.status_code == 403:
|
||||
raise requests.RequestException(
|
||||
"Access forbidden: check API key permissions"
|
||||
)
|
||||
else:
|
||||
raise requests.RequestException(
|
||||
f"HTTP {e.response.status_code} error from {models_url}"
|
||||
)
|
||||
except json.JSONDecodeError as e:
|
||||
logger.error(f"Failed to parse JSON response: {e}")
|
||||
raise ValueError(f"Invalid JSON response from {models_url}")
|
||||
|
||||
def _build_models_url(self, base_url: str) -> str:
|
||||
"""Build the models endpoint URL from a base URL.
|
||||
|
||||
Args:
|
||||
base_url: Base URL of the provider
|
||||
|
||||
Returns:
|
||||
Complete URL for the /v1/models endpoint
|
||||
"""
|
||||
# Remove trailing slash if present
|
||||
base_url = base_url.rstrip("/")
|
||||
|
||||
# Add /v1/models if not already present
|
||||
if not base_url.endswith("/v1/models"):
|
||||
if base_url.endswith("/v1"):
|
||||
base_url += "/models"
|
||||
else:
|
||||
base_url += "/v1/models"
|
||||
|
||||
return base_url
|
||||
|
||||
def _build_headers(
|
||||
self,
|
||||
api_key: Optional[str] = None,
|
||||
additional_headers: Optional[Dict[str, str]] = None,
|
||||
provider_type: Optional[str] = None,
|
||||
) -> Dict[str, str]:
|
||||
"""Build request headers.
|
||||
|
||||
Args:
|
||||
api_key: Optional API key for authentication
|
||||
additional_headers: Optional additional headers
|
||||
provider_type: Provider type for specific auth handling
|
||||
|
||||
Returns:
|
||||
Dictionary of headers
|
||||
"""
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
}
|
||||
|
||||
# Add authentication header if API key is provided
|
||||
if api_key:
|
||||
if provider_type == "anthropic":
|
||||
# Anthropic uses x-api-key header
|
||||
headers["x-api-key"] = api_key
|
||||
elif provider_type == "google":
|
||||
# Google uses x-goog-api-key header
|
||||
headers["x-goog-api-key"] = api_key
|
||||
else:
|
||||
# Standard Bearer token for OpenAI, OpenRouter, and custom providers
|
||||
headers["Authorization"] = f"Bearer {api_key}"
|
||||
|
||||
# Add any additional headers
|
||||
if additional_headers:
|
||||
headers.update(additional_headers)
|
||||
|
||||
return headers
|
||||
|
||||
|
||||
def fetch_provider_models(
|
||||
provider_config: Dict, timeout: int = 30
|
||||
) -> List[Dict[str, str]]:
|
||||
"""Convenience function to fetch models for a provider configuration.
|
||||
|
||||
Args:
|
||||
provider_config: Provider configuration dictionary
|
||||
timeout: Request timeout in seconds
|
||||
|
||||
Returns:
|
||||
List of model dictionaries
|
||||
|
||||
Raises:
|
||||
ValueError: If provider is not supported or missing required fields
|
||||
requests.RequestException: If the request fails
|
||||
"""
|
||||
import os
|
||||
from .config import PROVIDER_DEFAULT_URLS
|
||||
|
||||
provider_type = provider_config.get("type", "")
|
||||
base_url = provider_config.get("base_url")
|
||||
api_key = provider_config.get("api_key", "")
|
||||
|
||||
# Resolve environment variables in API key
|
||||
if api_key.startswith("${") and api_key.endswith("}"):
|
||||
env_var_name = api_key[2:-1]
|
||||
api_key = os.environ.get(env_var_name, "")
|
||||
|
||||
# Determine base URL - use custom base_url or default for standard providers
|
||||
if base_url:
|
||||
# Custom provider with explicit base_url
|
||||
effective_base_url = base_url
|
||||
elif provider_type in PROVIDER_DEFAULT_URLS:
|
||||
# Standard provider - use default URL
|
||||
effective_base_url = PROVIDER_DEFAULT_URLS[provider_type]
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Unsupported provider type '{provider_type}'. Must be one of: {list(PROVIDER_DEFAULT_URLS.keys())} or have a custom base_url"
|
||||
)
|
||||
|
||||
# Prepare additional headers for specific providers
|
||||
headers = {}
|
||||
if provider_type == "anthropic":
|
||||
# Anthropic uses a different API version header
|
||||
headers["anthropic-version"] = "2023-06-01"
|
||||
|
||||
fetcher = ModelFetcher(timeout=timeout)
|
||||
return fetcher.fetch_models(effective_base_url, api_key, headers, provider_type)
|
||||
111
cubbi/models.py
Normal file
111
cubbi/models.py
Normal file
@@ -0,0 +1,111 @@
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class SessionStatus(str, Enum):
|
||||
CREATING = "creating"
|
||||
RUNNING = "running"
|
||||
STOPPED = "stopped"
|
||||
FAILED = "failed"
|
||||
|
||||
|
||||
class MCPStatus(str, Enum):
|
||||
RUNNING = "running"
|
||||
STOPPED = "stopped"
|
||||
NOT_FOUND = "not_found"
|
||||
FAILED = "failed"
|
||||
|
||||
|
||||
class ImageEnvironmentVariable(BaseModel):
|
||||
name: str
|
||||
description: str
|
||||
required: bool = False
|
||||
default: Optional[str] = None
|
||||
sensitive: bool = False
|
||||
|
||||
|
||||
class PersistentConfig(BaseModel):
|
||||
source: str
|
||||
target: str
|
||||
type: str # "directory" or "file"
|
||||
description: str = ""
|
||||
|
||||
|
||||
class Image(BaseModel):
|
||||
name: str
|
||||
description: str
|
||||
version: str
|
||||
maintainer: str
|
||||
image: str
|
||||
environment: List[ImageEnvironmentVariable] = []
|
||||
persistent_configs: List[PersistentConfig] = []
|
||||
environments_to_forward: List[str] = []
|
||||
|
||||
|
||||
class RemoteMCP(BaseModel):
|
||||
name: str
|
||||
type: str = "remote"
|
||||
url: str
|
||||
headers: Dict[str, str] = Field(default_factory=dict)
|
||||
mcp_type: Optional[str] = None
|
||||
|
||||
|
||||
class DockerMCP(BaseModel):
|
||||
name: str
|
||||
type: str = "docker"
|
||||
image: str
|
||||
command: str
|
||||
env: Dict[str, str] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class ProxyMCP(BaseModel):
|
||||
name: str
|
||||
type: str = "proxy"
|
||||
base_image: str
|
||||
proxy_image: str
|
||||
command: str
|
||||
proxy_options: Dict[str, Any] = Field(default_factory=dict)
|
||||
env: Dict[str, str] = Field(default_factory=dict)
|
||||
host_port: Optional[int] = None # External port to bind the SSE port to on the host
|
||||
|
||||
|
||||
class LocalMCP(BaseModel):
|
||||
name: str
|
||||
type: str = "local"
|
||||
command: str # Path to executable
|
||||
args: List[str] = Field(default_factory=list) # Command arguments
|
||||
env: Dict[str, str] = Field(default_factory=dict) # Environment variables
|
||||
|
||||
|
||||
MCP = Union[RemoteMCP, DockerMCP, ProxyMCP, LocalMCP]
|
||||
|
||||
|
||||
class MCPContainer(BaseModel):
|
||||
name: str
|
||||
container_id: str
|
||||
status: MCPStatus
|
||||
image: str
|
||||
ports: Dict[str, Optional[int]] = Field(default_factory=dict)
|
||||
created_at: str
|
||||
type: str
|
||||
|
||||
|
||||
class Session(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
image: str
|
||||
status: SessionStatus
|
||||
container_id: Optional[str] = None
|
||||
ports: Dict[int, int] = Field(default_factory=dict)
|
||||
mcps: List[str] = Field(default_factory=list)
|
||||
|
||||
|
||||
class Config(BaseModel):
|
||||
docker: Dict[str, str] = Field(default_factory=dict)
|
||||
images: Dict[str, Image] = Field(default_factory=dict)
|
||||
defaults: Dict[str, object] = Field(
|
||||
default_factory=dict
|
||||
) # Can store strings, booleans, lists, or other values
|
||||
mcps: List[Dict[str, Any]] = Field(default_factory=list)
|
||||
156
cubbi/session.py
Normal file
156
cubbi/session.py
Normal file
@@ -0,0 +1,156 @@
|
||||
"""
|
||||
Session storage management for Cubbi Container Tool.
|
||||
"""
|
||||
|
||||
import fcntl
|
||||
import os
|
||||
from contextlib import contextmanager
|
||||
from pathlib import Path
|
||||
from typing import Dict, Optional
|
||||
|
||||
import yaml
|
||||
|
||||
DEFAULT_SESSIONS_FILE = Path.home() / ".config" / "cubbi" / "sessions.yaml"
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _file_lock(file_path: Path):
|
||||
"""Context manager for file locking.
|
||||
|
||||
Args:
|
||||
file_path: Path to the file to lock
|
||||
|
||||
Yields:
|
||||
File descriptor with exclusive lock
|
||||
"""
|
||||
# Ensure the file exists
|
||||
file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
if not file_path.exists():
|
||||
file_path.touch(mode=0o600)
|
||||
|
||||
# Open file and acquire exclusive lock
|
||||
fd = open(file_path, "r+")
|
||||
try:
|
||||
fcntl.flock(fd.fileno(), fcntl.LOCK_EX)
|
||||
yield fd
|
||||
finally:
|
||||
fcntl.flock(fd.fileno(), fcntl.LOCK_UN)
|
||||
fd.close()
|
||||
|
||||
|
||||
class SessionManager:
|
||||
"""Manager for container sessions."""
|
||||
|
||||
def __init__(self, sessions_path: Optional[Path] = None):
|
||||
"""Initialize the session manager.
|
||||
|
||||
Args:
|
||||
sessions_path: Optional path to the sessions file.
|
||||
Defaults to ~/.config/cubbi/sessions.yaml.
|
||||
"""
|
||||
self.sessions_path = sessions_path or DEFAULT_SESSIONS_FILE
|
||||
self.sessions = self._load_sessions()
|
||||
|
||||
def _load_sessions(self) -> Dict[str, dict]:
|
||||
"""Load sessions from file or create an empty sessions file if it doesn't exist."""
|
||||
if not self.sessions_path.exists():
|
||||
# Create directory if it doesn't exist
|
||||
self.sessions_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
# Create empty sessions file
|
||||
with open(self.sessions_path, "w") as f:
|
||||
yaml.safe_dump({}, f)
|
||||
# Set secure permissions
|
||||
os.chmod(self.sessions_path, 0o600)
|
||||
return {}
|
||||
|
||||
# Load existing sessions
|
||||
with open(self.sessions_path, "r") as f:
|
||||
sessions = yaml.safe_load(f) or {}
|
||||
return sessions
|
||||
|
||||
def save(self) -> None:
|
||||
"""Save the sessions to file.
|
||||
|
||||
Note: This method acquires a file lock and merges with existing data
|
||||
to prevent concurrent write issues.
|
||||
"""
|
||||
with _file_lock(self.sessions_path) as fd:
|
||||
# Reload sessions from disk to get latest state
|
||||
fd.seek(0)
|
||||
sessions = yaml.safe_load(fd) or {}
|
||||
|
||||
# Merge current in-memory sessions with disk state
|
||||
sessions.update(self.sessions)
|
||||
|
||||
# Write back to file
|
||||
fd.seek(0)
|
||||
fd.truncate()
|
||||
yaml.safe_dump(sessions, fd)
|
||||
|
||||
# Update in-memory cache
|
||||
self.sessions = sessions
|
||||
|
||||
def add_session(self, session_id: str, session_data: dict) -> None:
|
||||
"""Add a session to storage.
|
||||
|
||||
Args:
|
||||
session_id: The unique session ID
|
||||
session_data: The session data (Session model dump as dict)
|
||||
"""
|
||||
with _file_lock(self.sessions_path) as fd:
|
||||
# Reload sessions from disk to get latest state
|
||||
fd.seek(0)
|
||||
sessions = yaml.safe_load(fd) or {}
|
||||
|
||||
# Apply the modification
|
||||
sessions[session_id] = session_data
|
||||
|
||||
# Write back to file
|
||||
fd.seek(0)
|
||||
fd.truncate()
|
||||
yaml.safe_dump(sessions, fd)
|
||||
|
||||
# Update in-memory cache
|
||||
self.sessions = sessions
|
||||
|
||||
def get_session(self, session_id: str) -> Optional[dict]:
|
||||
"""Get a session by ID.
|
||||
|
||||
Args:
|
||||
session_id: The session ID
|
||||
|
||||
Returns:
|
||||
The session data or None if not found
|
||||
"""
|
||||
return self.sessions.get(session_id)
|
||||
|
||||
def list_sessions(self) -> Dict[str, dict]:
|
||||
"""List all sessions.
|
||||
|
||||
Returns:
|
||||
Dict of session ID to session data
|
||||
"""
|
||||
return self.sessions
|
||||
|
||||
def remove_session(self, session_id: str) -> None:
|
||||
"""Remove a session from storage.
|
||||
|
||||
Args:
|
||||
session_id: The session ID to remove
|
||||
"""
|
||||
with _file_lock(self.sessions_path) as fd:
|
||||
# Reload sessions from disk to get latest state
|
||||
fd.seek(0)
|
||||
sessions = yaml.safe_load(fd) or {}
|
||||
|
||||
# Apply the modification
|
||||
if session_id in sessions:
|
||||
del sessions[session_id]
|
||||
|
||||
# Write back to file
|
||||
fd.seek(0)
|
||||
fd.truncate()
|
||||
yaml.safe_dump(sessions, fd)
|
||||
|
||||
# Update in-memory cache
|
||||
self.sessions = sessions
|
||||
774
cubbi/user_config.py
Normal file
774
cubbi/user_config.py
Normal file
@@ -0,0 +1,774 @@
|
||||
"""
|
||||
User configuration manager for Cubbi Container Tool.
|
||||
"""
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
import yaml
|
||||
|
||||
# Define the environment variable mappings for auto-discovery
|
||||
STANDARD_PROVIDERS = {
|
||||
"anthropic": {
|
||||
"type": "anthropic",
|
||||
"env_key": "ANTHROPIC_API_KEY",
|
||||
},
|
||||
"openai": {
|
||||
"type": "openai",
|
||||
"env_key": "OPENAI_API_KEY",
|
||||
},
|
||||
"google": {
|
||||
"type": "google",
|
||||
"env_key": "GOOGLE_API_KEY",
|
||||
},
|
||||
"openrouter": {
|
||||
"type": "openrouter",
|
||||
"env_key": "OPENROUTER_API_KEY",
|
||||
},
|
||||
}
|
||||
|
||||
# Legacy environment variable mappings (kept for backward compatibility)
|
||||
LEGACY_ENV_MAPPINGS = {
|
||||
"services.langfuse.url": "LANGFUSE_URL",
|
||||
"services.langfuse.public_key": "LANGFUSE_INIT_PROJECT_PUBLIC_KEY",
|
||||
"services.langfuse.secret_key": "LANGFUSE_INIT_PROJECT_SECRET_KEY",
|
||||
"services.openai.api_key": "OPENAI_API_KEY",
|
||||
"services.openai.url": "OPENAI_URL",
|
||||
"services.anthropic.api_key": "ANTHROPIC_API_KEY",
|
||||
"services.openrouter.api_key": "OPENROUTER_API_KEY",
|
||||
"services.google.api_key": "GOOGLE_API_KEY",
|
||||
}
|
||||
|
||||
|
||||
class UserConfigManager:
|
||||
"""Manager for user-specific configuration."""
|
||||
|
||||
def __init__(self, config_path: Optional[str] = None):
|
||||
"""Initialize the user configuration manager.
|
||||
|
||||
Args:
|
||||
config_path: Optional path to the configuration file.
|
||||
Defaults to ~/.config/cubbi/config.yaml.
|
||||
"""
|
||||
# Default to ~/.config/cubbi/config.yaml
|
||||
self.config_path = Path(
|
||||
config_path or os.path.expanduser("~/.config/cubbi/config.yaml")
|
||||
)
|
||||
self.config = self._load_config()
|
||||
|
||||
def _load_config(self) -> Dict[str, Any]:
|
||||
"""Load configuration from file or create with defaults if it doesn't exist."""
|
||||
if not self.config_path.exists():
|
||||
# Create directory if it doesn't exist
|
||||
self.config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
# Create default config
|
||||
default_config = self._get_default_config()
|
||||
|
||||
# Auto-discover and add providers from environment for new configs
|
||||
self._auto_discover_providers(default_config)
|
||||
|
||||
# Save to file
|
||||
with open(self.config_path, "w") as f:
|
||||
yaml.safe_dump(default_config, f)
|
||||
# Set secure permissions
|
||||
os.chmod(self.config_path, 0o600)
|
||||
return default_config
|
||||
|
||||
# Load existing config with error handling
|
||||
try:
|
||||
with open(self.config_path, "r") as f:
|
||||
config = yaml.safe_load(f) or {}
|
||||
|
||||
# Check for backup file that might be newer
|
||||
backup_path = self.config_path.with_suffix(".yaml.bak")
|
||||
if backup_path.exists():
|
||||
# Check if backup is newer than main config
|
||||
if backup_path.stat().st_mtime > self.config_path.stat().st_mtime:
|
||||
try:
|
||||
with open(backup_path, "r") as f:
|
||||
backup_config = yaml.safe_load(f) or {}
|
||||
print("Found newer backup config, using that instead")
|
||||
config = backup_config
|
||||
except Exception as e:
|
||||
print(f"Failed to load backup config: {e}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error loading configuration: {e}")
|
||||
# Try to load from backup if main config is corrupted
|
||||
backup_path = self.config_path.with_suffix(".yaml.bak")
|
||||
if backup_path.exists():
|
||||
try:
|
||||
with open(backup_path, "r") as f:
|
||||
config = yaml.safe_load(f) or {}
|
||||
print("Loaded configuration from backup file")
|
||||
except Exception as backup_e:
|
||||
print(f"Failed to load backup configuration: {backup_e}")
|
||||
config = {}
|
||||
else:
|
||||
config = {}
|
||||
|
||||
# Merge with defaults for any missing fields
|
||||
config = self._merge_with_defaults(config)
|
||||
|
||||
# Auto-discover and add providers from environment
|
||||
self._auto_discover_providers(config)
|
||||
|
||||
return config
|
||||
|
||||
def _get_default_config(self) -> Dict[str, Any]:
|
||||
"""Get the default configuration."""
|
||||
return {
|
||||
"defaults": {
|
||||
"image": "goose",
|
||||
"connect": True,
|
||||
"mount_local": True,
|
||||
"networks": [], # Default networks to connect to (besides cubbi-network)
|
||||
"volumes": [], # Default volumes to mount, format: "source:dest"
|
||||
"ports": [], # Default ports to forward, format: list of integers
|
||||
"mcps": [], # Default MCP servers to connect to
|
||||
"model": "anthropic/claude-3-5-sonnet-latest", # Default LLM model (provider/model format)
|
||||
},
|
||||
"providers": {}, # LLM providers configuration
|
||||
"services": {
|
||||
"langfuse": {}, # Keep langfuse in services as it's not an LLM provider
|
||||
},
|
||||
"docker": {
|
||||
"network": "cubbi-network",
|
||||
},
|
||||
"ui": {
|
||||
"colors": True,
|
||||
"verbose": False,
|
||||
},
|
||||
}
|
||||
|
||||
def _merge_with_defaults(self, config: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Merge user config with defaults for missing values."""
|
||||
defaults = self._get_default_config()
|
||||
|
||||
# Deep merge of config with defaults
|
||||
def _deep_merge(source, destination):
|
||||
for key, value in source.items():
|
||||
if key not in destination:
|
||||
destination[key] = value
|
||||
elif isinstance(value, dict) and isinstance(destination[key], dict):
|
||||
_deep_merge(value, destination[key])
|
||||
return destination
|
||||
|
||||
return _deep_merge(defaults, config)
|
||||
|
||||
def get(self, key_path: str, default: Any = None) -> Any:
|
||||
"""Get a configuration value by dot-notation path.
|
||||
|
||||
Args:
|
||||
key_path: The configuration path (e.g., "defaults.image")
|
||||
default: The default value to return if not found
|
||||
|
||||
Returns:
|
||||
The configuration value or default if not found
|
||||
"""
|
||||
# Handle shorthand service paths (e.g., "langfuse.url")
|
||||
if (
|
||||
"." in key_path
|
||||
and not key_path.startswith("services.")
|
||||
and not any(
|
||||
key_path.startswith(section + ".")
|
||||
for section in ["defaults", "docker", "remote", "ui", "providers"]
|
||||
)
|
||||
):
|
||||
service, setting = key_path.split(".", 1)
|
||||
key_path = f"services.{service}.{setting}"
|
||||
|
||||
parts = key_path.split(".")
|
||||
result = self.config
|
||||
|
||||
for part in parts:
|
||||
if part not in result:
|
||||
return default
|
||||
result = result[part]
|
||||
|
||||
return result
|
||||
|
||||
def set(self, key_path: str, value: Any) -> None:
|
||||
"""Set a configuration value by dot-notation path.
|
||||
|
||||
Args:
|
||||
key_path: The configuration path (e.g., "defaults.image")
|
||||
value: The value to set
|
||||
"""
|
||||
# Handle shorthand service paths (e.g., "langfuse.url")
|
||||
if (
|
||||
"." in key_path
|
||||
and not key_path.startswith("services.")
|
||||
and not any(
|
||||
key_path.startswith(section + ".")
|
||||
for section in ["defaults", "docker", "remote", "ui", "providers"]
|
||||
)
|
||||
):
|
||||
service, setting = key_path.split(".", 1)
|
||||
key_path = f"services.{service}.{setting}"
|
||||
|
||||
parts = key_path.split(".")
|
||||
config = self.config
|
||||
|
||||
# Navigate to the containing dictionary
|
||||
for part in parts[:-1]:
|
||||
if part not in config:
|
||||
config[part] = {}
|
||||
config = config[part]
|
||||
|
||||
# Set the value
|
||||
config[parts[-1]] = value
|
||||
self.save()
|
||||
|
||||
def save(self) -> None:
|
||||
"""Save the configuration to file with error handling and backup."""
|
||||
# Create backup of existing config file if it exists
|
||||
if self.config_path.exists():
|
||||
backup_path = self.config_path.with_suffix(".yaml.bak")
|
||||
try:
|
||||
import shutil
|
||||
|
||||
shutil.copy2(self.config_path, backup_path)
|
||||
except Exception as e:
|
||||
print(f"Warning: Failed to create config backup: {e}")
|
||||
|
||||
# Ensure parent directory exists
|
||||
self.config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
try:
|
||||
# Write to a temporary file first
|
||||
temp_path = self.config_path.with_suffix(".yaml.tmp")
|
||||
with open(temp_path, "w") as f:
|
||||
yaml.safe_dump(self.config, f)
|
||||
|
||||
# Set secure permissions on temp file
|
||||
os.chmod(temp_path, 0o600)
|
||||
|
||||
# Rename temp file to actual config file (atomic operation)
|
||||
# Use os.replace which is atomic on Unix systems
|
||||
os.replace(temp_path, self.config_path)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error saving configuration: {e}")
|
||||
# If we have a backup and the save failed, try to restore from backup
|
||||
backup_path = self.config_path.with_suffix(".yaml.bak")
|
||||
if backup_path.exists():
|
||||
try:
|
||||
import shutil
|
||||
|
||||
shutil.copy2(backup_path, self.config_path)
|
||||
print("Restored configuration from backup")
|
||||
except Exception as restore_error:
|
||||
print(
|
||||
f"Failed to restore configuration from backup: {restore_error}"
|
||||
)
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset the configuration to defaults."""
|
||||
self.config = self._get_default_config()
|
||||
self.save()
|
||||
|
||||
def get_environment_variables(self) -> Dict[str, str]:
|
||||
"""Get environment variables from the configuration.
|
||||
|
||||
NOTE: API keys are now handled by cubbi_init plugins, not passed from host.
|
||||
|
||||
Returns:
|
||||
A dictionary of environment variables to set in the container.
|
||||
"""
|
||||
env_vars = {}
|
||||
|
||||
# Process the legacy service configurations and map to environment variables
|
||||
# BUT EXCLUDE API KEYS - they're now handled by cubbi_init
|
||||
for config_path, env_var in LEGACY_ENV_MAPPINGS.items():
|
||||
# Skip API key environment variables - let cubbi_init handle them
|
||||
if any(
|
||||
key_word in env_var.upper() for key_word in ["API_KEY", "SECRET_KEY"]
|
||||
):
|
||||
continue
|
||||
|
||||
value = self.get(config_path)
|
||||
if value:
|
||||
# Handle environment variable references
|
||||
if (
|
||||
isinstance(value, str)
|
||||
and value.startswith("${")
|
||||
and value.endswith("}")
|
||||
):
|
||||
env_var_name = value[2:-1]
|
||||
value = os.environ.get(env_var_name, "")
|
||||
|
||||
env_vars[env_var] = str(value)
|
||||
|
||||
# NOTE: Provider API keys are no longer passed as environment variables
|
||||
# They are now handled by cubbi_init plugins based on selected model
|
||||
# This prevents unused API keys from being exposed in containers
|
||||
|
||||
return env_vars
|
||||
|
||||
def get_provider_environment_variables(self, provider_name: str) -> Dict[str, str]:
|
||||
"""Get environment variables for a specific provider.
|
||||
|
||||
Args:
|
||||
provider_name: Name of the provider to get environment variables for
|
||||
|
||||
Returns:
|
||||
Dictionary of environment variables for the provider
|
||||
"""
|
||||
env_vars = {}
|
||||
provider_config = self.get_provider(provider_name)
|
||||
|
||||
if not provider_config:
|
||||
return env_vars
|
||||
|
||||
provider_type = provider_config.get("type", provider_name)
|
||||
api_key = provider_config.get("api_key", "")
|
||||
base_url = provider_config.get("base_url")
|
||||
|
||||
# Resolve environment variable references
|
||||
if api_key.startswith("${") and api_key.endswith("}"):
|
||||
env_var_name = api_key[2:-1]
|
||||
resolved_api_key = os.environ.get(env_var_name, "")
|
||||
else:
|
||||
resolved_api_key = api_key
|
||||
|
||||
if not resolved_api_key:
|
||||
return env_vars
|
||||
|
||||
# Add environment variables based on provider type
|
||||
if provider_type == "anthropic":
|
||||
env_vars["ANTHROPIC_API_KEY"] = resolved_api_key
|
||||
elif provider_type == "openai":
|
||||
env_vars["OPENAI_API_KEY"] = resolved_api_key
|
||||
if base_url:
|
||||
env_vars["OPENAI_URL"] = base_url
|
||||
elif provider_type == "google":
|
||||
env_vars["GOOGLE_API_KEY"] = resolved_api_key
|
||||
elif provider_type == "openrouter":
|
||||
env_vars["OPENROUTER_API_KEY"] = resolved_api_key
|
||||
|
||||
return env_vars
|
||||
|
||||
def get_all_providers_environment_variables(self) -> Dict[str, str]:
|
||||
"""Get environment variables for all configured providers.
|
||||
|
||||
Returns:
|
||||
Dictionary of all provider environment variables
|
||||
"""
|
||||
env_vars = {}
|
||||
providers = self.get("providers", {})
|
||||
|
||||
for provider_name in providers.keys():
|
||||
provider_env = self.get_provider_environment_variables(provider_name)
|
||||
env_vars.update(provider_env)
|
||||
|
||||
return env_vars
|
||||
|
||||
def list_config(self) -> List[Tuple[str, Any]]:
|
||||
"""List all configuration values as flattened key-value pairs.
|
||||
|
||||
Returns:
|
||||
A list of (key, value) tuples with flattened key paths.
|
||||
"""
|
||||
result = []
|
||||
|
||||
def _flatten_dict(d, prefix=""):
|
||||
for key, value in d.items():
|
||||
full_key = f"{prefix}.{key}" if prefix else key
|
||||
if isinstance(value, dict):
|
||||
_flatten_dict(value, full_key)
|
||||
else:
|
||||
# Mask sensitive values
|
||||
if any(
|
||||
substr in full_key.lower()
|
||||
for substr in ["key", "token", "secret", "password"]
|
||||
):
|
||||
displayed_value = "*****" if value else value
|
||||
else:
|
||||
displayed_value = value
|
||||
result.append((full_key, displayed_value))
|
||||
|
||||
_flatten_dict(self.config)
|
||||
return sorted(result)
|
||||
|
||||
def _auto_discover_providers(self, config: Dict[str, Any]) -> None:
|
||||
"""Auto-discover providers from environment variables."""
|
||||
if "providers" not in config:
|
||||
config["providers"] = {}
|
||||
|
||||
for provider_name, provider_info in STANDARD_PROVIDERS.items():
|
||||
# Skip if provider already configured
|
||||
if provider_name in config["providers"]:
|
||||
continue
|
||||
|
||||
# Check if environment variable exists
|
||||
api_key = os.environ.get(provider_info["env_key"])
|
||||
if api_key:
|
||||
config["providers"][provider_name] = {
|
||||
"type": provider_info["type"],
|
||||
"api_key": f"${{{provider_info['env_key']}}}", # Reference to env var
|
||||
}
|
||||
|
||||
def get_provider(self, provider_name: str) -> Optional[Dict[str, Any]]:
|
||||
"""Get a provider configuration by name."""
|
||||
return self.get(f"providers.{provider_name}")
|
||||
|
||||
def list_providers(self) -> Dict[str, Dict[str, Any]]:
|
||||
"""Get all configured providers."""
|
||||
return self.get("providers", {})
|
||||
|
||||
def add_provider(
|
||||
self,
|
||||
name: str,
|
||||
provider_type: str,
|
||||
api_key: str,
|
||||
base_url: Optional[str] = None,
|
||||
env_key: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Add a new provider configuration.
|
||||
|
||||
Args:
|
||||
name: Provider name/identifier
|
||||
provider_type: Type of provider (anthropic, openai, etc.)
|
||||
api_key: API key value or environment variable reference
|
||||
base_url: Custom base URL for API calls (optional)
|
||||
env_key: If provided, use env reference instead of direct api_key
|
||||
"""
|
||||
provider_config = {
|
||||
"type": provider_type,
|
||||
"api_key": f"${{{env_key}}}" if env_key else api_key,
|
||||
}
|
||||
|
||||
if base_url:
|
||||
provider_config["base_url"] = base_url
|
||||
|
||||
self.set(f"providers.{name}", provider_config)
|
||||
|
||||
def remove_provider(self, name: str) -> bool:
|
||||
"""Remove a provider configuration.
|
||||
|
||||
Returns:
|
||||
True if provider was removed, False if it didn't exist
|
||||
"""
|
||||
providers = self.get("providers", {})
|
||||
if name in providers:
|
||||
del providers[name]
|
||||
self.set("providers", providers)
|
||||
return True
|
||||
return False
|
||||
|
||||
def resolve_model(self, model_spec: str) -> Optional[Dict[str, Any]]:
|
||||
"""Resolve a model specification (provider/model) to provider config.
|
||||
|
||||
Args:
|
||||
model_spec: Model specification in format "provider/model"
|
||||
|
||||
Returns:
|
||||
Dictionary with resolved provider config and model name
|
||||
"""
|
||||
if "/" not in model_spec:
|
||||
# Legacy format - try to use as provider name with empty model
|
||||
provider_name = model_spec
|
||||
model_name = ""
|
||||
else:
|
||||
provider_name, model_name = model_spec.split("/", 1)
|
||||
|
||||
provider_config = self.get_provider(provider_name)
|
||||
if not provider_config:
|
||||
return None
|
||||
|
||||
# Resolve environment variable references in API key
|
||||
api_key = provider_config.get("api_key", "")
|
||||
if api_key.startswith("${") and api_key.endswith("}"):
|
||||
env_var_name = api_key[2:-1]
|
||||
resolved_api_key = os.environ.get(env_var_name, "")
|
||||
else:
|
||||
resolved_api_key = api_key
|
||||
|
||||
return {
|
||||
"provider_name": provider_name,
|
||||
"provider_type": provider_config.get("type", provider_name),
|
||||
"model_name": model_name,
|
||||
"api_key": resolved_api_key,
|
||||
"base_url": provider_config.get("base_url"),
|
||||
}
|
||||
|
||||
# Resource management methods
|
||||
def list_mcps(self) -> List[str]:
|
||||
"""Get all configured default MCP servers."""
|
||||
return self.get("defaults.mcps", [])
|
||||
|
||||
def add_mcp(self, name: str) -> None:
|
||||
"""Add a new default MCP server."""
|
||||
mcps = self.list_mcps()
|
||||
if name not in mcps:
|
||||
mcps.append(name)
|
||||
self.set("defaults.mcps", mcps)
|
||||
|
||||
def remove_mcp(self, name: str) -> bool:
|
||||
"""Remove a default MCP server.
|
||||
|
||||
Returns:
|
||||
True if MCP was removed, False if it didn't exist
|
||||
"""
|
||||
mcps = self.list_mcps()
|
||||
if name in mcps:
|
||||
mcps.remove(name)
|
||||
self.set("defaults.mcps", mcps)
|
||||
return True
|
||||
return False
|
||||
|
||||
def list_mcp_configurations(self) -> List[Dict[str, Any]]:
|
||||
"""Get all configured MCP server configurations."""
|
||||
return self.get("mcps", [])
|
||||
|
||||
def get_mcp_configuration(self, name: str) -> Optional[Dict[str, Any]]:
|
||||
"""Get an MCP configuration by name."""
|
||||
mcps = self.list_mcp_configurations()
|
||||
for mcp in mcps:
|
||||
if mcp.get("name") == name:
|
||||
return mcp
|
||||
return None
|
||||
|
||||
def add_mcp_configuration(self, mcp_config: Dict[str, Any]) -> None:
|
||||
"""Add a new MCP server configuration."""
|
||||
mcps = self.list_mcp_configurations()
|
||||
|
||||
# Remove existing MCP with the same name if it exists
|
||||
mcps = [mcp for mcp in mcps if mcp.get("name") != mcp_config.get("name")]
|
||||
|
||||
# Add the new MCP
|
||||
mcps.append(mcp_config)
|
||||
|
||||
# Save the configuration
|
||||
self.set("mcps", mcps)
|
||||
|
||||
def remove_mcp_configuration(self, name: str) -> bool:
|
||||
"""Remove an MCP server configuration.
|
||||
|
||||
Returns:
|
||||
True if MCP was removed, False if it didn't exist
|
||||
"""
|
||||
mcps = self.list_mcp_configurations()
|
||||
original_length = len(mcps)
|
||||
|
||||
# Filter out the MCP with the specified name
|
||||
mcps = [mcp for mcp in mcps if mcp.get("name") != name]
|
||||
|
||||
if len(mcps) < original_length:
|
||||
self.set("mcps", mcps)
|
||||
|
||||
# Also remove from defaults if it's there
|
||||
self.remove_mcp(name)
|
||||
return True
|
||||
return False
|
||||
|
||||
def list_networks(self) -> List[str]:
|
||||
"""Get all configured default networks."""
|
||||
return self.get("defaults.networks", [])
|
||||
|
||||
def add_network(self, name: str) -> None:
|
||||
"""Add a new default network."""
|
||||
networks = self.list_networks()
|
||||
if name not in networks:
|
||||
networks.append(name)
|
||||
self.set("defaults.networks", networks)
|
||||
|
||||
def remove_network(self, name: str) -> bool:
|
||||
"""Remove a default network.
|
||||
|
||||
Returns:
|
||||
True if network was removed, False if it didn't exist
|
||||
"""
|
||||
networks = self.list_networks()
|
||||
if name in networks:
|
||||
networks.remove(name)
|
||||
self.set("defaults.networks", networks)
|
||||
return True
|
||||
return False
|
||||
|
||||
def list_volumes(self) -> List[str]:
|
||||
"""Get all configured default volumes."""
|
||||
return self.get("defaults.volumes", [])
|
||||
|
||||
def add_volume(self, volume: str) -> None:
|
||||
"""Add a new default volume mapping."""
|
||||
volumes = self.list_volumes()
|
||||
if volume not in volumes:
|
||||
volumes.append(volume)
|
||||
self.set("defaults.volumes", volumes)
|
||||
|
||||
def remove_volume(self, volume: str) -> bool:
|
||||
"""Remove a default volume mapping.
|
||||
|
||||
Returns:
|
||||
True if volume was removed, False if it didn't exist
|
||||
"""
|
||||
volumes = self.list_volumes()
|
||||
if volume in volumes:
|
||||
volumes.remove(volume)
|
||||
self.set("defaults.volumes", volumes)
|
||||
return True
|
||||
return False
|
||||
|
||||
def list_ports(self) -> List[int]:
|
||||
"""Get all configured default ports."""
|
||||
return self.get("defaults.ports", [])
|
||||
|
||||
def add_port(self, port: int) -> None:
|
||||
"""Add a new default port."""
|
||||
ports = self.list_ports()
|
||||
if port not in ports:
|
||||
ports.append(port)
|
||||
self.set("defaults.ports", ports)
|
||||
|
||||
def remove_port(self, port: int) -> bool:
|
||||
"""Remove a default port.
|
||||
|
||||
Returns:
|
||||
True if port was removed, False if it didn't exist
|
||||
"""
|
||||
ports = self.list_ports()
|
||||
if port in ports:
|
||||
ports.remove(port)
|
||||
self.set("defaults.ports", ports)
|
||||
return True
|
||||
return False
|
||||
|
||||
# Model management methods
|
||||
def list_provider_models(self, provider_name: str) -> List[Dict[str, str]]:
|
||||
"""Get all models for a specific provider.
|
||||
|
||||
Args:
|
||||
provider_name: Name of the provider
|
||||
|
||||
Returns:
|
||||
List of model dictionaries with 'id' and 'name' keys
|
||||
"""
|
||||
provider_config = self.get_provider(provider_name)
|
||||
if not provider_config:
|
||||
return []
|
||||
|
||||
models = provider_config.get("models", [])
|
||||
normalized_models = []
|
||||
for model in models:
|
||||
if isinstance(model, str):
|
||||
normalized_models.append({"id": model})
|
||||
elif isinstance(model, dict):
|
||||
model_id = model.get("id", "")
|
||||
if model_id:
|
||||
normalized_models.append({"id": model_id})
|
||||
|
||||
return normalized_models
|
||||
|
||||
def set_provider_models(
|
||||
self, provider_name: str, models: List[Dict[str, str]]
|
||||
) -> None:
|
||||
"""Set the models for a specific provider.
|
||||
|
||||
Args:
|
||||
provider_name: Name of the provider
|
||||
models: List of model dictionaries with 'id' and optional 'name' keys
|
||||
"""
|
||||
provider_config = self.get_provider(provider_name)
|
||||
if not provider_config:
|
||||
return
|
||||
|
||||
# Normalize models - ensure each has id, name defaults to id
|
||||
normalized_models = []
|
||||
for model in models:
|
||||
if isinstance(model, dict) and "id" in model:
|
||||
normalized_model = {
|
||||
"id": model["id"],
|
||||
}
|
||||
normalized_models.append(normalized_model)
|
||||
|
||||
provider_config["models"] = normalized_models
|
||||
self.set(f"providers.{provider_name}", provider_config)
|
||||
|
||||
def add_provider_model(
|
||||
self, provider_name: str, model_id: str, model_name: Optional[str] = None
|
||||
) -> None:
|
||||
"""Add a model to a provider.
|
||||
|
||||
Args:
|
||||
provider_name: Name of the provider
|
||||
model_id: ID of the model
|
||||
model_name: Optional display name for the model (defaults to model_id)
|
||||
"""
|
||||
models = self.list_provider_models(provider_name)
|
||||
|
||||
for existing_model in models:
|
||||
if existing_model["id"] == model_id:
|
||||
return
|
||||
|
||||
new_model = {"id": model_id}
|
||||
models.append(new_model)
|
||||
self.set_provider_models(provider_name, models)
|
||||
|
||||
def remove_provider_model(self, provider_name: str, model_id: str) -> bool:
|
||||
"""Remove a model from a provider.
|
||||
|
||||
Args:
|
||||
provider_name: Name of the provider
|
||||
model_id: ID of the model to remove
|
||||
|
||||
Returns:
|
||||
True if model was removed, False if it didn't exist
|
||||
"""
|
||||
models = self.list_provider_models(provider_name)
|
||||
original_length = len(models)
|
||||
|
||||
# Filter out the model with the specified ID
|
||||
models = [model for model in models if model["id"] != model_id]
|
||||
|
||||
if len(models) < original_length:
|
||||
self.set_provider_models(provider_name, models)
|
||||
return True
|
||||
return False
|
||||
|
||||
def is_provider_openai_compatible(self, provider_name: str) -> bool:
|
||||
provider_config = self.get_provider(provider_name)
|
||||
if not provider_config:
|
||||
return False
|
||||
|
||||
provider_type = provider_config.get("type", "")
|
||||
return provider_type == "openai" and provider_config.get("base_url") is not None
|
||||
|
||||
def supports_model_fetching(self, provider_name: str) -> bool:
|
||||
"""Check if a provider supports model fetching via API."""
|
||||
from .config import PROVIDER_DEFAULT_URLS
|
||||
|
||||
provider = self.get_provider(provider_name)
|
||||
if not provider:
|
||||
return False
|
||||
|
||||
provider_type = provider.get("type")
|
||||
base_url = provider.get("base_url")
|
||||
|
||||
# Provider supports model fetching if:
|
||||
# 1. It has a custom base_url (OpenAI-compatible), OR
|
||||
# 2. It's a standard provider type that we support
|
||||
return base_url is not None or provider_type in PROVIDER_DEFAULT_URLS
|
||||
|
||||
def list_openai_compatible_providers(self) -> List[str]:
|
||||
providers = self.list_providers()
|
||||
compatible_providers = []
|
||||
|
||||
for provider_name in providers.keys():
|
||||
if self.is_provider_openai_compatible(provider_name):
|
||||
compatible_providers.append(provider_name)
|
||||
|
||||
return compatible_providers
|
||||
|
||||
def list_model_fetchable_providers(self) -> List[str]:
|
||||
"""List all providers that support model fetching."""
|
||||
providers = self.list_providers()
|
||||
fetchable_providers = []
|
||||
|
||||
for provider_name in providers.keys():
|
||||
if self.supports_model_fetching(provider_name):
|
||||
fetchable_providers.append(provider_name)
|
||||
|
||||
return fetchable_providers
|
||||
682
docs/specs/1_SPECIFICATIONS.md
Normal file
682
docs/specs/1_SPECIFICATIONS.md
Normal file
@@ -0,0 +1,682 @@
|
||||
# Cubbi - Container Tool
|
||||
|
||||
## Overview
|
||||
|
||||
Cubbi is a command-line tool for managing ephemeral
|
||||
containers that run AI tools and development environments. It works with both
|
||||
local Docker and a dedicated remote web service that manages containers in a
|
||||
Docker-in-Docker (DinD) environment.
|
||||
|
||||
## Technology Stack
|
||||
|
||||
### Cubbi Service
|
||||
- **Web Framework**: FastAPI for high-performance, async API endpoints
|
||||
- **Package Management**: uv (Astral) for dependency management
|
||||
- **Database**: SQLite for development, PostgreSQL for production
|
||||
- **Container Management**: Docker SDK for Python
|
||||
- **Authentication**: OAuth 2.0 integration with Authentik
|
||||
|
||||
### Cubbi CLI
|
||||
- **Language**: Python
|
||||
- **Package Management**: uv for dependency management
|
||||
- **Distribution**: Standalone binary via PyInstaller or similar
|
||||
- **Configuration**: YAML for configuration files
|
||||
|
||||
## System Architecture
|
||||
|
||||
### Components
|
||||
|
||||
1. **CLI Tool (`cubbi`)**: The command-line interface users interact with
|
||||
2. **Cubbi Service**: A web service that handles remote container execution
|
||||
3. **Container Images**: Predefined container templates for various AI tools
|
||||
|
||||
### Architecture Diagram
|
||||
|
||||
```
|
||||
┌─────────────┐ ┌─────────────────────────┐
|
||||
│ │ │ │
|
||||
│ Cubbi CLI │◄─────────►│ Local Docker Daemon │
|
||||
│ (cubbi) │ │ │
|
||||
│ │ └─────────────────────────┘
|
||||
└──────┬──────┘
|
||||
│
|
||||
│ REST API
|
||||
│
|
||||
┌──────▼──────┐ ┌─────────────────────────┐
|
||||
│ │ │ │
|
||||
│ Cubbi │◄─────────►│ Docker-in-Docker │
|
||||
│ Service │ │ │
|
||||
│ │ └─────────────────────────┘
|
||||
└─────────────┘
|
||||
│
|
||||
├──────────────┬───────────────┐
|
||||
│ │ │
|
||||
┌──────▼──────┐ ┌─────▼─────┐ ┌──────▼──────┐
|
||||
│ │ │ │ │ │
|
||||
│ Fluentd │ │ Langfuse │ │ Other │
|
||||
│ Logging │ │ Logging │ │ Services │
|
||||
│ │ │ │ │ │
|
||||
└─────────────┘ └───────────┘ └─────────────┘
|
||||
```
|
||||
|
||||
## Core Concepts
|
||||
|
||||
- **Session**: An active container instance with a specific image
|
||||
- **Image**: A predefined container template with specific AI tools installed
|
||||
- **Remote**: A configured cubbi service instance
|
||||
|
||||
## User Configuration
|
||||
|
||||
Cubbi supports user-specific configuration via a YAML file located at `~/.config/cubbi/config.yaml`. This provides a way to set default values, store service credentials, and customize behavior without modifying code.
|
||||
|
||||
### Configuration File Structure
|
||||
|
||||
```yaml
|
||||
# ~/.config/cubbi/config.yaml
|
||||
defaults:
|
||||
image: "goose" # Default image to use
|
||||
connect: true # Automatically connect after creating session
|
||||
mount_local: true # Mount local directory by default
|
||||
networks: [] # Default networks to connect to (besides cubbi-network)
|
||||
|
||||
services:
|
||||
# Service credentials with simplified naming
|
||||
# These are mapped to environment variables in containers
|
||||
langfuse:
|
||||
url: "" # Will be set by the user
|
||||
public_key: "pk-lf-..."
|
||||
secret_key: "sk-lf-..."
|
||||
|
||||
openai:
|
||||
api_key: "sk-..."
|
||||
|
||||
anthropic:
|
||||
api_key: "sk-ant-..."
|
||||
|
||||
openrouter:
|
||||
api_key: "sk-or-..."
|
||||
|
||||
docker:
|
||||
network: "cubbi-network" # Default Docker network to use
|
||||
socket: "/var/run/docker.sock" # Docker socket path
|
||||
|
||||
remote:
|
||||
default: "production" # Default remote to use
|
||||
endpoints:
|
||||
production:
|
||||
url: "https://cubbi.monadical.com"
|
||||
auth_method: "oauth"
|
||||
staging:
|
||||
url: "https://cubbi-staging.monadical.com"
|
||||
auth_method: "oauth"
|
||||
|
||||
ui:
|
||||
colors: true # Enable/disable colors in terminal output
|
||||
verbose: false # Enable/disable verbose output
|
||||
table_format: "grid" # Table format for session listings
|
||||
```
|
||||
|
||||
### Environment Variable Mapping
|
||||
|
||||
The simplified configuration names are mapped to environment variables:
|
||||
|
||||
| Config Path | Environment Variable |
|
||||
|-------------|---------------------|
|
||||
| `services.langfuse.url` | `LANGFUSE_URL` |
|
||||
| `services.langfuse.public_key` | `LANGFUSE_INIT_PROJECT_PUBLIC_KEY` |
|
||||
| `services.langfuse.secret_key` | `LANGFUSE_INIT_PROJECT_SECRET_KEY` |
|
||||
| `services.openai.api_key` | `OPENAI_API_KEY` |
|
||||
| `services.anthropic.api_key` | `ANTHROPIC_API_KEY` |
|
||||
| `services.openrouter.api_key` | `OPENROUTER_API_KEY` |
|
||||
|
||||
### Environment Variable Precedence
|
||||
|
||||
1. Command-line arguments (`-e KEY=VALUE`) take highest precedence
|
||||
2. User config file takes second precedence
|
||||
3. System defaults take lowest precedence
|
||||
|
||||
### Security Considerations
|
||||
|
||||
- Configuration file permissions are set to 600 (user read/write only)
|
||||
- Sensitive values can be referenced from environment variables: `${ENV_VAR}`
|
||||
- API keys and secrets are never logged or displayed in verbose output
|
||||
|
||||
### CLI Configuration Commands
|
||||
|
||||
```bash
|
||||
# View entire configuration
|
||||
cubbi config list
|
||||
|
||||
# Get specific configuration value
|
||||
cubbi config get defaults.driver
|
||||
|
||||
# Set configuration value (using simplified naming)
|
||||
cubbi config set langfuse.url "https://cloud.langfuse.com"
|
||||
cubbi config set openai.api_key "sk-..."
|
||||
|
||||
# Network configuration
|
||||
cubbi config network list # List default networks
|
||||
cubbi config network add example-network # Add a network to defaults
|
||||
cubbi config network remove example-network # Remove a network from defaults
|
||||
|
||||
# Reset configuration to defaults
|
||||
cubbi config reset
|
||||
```
|
||||
|
||||
## CLI Tool Commands
|
||||
|
||||
### Basic Commands
|
||||
|
||||
```bash
|
||||
# Create a new session locally (shorthand)
|
||||
cubbi
|
||||
|
||||
# List active sessions on local system
|
||||
cubbi session list
|
||||
|
||||
# Create a new session locally
|
||||
cubbi session create [OPTIONS]
|
||||
|
||||
# Create a session with a specific image
|
||||
cubbi session create --image goose
|
||||
|
||||
# Create a session with a specific project repository
|
||||
cubbi session create --image goose --project github.com/hello/private
|
||||
|
||||
# Create a session with external networks
|
||||
cubbi session create --network teamnet --network othernetwork
|
||||
|
||||
# Create a session with a project (shorthand)
|
||||
cubbi git@github.com:hello/private
|
||||
|
||||
# Close a specific session
|
||||
cubbi session close <id>
|
||||
|
||||
# Connect to an existing session
|
||||
cubbi session connect <id>
|
||||
|
||||
```
|
||||
|
||||
### Remote Management
|
||||
|
||||
```bash
|
||||
# Add a remote Cubbi service
|
||||
cubbi remote add <name> <url>
|
||||
|
||||
# List configured remote services
|
||||
cubbi remote list
|
||||
|
||||
# Remove a remote service
|
||||
cubbi remote remove <name>
|
||||
|
||||
# Authenticate with a remote service
|
||||
cubbi -r <remote_name> auth
|
||||
|
||||
# Create a session on a remote service
|
||||
cubbi -r <remote_name> [session create]
|
||||
|
||||
# List sessions on a remote service
|
||||
cubbi -r <remote_name> session list
|
||||
```
|
||||
|
||||
### Environment Variables
|
||||
|
||||
```bash
|
||||
# Set environment variables for a session
|
||||
cubbi session create -e VAR1=value1 -e VAR2=value2
|
||||
|
||||
# Set environment variables for a remote session
|
||||
cubbi -r <remote_name> session create -e VAR1=value1
|
||||
```
|
||||
|
||||
### Logging
|
||||
|
||||
```bash
|
||||
# Stream logs from a session
|
||||
cubbi session logs <id>
|
||||
|
||||
# Stream logs with follow option
|
||||
cubbi session logs <id> -f
|
||||
```
|
||||
|
||||
## Cubbi Service Specification
|
||||
|
||||
### Overview
|
||||
|
||||
The Cubbi Service is a web service that manages ephemeral containers in a Docker-in-Docker environment. It provides a REST API for container lifecycle management, authentication, and real-time log streaming.
|
||||
|
||||
### API Endpoints
|
||||
|
||||
#### Authentication
|
||||
|
||||
```
|
||||
POST /auth/login - Initiate Authentik authentication flow
|
||||
POST /auth/callback - Handle Authentik OAuth callback
|
||||
POST /auth/refresh - Refresh an existing token
|
||||
POST /auth/logout - Invalidate current token
|
||||
```
|
||||
|
||||
### Authentik Integration
|
||||
|
||||
The Cubbi Service integrates with Authentik at https://authentik.monadical.io using OAuth 2.0:
|
||||
|
||||
1. **Application Registration**:
|
||||
- Cubbi Service is registered as an OAuth application in Authentik
|
||||
- Configured with redirect URI to `/auth/callback`
|
||||
- Assigned appropriate scopes for user identification
|
||||
|
||||
2. **Authentication Flow**:
|
||||
- User initiates authentication via CLI
|
||||
- Cubbi CLI opens browser to Authentik authorization URL
|
||||
- User logs in through Authentik's interface
|
||||
- Authentik redirects to callback URL with authorization code
|
||||
- Cubbi Service exchanges code for access and refresh tokens
|
||||
- CLI receives and securely stores tokens
|
||||
|
||||
3. **Token Management**:
|
||||
- Access tokens used for API authorization
|
||||
- Refresh tokens used to obtain new access tokens
|
||||
- Tokens are encrypted at rest in CLI configuration
|
||||
|
||||
#### Sessions
|
||||
|
||||
```
|
||||
GET /sessions - List all sessions
|
||||
POST /sessions - Create a new session
|
||||
GET /sessions/{id} - Get session details
|
||||
DELETE /sessions/{id} - Terminate a session
|
||||
POST /sessions/{id}/connect - Establish connection to session
|
||||
GET /sessions/{id}/logs - Stream session logs
|
||||
```
|
||||
|
||||
#### Images
|
||||
|
||||
```
|
||||
GET /images - List available images
|
||||
GET /images/{name} - Get image details
|
||||
```
|
||||
|
||||
#### Projects
|
||||
|
||||
```
|
||||
GET /projects - List all projects
|
||||
POST /projects - Add a new project
|
||||
GET /projects/{id} - Get project details
|
||||
PUT /projects/{id} - Update project details
|
||||
DELETE /projects/{id} - Remove a project
|
||||
```
|
||||
|
||||
### Service Configuration
|
||||
|
||||
```yaml
|
||||
# cubbi-service.yaml
|
||||
server:
|
||||
port: 3000
|
||||
host: 0.0.0.0
|
||||
|
||||
docker:
|
||||
socket: /var/run/docker.sock
|
||||
network: cubbi-network
|
||||
|
||||
auth:
|
||||
provider: authentik
|
||||
url: https://authentik.monadical.io
|
||||
clientId: cubbi-service
|
||||
|
||||
logging:
|
||||
providers:
|
||||
- type: fluentd
|
||||
url: http://fluentd.example.com:24224
|
||||
- type: langfuse
|
||||
url: https://cloud.langfuse.com
|
||||
public_key: ${LANGFUSE_INIT_PROJECT_PUBLIC_KEY}
|
||||
secret_key: ${LANGFUSE_INIT_PROJECT_SECRET_KEY}
|
||||
|
||||
images:
|
||||
- name: goose
|
||||
image: monadical/cubbi-goose:latest
|
||||
- name: aider
|
||||
image: monadical/cubbi-aider:latest
|
||||
- name: claude-code
|
||||
image: monadical/cubbi-claude-code:latest
|
||||
|
||||
projects:
|
||||
storage:
|
||||
type: encrypted
|
||||
key: ${PROJECT_ENCRYPTION_KEY}
|
||||
default_ssh_scan:
|
||||
- github.com
|
||||
- gitlab.com
|
||||
- bitbucket.org
|
||||
```
|
||||
|
||||
### Docker-in-Docker Implementation
|
||||
|
||||
The Cubbi Service runs in a container with access to the host's Docker socket, allowing it to create and manage sibling containers. This approach provides:
|
||||
|
||||
1. Isolation between containers
|
||||
2. Simple lifecycle management
|
||||
3. Resource constraints for security
|
||||
|
||||
### Connection Handling
|
||||
|
||||
For remote connections to containers, the service provides two methods:
|
||||
|
||||
1. **WebSocket Terminal**: Browser-based terminal access
|
||||
2. **SSH Server**: Each container runs an SSH server for CLI access
|
||||
|
||||
### Logging Implementation
|
||||
|
||||
The Cubbi Service implements log collection and forwarding:
|
||||
|
||||
1. Container logs are captured using Docker's logging drivers
|
||||
2. Logs are forwarded to configured providers (Fluentd, Langfuse)
|
||||
3. Real-time log streaming is available via WebSockets
|
||||
|
||||
## Project Management
|
||||
|
||||
### Persistent Project Configuration
|
||||
|
||||
Cubbi provides persistent storage for project-specific configurations that need to survive container restarts. This is implemented through a dedicated volume mount and symlink system:
|
||||
|
||||
1. **Configuration Storage**:
|
||||
- Each project has a dedicated configuration directory on the host at `~/.cubbi/projects/<project-hash>/config`
|
||||
- For projects specified by URL, the hash is derived from the repository URL
|
||||
- For local projects, the hash is derived from the absolute path of the local directory
|
||||
- This directory is mounted into the container at `/cubbi-config`
|
||||
|
||||
2. **Image Configuration**:
|
||||
- Each image can specify configuration files/directories that should persist across sessions
|
||||
- These are defined in the image's `cubbi_image.yaml` file in the `persistent_configs` section
|
||||
- Example for Goose image:
|
||||
```yaml
|
||||
persistent_configs:
|
||||
- source: "/app/.goose" # Path in container
|
||||
target: "/cubbi-config/goose" # Path in persistent storage
|
||||
type: "directory" # directory or file
|
||||
description: "Goose memory and configuration"
|
||||
```
|
||||
|
||||
3. **Automatic Symlinking**:
|
||||
- During container initialization, the system:
|
||||
- Creates all target directories in the persistent storage
|
||||
- Creates symlinks from the source paths to the target paths
|
||||
- This makes the persistence transparent to the application
|
||||
|
||||
4. **Environment Variables**:
|
||||
- Container has access to configuration location via environment variables:
|
||||
```
|
||||
CUBBI_CONFIG_DIR=/cubbi-config
|
||||
CUBBI_IMAGE_CONFIG_DIR=/cubbi-config/<image-name>
|
||||
```
|
||||
|
||||
This ensures that important configurations like Goose's memory store, authentication tokens, and other state information persist between container sessions while maintaining isolation between different projects.
|
||||
|
||||
### Adding Projects
|
||||
|
||||
Users can add projects with associated credentials:
|
||||
|
||||
```bash
|
||||
# Add a project with SSH key
|
||||
cubbi project add github.com/hello/private --ssh-key ~/.ssh/id_ed25519
|
||||
|
||||
# Add a project with token authentication
|
||||
cubbi project add github.com/hello/private --token ghp_123456789
|
||||
|
||||
# List all projects
|
||||
cubbi project list
|
||||
|
||||
# Remove a project
|
||||
cubbi project remove github.com/hello/private
|
||||
```
|
||||
|
||||
### Project Configuration
|
||||
|
||||
Projects are stored in the Cubbi service and referenced by their repository URL. The configuration includes:
|
||||
|
||||
```yaml
|
||||
# Project configuration
|
||||
id: github.com/hello/private
|
||||
url: git@github.com:hello/private.git
|
||||
type: git
|
||||
auth:
|
||||
type: ssh
|
||||
key: |
|
||||
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||
...encrypted key data...
|
||||
-----END OPENSSH PRIVATE KEY-----
|
||||
public_key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAI...
|
||||
```
|
||||
|
||||
## Image Implementation
|
||||
|
||||
### Image Structure
|
||||
|
||||
Each image is a Docker container with a standardized structure:
|
||||
|
||||
```
|
||||
/
|
||||
├── entrypoint.sh # Container initialization
|
||||
├── cubbi-init.sh # Standardized initialization script
|
||||
├── cubbi_image.yaml # Image metadata and configuration
|
||||
├── tool/ # AI tool installation
|
||||
└── ssh/ # SSH server configuration
|
||||
```
|
||||
|
||||
### Standardized Initialization Script
|
||||
|
||||
All images include a standardized `cubbi-init.sh` script that handles common initialization tasks:
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
# Project initialization
|
||||
if [ -n "$CUBBI_PROJECT_URL" ]; then
|
||||
echo "Initializing project: $CUBBI_PROJECT_URL"
|
||||
|
||||
# Set up SSH key if provided
|
||||
if [ -n "$CUBBI_GIT_SSH_KEY" ]; then
|
||||
mkdir -p ~/.ssh
|
||||
echo "$CUBBI_GIT_SSH_KEY" > ~/.ssh/id_ed25519
|
||||
chmod 600 ~/.ssh/id_ed25519
|
||||
ssh-keyscan github.com >> ~/.ssh/known_hosts 2>/dev/null
|
||||
fi
|
||||
|
||||
# Set up token if provided
|
||||
if [ -n "$CUBBI_GIT_TOKEN" ]; then
|
||||
git config --global credential.helper store
|
||||
echo "https://$CUBBI_GIT_TOKEN:x-oauth-basic@github.com" > ~/.git-credentials
|
||||
fi
|
||||
|
||||
# Clone repository
|
||||
git clone $CUBBI_PROJECT_URL /app
|
||||
cd /app
|
||||
|
||||
# Run project-specific initialization if present
|
||||
if [ -f "/app/.cubbi/init.sh" ]; then
|
||||
bash /app/.cubbi/init.sh
|
||||
fi
|
||||
fi
|
||||
|
||||
# Image-specific initialization continues...
|
||||
```
|
||||
|
||||
### Image Configuration (cubbi_image.yaml)
|
||||
|
||||
```yaml
|
||||
name: goose
|
||||
description: Goose with MCP servers
|
||||
version: 1.0.0
|
||||
maintainer: team@monadical.com
|
||||
|
||||
init:
|
||||
pre_command: /cubbi-init.sh
|
||||
command: /entrypoint.sh
|
||||
|
||||
environment:
|
||||
- name: MCP_HOST
|
||||
description: MCP server host
|
||||
required: true
|
||||
default: http://localhost:8000
|
||||
|
||||
- name: GOOSE_ID
|
||||
description: Goose instance ID
|
||||
required: false
|
||||
|
||||
# Project environment variables
|
||||
- name: CUBBI_PROJECT_URL
|
||||
description: Project repository URL
|
||||
required: false
|
||||
|
||||
- name: CUBBI_PROJECT_TYPE
|
||||
description: Project repository type (git, svn, etc.)
|
||||
required: false
|
||||
default: git
|
||||
|
||||
- name: CUBBI_GIT_SSH_KEY
|
||||
description: SSH key for Git authentication
|
||||
required: false
|
||||
sensitive: true
|
||||
|
||||
- name: CUBBI_GIT_TOKEN
|
||||
description: Token for Git authentication
|
||||
required: false
|
||||
sensitive: true
|
||||
|
||||
ports:
|
||||
- 8000 # Main application
|
||||
- 22 # SSH server
|
||||
|
||||
volumes:
|
||||
- mountPath: /app
|
||||
description: Application directory
|
||||
|
||||
persistent_configs:
|
||||
- source: "/app/.goose"
|
||||
target: "/cubbi-config/goose"
|
||||
type: "directory"
|
||||
description: "Goose memory and configuration"
|
||||
```
|
||||
|
||||
### Example Built-in images
|
||||
|
||||
1. **goose**: Goose with MCP servers
|
||||
2. **aider**: Aider coding assistant
|
||||
3. **claude-code**: Claude Code environment
|
||||
4. **custom**: Custom Dockerfile support
|
||||
|
||||
## Network Management
|
||||
|
||||
### Docker Network Integration
|
||||
|
||||
Cubbi provides flexible network management for containers:
|
||||
|
||||
1. **Default Cubbi Network**:
|
||||
- Each container is automatically connected to the Cubbi network (`cubbi-network` by default)
|
||||
- This ensures containers can communicate with each other
|
||||
|
||||
2. **External Network Connection**:
|
||||
- Containers can be connected to one or more external Docker networks
|
||||
- This allows integration with existing infrastructure (e.g., databases, web servers)
|
||||
- Networks can be specified at session creation time: `cubbi session create --network mynetwork`
|
||||
|
||||
3. **Default Networks Configuration**:
|
||||
- Users can configure default networks in their configuration
|
||||
- These networks will be used for all new sessions unless overridden
|
||||
- Managed with `cubbi config network` commands
|
||||
|
||||
4. **Network Command Examples**:
|
||||
```bash
|
||||
# Use with session creation
|
||||
cubbi session create --network teamnet
|
||||
|
||||
# Use with multiple networks
|
||||
cubbi session create --network teamnet --network dbnet
|
||||
|
||||
# Configure default networks
|
||||
cubbi config network add teamnet
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
1. **Container Isolation**: Each session runs in an isolated container
|
||||
2. **Authentication**: Integration with Authentik for secure authentication
|
||||
3. **Resource Limits**: Configurable CPU, memory, and storage limits
|
||||
4. **Network Isolation**: Internal Docker network for container-to-container communication with optional external network connections
|
||||
5. **Encrypted Connections**: TLS for API connections and SSH for terminal access
|
||||
|
||||
## Deployment
|
||||
|
||||
### Cubbi Service Deployment
|
||||
|
||||
```yaml
|
||||
# docker-compose.yml for Cubbi Service
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
cubbi-service:
|
||||
image: monadical/cubbi-service:latest
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- ./config:/app/config
|
||||
ports:
|
||||
- "3000:3000"
|
||||
environment:
|
||||
- AUTH_URL=https://authentik.monadical.io
|
||||
- LANGFUSE_API_KEY=your_api_key
|
||||
networks:
|
||||
- cubbi-network
|
||||
|
||||
networks:
|
||||
cubbi-network:
|
||||
driver: bridge
|
||||
```
|
||||
|
||||
## Project Repository Integration Workflow
|
||||
|
||||
### Adding a Project Repository
|
||||
|
||||
1. User adds project repository with authentication:
|
||||
```bash
|
||||
cubbi project add github.com/hello/private --ssh-key ~/.ssh/id_ed25519
|
||||
```
|
||||
|
||||
2. Cubbi CLI reads the SSH key, encrypts it, and sends to Cubbi Service
|
||||
|
||||
3. Cubbi Service stores the project configuration securely
|
||||
|
||||
### Using a Project in a Session
|
||||
|
||||
1. User creates a session with a project:
|
||||
```bash
|
||||
cubbi -r monadical git@github.com:hello/private
|
||||
```
|
||||
|
||||
2. Cubbi Service:
|
||||
- Identifies the project from the URL
|
||||
- Retrieves project authentication details
|
||||
- Sets up environment variables:
|
||||
```
|
||||
CUBBI_PROJECT_URL=git@github.com:hello/private
|
||||
CUBBI_PROJECT_TYPE=git
|
||||
CUBBI_GIT_SSH_KEY=<contents of the SSH key>
|
||||
```
|
||||
- Creates container with these environment variables
|
||||
|
||||
3. Container initialization:
|
||||
- The standardized `cubbi-init.sh` script detects the project environment variables
|
||||
- Sets up SSH key or token authentication
|
||||
- Clones the repository to `/app`
|
||||
- Runs any project-specific initialization scripts
|
||||
|
||||
4. User can immediately begin working with the repository
|
||||
|
||||
## Implementation Roadmap
|
||||
|
||||
1. **Phase 1**: Local CLI tool with Docker integration
|
||||
2. **Phase 2**: Cubbi Service REST API with basic container management
|
||||
3. **Phase 3**: Authentication and secure connections
|
||||
4. **Phase 4**: Project management functionality
|
||||
5. **Phase 5**: Image implementation (Goose, Aider, Claude Code)
|
||||
6. **Phase 6**: Logging integration with Fluentd and Langfuse
|
||||
7. **Phase 7**: CLI remote connectivity improvements
|
||||
8. **Phase 8**: Additional images and extensibility features
|
||||
160
docs/specs/2_MCP_SERVER.md
Normal file
160
docs/specs/2_MCP_SERVER.md
Normal file
@@ -0,0 +1,160 @@
|
||||
# MCP Server Specification
|
||||
|
||||
## Overview
|
||||
|
||||
This document specifies the implementation for Model Control Protocol (MCP) server support in the Cubbi system. The MCP server feature allows users to connect, build, and manage external MCP servers that can be attached to Cubbi sessions.
|
||||
|
||||
An MCP server is a service that can be accessed by a image (such as Goose or Claude Code) to extend the LLM's capabilities through tool calls. It can be either:
|
||||
- A local stdio-based MCP server running in a container (accessed via an SSE proxy)
|
||||
- A remote HTTP SSE server accessed directly via its URL
|
||||
|
||||
## Key Features
|
||||
|
||||
1. Support two types of MCP servers:
|
||||
- **Proxy-based MCP servers** (default): Container running an MCP stdio server with a proxy that converts to SSE
|
||||
- **Remote MCP servers**: External HTTP SSE servers accessed via URL
|
||||
|
||||
2. Persistent MCP containers that can be:
|
||||
- Started/stopped independently of sessions
|
||||
- Connected to multiple sessions
|
||||
- Automatically started when referenced in a session creation
|
||||
|
||||
3. Management of MCP server configurations and containers
|
||||
|
||||
## MCP Configuration Model
|
||||
|
||||
The MCP configuration will be stored in the user configuration file and will include:
|
||||
|
||||
```yaml
|
||||
mcps:
|
||||
# Proxy-based MCP server (default type)
|
||||
- name: github
|
||||
type: proxy
|
||||
base_image: mcp/github
|
||||
command: "github-mcp" # Optional command to run in the base image
|
||||
proxy_image: ghcr.io/sparfenyuk/mcp-proxy:latest # Optional, defaults to standard proxy image
|
||||
proxy_options:
|
||||
sse_port: 8080
|
||||
sse_host: "0.0.0.0"
|
||||
allow_origin: "*"
|
||||
env:
|
||||
GITHUB_TOKEN: "your-token-here"
|
||||
|
||||
# Remote MCP server
|
||||
- name: remote-mcp
|
||||
type: remote
|
||||
url: "http://mcp-server.example.com/sse"
|
||||
headers:
|
||||
Authorization: "Bearer your-token-here"
|
||||
```
|
||||
|
||||
## CLI Commands
|
||||
|
||||
### MCP Management
|
||||
|
||||
```
|
||||
cubbi mcp list # List all configured MCP servers and their status
|
||||
cubbi mcp status <name> # Show detailed status of a specific MCP server
|
||||
cubbi mcp start <name> # Start an MCP server container
|
||||
cubbi mcp stop <name> # Stop and remove an MCP server container
|
||||
cubbi mcp restart <name> # Restart an MCP server container
|
||||
cubbi mcp start --all # Start all MCP server containers
|
||||
cubbi mcp stop --all # Stop and remove all MCP server containers
|
||||
cubbi mcp inspector # Run the MCP Inspector UI with network connectivity to all MCP servers
|
||||
cubbi mcp inspector --client-port <cp> --server-port <sp> # Run with custom client port (default: 5173) and server port (default: 3000)
|
||||
cubbi mcp inspector --detach # Run the inspector in detached mode
|
||||
cubbi mcp inspector --stop # Stop the running inspector
|
||||
cubbi mcp logs <name> # Show logs for an MCP server container
|
||||
```
|
||||
|
||||
### MCP Configuration
|
||||
|
||||
```
|
||||
# Add a proxy-based MCP server (default)
|
||||
cubbi mcp add <name> <base_image> [--command CMD] [--proxy-image IMG] [--sse-port PORT] [--sse-host HOST] [--allow-origin ORIGIN] [--env KEY=VALUE...]
|
||||
|
||||
# Add a remote MCP server
|
||||
cubbi mcp add-remote <name> <url> [--header KEY=VALUE...]
|
||||
|
||||
# Remove an MCP configuration
|
||||
cubbi mcp remove <name>
|
||||
```
|
||||
|
||||
### Session Integration
|
||||
|
||||
```
|
||||
cubbi session create [--mcp <name>] # Create a session with an MCP server attached
|
||||
```
|
||||
|
||||
## Implementation Details
|
||||
|
||||
### MCP Container Management
|
||||
|
||||
1. MCP containers will have their own dedicated Docker network (`cubbi-mcp-network`)
|
||||
2. Session containers will be attached to both their session network and the MCP network when using an MCP
|
||||
3. MCP containers will be persistent across sessions unless explicitly stopped
|
||||
4. MCP containers will be named with a prefix to identify them (`cubbi_mcp_<name>`)
|
||||
5. Each MCP container will have a network alias matching its name without the prefix (e.g., `cubbi_mcp_github` will have the alias `github`)
|
||||
6. Network aliases enable DNS-based service discovery between containers
|
||||
|
||||
### MCP Inspector
|
||||
|
||||
The MCP Inspector is a web-based UI tool that allows you to:
|
||||
|
||||
1. Visualize and interact with multiple MCP servers
|
||||
2. Debug MCP server messages and interactions
|
||||
3. Test MCP server capabilities directly
|
||||
|
||||
The MCP Inspector implementation includes:
|
||||
|
||||
1. A container based on the `mcp/inspector` image
|
||||
2. Automatic joining of all MCP server networks for seamless DNS resolution
|
||||
3. A modified Express server that binds to all interfaces (0.0.0.0)
|
||||
4. Port mapping for both the frontend (default: 5173) and backend API (default: 3000)
|
||||
5. Network connectivity to all MCP servers using their simple names as DNS hostnames
|
||||
|
||||
### Proxy-based MCP Servers (Default)
|
||||
|
||||
For proxy-based MCP servers:
|
||||
1. Create a custom Dockerfile that:
|
||||
- Uses the specified proxy image as the base
|
||||
- Installs Docker-in-Docker capabilities
|
||||
- Sets up the base MCP server image
|
||||
- Configures the entrypoint to run the MCP proxy with the right parameters
|
||||
2. Build the custom image
|
||||
3. Run the container with:
|
||||
- The Docker socket mounted to enable Docker-in-Docker
|
||||
- Environment variables from the configuration
|
||||
- The SSE server port exposed
|
||||
|
||||
The proxy container will:
|
||||
1. Pull the base image
|
||||
2. Run the base image with the specified command
|
||||
3. Connect the stdio of the base image to the MCP proxy
|
||||
4. Expose an SSE server that clients can connect to
|
||||
|
||||
### Remote MCP Servers
|
||||
|
||||
For remote MCP servers:
|
||||
1. Store the URL and headers
|
||||
2. Provide these to the session container when connecting
|
||||
|
||||
## Session Integration
|
||||
|
||||
When a session is created with an MCP server:
|
||||
1. If the MCP server is not running, start it automatically
|
||||
2. Connect the session container to the MCP server's network
|
||||
3. Set the appropriate environment variables in the session to enable MCP connectivity
|
||||
|
||||
## Security Considerations
|
||||
|
||||
1. MCP server credentials and tokens should be handled securely through environment variables
|
||||
2. Network isolation should be maintained between different MCP servers
|
||||
3. Consider options for access control between sessions and MCP servers
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
1. Support for MCP server version management
|
||||
2. Health checking and automatic restart capabilities
|
||||
3. Support for MCP server clusters or load balancing
|
||||
4. Integration with monitoring systems
|
||||
327
docs/specs/3_IMAGE.md
Normal file
327
docs/specs/3_IMAGE.md
Normal file
@@ -0,0 +1,327 @@
|
||||
# Cubbi Image Specifications
|
||||
|
||||
## Overview
|
||||
|
||||
This document defines the specifications and requirements for building Cubbi-compatible container images. These images serve as isolated development environments for AI tools within the Cubbi platform.
|
||||
|
||||
## Architecture
|
||||
|
||||
Cubbi images use a Python-based initialization system with a plugin architecture that separates core Cubbi functionality from tool-specific configuration.
|
||||
|
||||
### Core Components
|
||||
|
||||
1. **Image Metadata File** (`cubbi_image.yaml`) - *Tool-specific*
|
||||
2. **Container Definition** (`Dockerfile`) - *Tool-specific*
|
||||
3. **Python Initialization Script** (`cubbi_init.py`) - *Shared across all images*
|
||||
4. **Tool-specific Plugins** (e.g., `goose_plugin.py`) - *Tool-specific*
|
||||
5. **Status Tracking Scripts** (`init-status.sh`) - *Shared across all images*
|
||||
|
||||
## Image Metadata Specification
|
||||
|
||||
### Required Fields
|
||||
|
||||
```yaml
|
||||
name: string # Unique identifier for the image
|
||||
description: string # Human-readable description
|
||||
version: string # Semantic version (e.g., "1.0.0")
|
||||
maintainer: string # Contact information
|
||||
image: string # Docker image name and tag
|
||||
```
|
||||
|
||||
### Environment Variables
|
||||
|
||||
```yaml
|
||||
environment:
|
||||
- name: string # Variable name
|
||||
description: string # Human-readable description
|
||||
required: boolean # Whether variable is mandatory
|
||||
sensitive: boolean # Whether variable contains secrets
|
||||
default: string # Default value (optional)
|
||||
```
|
||||
|
||||
#### Standard Environment Variables
|
||||
|
||||
All images MUST support these standard environment variables:
|
||||
|
||||
- `CUBBI_USER_ID`: UID for the container user (default: 1000)
|
||||
- `CUBBI_GROUP_ID`: GID for the container user (default: 1000)
|
||||
- `CUBBI_RUN_COMMAND`: Command to execute after initialization
|
||||
- `CUBBI_NO_SHELL`: Exit after command execution ("true"/"false")
|
||||
- `CUBBI_CONFIG_DIR`: Directory for persistent configurations (default: "/cubbi-config")
|
||||
- `CUBBI_MODEL`: Model to use for the tool
|
||||
- `CUBBI_PROVIDER`: Provider to use for the tool
|
||||
|
||||
#### MCP Integration Variables
|
||||
|
||||
For MCP (Model Context Protocol) integration:
|
||||
|
||||
- `MCP_COUNT`: Number of available MCP servers
|
||||
- `MCP_{idx}_NAME`: Name of MCP server at index
|
||||
- `MCP_{idx}_TYPE`: Type of MCP server
|
||||
- `MCP_{idx}_HOST`: Hostname of MCP server
|
||||
- `MCP_{idx}_URL`: Full URL for remote MCP servers
|
||||
|
||||
### Network Configuration
|
||||
|
||||
```yaml
|
||||
ports:
|
||||
- number # Port to expose (e.g., 8000)
|
||||
```
|
||||
|
||||
### Storage Configuration
|
||||
|
||||
```yaml
|
||||
volumes:
|
||||
- mountPath: string # Path inside container
|
||||
description: string # Purpose of the volume
|
||||
|
||||
persistent_configs:
|
||||
- source: string # Path inside container
|
||||
target: string # Path in persistent storage
|
||||
type: string # "file" or "directory"
|
||||
description: string # Purpose of the configuration
|
||||
```
|
||||
|
||||
## Container Requirements
|
||||
|
||||
### Base System Dependencies
|
||||
|
||||
All images MUST include:
|
||||
|
||||
- `python3` - For the initialization system
|
||||
- `gosu` - For secure user switching
|
||||
- `bash` - For script execution
|
||||
|
||||
### Python Dependencies
|
||||
|
||||
The Cubbi initialization system requires:
|
||||
|
||||
- `ruamel.yaml` - For YAML configuration parsing
|
||||
|
||||
### User Management
|
||||
|
||||
Images MUST:
|
||||
|
||||
1. Run as root initially for setup
|
||||
2. Create a non-root user (`cubbi`) with configurable UID/GID
|
||||
3. Switch to the non-root user for tool execution
|
||||
4. Handle user ID mapping for volume permissions
|
||||
|
||||
### Directory Structure
|
||||
|
||||
Standard directories:
|
||||
|
||||
- `/app` - Primary working directory (owned by cubbi user)
|
||||
- `/home/cubbi` - User home directory
|
||||
- `/cubbi-config` - Persistent configuration storage
|
||||
- `/cubbi/init.log` - Initialization log file
|
||||
- `/cubbi/init.status` - Initialization status tracking
|
||||
- `/cubbi/cubbi_image.yaml` - Image configuration
|
||||
|
||||
## Initialization System
|
||||
|
||||
### Shared Scripts
|
||||
|
||||
The following scripts are **shared across all Cubbi images** and should be copied from the main Cubbi repository:
|
||||
|
||||
#### Main Script (`cubbi_init.py`) - *Shared*
|
||||
|
||||
The standalone initialization script that:
|
||||
|
||||
1. Sets up user and group with proper IDs
|
||||
2. Creates standard directories with correct permissions
|
||||
3. Sets up persistent configuration symlinks
|
||||
4. Runs tool-specific initialization
|
||||
5. Executes user commands or starts interactive shell
|
||||
|
||||
The script supports:
|
||||
- `--help` for usage information
|
||||
- Argument passing to final command
|
||||
- Environment variable configuration
|
||||
- Plugin-based tool initialization
|
||||
|
||||
#### Status Tracking Script (`init-status.sh`) - *Shared*
|
||||
|
||||
A bash script that:
|
||||
- Monitors initialization progress
|
||||
- Displays logs during setup
|
||||
- Ensures files exist before operations
|
||||
- Switches to user shell when complete
|
||||
|
||||
### Tool-Specific Components
|
||||
|
||||
#### Tool Plugins (`{tool}_plugin.py`) - *Tool-specific*
|
||||
|
||||
Each tool MUST provide a plugin (`{tool}_plugin.py`) implementing:
|
||||
|
||||
```python
|
||||
from cubbi_init import ToolPlugin
|
||||
|
||||
class MyToolPlugin(ToolPlugin):
|
||||
@property
|
||||
def tool_name(self) -> str:
|
||||
return "mytool"
|
||||
|
||||
def initialize(self) -> bool:
|
||||
"""Main tool initialization logic"""
|
||||
# Tool-specific setup
|
||||
return True
|
||||
|
||||
def integrate_mcp_servers(self, mcp_config: Dict[str, Any]) -> bool:
|
||||
"""Integrate with available MCP servers"""
|
||||
# MCP integration logic
|
||||
return True
|
||||
```
|
||||
|
||||
#### Image Configuration (`cubbi_image.yaml`) - *Tool-specific*
|
||||
|
||||
Each tool provides its own metadata file defining:
|
||||
- Tool-specific environment variables
|
||||
- Port configurations
|
||||
- Volume mounts
|
||||
- Persistent configuration mappings
|
||||
|
||||
## Plugin Architecture
|
||||
|
||||
### Plugin Discovery
|
||||
|
||||
Plugins are automatically discovered by:
|
||||
|
||||
1. Looking for `{image_name}_plugin.py` in the same directory as `cubbi_init.py`
|
||||
2. Loading classes that inherit from `ToolPlugin`
|
||||
3. Executing initialization and MCP integration
|
||||
|
||||
### Plugin Requirements
|
||||
|
||||
Tool plugins MUST:
|
||||
- Inherit from `ToolPlugin` base class
|
||||
- Implement `tool_name` property
|
||||
- Implement `initialize()` method
|
||||
- Optionally implement `integrate_mcp_servers()` method
|
||||
- Use ruamel.yaml for configuration file operations
|
||||
|
||||
## Security Requirements
|
||||
|
||||
### User Isolation
|
||||
|
||||
- Container MUST NOT run processes as root after initialization
|
||||
- All user processes MUST run as the `cubbi` user
|
||||
- Proper file ownership and permissions MUST be maintained
|
||||
|
||||
### Secrets Management
|
||||
|
||||
- Sensitive environment variables MUST be marked as `sensitive: true`
|
||||
- SSH keys and tokens MUST have restricted permissions (600)
|
||||
- No secrets SHOULD be logged or exposed in configuration files
|
||||
|
||||
### Network Security
|
||||
|
||||
- Only necessary ports SHOULD be exposed
|
||||
- Network services should be properly configured and secured
|
||||
|
||||
## Integration Requirements
|
||||
|
||||
### MCP Server Integration
|
||||
|
||||
Images MUST support dynamic MCP server discovery and configuration through:
|
||||
|
||||
1. Environment variable parsing for server count and details
|
||||
2. Automatic tool configuration updates
|
||||
3. Standard MCP communication protocols
|
||||
|
||||
### Persistent Configuration
|
||||
|
||||
Images MUST support:
|
||||
|
||||
1. Configuration persistence through volume mounts
|
||||
2. Symlink creation for tool configuration directories
|
||||
3. Proper ownership and permission handling
|
||||
|
||||
## Docker Integration
|
||||
|
||||
### Dockerfile Requirements
|
||||
|
||||
```dockerfile
|
||||
# Copy shared scripts from main Cubbi repository
|
||||
COPY cubbi_init.py /cubbi_init.py # Shared
|
||||
COPY init-status.sh /init-status.sh # Shared
|
||||
|
||||
# Copy tool-specific files
|
||||
COPY {tool}_plugin.py /{tool}_plugin.py # Tool-specific
|
||||
COPY cubbi_image.yaml /cubbi/cubbi_image.yaml # Tool-specific
|
||||
|
||||
# Install Python dependencies
|
||||
RUN pip install ruamel.yaml
|
||||
|
||||
# Make scripts executable
|
||||
RUN chmod +x /cubbi_init.py /init-status.sh
|
||||
|
||||
# Set entrypoint
|
||||
ENTRYPOINT ["/cubbi_init.py"]
|
||||
CMD ["tail", "-f", "/dev/null"]
|
||||
```
|
||||
|
||||
### Init Container Support
|
||||
|
||||
For complex initialization, use:
|
||||
|
||||
```dockerfile
|
||||
# Use init-status.sh as entrypoint for monitoring
|
||||
ENTRYPOINT ["/init-status.sh"]
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### Performance
|
||||
|
||||
- Use multi-stage builds to minimize image size
|
||||
- Clean up package caches and temporary files
|
||||
- Use specific base image versions for reproducibility
|
||||
|
||||
### Maintainability
|
||||
|
||||
- Follow consistent naming conventions
|
||||
- Include comprehensive documentation
|
||||
- Use semantic versioning for image releases
|
||||
- Provide clear error messages and logging
|
||||
|
||||
### Compatibility
|
||||
|
||||
- Support common development workflows
|
||||
- Maintain backward compatibility when possible
|
||||
- Test with various project types and configurations
|
||||
|
||||
## Validation Checklist
|
||||
|
||||
Before releasing a Cubbi image, verify:
|
||||
|
||||
- [ ] All required metadata fields are present in `cubbi_image.yaml`
|
||||
- [ ] Standard environment variables are supported
|
||||
- [ ] `cubbi_init.py` script is properly installed and executable
|
||||
- [ ] Tool plugin is discovered and loads correctly
|
||||
- [ ] User management works correctly
|
||||
- [ ] Persistent configurations are properly handled
|
||||
- [ ] MCP integration functions (if applicable)
|
||||
- [ ] Tool-specific functionality works as expected
|
||||
- [ ] Security requirements are met
|
||||
- [ ] Python dependencies are satisfied
|
||||
- [ ] Status tracking works correctly
|
||||
- [ ] Documentation is complete and accurate
|
||||
|
||||
## Examples
|
||||
|
||||
### Complete Goose Example
|
||||
|
||||
See the `/cubbi/images/goose/` directory for a complete implementation including:
|
||||
- `Dockerfile` - Container definition
|
||||
- `cubbi_image.yaml` - Image metadata
|
||||
- `goose_plugin.py` - Tool-specific initialization
|
||||
- `README.md` - Tool-specific documentation
|
||||
|
||||
### Migration Notes
|
||||
|
||||
The current Python-based system uses:
|
||||
- `cubbi_init.py` - Standalone initialization script with plugin support
|
||||
- `{tool}_plugin.py` - Tool-specific configuration and MCP integration
|
||||
- `init-status.sh` - Status monitoring and log display
|
||||
- `cubbi_image.yaml` - Image metadata and configuration
|
||||
@@ -1,47 +0,0 @@
|
||||
FROM python:3.12-slim
|
||||
|
||||
LABEL maintainer="team@monadical.com"
|
||||
LABEL description="Goose with MCP servers"
|
||||
|
||||
# Install system dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
git \
|
||||
openssh-server \
|
||||
bash \
|
||||
curl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Set up SSH server
|
||||
RUN mkdir /var/run/sshd
|
||||
RUN echo 'root:root' | chpasswd
|
||||
RUN sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config
|
||||
RUN sed -i 's/#PasswordAuthentication yes/PasswordAuthentication yes/' /etc/ssh/sshd_config
|
||||
|
||||
# Create app directory
|
||||
WORKDIR /app
|
||||
|
||||
# Install python dependencies
|
||||
# This is done before copying scripts for better cache management
|
||||
RUN pip install --no-cache-dir goose-ai langfuse
|
||||
|
||||
# Copy initialization scripts
|
||||
COPY mc-init.sh /mc-init.sh
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
COPY mc-driver.yaml /mc-driver.yaml
|
||||
COPY init-status.sh /init-status.sh
|
||||
|
||||
# Make scripts executable
|
||||
RUN chmod +x /mc-init.sh /entrypoint.sh /init-status.sh
|
||||
|
||||
# Set up initialization status check on login
|
||||
RUN echo '[ -x /init-status.sh ] && /init-status.sh' >> /etc/bash.bashrc
|
||||
|
||||
# Set up environment
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
|
||||
# Expose ports
|
||||
EXPOSE 8000 22
|
||||
|
||||
# Set entrypoint
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
@@ -1,47 +0,0 @@
|
||||
# Goose Driver for MC
|
||||
|
||||
This driver provides a containerized environment for running [Goose](https://goose.ai) with MCP servers.
|
||||
|
||||
## Features
|
||||
|
||||
- Pre-configured environment for Goose AI
|
||||
- MCP server integration
|
||||
- SSH access
|
||||
- Git repository integration
|
||||
- Langfuse logging support
|
||||
|
||||
## Environment Variables
|
||||
|
||||
| Variable | Description | Required |
|
||||
|----------|-------------|----------|
|
||||
| `MCP_HOST` | MCP server host | Yes |
|
||||
| `GOOSE_API_KEY` | Goose API key | Yes |
|
||||
| `GOOSE_ID` | Goose instance ID | No |
|
||||
| `LANGFUSE_PUBLIC_KEY` | Langfuse public key | No |
|
||||
| `LANGFUSE_SECRET_KEY` | Langfuse secret key | No |
|
||||
| `LANGFUSE_HOST` | Langfuse API host | No |
|
||||
| `MC_PROJECT_URL` | Project repository URL | No |
|
||||
| `MC_GIT_SSH_KEY` | SSH key for Git authentication | No |
|
||||
| `MC_GIT_TOKEN` | Token for Git authentication | No |
|
||||
|
||||
## Build
|
||||
|
||||
To build this driver:
|
||||
|
||||
```bash
|
||||
cd drivers/goose
|
||||
docker build -t monadical/mc-goose:latest .
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Create a new session with this driver
|
||||
mc session create --driver goose
|
||||
|
||||
# Create with specific MCP server
|
||||
mc session create --driver goose -e MCP_HOST=http://mcp.example.com:8000
|
||||
|
||||
# Create with project repository
|
||||
mc session create --driver goose --project github.com/username/repo
|
||||
```
|
||||
@@ -1,17 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Entrypoint script for Goose driver
|
||||
|
||||
# Run the standard initialization script
|
||||
/mc-init.sh
|
||||
|
||||
# Start SSH server in the background
|
||||
/usr/sbin/sshd
|
||||
|
||||
# Print welcome message
|
||||
echo "==============================================="
|
||||
echo "Goose driver container started"
|
||||
echo "SSH server running on port 22"
|
||||
echo "==============================================="
|
||||
|
||||
# Keep container running
|
||||
exec tail -f /dev/null
|
||||
@@ -1,65 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Script to check and display initialization status
|
||||
|
||||
# Function to display initialization logs
|
||||
show_init_logs() {
|
||||
if [ -f "/init.log" ]; then
|
||||
echo "Displaying initialization logs:"
|
||||
echo "----------------------------------------"
|
||||
cat /init.log
|
||||
echo "----------------------------------------"
|
||||
else
|
||||
echo "No initialization logs found."
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to follow logs until initialization completes
|
||||
follow_init_logs() {
|
||||
if [ ! -f "/init.log" ]; then
|
||||
echo "No initialization logs found."
|
||||
return
|
||||
fi
|
||||
|
||||
echo "Initialization is still in progress. Showing logs:"
|
||||
echo "----------------------------------------"
|
||||
tail -f /init.log &
|
||||
tail_pid=$!
|
||||
|
||||
# Check every second if initialization has completed
|
||||
while true; do
|
||||
if [ -f "/init.status" ] && grep -q "INIT_COMPLETE=true" "/init.status"; then
|
||||
kill $tail_pid 2>/dev/null
|
||||
echo "----------------------------------------"
|
||||
echo "Initialization completed."
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
}
|
||||
|
||||
# Check if we're in an interactive shell
|
||||
if [ -t 0 ]; then
|
||||
INTERACTIVE=true
|
||||
else
|
||||
INTERACTIVE=false
|
||||
fi
|
||||
|
||||
# Check initialization status
|
||||
if [ -f "/init.status" ]; then
|
||||
if grep -q "INIT_COMPLETE=true" "/init.status"; then
|
||||
echo "MC initialization has completed."
|
||||
# No longer prompt to show logs when initialization is complete
|
||||
else
|
||||
echo "MC initialization is still in progress."
|
||||
follow_init_logs
|
||||
fi
|
||||
else
|
||||
echo "Cannot determine initialization status."
|
||||
# Ask if user wants to see logs if they exist (only in interactive mode)
|
||||
if [ -f "/init.log" ] && [ "$INTERACTIVE" = true ]; then
|
||||
read -p "Do you want to see initialization logs? (y/n): " show_logs
|
||||
if [[ "$show_logs" =~ ^[Yy] ]]; then
|
||||
show_init_logs
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
@@ -1,66 +0,0 @@
|
||||
name: goose
|
||||
description: Goose with MCP servers
|
||||
version: 1.0.0
|
||||
maintainer: team@monadical.com
|
||||
|
||||
init:
|
||||
pre_command: /mc-init.sh
|
||||
command: /entrypoint.sh
|
||||
|
||||
environment:
|
||||
- name: MCP_HOST
|
||||
description: MCP server host
|
||||
required: true
|
||||
default: http://localhost:8000
|
||||
|
||||
- name: GOOSE_API_KEY
|
||||
description: Goose API key
|
||||
required: true
|
||||
sensitive: true
|
||||
|
||||
- name: GOOSE_ID
|
||||
description: Goose instance ID
|
||||
required: false
|
||||
|
||||
- name: LANGFUSE_PUBLIC_KEY
|
||||
description: Langfuse public key
|
||||
required: false
|
||||
sensitive: true
|
||||
|
||||
- name: LANGFUSE_SECRET_KEY
|
||||
description: Langfuse secret key
|
||||
required: false
|
||||
sensitive: true
|
||||
|
||||
- name: LANGFUSE_HOST
|
||||
description: Langfuse API host
|
||||
required: false
|
||||
default: https://api.langfuse.com
|
||||
|
||||
# Project environment variables
|
||||
- name: MC_PROJECT_URL
|
||||
description: Project repository URL
|
||||
required: false
|
||||
|
||||
- name: MC_PROJECT_TYPE
|
||||
description: Project repository type (git, svn, etc.)
|
||||
required: false
|
||||
default: git
|
||||
|
||||
- name: MC_GIT_SSH_KEY
|
||||
description: SSH key for Git authentication
|
||||
required: false
|
||||
sensitive: true
|
||||
|
||||
- name: MC_GIT_TOKEN
|
||||
description: Token for Git authentication
|
||||
required: false
|
||||
sensitive: true
|
||||
|
||||
ports:
|
||||
- 8000 # Main application
|
||||
- 22 # SSH server
|
||||
|
||||
volumes:
|
||||
- mountPath: /app
|
||||
description: Application directory
|
||||
@@ -1,65 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Standardized initialization script for MC drivers
|
||||
|
||||
# Redirect all output to both stdout and the log file
|
||||
exec > >(tee -a /init.log) 2>&1
|
||||
|
||||
# Mark initialization as started
|
||||
echo "=== MC Initialization started at $(date) ==="
|
||||
echo "INIT_COMPLETE=false" > /init.status
|
||||
|
||||
# Project initialization
|
||||
if [ -n "$MC_PROJECT_URL" ]; then
|
||||
echo "Initializing project: $MC_PROJECT_URL"
|
||||
|
||||
# Set up SSH key if provided
|
||||
if [ -n "$MC_GIT_SSH_KEY" ]; then
|
||||
mkdir -p ~/.ssh
|
||||
echo "$MC_GIT_SSH_KEY" > ~/.ssh/id_ed25519
|
||||
chmod 600 ~/.ssh/id_ed25519
|
||||
ssh-keyscan github.com >> ~/.ssh/known_hosts 2>/dev/null
|
||||
ssh-keyscan gitlab.com >> ~/.ssh/known_hosts 2>/dev/null
|
||||
ssh-keyscan bitbucket.org >> ~/.ssh/known_hosts 2>/dev/null
|
||||
fi
|
||||
|
||||
# Set up token if provided
|
||||
if [ -n "$MC_GIT_TOKEN" ]; then
|
||||
git config --global credential.helper store
|
||||
echo "https://$MC_GIT_TOKEN:x-oauth-basic@github.com" > ~/.git-credentials
|
||||
fi
|
||||
|
||||
# Clone repository
|
||||
git clone $MC_PROJECT_URL /app
|
||||
cd /app
|
||||
|
||||
# Run project-specific initialization if present
|
||||
if [ -f "/app/.mc/init.sh" ]; then
|
||||
bash /app/.mc/init.sh
|
||||
fi
|
||||
fi
|
||||
|
||||
# Set up Goose API key if provided
|
||||
if [ -n "$GOOSE_API_KEY" ]; then
|
||||
echo "Setting up Goose API key"
|
||||
export GOOSE_API_KEY="$GOOSE_API_KEY"
|
||||
fi
|
||||
|
||||
# Set up MCP connection if provided
|
||||
if [ -n "$MCP_HOST" ]; then
|
||||
echo "Setting up MCP connection to $MCP_HOST"
|
||||
export MCP_HOST="$MCP_HOST"
|
||||
fi
|
||||
|
||||
# Set up Langfuse logging if credentials are provided
|
||||
if [ -n "$LANGFUSE_SECRET_KEY" ] && [ -n "$LANGFUSE_PUBLIC_KEY" ]; then
|
||||
echo "Setting up Langfuse logging"
|
||||
export LANGFUSE_SECRET_KEY="$LANGFUSE_SECRET_KEY"
|
||||
export LANGFUSE_PUBLIC_KEY="$LANGFUSE_PUBLIC_KEY"
|
||||
export LANGFUSE_HOST="${LANGFUSE_HOST:-https://api.langfuse.com}"
|
||||
fi
|
||||
|
||||
echo "MC driver initialization complete"
|
||||
|
||||
# Mark initialization as complete
|
||||
echo "=== MC Initialization completed at $(date) ==="
|
||||
echo "INIT_COMPLETE=true" > /init.status
|
||||
@@ -1,403 +0,0 @@
|
||||
import os
|
||||
from typing import List, Optional
|
||||
import typer
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
|
||||
from .config import ConfigManager
|
||||
from .container import ContainerManager
|
||||
from .models import SessionStatus
|
||||
|
||||
app = typer.Typer(help="Monadical Container Tool")
|
||||
session_app = typer.Typer(help="Manage MC sessions")
|
||||
driver_app = typer.Typer(help="Manage MC drivers", no_args_is_help=True)
|
||||
app.add_typer(session_app, name="session", no_args_is_help=True)
|
||||
app.add_typer(driver_app, name="driver", no_args_is_help=True)
|
||||
|
||||
console = Console()
|
||||
config_manager = ConfigManager()
|
||||
container_manager = ContainerManager(config_manager)
|
||||
|
||||
|
||||
@app.callback(invoke_without_command=True)
|
||||
def main(ctx: typer.Context) -> None:
|
||||
"""Monadical Container Tool"""
|
||||
# If no command is specified, create a session
|
||||
if ctx.invoked_subcommand is None:
|
||||
create_session(
|
||||
driver=None,
|
||||
project=None,
|
||||
env=[],
|
||||
name=None,
|
||||
no_connect=False,
|
||||
no_mount=False,
|
||||
)
|
||||
|
||||
|
||||
@app.command()
|
||||
def version() -> None:
|
||||
"""Show MC version information"""
|
||||
from importlib.metadata import version as get_version
|
||||
|
||||
try:
|
||||
version_str = get_version("mcontainer")
|
||||
console.print(f"MC - Monadical Container Tool v{version_str}")
|
||||
except Exception:
|
||||
console.print("MC - Monadical Container Tool (development version)")
|
||||
|
||||
|
||||
@session_app.command("list")
|
||||
def list_sessions() -> None:
|
||||
"""List active MC sessions"""
|
||||
sessions = container_manager.list_sessions()
|
||||
|
||||
if not sessions:
|
||||
console.print("No active sessions found")
|
||||
return
|
||||
|
||||
table = Table(show_header=True, header_style="bold")
|
||||
table.add_column("ID")
|
||||
table.add_column("Name")
|
||||
table.add_column("Driver")
|
||||
table.add_column("Status")
|
||||
table.add_column("Ports")
|
||||
table.add_column("Project")
|
||||
|
||||
for session in sessions:
|
||||
ports_str = ", ".join(
|
||||
[
|
||||
f"{container_port}:{host_port}"
|
||||
for container_port, host_port in session.ports.items()
|
||||
]
|
||||
)
|
||||
|
||||
status_color = {
|
||||
SessionStatus.RUNNING: "green",
|
||||
SessionStatus.STOPPED: "red",
|
||||
SessionStatus.CREATING: "yellow",
|
||||
SessionStatus.FAILED: "red",
|
||||
}.get(session.status, "white")
|
||||
|
||||
table.add_row(
|
||||
session.id,
|
||||
session.name,
|
||||
session.driver,
|
||||
f"[{status_color}]{session.status}[/{status_color}]",
|
||||
ports_str,
|
||||
session.project or "",
|
||||
)
|
||||
|
||||
console.print(table)
|
||||
|
||||
|
||||
@session_app.command("create")
|
||||
def create_session(
|
||||
driver: Optional[str] = typer.Option(None, "--driver", "-d", help="Driver to use"),
|
||||
project: Optional[str] = typer.Option(
|
||||
None, "--project", "-p", help="Project repository URL"
|
||||
),
|
||||
env: List[str] = typer.Option(
|
||||
[], "--env", "-e", help="Environment variables (KEY=VALUE)"
|
||||
),
|
||||
name: Optional[str] = typer.Option(None, "--name", "-n", help="Session name"),
|
||||
no_connect: bool = typer.Option(
|
||||
False, "--no-connect", help="Don't automatically connect to the session"
|
||||
),
|
||||
no_mount: bool = typer.Option(
|
||||
False,
|
||||
"--no-mount",
|
||||
help="Don't mount local directory to /app (ignored if --project is used)",
|
||||
),
|
||||
) -> None:
|
||||
"""Create a new MC session"""
|
||||
# Use default driver if not specified
|
||||
if not driver:
|
||||
driver = config_manager.config.defaults.get("driver", "goose")
|
||||
|
||||
# Parse environment variables
|
||||
environment = {}
|
||||
for var in env:
|
||||
if "=" in var:
|
||||
key, value = var.split("=", 1)
|
||||
environment[key] = value
|
||||
else:
|
||||
console.print(
|
||||
f"[yellow]Warning: Ignoring invalid environment variable format: {var}[/yellow]"
|
||||
)
|
||||
|
||||
with console.status(f"Creating session with driver '{driver}'..."):
|
||||
session = container_manager.create_session(
|
||||
driver_name=driver,
|
||||
project=project,
|
||||
environment=environment,
|
||||
session_name=name,
|
||||
mount_local=not no_mount,
|
||||
)
|
||||
|
||||
if session:
|
||||
console.print("[green]Session created successfully![/green]")
|
||||
console.print(f"Session ID: {session.id}")
|
||||
console.print(f"Driver: {session.driver}")
|
||||
|
||||
if session.ports:
|
||||
console.print("Ports:")
|
||||
for container_port, host_port in session.ports.items():
|
||||
console.print(f" {container_port} -> {host_port}")
|
||||
|
||||
# Auto-connect unless --no-connect flag is provided
|
||||
if not no_connect:
|
||||
console.print(f"\nConnecting to session {session.id}...")
|
||||
container_manager.connect_session(session.id)
|
||||
else:
|
||||
console.print(
|
||||
f"\nConnect to the session with:\n mc session connect {session.id}"
|
||||
)
|
||||
else:
|
||||
console.print("[red]Failed to create session[/red]")
|
||||
|
||||
|
||||
@session_app.command("close")
|
||||
def close_session(
|
||||
session_id: Optional[str] = typer.Argument(None, help="Session ID to close"),
|
||||
all_sessions: bool = typer.Option(False, "--all", help="Close all active sessions"),
|
||||
) -> None:
|
||||
"""Close a MC session or all sessions"""
|
||||
if all_sessions:
|
||||
# Get sessions first to display them
|
||||
sessions = container_manager.list_sessions()
|
||||
if not sessions:
|
||||
console.print("No active sessions to close")
|
||||
return
|
||||
|
||||
console.print(f"Closing {len(sessions)} sessions...")
|
||||
|
||||
# Simple progress function that prints a line when a session is closed
|
||||
def update_progress(session_id, status, message):
|
||||
if status == "completed":
|
||||
console.print(
|
||||
f"[green]Session {session_id} closed successfully[/green]"
|
||||
)
|
||||
elif status == "failed":
|
||||
console.print(
|
||||
f"[red]Failed to close session {session_id}: {message}[/red]"
|
||||
)
|
||||
|
||||
# Start closing sessions with progress updates
|
||||
count, success = container_manager.close_all_sessions(update_progress)
|
||||
|
||||
# Final result
|
||||
if success:
|
||||
console.print(f"[green]{count} sessions closed successfully[/green]")
|
||||
else:
|
||||
console.print("[red]Failed to close all sessions[/red]")
|
||||
elif session_id:
|
||||
with console.status(f"Closing session {session_id}..."):
|
||||
success = container_manager.close_session(session_id)
|
||||
|
||||
if success:
|
||||
console.print(f"[green]Session {session_id} closed successfully[/green]")
|
||||
else:
|
||||
console.print(f"[red]Failed to close session {session_id}[/red]")
|
||||
else:
|
||||
console.print("[red]Error: Please provide a session ID or use --all flag[/red]")
|
||||
|
||||
|
||||
@session_app.command("connect")
|
||||
def connect_session(
|
||||
session_id: str = typer.Argument(..., help="Session ID to connect to"),
|
||||
) -> None:
|
||||
"""Connect to a MC session"""
|
||||
console.print(f"Connecting to session {session_id}...")
|
||||
success = container_manager.connect_session(session_id)
|
||||
|
||||
if not success:
|
||||
console.print(f"[red]Failed to connect to session {session_id}[/red]")
|
||||
|
||||
|
||||
@session_app.command("logs")
|
||||
def session_logs(
|
||||
session_id: str = typer.Argument(..., help="Session ID to get logs from"),
|
||||
follow: bool = typer.Option(False, "--follow", "-f", help="Follow log output"),
|
||||
init: bool = typer.Option(
|
||||
False, "--init", "-i", help="Show initialization logs instead of container logs"
|
||||
),
|
||||
) -> None:
|
||||
"""Stream logs from a MC session"""
|
||||
if init:
|
||||
# Show initialization logs
|
||||
if follow:
|
||||
console.print(
|
||||
f"Streaming initialization logs from session {session_id}... (Ctrl+C to exit)"
|
||||
)
|
||||
container_manager.get_init_logs(session_id, follow=True)
|
||||
else:
|
||||
logs = container_manager.get_init_logs(session_id)
|
||||
if logs:
|
||||
console.print(logs)
|
||||
else:
|
||||
# Show regular container logs
|
||||
if follow:
|
||||
console.print(
|
||||
f"Streaming logs from session {session_id}... (Ctrl+C to exit)"
|
||||
)
|
||||
container_manager.get_session_logs(session_id, follow=True)
|
||||
else:
|
||||
logs = container_manager.get_session_logs(session_id)
|
||||
if logs:
|
||||
console.print(logs)
|
||||
|
||||
|
||||
@app.command()
|
||||
def stop() -> None:
|
||||
"""Stop the current MC session (from inside the container)"""
|
||||
# Check if running inside a container
|
||||
if not os.path.exists("/.dockerenv"):
|
||||
console.print(
|
||||
"[red]This command can only be run from inside a MC container[/red]"
|
||||
)
|
||||
return
|
||||
|
||||
# Stop the container from inside
|
||||
console.print("Stopping the current session...")
|
||||
os.system("kill 1") # Send SIGTERM to PID 1 (container's init process)
|
||||
|
||||
|
||||
# Main CLI entry point that handles project repository URLs
|
||||
@app.command(name="")
|
||||
def quick_create(
|
||||
project: Optional[str] = typer.Argument(..., help="Project repository URL"),
|
||||
driver: Optional[str] = typer.Option(None, "--driver", "-d", help="Driver to use"),
|
||||
env: List[str] = typer.Option(
|
||||
[], "--env", "-e", help="Environment variables (KEY=VALUE)"
|
||||
),
|
||||
name: Optional[str] = typer.Option(None, "--name", "-n", help="Session name"),
|
||||
no_connect: bool = typer.Option(
|
||||
False, "--no-connect", help="Don't automatically connect to the session"
|
||||
),
|
||||
no_mount: bool = typer.Option(
|
||||
False,
|
||||
"--no-mount",
|
||||
help="Don't mount local directory to /app (ignored if a project is specified)",
|
||||
),
|
||||
) -> None:
|
||||
"""Create a new MC session with a project repository"""
|
||||
create_session(
|
||||
driver=driver,
|
||||
project=project,
|
||||
env=env,
|
||||
name=name,
|
||||
no_connect=no_connect,
|
||||
no_mount=no_mount,
|
||||
)
|
||||
|
||||
|
||||
@driver_app.command("list")
|
||||
def list_drivers() -> None:
|
||||
"""List available MC drivers"""
|
||||
drivers = config_manager.list_drivers()
|
||||
|
||||
if not drivers:
|
||||
console.print("No drivers found")
|
||||
return
|
||||
|
||||
table = Table(show_header=True, header_style="bold")
|
||||
table.add_column("Name")
|
||||
table.add_column("Description")
|
||||
table.add_column("Version")
|
||||
table.add_column("Maintainer")
|
||||
table.add_column("Image")
|
||||
|
||||
for name, driver in drivers.items():
|
||||
table.add_row(
|
||||
driver.name,
|
||||
driver.description,
|
||||
driver.version,
|
||||
driver.maintainer,
|
||||
driver.image,
|
||||
)
|
||||
|
||||
console.print(table)
|
||||
|
||||
|
||||
@driver_app.command("build")
|
||||
def build_driver(
|
||||
driver_name: str = typer.Argument(..., help="Driver name to build"),
|
||||
tag: str = typer.Option("latest", "--tag", "-t", help="Image tag"),
|
||||
push: bool = typer.Option(
|
||||
False, "--push", "-p", help="Push image to registry after building"
|
||||
),
|
||||
) -> None:
|
||||
"""Build a driver Docker image"""
|
||||
# Get driver path
|
||||
driver_path = config_manager.get_driver_path(driver_name)
|
||||
if not driver_path:
|
||||
console.print(f"[red]Driver '{driver_name}' not found[/red]")
|
||||
return
|
||||
|
||||
# Check if Dockerfile exists
|
||||
dockerfile_path = driver_path / "Dockerfile"
|
||||
if not dockerfile_path.exists():
|
||||
console.print(f"[red]Dockerfile not found in {driver_path}[/red]")
|
||||
return
|
||||
|
||||
# Build image name
|
||||
image_name = f"monadical/mc-{driver_name}:{tag}"
|
||||
|
||||
# Build the image
|
||||
with console.status(f"Building image {image_name}..."):
|
||||
result = os.system(f"cd {driver_path} && docker build -t {image_name} .")
|
||||
|
||||
if result != 0:
|
||||
console.print("[red]Failed to build driver image[/red]")
|
||||
return
|
||||
|
||||
console.print(f"[green]Successfully built image: {image_name}[/green]")
|
||||
|
||||
# Push if requested
|
||||
if push:
|
||||
with console.status(f"Pushing image {image_name}..."):
|
||||
result = os.system(f"docker push {image_name}")
|
||||
|
||||
if result != 0:
|
||||
console.print("[red]Failed to push driver image[/red]")
|
||||
return
|
||||
|
||||
console.print(f"[green]Successfully pushed image: {image_name}[/green]")
|
||||
|
||||
|
||||
@driver_app.command("info")
|
||||
def driver_info(
|
||||
driver_name: str = typer.Argument(..., help="Driver name to get info for"),
|
||||
) -> None:
|
||||
"""Show detailed information about a driver"""
|
||||
driver = config_manager.get_driver(driver_name)
|
||||
if not driver:
|
||||
console.print(f"[red]Driver '{driver_name}' not found[/red]")
|
||||
return
|
||||
|
||||
console.print(f"[bold]Driver: {driver.name}[/bold]")
|
||||
console.print(f"Description: {driver.description}")
|
||||
console.print(f"Version: {driver.version}")
|
||||
console.print(f"Maintainer: {driver.maintainer}")
|
||||
console.print(f"Image: {driver.image}")
|
||||
|
||||
if driver.ports:
|
||||
console.print("\n[bold]Ports:[/bold]")
|
||||
for port in driver.ports:
|
||||
console.print(f" {port}")
|
||||
|
||||
# Get driver path
|
||||
driver_path = config_manager.get_driver_path(driver_name)
|
||||
if driver_path:
|
||||
console.print(f"\n[bold]Path:[/bold] {driver_path}")
|
||||
|
||||
# Check for README
|
||||
readme_path = driver_path / "README.md"
|
||||
if readme_path.exists():
|
||||
console.print("\n[bold]README:[/bold]")
|
||||
with open(readme_path, "r") as f:
|
||||
console.print(f.read())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
app()
|
||||
@@ -1,204 +0,0 @@
|
||||
import yaml
|
||||
from pathlib import Path
|
||||
from typing import Dict, Optional
|
||||
|
||||
from .models import Config, Driver
|
||||
|
||||
DEFAULT_CONFIG_DIR = Path.home() / ".config" / "mc"
|
||||
DEFAULT_CONFIG_FILE = DEFAULT_CONFIG_DIR / "config.yaml"
|
||||
DEFAULT_DRIVERS_DIR = Path.home() / ".config" / "mc" / "drivers"
|
||||
PROJECT_ROOT = Path(__file__).parent.parent
|
||||
BUILTIN_DRIVERS_DIR = PROJECT_ROOT / "drivers"
|
||||
|
||||
# Default built-in driver configurations
|
||||
DEFAULT_DRIVERS = {
|
||||
"goose": Driver(
|
||||
name="goose",
|
||||
description="Goose with MCP servers",
|
||||
version="1.0.0",
|
||||
maintainer="team@monadical.com",
|
||||
image="monadical/mc-goose:latest",
|
||||
ports=[8000, 22],
|
||||
),
|
||||
"aider": Driver(
|
||||
name="aider",
|
||||
description="Aider coding assistant",
|
||||
version="1.0.0",
|
||||
maintainer="team@monadical.com",
|
||||
image="monadical/mc-aider:latest",
|
||||
ports=[22],
|
||||
),
|
||||
"claude-code": Driver(
|
||||
name="claude-code",
|
||||
description="Claude Code environment",
|
||||
version="1.0.0",
|
||||
maintainer="team@monadical.com",
|
||||
image="monadical/mc-claude-code:latest",
|
||||
ports=[22],
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
class ConfigManager:
|
||||
def __init__(self, config_path: Optional[Path] = None):
|
||||
self.config_path = config_path or DEFAULT_CONFIG_FILE
|
||||
self.config_dir = self.config_path.parent
|
||||
self.drivers_dir = DEFAULT_DRIVERS_DIR
|
||||
self.config = self._load_or_create_config()
|
||||
|
||||
def _load_or_create_config(self) -> Config:
|
||||
"""Load existing config or create a new one with defaults"""
|
||||
if self.config_path.exists():
|
||||
try:
|
||||
with open(self.config_path, "r") as f:
|
||||
config_data = yaml.safe_load(f) or {}
|
||||
|
||||
# Create a new config from scratch, then update with data from file
|
||||
config = Config(
|
||||
docker=config_data.get("docker", {}),
|
||||
defaults=config_data.get("defaults", {}),
|
||||
)
|
||||
|
||||
# Add drivers
|
||||
if "drivers" in config_data:
|
||||
for driver_name, driver_data in config_data["drivers"].items():
|
||||
config.drivers[driver_name] = Driver.model_validate(driver_data)
|
||||
|
||||
# Add sessions (stored as simple dictionaries)
|
||||
if "sessions" in config_data:
|
||||
config.sessions = config_data["sessions"]
|
||||
|
||||
return config
|
||||
except Exception as e:
|
||||
print(f"Error loading config: {e}")
|
||||
return self._create_default_config()
|
||||
else:
|
||||
return self._create_default_config()
|
||||
|
||||
def _create_default_config(self) -> Config:
|
||||
"""Create a default configuration"""
|
||||
self.config_dir.mkdir(parents=True, exist_ok=True)
|
||||
self.drivers_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Load built-in drivers from directories
|
||||
builtin_drivers = self.load_builtin_drivers()
|
||||
|
||||
# Merge with default drivers, with directory drivers taking precedence
|
||||
drivers = {**DEFAULT_DRIVERS, **builtin_drivers}
|
||||
|
||||
config = Config(
|
||||
docker={
|
||||
"socket": "/var/run/docker.sock",
|
||||
"network": "mc-network",
|
||||
},
|
||||
drivers=drivers,
|
||||
defaults={
|
||||
"driver": "goose",
|
||||
},
|
||||
)
|
||||
|
||||
self.save_config(config)
|
||||
return config
|
||||
|
||||
def save_config(self, config: Optional[Config] = None) -> None:
|
||||
"""Save the current config to disk"""
|
||||
if config:
|
||||
self.config = config
|
||||
|
||||
self.config_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Use model_dump with mode="json" for proper serialization of enums
|
||||
config_dict = self.config.model_dump(mode="json")
|
||||
|
||||
# Write to file
|
||||
with open(self.config_path, "w") as f:
|
||||
yaml.dump(config_dict, f)
|
||||
|
||||
def get_driver(self, name: str) -> Optional[Driver]:
|
||||
"""Get a driver by name"""
|
||||
return self.config.drivers.get(name)
|
||||
|
||||
def list_drivers(self) -> Dict[str, Driver]:
|
||||
"""List all available drivers"""
|
||||
return self.config.drivers
|
||||
|
||||
def add_session(self, session_id: str, session_data: dict) -> None:
|
||||
"""Add a session to the config"""
|
||||
# Store session data as a dictionary in the config
|
||||
self.config.sessions[session_id] = session_data
|
||||
self.save_config()
|
||||
|
||||
def remove_session(self, session_id: str) -> None:
|
||||
"""Remove a session from the config"""
|
||||
if session_id in self.config.sessions:
|
||||
del self.config.sessions[session_id]
|
||||
self.save_config()
|
||||
|
||||
def list_sessions(self) -> Dict:
|
||||
"""List all sessions in the config"""
|
||||
return self.config.sessions
|
||||
|
||||
def load_driver_from_dir(self, driver_dir: Path) -> Optional[Driver]:
|
||||
"""Load a driver configuration from a directory"""
|
||||
yaml_path = (
|
||||
driver_dir / "mai-driver.yaml"
|
||||
) # Keep this name for backward compatibility
|
||||
|
||||
if not yaml_path.exists():
|
||||
return None
|
||||
|
||||
try:
|
||||
with open(yaml_path, "r") as f:
|
||||
driver_data = yaml.safe_load(f)
|
||||
|
||||
# Extract required fields
|
||||
if not all(
|
||||
k in driver_data
|
||||
for k in ["name", "description", "version", "maintainer"]
|
||||
):
|
||||
print(f"Driver config {yaml_path} missing required fields")
|
||||
return None
|
||||
|
||||
# Create driver object
|
||||
driver = Driver(
|
||||
name=driver_data["name"],
|
||||
description=driver_data["description"],
|
||||
version=driver_data["version"],
|
||||
maintainer=driver_data["maintainer"],
|
||||
image=f"monadical/mc-{driver_data['name']}:latest",
|
||||
ports=driver_data.get("ports", []),
|
||||
)
|
||||
|
||||
return driver
|
||||
except Exception as e:
|
||||
print(f"Error loading driver from {yaml_path}: {e}")
|
||||
return None
|
||||
|
||||
def load_builtin_drivers(self) -> Dict[str, Driver]:
|
||||
"""Load all built-in drivers from the drivers directory"""
|
||||
drivers = {}
|
||||
|
||||
if not BUILTIN_DRIVERS_DIR.exists():
|
||||
return drivers
|
||||
|
||||
for driver_dir in BUILTIN_DRIVERS_DIR.iterdir():
|
||||
if driver_dir.is_dir():
|
||||
driver = self.load_driver_from_dir(driver_dir)
|
||||
if driver:
|
||||
drivers[driver.name] = driver
|
||||
|
||||
return drivers
|
||||
|
||||
def get_driver_path(self, driver_name: str) -> Optional[Path]:
|
||||
"""Get the directory path for a driver"""
|
||||
# Check built-in drivers first
|
||||
builtin_path = BUILTIN_DRIVERS_DIR / driver_name
|
||||
if builtin_path.exists() and builtin_path.is_dir():
|
||||
return builtin_path
|
||||
|
||||
# Then check user drivers
|
||||
user_path = self.drivers_dir / driver_name
|
||||
if user_path.exists() and user_path.is_dir():
|
||||
return user_path
|
||||
|
||||
return None
|
||||
@@ -1,409 +0,0 @@
|
||||
import os
|
||||
import sys
|
||||
import uuid
|
||||
import docker
|
||||
import concurrent.futures
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
from docker.errors import DockerException, ImageNotFound
|
||||
|
||||
from .models import Session, SessionStatus
|
||||
from .config import ConfigManager
|
||||
|
||||
|
||||
class ContainerManager:
|
||||
def __init__(self, config_manager: Optional[ConfigManager] = None):
|
||||
self.config_manager = config_manager or ConfigManager()
|
||||
try:
|
||||
self.client = docker.from_env()
|
||||
# Test connection
|
||||
self.client.ping()
|
||||
except DockerException as e:
|
||||
print(f"Error connecting to Docker: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
def _ensure_network(self) -> None:
|
||||
"""Ensure the MC network exists"""
|
||||
network_name = self.config_manager.config.docker.get("network", "mc-network")
|
||||
networks = self.client.networks.list(names=[network_name])
|
||||
if not networks:
|
||||
self.client.networks.create(network_name, driver="bridge")
|
||||
|
||||
def _generate_session_id(self) -> str:
|
||||
"""Generate a unique session ID"""
|
||||
return str(uuid.uuid4())[:8]
|
||||
|
||||
def list_sessions(self) -> List[Session]:
|
||||
"""List all active MC sessions"""
|
||||
sessions = []
|
||||
try:
|
||||
containers = self.client.containers.list(
|
||||
all=True, filters={"label": "mc.session"}
|
||||
)
|
||||
|
||||
for container in containers:
|
||||
container_id = container.id
|
||||
labels = container.labels
|
||||
|
||||
session_id = labels.get("mc.session.id")
|
||||
if not session_id:
|
||||
continue
|
||||
|
||||
status = SessionStatus.RUNNING
|
||||
if container.status == "exited":
|
||||
status = SessionStatus.STOPPED
|
||||
elif container.status == "created":
|
||||
status = SessionStatus.CREATING
|
||||
|
||||
session = Session(
|
||||
id=session_id,
|
||||
name=labels.get("mc.session.name", f"mc-{session_id}"),
|
||||
driver=labels.get("mc.driver", "unknown"),
|
||||
status=status,
|
||||
container_id=container_id,
|
||||
created_at=container.attrs["Created"],
|
||||
project=labels.get("mc.project"),
|
||||
)
|
||||
|
||||
# Get port mappings
|
||||
if container.attrs.get("NetworkSettings", {}).get("Ports"):
|
||||
ports = {}
|
||||
for container_port, host_ports in container.attrs[
|
||||
"NetworkSettings"
|
||||
]["Ports"].items():
|
||||
if host_ports:
|
||||
# Strip /tcp or /udp suffix and convert to int
|
||||
container_port_num = int(container_port.split("/")[0])
|
||||
host_port = int(host_ports[0]["HostPort"])
|
||||
ports[container_port_num] = host_port
|
||||
session.ports = ports
|
||||
|
||||
sessions.append(session)
|
||||
|
||||
except DockerException as e:
|
||||
print(f"Error listing sessions: {e}")
|
||||
|
||||
return sessions
|
||||
|
||||
def create_session(
|
||||
self,
|
||||
driver_name: str,
|
||||
project: Optional[str] = None,
|
||||
environment: Optional[Dict[str, str]] = None,
|
||||
session_name: Optional[str] = None,
|
||||
mount_local: bool = True,
|
||||
) -> Optional[Session]:
|
||||
"""Create a new MC session
|
||||
|
||||
Args:
|
||||
driver_name: The name of the driver to use
|
||||
project: Optional project repository URL
|
||||
environment: Optional environment variables
|
||||
session_name: Optional session name
|
||||
mount_local: Whether to mount the current directory to /app
|
||||
"""
|
||||
try:
|
||||
# Validate driver exists
|
||||
driver = self.config_manager.get_driver(driver_name)
|
||||
if not driver:
|
||||
print(f"Driver '{driver_name}' not found")
|
||||
return None
|
||||
|
||||
# Generate session ID and name
|
||||
session_id = self._generate_session_id()
|
||||
if not session_name:
|
||||
session_name = f"mc-{session_id}"
|
||||
|
||||
# Ensure network exists
|
||||
self._ensure_network()
|
||||
|
||||
# Prepare environment variables
|
||||
env_vars = environment or {}
|
||||
|
||||
# Add project URL to environment if provided
|
||||
if project:
|
||||
env_vars["MC_PROJECT_URL"] = project
|
||||
|
||||
# Pull image if needed
|
||||
try:
|
||||
self.client.images.get(driver.image)
|
||||
except ImageNotFound:
|
||||
print(f"Pulling image {driver.image}...")
|
||||
self.client.images.pull(driver.image)
|
||||
|
||||
# Set up volume mounts
|
||||
volumes = {}
|
||||
# If project URL is provided, don't mount local directory (will clone into /app)
|
||||
# If no project URL and mount_local is True, mount local directory to /app
|
||||
if not project and mount_local:
|
||||
# Mount current directory to /app in the container
|
||||
import os
|
||||
|
||||
current_dir = os.getcwd()
|
||||
volumes[current_dir] = {"bind": "/app", "mode": "rw"}
|
||||
print(f"Mounting local directory {current_dir} to /app")
|
||||
elif project:
|
||||
print(
|
||||
f"Project URL provided - container will clone {project} into /app during initialization"
|
||||
)
|
||||
|
||||
# Create container
|
||||
container = self.client.containers.create(
|
||||
image=driver.image,
|
||||
name=session_name,
|
||||
hostname=session_name,
|
||||
detach=True,
|
||||
tty=True,
|
||||
stdin_open=True,
|
||||
environment=env_vars,
|
||||
volumes=volumes,
|
||||
labels={
|
||||
"mc.session": "true",
|
||||
"mc.session.id": session_id,
|
||||
"mc.session.name": session_name,
|
||||
"mc.driver": driver_name,
|
||||
"mc.project": project or "",
|
||||
},
|
||||
network=self.config_manager.config.docker.get("network", "mc-network"),
|
||||
ports={f"{port}/tcp": None for port in driver.ports},
|
||||
)
|
||||
|
||||
# Start container
|
||||
container.start()
|
||||
|
||||
# Get updated port information
|
||||
container.reload()
|
||||
ports = {}
|
||||
if container.attrs.get("NetworkSettings", {}).get("Ports"):
|
||||
for container_port, host_ports in container.attrs["NetworkSettings"][
|
||||
"Ports"
|
||||
].items():
|
||||
if host_ports:
|
||||
container_port_num = int(container_port.split("/")[0])
|
||||
host_port = int(host_ports[0]["HostPort"])
|
||||
ports[container_port_num] = host_port
|
||||
|
||||
# Create session object
|
||||
session = Session(
|
||||
id=session_id,
|
||||
name=session_name,
|
||||
driver=driver_name,
|
||||
status=SessionStatus.RUNNING,
|
||||
container_id=container.id,
|
||||
environment=env_vars,
|
||||
project=project,
|
||||
created_at=container.attrs["Created"],
|
||||
ports=ports,
|
||||
)
|
||||
|
||||
# Save session to config as JSON-compatible dict
|
||||
self.config_manager.add_session(session_id, session.model_dump(mode="json"))
|
||||
|
||||
return session
|
||||
|
||||
except DockerException as e:
|
||||
print(f"Error creating session: {e}")
|
||||
return None
|
||||
|
||||
def close_session(self, session_id: str) -> bool:
|
||||
"""Close a MC session"""
|
||||
try:
|
||||
sessions = self.list_sessions()
|
||||
for session in sessions:
|
||||
if session.id == session_id:
|
||||
return self._close_single_session(session)
|
||||
|
||||
print(f"Session '{session_id}' not found")
|
||||
return False
|
||||
|
||||
except DockerException as e:
|
||||
print(f"Error closing session: {e}")
|
||||
return False
|
||||
|
||||
def connect_session(self, session_id: str) -> bool:
|
||||
"""Connect to a running MC session"""
|
||||
try:
|
||||
sessions = self.list_sessions()
|
||||
for session in sessions:
|
||||
if session.id == session_id and session.container_id:
|
||||
if session.status != SessionStatus.RUNNING:
|
||||
print(f"Session '{session_id}' is not running")
|
||||
return False
|
||||
|
||||
# Execute interactive shell in container
|
||||
# The init-status.sh script will automatically show logs if needed
|
||||
print(f"Connecting to session {session_id}...")
|
||||
os.system(f"docker exec -it {session.container_id} /bin/bash")
|
||||
return True
|
||||
|
||||
print(f"Session '{session_id}' not found")
|
||||
return False
|
||||
|
||||
except DockerException as e:
|
||||
print(f"Error connecting to session: {e}")
|
||||
return False
|
||||
|
||||
def _close_single_session(self, session: Session) -> bool:
|
||||
"""Close a single session (helper for parallel processing)
|
||||
|
||||
Args:
|
||||
session: The session to close
|
||||
|
||||
Returns:
|
||||
bool: Whether the session was successfully closed
|
||||
"""
|
||||
if not session.container_id:
|
||||
return False
|
||||
|
||||
try:
|
||||
container = self.client.containers.get(session.container_id)
|
||||
container.stop()
|
||||
container.remove()
|
||||
self.config_manager.remove_session(session.id)
|
||||
return True
|
||||
except DockerException as e:
|
||||
print(f"Error closing session {session.id}: {e}")
|
||||
return False
|
||||
|
||||
def close_all_sessions(self, progress_callback=None) -> Tuple[int, bool]:
|
||||
"""Close all MC sessions with parallel processing and progress reporting
|
||||
|
||||
Args:
|
||||
progress_callback: Optional callback function to report progress
|
||||
The callback should accept (session_id, status, message)
|
||||
|
||||
Returns:
|
||||
tuple: (number of sessions closed, success)
|
||||
"""
|
||||
try:
|
||||
sessions = self.list_sessions()
|
||||
if not sessions:
|
||||
return 0, True
|
||||
|
||||
# No need for session status as we receive it via callback
|
||||
|
||||
# Define a wrapper to track progress
|
||||
def close_with_progress(session):
|
||||
if not session.container_id:
|
||||
return False
|
||||
|
||||
try:
|
||||
container = self.client.containers.get(session.container_id)
|
||||
# Stop and remove container
|
||||
container.stop()
|
||||
container.remove()
|
||||
# Remove from config
|
||||
self.config_manager.remove_session(session.id)
|
||||
|
||||
# Notify about completion
|
||||
if progress_callback:
|
||||
progress_callback(
|
||||
session.id,
|
||||
"completed",
|
||||
f"{session.name} closed successfully",
|
||||
)
|
||||
|
||||
return True
|
||||
except DockerException as e:
|
||||
error_msg = f"Error: {str(e)}"
|
||||
if progress_callback:
|
||||
progress_callback(session.id, "failed", error_msg)
|
||||
print(f"Error closing session {session.id}: {e}")
|
||||
return False
|
||||
|
||||
# Use ThreadPoolExecutor to close sessions in parallel
|
||||
with concurrent.futures.ThreadPoolExecutor(
|
||||
max_workers=min(10, len(sessions))
|
||||
) as executor:
|
||||
# Submit all session closing tasks
|
||||
future_to_session = {
|
||||
executor.submit(close_with_progress, session): session
|
||||
for session in sessions
|
||||
}
|
||||
|
||||
# Collect results
|
||||
closed_count = 0
|
||||
for future in concurrent.futures.as_completed(future_to_session):
|
||||
session = future_to_session[future]
|
||||
try:
|
||||
success = future.result()
|
||||
if success:
|
||||
closed_count += 1
|
||||
except Exception as e:
|
||||
print(f"Error closing session {session.id}: {e}")
|
||||
|
||||
return closed_count, closed_count > 0
|
||||
|
||||
except DockerException as e:
|
||||
print(f"Error closing all sessions: {e}")
|
||||
return 0, False
|
||||
|
||||
def get_session_logs(self, session_id: str, follow: bool = False) -> Optional[str]:
|
||||
"""Get logs from a MC session"""
|
||||
try:
|
||||
sessions = self.list_sessions()
|
||||
for session in sessions:
|
||||
if session.id == session_id and session.container_id:
|
||||
container = self.client.containers.get(session.container_id)
|
||||
if follow:
|
||||
for line in container.logs(stream=True, follow=True):
|
||||
print(line.decode().strip())
|
||||
return None
|
||||
else:
|
||||
return container.logs().decode()
|
||||
|
||||
print(f"Session '{session_id}' not found")
|
||||
return None
|
||||
|
||||
except DockerException as e:
|
||||
print(f"Error getting session logs: {e}")
|
||||
return None
|
||||
|
||||
def get_init_logs(self, session_id: str, follow: bool = False) -> Optional[str]:
|
||||
"""Get initialization logs from a MC session
|
||||
|
||||
Args:
|
||||
session_id: The session ID
|
||||
follow: Whether to follow the logs
|
||||
|
||||
Returns:
|
||||
The logs as a string, or None if there was an error
|
||||
"""
|
||||
try:
|
||||
sessions = self.list_sessions()
|
||||
for session in sessions:
|
||||
if session.id == session_id and session.container_id:
|
||||
container = self.client.containers.get(session.container_id)
|
||||
|
||||
# Check if initialization is complete
|
||||
init_complete = False
|
||||
try:
|
||||
exit_code, output = container.exec_run(
|
||||
"grep -q 'INIT_COMPLETE=true' /init.status"
|
||||
)
|
||||
init_complete = exit_code == 0
|
||||
except DockerException:
|
||||
pass
|
||||
|
||||
if follow and not init_complete:
|
||||
print(
|
||||
f"Following initialization logs for session {session_id}..."
|
||||
)
|
||||
print("Press Ctrl+C to stop following")
|
||||
container.exec_run(
|
||||
"tail -f /init.log", stream=True, demux=True, tty=True
|
||||
)
|
||||
return None
|
||||
else:
|
||||
exit_code, output = container.exec_run("cat /init.log")
|
||||
if exit_code == 0:
|
||||
return output.decode()
|
||||
else:
|
||||
print("No initialization logs found")
|
||||
return None
|
||||
|
||||
print(f"Session '{session_id}' not found")
|
||||
return None
|
||||
|
||||
except DockerException as e:
|
||||
print(f"Error getting initialization logs: {e}")
|
||||
return None
|
||||
@@ -1,28 +0,0 @@
|
||||
"""
|
||||
Base driver implementation for MAI
|
||||
"""
|
||||
|
||||
from typing import Dict, Optional
|
||||
|
||||
from ..models import Driver
|
||||
|
||||
|
||||
class DriverManager:
|
||||
"""Manager for MAI drivers"""
|
||||
|
||||
@staticmethod
|
||||
def get_default_drivers() -> Dict[str, Driver]:
|
||||
"""Get the default built-in drivers"""
|
||||
from ..config import DEFAULT_DRIVERS
|
||||
|
||||
return DEFAULT_DRIVERS
|
||||
|
||||
@staticmethod
|
||||
def get_driver_metadata(driver_name: str) -> Optional[Dict]:
|
||||
"""Get metadata for a specific driver"""
|
||||
from ..config import DEFAULT_DRIVERS
|
||||
|
||||
if driver_name in DEFAULT_DRIVERS:
|
||||
return DEFAULT_DRIVERS[driver_name].model_dump()
|
||||
|
||||
return None
|
||||
@@ -1,50 +0,0 @@
|
||||
from enum import Enum
|
||||
from typing import Dict, List, Optional
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class SessionStatus(str, Enum):
|
||||
CREATING = "creating"
|
||||
RUNNING = "running"
|
||||
STOPPED = "stopped"
|
||||
FAILED = "failed"
|
||||
|
||||
|
||||
class DriverEnvironmentVariable(BaseModel):
|
||||
name: str
|
||||
description: str
|
||||
required: bool = False
|
||||
default: Optional[str] = None
|
||||
sensitive: bool = False
|
||||
|
||||
|
||||
class Driver(BaseModel):
|
||||
name: str
|
||||
description: str
|
||||
version: str
|
||||
maintainer: str
|
||||
image: str
|
||||
environment: List[DriverEnvironmentVariable] = []
|
||||
ports: List[int] = []
|
||||
volumes: List[Dict[str, str]] = []
|
||||
|
||||
|
||||
class Session(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
driver: str
|
||||
status: SessionStatus
|
||||
container_id: Optional[str] = None
|
||||
environment: Dict[str, str] = Field(default_factory=dict)
|
||||
project: Optional[str] = None
|
||||
created_at: str
|
||||
ports: Dict[int, int] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class Config(BaseModel):
|
||||
docker: Dict[str, str] = Field(default_factory=dict)
|
||||
drivers: Dict[str, Driver] = Field(default_factory=dict)
|
||||
sessions: Dict[str, dict] = Field(
|
||||
default_factory=dict
|
||||
) # Store as dict to avoid serialization issues
|
||||
defaults: Dict[str, str] = Field(default_factory=dict)
|
||||
@@ -1,14 +0,0 @@
|
||||
"""
|
||||
MC Service - Container Management Web Service
|
||||
(This is a placeholder for Phase 2)
|
||||
"""
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Run the MC service"""
|
||||
print("MC Service - Container Management Web Service")
|
||||
print("This feature will be implemented in Phase 2")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
103
pyproject.toml
103
pyproject.toml
@@ -1,15 +1,31 @@
|
||||
[project]
|
||||
name = "mcontainer"
|
||||
version = "0.1.0"
|
||||
description = "Monadical Container Tool"
|
||||
name = "cubbi"
|
||||
version = "0.5.0"
|
||||
description = "Cubbi Container Tool"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.12"
|
||||
license = "MIT"
|
||||
authors = [
|
||||
{name = "Monadical SAS", email = "contact@monadical.com"}
|
||||
]
|
||||
dependencies = [
|
||||
"typer>=0.9.0",
|
||||
"docker>=7.0.0",
|
||||
"pyyaml>=6.0.1",
|
||||
"rich>=13.6.0",
|
||||
"pydantic>=2.5.0",
|
||||
"questionary>=2.0.0",
|
||||
"requests>=2.32.3",
|
||||
]
|
||||
classifiers = [
|
||||
"Development Status :: 3 - Alpha",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
"Intended Audience :: Developers",
|
||||
"Intended Audience :: Science/Research",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Operating System :: OS Independent",
|
||||
"Topic :: Software Development",
|
||||
"Topic :: Scientific/Engineering :: Artificial Intelligence"
|
||||
]
|
||||
|
||||
[build-system]
|
||||
@@ -24,15 +40,94 @@ dev = [
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
mc = "mcontainer.cli:app"
|
||||
cubbi = "cubbi.cli:app"
|
||||
cubbix = "cubbi.cli:session_create_entry_point"
|
||||
|
||||
[tool.ruff]
|
||||
line-length = 88
|
||||
target-version = "py312"
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
# Exclude integration tests by default
|
||||
addopts = "-v --tb=short -m 'not integration'"
|
||||
markers = [
|
||||
"integration: marks tests as integration tests (deselected by default)",
|
||||
]
|
||||
|
||||
[tool.mypy]
|
||||
python_version = "3.12"
|
||||
warn_return_any = true
|
||||
warn_unused_configs = true
|
||||
disallow_untyped_defs = true
|
||||
disallow_incomplete_defs = true
|
||||
|
||||
[dependency-groups]
|
||||
dev = [
|
||||
"pytest>=8.3.5",
|
||||
]
|
||||
|
||||
[tool.semantic_release]
|
||||
assets = []
|
||||
build_command_env = []
|
||||
commit_message = "{version}\n\nAutomatically generated by python-semantic-release"
|
||||
logging_use_named_masks = false
|
||||
major_on_zero = true
|
||||
allow_zero_version = true
|
||||
no_git_verify = false
|
||||
tag_format = "v{version}"
|
||||
version_toml = [
|
||||
"pyproject.toml:project.version:nf"
|
||||
]
|
||||
|
||||
[tool.semantic_release.branches.main]
|
||||
match = "(main|master)"
|
||||
prerelease_token = "rc"
|
||||
prerelease = false
|
||||
|
||||
[tool.semantic_release.changelog]
|
||||
exclude_commit_patterns = []
|
||||
mode = "init"
|
||||
insertion_flag = "<!-- version list -->"
|
||||
template_dir = "templates"
|
||||
|
||||
[tool.semantic_release.changelog.default_templates]
|
||||
changelog_file = "CHANGELOG.md"
|
||||
output_format = "md"
|
||||
mask_initial_release = false
|
||||
|
||||
[tool.semantic_release.changelog.environment]
|
||||
block_start_string = "{%"
|
||||
block_end_string = "%}"
|
||||
variable_start_string = "{{"
|
||||
variable_end_string = "}}"
|
||||
comment_start_string = "{#"
|
||||
comment_end_string = "#}"
|
||||
trim_blocks = false
|
||||
lstrip_blocks = false
|
||||
newline_sequence = "\n"
|
||||
keep_trailing_newline = false
|
||||
extensions = []
|
||||
autoescape = false
|
||||
|
||||
[tool.semantic_release.commit_author]
|
||||
env = "GIT_COMMIT_AUTHOR"
|
||||
default = "semantic-release <semantic-release>"
|
||||
|
||||
[tool.semantic_release.commit_parser_options]
|
||||
minor_tags = ["feat"]
|
||||
patch_tags = ["fix", "perf"]
|
||||
other_allowed_tags = ["build", "chore", "ci", "docs", "style", "refactor", "test"]
|
||||
allowed_tags = ["feat", "fix", "perf", "build", "chore", "ci", "docs", "style", "refactor", "test"]
|
||||
default_bump_level = 0
|
||||
parse_squash_commits = false
|
||||
ignore_merge_commits = false
|
||||
|
||||
[tool.semantic_release.remote]
|
||||
name = "origin"
|
||||
type = "github"
|
||||
ignore_token_for_push = false
|
||||
insecure = false
|
||||
|
||||
[tool.semantic_release.publish]
|
||||
dist_glob_patterns = ["dist/*"]
|
||||
upload_to_vcs_release = true
|
||||
|
||||
83
tests/README_integration.md
Normal file
83
tests/README_integration.md
Normal file
@@ -0,0 +1,83 @@
|
||||
# Integration Tests
|
||||
|
||||
This directory contains integration tests for cubbi images with different model combinations.
|
||||
|
||||
## Test Matrix
|
||||
|
||||
The integration tests cover:
|
||||
- **5 Images**: goose, aider, claudecode, opencode, crush
|
||||
- **4 Models**: anthropic/claude-sonnet-4-20250514, openai/gpt-4o, openrouter/openai/gpt-4o, litellm/gpt-oss:120b
|
||||
- **Total**: 20 image/model combinations + additional tests
|
||||
|
||||
## Running Tests
|
||||
|
||||
### Default (Skip Integration)
|
||||
```bash
|
||||
# Regular tests only (integration tests excluded by default)
|
||||
uv run -m pytest
|
||||
|
||||
# Specific test file (excluding integration)
|
||||
uv run -m pytest tests/test_cli.py
|
||||
```
|
||||
|
||||
### Integration Tests Only
|
||||
```bash
|
||||
# Run all integration tests (20 combinations + helpers)
|
||||
uv run -m pytest -m integration
|
||||
|
||||
# Run specific image with all models
|
||||
uv run -m pytest -m integration -k "goose"
|
||||
|
||||
# Run specific model with all images
|
||||
uv run -m pytest -m integration -k "anthropic"
|
||||
|
||||
# Run single combination
|
||||
uv run -m pytest -m integration -k "goose and anthropic"
|
||||
|
||||
# Verbose output with timing
|
||||
uv run -m pytest -m integration -v -s
|
||||
```
|
||||
|
||||
### Combined Tests
|
||||
```bash
|
||||
# Run both regular and integration tests
|
||||
uv run -m pytest -m "not slow" # or remove the default marker exclusion
|
||||
```
|
||||
|
||||
## Test Structure
|
||||
|
||||
### `test_image_model_combination`
|
||||
- Parametrized test with all image/model combinations
|
||||
- Tests single prompt/response functionality
|
||||
- Uses appropriate command syntax for each tool
|
||||
- Verifies successful completion and basic output
|
||||
|
||||
### `test_image_help_command`
|
||||
- Tests help command for each image
|
||||
- Ensures basic functionality works
|
||||
|
||||
### `test_all_images_available`
|
||||
- Verifies all required images are built and available
|
||||
|
||||
## Command Templates
|
||||
|
||||
Each image uses its specific command syntax:
|
||||
- **goose**: `goose run -t 'prompt' --no-session --quiet`
|
||||
- **aider**: `aider --message 'prompt' --yes-always --no-fancy-input --no-check-update --no-auto-commits`
|
||||
- **claudecode**: `claude -p 'prompt'`
|
||||
- **opencode**: `opencode run -m MODEL 'prompt'`
|
||||
- **crush**: `crush run 'prompt'`
|
||||
|
||||
## Expected Results
|
||||
|
||||
All tests should pass when:
|
||||
1. Images are built (`uv run -m cubbi.cli image build [IMAGE]`)
|
||||
2. API keys are configured (`uv run -m cubbi.cli configure`)
|
||||
3. Models are accessible and working
|
||||
|
||||
## Debugging Failed Tests
|
||||
|
||||
If tests fail, check:
|
||||
1. Image availability: `uv run -m cubbi.cli image list`
|
||||
2. Configuration: `uv run -m cubbi.cli config list`
|
||||
3. Manual test: `uv run -m cubbi.cli session create -i IMAGE -m MODEL --run "COMMAND"`
|
||||
169
tests/conftest.py
Normal file
169
tests/conftest.py
Normal file
@@ -0,0 +1,169 @@
|
||||
"""
|
||||
Common test fixtures for Cubbi Container tests.
|
||||
"""
|
||||
|
||||
import tempfile
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import docker
|
||||
import pytest
|
||||
|
||||
from cubbi.config import ConfigManager
|
||||
from cubbi.container import ContainerManager
|
||||
from cubbi.models import Session, SessionStatus
|
||||
from cubbi.session import SessionManager
|
||||
from cubbi.user_config import UserConfigManager
|
||||
|
||||
|
||||
# Check if Docker is available
|
||||
def is_docker_available():
|
||||
"""Check if Docker is available and running."""
|
||||
try:
|
||||
client = docker.from_env()
|
||||
client.ping()
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
# Register custom mark for Docker-dependent tests
|
||||
def pytest_configure(config):
|
||||
config.addinivalue_line(
|
||||
"markers", "requires_docker: mark test that requires Docker to be running"
|
||||
)
|
||||
|
||||
|
||||
# Decorator to mark tests that require Docker
|
||||
requires_docker = pytest.mark.skipif(
|
||||
not is_docker_available(),
|
||||
reason="Docker is not available or not running",
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def temp_config_dir():
|
||||
"""Create a temporary directory for configuration files."""
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
yield Path(temp_dir)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_container_manager(isolate_cubbi_config):
|
||||
"""Mock the ContainerManager class with proper behaviors for testing."""
|
||||
mock_session = Session(
|
||||
id="test-session-id",
|
||||
name="test-session",
|
||||
image="goose",
|
||||
status=SessionStatus.RUNNING,
|
||||
ports={8080: 32768},
|
||||
)
|
||||
|
||||
container_manager = isolate_cubbi_config["container_manager"]
|
||||
|
||||
# Patch the container manager methods for mocking
|
||||
with (
|
||||
patch.object(container_manager, "list_sessions", return_value=[]),
|
||||
patch.object(container_manager, "create_session", return_value=mock_session),
|
||||
patch.object(container_manager, "close_session", return_value=True),
|
||||
patch.object(container_manager, "close_all_sessions", return_value=(3, True)),
|
||||
):
|
||||
yield container_manager
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def cli_runner():
|
||||
"""Provide a CLI runner for testing commands."""
|
||||
from typer.testing import CliRunner
|
||||
|
||||
return CliRunner()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_file_content(temp_config_dir):
|
||||
"""Create a test file with content in a temporary directory."""
|
||||
test_content = "This is a test file for volume mounting"
|
||||
test_file = temp_config_dir / "test_volume_file.txt"
|
||||
with open(test_file, "w") as f:
|
||||
f.write(test_content)
|
||||
return test_file, test_content
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def docker_test_network():
|
||||
"""Create a Docker network for testing and clean it up after."""
|
||||
if not is_docker_available():
|
||||
pytest.skip("Docker is not available")
|
||||
return None
|
||||
|
||||
test_network_name = f"cubbi-test-network-{uuid.uuid4().hex[:8]}"
|
||||
client = docker.from_env()
|
||||
network = client.networks.create(test_network_name, driver="bridge")
|
||||
|
||||
yield test_network_name
|
||||
|
||||
# Clean up
|
||||
try:
|
||||
network.remove()
|
||||
except Exception:
|
||||
# Network might be in use by other containers
|
||||
pass
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope="function")
|
||||
def isolate_cubbi_config(temp_config_dir):
|
||||
"""
|
||||
Automatically isolate all Cubbi configuration for every test.
|
||||
|
||||
This fixture ensures that tests never touch the user's real configuration
|
||||
by patching both ConfigManager and UserConfigManager in cli.py to use
|
||||
temporary directories.
|
||||
"""
|
||||
# Create isolated config instances with temporary paths
|
||||
config_path = temp_config_dir / "config.yaml"
|
||||
user_config_path = temp_config_dir / "user_config.yaml"
|
||||
|
||||
# Create the ConfigManager with a custom config path
|
||||
isolated_config_manager = ConfigManager(config_path)
|
||||
|
||||
# Create the UserConfigManager with a custom config path
|
||||
isolated_user_config = UserConfigManager(str(user_config_path))
|
||||
|
||||
# Create isolated session manager
|
||||
sessions_path = temp_config_dir / "sessions.yaml"
|
||||
isolated_session_manager = SessionManager(sessions_path)
|
||||
|
||||
# Create isolated container manager
|
||||
isolated_container_manager = ContainerManager(
|
||||
isolated_config_manager, isolated_session_manager, isolated_user_config
|
||||
)
|
||||
|
||||
# Patch all the global instances in cli.py and the UserConfigManager class
|
||||
with (
|
||||
patch("cubbi.cli.config_manager", isolated_config_manager),
|
||||
patch("cubbi.cli.user_config", isolated_user_config),
|
||||
patch("cubbi.cli.session_manager", isolated_session_manager),
|
||||
patch("cubbi.cli.container_manager", isolated_container_manager),
|
||||
patch("cubbi.cli.UserConfigManager", return_value=isolated_user_config),
|
||||
):
|
||||
# Create isolated MCP manager with isolated user config
|
||||
from cubbi.mcp import MCPManager
|
||||
|
||||
isolated_mcp_manager = MCPManager(config_manager=isolated_user_config)
|
||||
|
||||
# Patch the global mcp_manager instance
|
||||
with patch("cubbi.cli.mcp_manager", isolated_mcp_manager):
|
||||
yield {
|
||||
"config_manager": isolated_config_manager,
|
||||
"user_config": isolated_user_config,
|
||||
"session_manager": isolated_session_manager,
|
||||
"container_manager": isolated_container_manager,
|
||||
"mcp_manager": isolated_mcp_manager,
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def patched_config_manager(isolate_cubbi_config):
|
||||
"""Compatibility fixture - returns the isolated user config."""
|
||||
return isolate_cubbi_config["user_config"]
|
||||
@@ -1,5 +1,6 @@
|
||||
from typer.testing import CliRunner
|
||||
from mcontainer.cli import app
|
||||
|
||||
from cubbi.cli import app
|
||||
|
||||
runner = CliRunner()
|
||||
|
||||
@@ -8,15 +9,15 @@ def test_version() -> None:
|
||||
"""Test version command"""
|
||||
result = runner.invoke(app, ["version"])
|
||||
assert result.exit_code == 0
|
||||
assert "MC - Monadical Container Tool" in result.stdout
|
||||
assert "Cubbi - Cubbi Container Tool" in result.stdout
|
||||
|
||||
|
||||
def test_session_list() -> None:
|
||||
"""Test session list command"""
|
||||
result = runner.invoke(app, ["session", "list"])
|
||||
assert result.exit_code == 0
|
||||
# Could be either "No active sessions found" or a table of sessions
|
||||
assert "sessions" in result.stdout.lower() or "no active" in result.stdout.lower()
|
||||
# Could be either "No active sessions found" or a table with headers
|
||||
assert "no active" in result.stdout.lower() or "id" in result.stdout.lower()
|
||||
|
||||
|
||||
def test_help() -> None:
|
||||
@@ -24,4 +25,4 @@ def test_help() -> None:
|
||||
result = runner.invoke(app, ["--help"])
|
||||
assert result.exit_code == 0
|
||||
assert "Usage" in result.stdout
|
||||
assert "Monadical Container Tool" in result.stdout
|
||||
assert "Cubbi Container Tool" in result.stdout
|
||||
|
||||
291
tests/test_config_commands.py
Normal file
291
tests/test_config_commands.py
Normal file
@@ -0,0 +1,291 @@
|
||||
"""
|
||||
Tests for the configuration management commands.
|
||||
"""
|
||||
|
||||
from cubbi.cli import app
|
||||
|
||||
|
||||
def test_config_list(cli_runner, patched_config_manager):
|
||||
"""Test the 'cubbi config list' command."""
|
||||
result = cli_runner.invoke(app, ["config", "list"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Configuration" in result.stdout
|
||||
assert "Value" in result.stdout
|
||||
|
||||
# Check for default configurations
|
||||
assert "defaults.image" in result.stdout
|
||||
assert "defaults.connect" in result.stdout
|
||||
assert "defaults.mount_local" in result.stdout
|
||||
|
||||
|
||||
def test_config_get(cli_runner, patched_config_manager):
|
||||
"""Test the 'cubbi config get' command."""
|
||||
# Test getting an existing value
|
||||
result = cli_runner.invoke(app, ["config", "get", "defaults.image"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "defaults.image" in result.stdout
|
||||
assert "goose" in result.stdout
|
||||
|
||||
# Test getting a non-existent value
|
||||
result = cli_runner.invoke(app, ["config", "get", "nonexistent.key"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "not found" in result.stdout
|
||||
|
||||
|
||||
def test_config_set(cli_runner, patched_config_manager):
|
||||
"""Test the 'cubbi config set' command."""
|
||||
# Test setting a string value
|
||||
result = cli_runner.invoke(app, ["config", "set", "defaults.image", "claude"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Configuration updated" in result.stdout
|
||||
assert patched_config_manager.get("defaults.image") == "claude"
|
||||
|
||||
# Test setting a boolean value
|
||||
result = cli_runner.invoke(app, ["config", "set", "defaults.connect", "false"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Configuration updated" in result.stdout
|
||||
assert patched_config_manager.get("defaults.connect") is False
|
||||
|
||||
# Test setting a new value
|
||||
result = cli_runner.invoke(app, ["config", "set", "new.setting", "value"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Configuration updated" in result.stdout
|
||||
assert patched_config_manager.get("new.setting") == "value"
|
||||
|
||||
|
||||
def test_volume_list_empty(cli_runner, patched_config_manager):
|
||||
"""Test the 'cubbi config volume list' command with no volumes."""
|
||||
result = cli_runner.invoke(app, ["config", "volume", "list"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "No default volumes configured" in result.stdout
|
||||
|
||||
|
||||
def test_volume_add_and_list(cli_runner, patched_config_manager, temp_config_dir):
|
||||
"""Test adding a volume and then listing it."""
|
||||
# Create a test directory
|
||||
test_dir = temp_config_dir / "test_dir"
|
||||
test_dir.mkdir()
|
||||
|
||||
# Add a volume
|
||||
result = cli_runner.invoke(
|
||||
app, ["config", "volume", "add", f"{test_dir}:/container/path"]
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Added volume" in result.stdout
|
||||
|
||||
# Verify volume was added to the configuration
|
||||
volumes = patched_config_manager.get("defaults.volumes", [])
|
||||
assert f"{test_dir}:/container/path" in volumes
|
||||
|
||||
# List volumes - just check the command runs without error
|
||||
result = cli_runner.invoke(app, ["config", "volume", "list"])
|
||||
assert result.exit_code == 0
|
||||
assert "/container/path" in result.stdout
|
||||
|
||||
|
||||
def test_volume_remove(cli_runner, patched_config_manager, temp_config_dir):
|
||||
"""Test removing a volume."""
|
||||
# Create a test directory
|
||||
test_dir = temp_config_dir / "test_dir"
|
||||
test_dir.mkdir()
|
||||
|
||||
# Add a volume
|
||||
patched_config_manager.set("defaults.volumes", [f"{test_dir}:/container/path"])
|
||||
|
||||
# Remove the volume
|
||||
result = cli_runner.invoke(app, ["config", "volume", "remove", f"{test_dir}"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Removed volume" in result.stdout
|
||||
|
||||
# Verify it's gone
|
||||
volumes = patched_config_manager.get("defaults.volumes")
|
||||
assert len(volumes) == 0
|
||||
|
||||
|
||||
def test_volume_add_nonexistent_path(cli_runner, patched_config_manager, monkeypatch):
|
||||
"""Test adding a volume with a nonexistent path."""
|
||||
nonexistent_path = "/path/that/does/not/exist"
|
||||
|
||||
# Mock typer.confirm to return True
|
||||
monkeypatch.setattr("typer.confirm", lambda message: True)
|
||||
|
||||
# Add a volume with nonexistent path
|
||||
result = cli_runner.invoke(
|
||||
app, ["config", "volume", "add", f"{nonexistent_path}:/container/path"]
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Warning: Local path" in result.stdout
|
||||
assert "Added volume" in result.stdout
|
||||
|
||||
# Verify it was added
|
||||
volumes = patched_config_manager.get("defaults.volumes")
|
||||
assert f"{nonexistent_path}:/container/path" in volumes
|
||||
|
||||
|
||||
def test_network_list_empty(cli_runner, patched_config_manager):
|
||||
"""Test the 'cubbi config network list' command with no networks."""
|
||||
result = cli_runner.invoke(app, ["config", "network", "list"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "No default networks configured" in result.stdout
|
||||
|
||||
|
||||
def test_network_add_and_list(cli_runner, patched_config_manager):
|
||||
"""Test adding a network and then listing it."""
|
||||
# Add a network
|
||||
result = cli_runner.invoke(app, ["config", "network", "add", "test-network"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Added network" in result.stdout
|
||||
|
||||
# List networks
|
||||
result = cli_runner.invoke(app, ["config", "network", "list"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "test-network" in result.stdout
|
||||
|
||||
|
||||
def test_network_remove(cli_runner, patched_config_manager):
|
||||
"""Test removing a network."""
|
||||
# Add a network
|
||||
patched_config_manager.set("defaults.networks", ["test-network"])
|
||||
|
||||
# Remove the network
|
||||
result = cli_runner.invoke(app, ["config", "network", "remove", "test-network"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Removed network" in result.stdout
|
||||
|
||||
# Verify it's gone
|
||||
networks = patched_config_manager.get("defaults.networks")
|
||||
assert len(networks) == 0
|
||||
|
||||
|
||||
def test_config_reset(cli_runner, patched_config_manager, monkeypatch):
|
||||
"""Test resetting the configuration."""
|
||||
# Set a custom value first
|
||||
patched_config_manager.set("defaults.image", "custom-image")
|
||||
|
||||
# Mock typer.confirm to return True
|
||||
monkeypatch.setattr("typer.confirm", lambda message: True)
|
||||
|
||||
# Reset config
|
||||
result = cli_runner.invoke(app, ["config", "reset"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Configuration reset to defaults" in result.stdout
|
||||
|
||||
# Verify it was reset
|
||||
assert patched_config_manager.get("defaults.image") == "goose"
|
||||
|
||||
|
||||
def test_port_list_empty(cli_runner, patched_config_manager):
|
||||
"""Test listing ports when none are configured."""
|
||||
result = cli_runner.invoke(app, ["config", "port", "list"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "No default ports configured" in result.stdout
|
||||
|
||||
|
||||
def test_port_add_single(cli_runner, patched_config_manager):
|
||||
"""Test adding a single port."""
|
||||
result = cli_runner.invoke(app, ["config", "port", "add", "8000"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Added port 8000 to defaults" in result.stdout
|
||||
|
||||
# Verify it was added
|
||||
ports = patched_config_manager.get("defaults.ports")
|
||||
assert 8000 in ports
|
||||
|
||||
|
||||
def test_port_add_multiple(cli_runner, patched_config_manager):
|
||||
"""Test adding multiple ports with comma separation."""
|
||||
result = cli_runner.invoke(app, ["config", "port", "add", "8000,3000,5173"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Added ports [8000, 3000, 5173] to defaults" in result.stdout
|
||||
|
||||
# Verify they were added
|
||||
ports = patched_config_manager.get("defaults.ports")
|
||||
assert 8000 in ports
|
||||
assert 3000 in ports
|
||||
assert 5173 in ports
|
||||
|
||||
|
||||
def test_port_add_duplicate(cli_runner, patched_config_manager):
|
||||
"""Test adding a port that already exists."""
|
||||
# Add a port first
|
||||
patched_config_manager.set("defaults.ports", [8000])
|
||||
|
||||
# Try to add the same port again
|
||||
result = cli_runner.invoke(app, ["config", "port", "add", "8000"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Port 8000 is already in defaults" in result.stdout
|
||||
|
||||
|
||||
def test_port_add_invalid_format(cli_runner, patched_config_manager):
|
||||
"""Test adding an invalid port format."""
|
||||
result = cli_runner.invoke(app, ["config", "port", "add", "invalid"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Error: Invalid port format" in result.stdout
|
||||
|
||||
|
||||
def test_port_add_invalid_range(cli_runner, patched_config_manager):
|
||||
"""Test adding a port outside valid range."""
|
||||
result = cli_runner.invoke(app, ["config", "port", "add", "70000"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Error: Invalid ports [70000]" in result.stdout
|
||||
|
||||
|
||||
def test_port_list_with_ports(cli_runner, patched_config_manager):
|
||||
"""Test listing ports when some are configured."""
|
||||
# Add some ports
|
||||
patched_config_manager.set("defaults.ports", [8000, 3000])
|
||||
|
||||
# List ports
|
||||
result = cli_runner.invoke(app, ["config", "port", "list"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "8000" in result.stdout
|
||||
assert "3000" in result.stdout
|
||||
|
||||
|
||||
def test_port_remove(cli_runner, patched_config_manager):
|
||||
"""Test removing a port."""
|
||||
# Add a port first
|
||||
patched_config_manager.set("defaults.ports", [8000])
|
||||
|
||||
# Remove the port
|
||||
result = cli_runner.invoke(app, ["config", "port", "remove", "8000"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Removed port 8000 from defaults" in result.stdout
|
||||
|
||||
# Verify it's gone
|
||||
ports = patched_config_manager.get("defaults.ports")
|
||||
assert 8000 not in ports
|
||||
|
||||
|
||||
def test_port_remove_not_found(cli_runner, patched_config_manager):
|
||||
"""Test removing a port that doesn't exist."""
|
||||
result = cli_runner.invoke(app, ["config", "port", "remove", "8000"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Port 8000 is not in defaults" in result.stdout
|
||||
|
||||
|
||||
# patched_config_manager fixture is now in conftest.py
|
||||
90
tests/test_config_isolation.py
Normal file
90
tests/test_config_isolation.py
Normal file
@@ -0,0 +1,90 @@
|
||||
"""
|
||||
Test that configuration isolation works correctly and doesn't touch user's real config.
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
from cubbi.cli import app
|
||||
|
||||
|
||||
def test_config_isolation_preserves_user_config(cli_runner, isolate_cubbi_config):
|
||||
"""Test that test isolation doesn't affect user's real configuration."""
|
||||
|
||||
# Get the user's real config path
|
||||
real_config_path = Path.home() / ".config" / "cubbi" / "config.yaml"
|
||||
|
||||
# If the user has a real config, store its content before test
|
||||
original_content = None
|
||||
if real_config_path.exists():
|
||||
with open(real_config_path, "r") as f:
|
||||
original_content = f.read()
|
||||
|
||||
# Run some config modification commands in the test
|
||||
result = cli_runner.invoke(app, ["config", "port", "add", "9999"])
|
||||
assert result.exit_code == 0
|
||||
|
||||
result = cli_runner.invoke(app, ["config", "set", "defaults.image", "test-image"])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Verify the user's real config is unchanged
|
||||
if original_content is not None:
|
||||
with open(real_config_path, "r") as f:
|
||||
current_content = f.read()
|
||||
assert current_content == original_content
|
||||
else:
|
||||
# If no real config existed, it should still not exist
|
||||
assert not real_config_path.exists()
|
||||
|
||||
|
||||
def test_isolated_config_works_independently(cli_runner, isolate_cubbi_config):
|
||||
"""Test that the isolated config works correctly for tests."""
|
||||
|
||||
# Add a port to isolated config
|
||||
result = cli_runner.invoke(app, ["config", "port", "add", "8888"])
|
||||
assert result.exit_code == 0
|
||||
assert "Added port 8888 to defaults" in result.stdout
|
||||
|
||||
# Verify it appears in the list
|
||||
result = cli_runner.invoke(app, ["config", "port", "list"])
|
||||
assert result.exit_code == 0
|
||||
assert "8888" in result.stdout
|
||||
|
||||
# Remove the port
|
||||
result = cli_runner.invoke(app, ["config", "port", "remove", "8888"])
|
||||
assert result.exit_code == 0
|
||||
assert "Removed port 8888 from defaults" in result.stdout
|
||||
|
||||
# Verify it's gone
|
||||
result = cli_runner.invoke(app, ["config", "port", "list"])
|
||||
assert result.exit_code == 0
|
||||
assert "No default ports configured" in result.stdout
|
||||
|
||||
|
||||
def test_each_test_gets_fresh_config(cli_runner, isolate_cubbi_config):
|
||||
"""Test that each test gets a fresh, isolated configuration."""
|
||||
|
||||
# This test should start with empty ports (fresh config)
|
||||
result = cli_runner.invoke(app, ["config", "port", "list"])
|
||||
assert result.exit_code == 0
|
||||
assert "No default ports configured" in result.stdout
|
||||
|
||||
# Add a port
|
||||
result = cli_runner.invoke(app, ["config", "port", "add", "7777"])
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Verify it's there
|
||||
result = cli_runner.invoke(app, ["config", "port", "list"])
|
||||
assert result.exit_code == 0
|
||||
assert "7777" in result.stdout
|
||||
|
||||
|
||||
def test_another_fresh_config_test(cli_runner, isolate_cubbi_config):
|
||||
"""Another test to verify each test gets a completely fresh config."""
|
||||
|
||||
# This test should also start with empty ports (independent of previous test)
|
||||
result = cli_runner.invoke(app, ["config", "port", "list"])
|
||||
assert result.exit_code == 0
|
||||
assert "No default ports configured" in result.stdout
|
||||
|
||||
# The port from the previous test should not be here
|
||||
result = cli_runner.invoke(app, ["config", "port", "list"])
|
||||
assert "7777" not in result.stdout
|
||||
135
tests/test_integration.py
Normal file
135
tests/test_integration.py
Normal file
@@ -0,0 +1,135 @@
|
||||
"""Integration tests for cubbi images with different model combinations."""
|
||||
|
||||
import subprocess
|
||||
import pytest
|
||||
from typing import Dict
|
||||
|
||||
|
||||
IMAGES = ["goose", "aider", "opencode", "crush"]
|
||||
|
||||
MODELS = [
|
||||
"anthropic/claude-sonnet-4-20250514",
|
||||
"openai/gpt-4o",
|
||||
"openrouter/openai/gpt-4o",
|
||||
"litellm/gpt-oss:120b",
|
||||
]
|
||||
|
||||
# Command templates for each tool (based on research)
|
||||
COMMANDS: Dict[str, str] = {
|
||||
"goose": "goose run -t '{prompt}' --no-session --quiet",
|
||||
"aider": "aider --message '{prompt}' --yes-always --no-fancy-input --no-check-update --no-auto-commits",
|
||||
"opencode": "opencode run '{prompt}'",
|
||||
"crush": "crush run -q '{prompt}'",
|
||||
}
|
||||
|
||||
|
||||
def run_cubbi_command(
|
||||
image: str, model: str, command: str, timeout: int = 20
|
||||
) -> subprocess.CompletedProcess:
|
||||
"""Run a cubbi command with specified image, model, and command."""
|
||||
full_command = [
|
||||
"uv",
|
||||
"run",
|
||||
"-m",
|
||||
"cubbi.cli",
|
||||
"session",
|
||||
"create",
|
||||
"-i",
|
||||
image,
|
||||
"-m",
|
||||
model,
|
||||
"--no-connect",
|
||||
"--no-shell",
|
||||
"--run",
|
||||
command,
|
||||
]
|
||||
|
||||
return subprocess.run(
|
||||
full_command,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=timeout,
|
||||
cwd="/home/tito/code/monadical/cubbi",
|
||||
)
|
||||
|
||||
|
||||
def is_successful_response(result: subprocess.CompletedProcess) -> bool:
|
||||
"""Check if the cubbi command completed successfully."""
|
||||
# Check for successful completion markers
|
||||
return (
|
||||
result.returncode == 0
|
||||
and "Initial command finished (exit code: 0)" in result.stdout
|
||||
and "Command execution complete" in result.stdout
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
@pytest.mark.parametrize("image", IMAGES)
|
||||
@pytest.mark.parametrize("model", MODELS)
|
||||
def test_image_model_combination(image: str, model: str):
|
||||
"""Test each image with each model using appropriate command syntax."""
|
||||
prompt = "What is 2+2?"
|
||||
|
||||
# Get the command template for this image
|
||||
command_template = COMMANDS[image]
|
||||
|
||||
# For opencode, we need to substitute the model in the command
|
||||
if image == "opencode":
|
||||
command = command_template.format(prompt=prompt, model=model)
|
||||
else:
|
||||
command = command_template.format(prompt=prompt)
|
||||
|
||||
# Run the test with timeout handling
|
||||
try:
|
||||
result = run_cubbi_command(image, model, command)
|
||||
except subprocess.TimeoutExpired:
|
||||
pytest.fail(f"Test timed out after 20s for {image} with {model}")
|
||||
|
||||
# Check if the command was successful
|
||||
assert is_successful_response(result), (
|
||||
f"Failed to run {image} with {model}. "
|
||||
f"Return code: {result.returncode}\n"
|
||||
f"Stdout: {result.stdout}\n"
|
||||
f"Stderr: {result.stderr}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_all_images_available():
|
||||
"""Test that all required images are available for testing."""
|
||||
# Run image list command
|
||||
result = subprocess.run(
|
||||
["uv", "run", "-m", "cubbi.cli", "image", "list"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=30,
|
||||
cwd="/home/tito/code/monadical/cubbi",
|
||||
)
|
||||
|
||||
assert result.returncode == 0, f"Failed to list images: {result.stderr}"
|
||||
|
||||
for image in IMAGES:
|
||||
assert image in result.stdout, f"Image {image} not found in available images"
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
def test_claudecode():
|
||||
"""Test Claude Code without model preselection since it only supports Anthropic."""
|
||||
command = "claude -p hello"
|
||||
|
||||
try:
|
||||
result = run_cubbi_command("claudecode", MODELS[0], command, timeout=20)
|
||||
except subprocess.TimeoutExpired:
|
||||
pytest.fail("Claude Code help command timed out after 20s")
|
||||
|
||||
assert is_successful_response(result), (
|
||||
f"Failed to run Claude Code help command. "
|
||||
f"Return code: {result.returncode}\n"
|
||||
f"Stdout: {result.stdout}\n"
|
||||
f"Stderr: {result.stderr}"
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Allow running the test file directly for development
|
||||
pytest.main([__file__, "-v", "-m", "integration"])
|
||||
372
tests/test_integration_docker.py
Normal file
372
tests/test_integration_docker.py
Normal file
@@ -0,0 +1,372 @@
|
||||
"""
|
||||
Integration tests for Docker interactions in Cubbi Container.
|
||||
These tests require Docker to be running.
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
import time
|
||||
import uuid
|
||||
import docker
|
||||
|
||||
|
||||
# Import the requires_docker decorator from conftest
|
||||
from conftest import requires_docker
|
||||
|
||||
|
||||
def execute_command_in_container(container_id, command):
|
||||
"""Execute a command in a Docker container and return the output."""
|
||||
result = subprocess.run(
|
||||
["docker", "exec", container_id, "bash", "-c", command],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
return result.stdout.strip()
|
||||
|
||||
|
||||
def wait_for_container_init(container_id, timeout=5.0, poll_interval=0.1):
|
||||
start_time = time.time()
|
||||
|
||||
while time.time() - start_time < timeout:
|
||||
try:
|
||||
# Check if /cubbi/init.status contains INIT_COMPLETE=true
|
||||
result = execute_command_in_container(
|
||||
container_id,
|
||||
"grep -q 'INIT_COMPLETE=true' /cubbi/init.status 2>/dev/null && echo 'COMPLETE' || echo 'PENDING'",
|
||||
)
|
||||
|
||||
if result == "COMPLETE":
|
||||
return True
|
||||
|
||||
except subprocess.CalledProcessError:
|
||||
# File might not exist yet or container not ready, continue polling
|
||||
pass
|
||||
|
||||
time.sleep(poll_interval)
|
||||
|
||||
# Timeout reached
|
||||
return False
|
||||
|
||||
|
||||
@requires_docker
|
||||
def test_integration_session_create_with_volumes(
|
||||
isolate_cubbi_config, test_file_content
|
||||
):
|
||||
"""Test creating a session with a volume mount."""
|
||||
test_file, test_content = test_file_content
|
||||
session = None
|
||||
|
||||
try:
|
||||
# Get the isolated container manager
|
||||
container_manager = isolate_cubbi_config["container_manager"]
|
||||
|
||||
# Create a session with a volume mount
|
||||
session = container_manager.create_session(
|
||||
image_name="goose",
|
||||
session_name=f"cubbi-test-volume-{uuid.uuid4().hex[:8]}",
|
||||
mount_local=False, # Don't mount current directory
|
||||
volumes={str(test_file): {"bind": "/test/volume_test.txt", "mode": "ro"}},
|
||||
)
|
||||
|
||||
assert session is not None
|
||||
assert session.status == "running"
|
||||
|
||||
# Wait for container initialization to complete
|
||||
init_success = wait_for_container_init(session.container_id)
|
||||
assert init_success, "Container initialization timed out"
|
||||
|
||||
# Verify the file exists in the container and has correct content
|
||||
container_content = execute_command_in_container(
|
||||
session.container_id, "cat /test/volume_test.txt"
|
||||
)
|
||||
|
||||
assert container_content == test_content
|
||||
|
||||
finally:
|
||||
# Clean up the container (use kill for faster test cleanup)
|
||||
if session and session.container_id:
|
||||
container_manager.close_session(session.id, kill=True)
|
||||
|
||||
|
||||
@requires_docker
|
||||
def test_integration_session_create_with_networks(
|
||||
isolate_cubbi_config, docker_test_network
|
||||
):
|
||||
"""Test creating a session connected to a custom network."""
|
||||
session = None
|
||||
|
||||
try:
|
||||
# Get the isolated container manager
|
||||
container_manager = isolate_cubbi_config["container_manager"]
|
||||
|
||||
# Create a session with the test network
|
||||
session = container_manager.create_session(
|
||||
image_name="goose",
|
||||
session_name=f"cubbi-test-network-{uuid.uuid4().hex[:8]}",
|
||||
mount_local=False, # Don't mount current directory
|
||||
networks=[docker_test_network],
|
||||
)
|
||||
|
||||
assert session is not None
|
||||
assert session.status == "running"
|
||||
|
||||
# Wait for container initialization to complete
|
||||
init_success = wait_for_container_init(session.container_id)
|
||||
assert init_success, "Container initialization timed out"
|
||||
|
||||
# Verify the container is connected to the test network
|
||||
# Use inspect to check network connections
|
||||
import docker
|
||||
|
||||
client = docker.from_env()
|
||||
container = client.containers.get(session.container_id)
|
||||
container_networks = container.attrs["NetworkSettings"]["Networks"]
|
||||
|
||||
# Container should be connected to both the default cubbi-network and our test network
|
||||
assert docker_test_network in container_networks
|
||||
|
||||
# Verify network interface exists in container
|
||||
network_interfaces = execute_command_in_container(
|
||||
session.container_id, "ip link show | grep -v 'lo' | wc -l"
|
||||
)
|
||||
|
||||
# Should have at least 2 interfaces (eth0 for cubbi-network, eth1 for test network)
|
||||
assert int(network_interfaces) >= 2
|
||||
|
||||
finally:
|
||||
# Clean up the container (use kill for faster test cleanup)
|
||||
if session and session.container_id:
|
||||
container_manager.close_session(session.id, kill=True)
|
||||
|
||||
|
||||
@requires_docker
|
||||
def test_integration_session_create_with_ports(isolate_cubbi_config):
|
||||
"""Test creating a session with port forwarding."""
|
||||
session = None
|
||||
|
||||
try:
|
||||
# Get the isolated container manager
|
||||
container_manager = isolate_cubbi_config["container_manager"]
|
||||
|
||||
# Create a session with port forwarding
|
||||
session = container_manager.create_session(
|
||||
image_name="goose",
|
||||
session_name=f"cubbi-test-ports-{uuid.uuid4().hex[:8]}",
|
||||
mount_local=False, # Don't mount current directory
|
||||
ports=[8080, 9000], # Forward these ports
|
||||
)
|
||||
|
||||
assert session is not None
|
||||
assert session.status == "running"
|
||||
|
||||
# Verify ports are mapped
|
||||
assert isinstance(session.ports, dict)
|
||||
assert 8080 in session.ports
|
||||
assert 9000 in session.ports
|
||||
|
||||
# Verify port mappings are valid (host ports should be assigned)
|
||||
assert isinstance(session.ports[8080], int)
|
||||
assert isinstance(session.ports[9000], int)
|
||||
assert session.ports[8080] > 0
|
||||
assert session.ports[9000] > 0
|
||||
|
||||
# Wait for container initialization to complete
|
||||
init_success = wait_for_container_init(session.container_id)
|
||||
assert init_success, "Container initialization timed out"
|
||||
|
||||
# Verify Docker port mappings using Docker client
|
||||
import docker
|
||||
|
||||
client = docker.from_env()
|
||||
container = client.containers.get(session.container_id)
|
||||
container_ports = container.attrs["NetworkSettings"]["Ports"]
|
||||
|
||||
# Verify both ports are exposed
|
||||
assert "8080/tcp" in container_ports
|
||||
assert "9000/tcp" in container_ports
|
||||
|
||||
# Verify host port bindings exist
|
||||
assert container_ports["8080/tcp"] is not None
|
||||
assert container_ports["9000/tcp"] is not None
|
||||
assert len(container_ports["8080/tcp"]) > 0
|
||||
assert len(container_ports["9000/tcp"]) > 0
|
||||
|
||||
# Verify host ports match session.ports
|
||||
host_port_8080 = int(container_ports["8080/tcp"][0]["HostPort"])
|
||||
host_port_9000 = int(container_ports["9000/tcp"][0]["HostPort"])
|
||||
assert session.ports[8080] == host_port_8080
|
||||
assert session.ports[9000] == host_port_9000
|
||||
|
||||
finally:
|
||||
# Clean up the container (use kill for faster test cleanup)
|
||||
if session and session.container_id:
|
||||
container_manager.close_session(session.id, kill=True)
|
||||
|
||||
|
||||
@requires_docker
|
||||
def test_integration_session_create_no_ports(isolate_cubbi_config):
|
||||
"""Test creating a session without port forwarding."""
|
||||
session = None
|
||||
|
||||
try:
|
||||
# Get the isolated container manager
|
||||
container_manager = isolate_cubbi_config["container_manager"]
|
||||
|
||||
# Create a session without ports
|
||||
session = container_manager.create_session(
|
||||
image_name="goose",
|
||||
session_name=f"cubbi-test-no-ports-{uuid.uuid4().hex[:8]}",
|
||||
mount_local=False, # Don't mount current directory
|
||||
ports=[], # No ports
|
||||
)
|
||||
|
||||
assert session is not None
|
||||
assert session.status == "running"
|
||||
|
||||
# Verify no ports are mapped
|
||||
assert isinstance(session.ports, dict)
|
||||
assert len(session.ports) == 0
|
||||
|
||||
# Wait for container initialization to complete
|
||||
init_success = wait_for_container_init(session.container_id)
|
||||
assert init_success, "Container initialization timed out"
|
||||
|
||||
# Verify Docker has no port mappings
|
||||
import docker
|
||||
|
||||
client = docker.from_env()
|
||||
container = client.containers.get(session.container_id)
|
||||
container_ports = container.attrs["NetworkSettings"]["Ports"]
|
||||
|
||||
# Should have no port mappings (empty dict or None values)
|
||||
for port_spec, bindings in container_ports.items():
|
||||
assert bindings is None or len(bindings) == 0
|
||||
|
||||
finally:
|
||||
# Clean up the container (use kill for faster test cleanup)
|
||||
if session and session.container_id:
|
||||
container_manager.close_session(session.id, kill=True)
|
||||
|
||||
|
||||
@requires_docker
|
||||
def test_integration_session_create_with_single_port(isolate_cubbi_config):
|
||||
"""Test creating a session with a single port forward."""
|
||||
session = None
|
||||
|
||||
try:
|
||||
# Get the isolated container manager
|
||||
container_manager = isolate_cubbi_config["container_manager"]
|
||||
|
||||
# Create a session with single port
|
||||
session = container_manager.create_session(
|
||||
image_name="goose",
|
||||
session_name=f"cubbi-test-single-port-{uuid.uuid4().hex[:8]}",
|
||||
mount_local=False, # Don't mount current directory
|
||||
ports=[3000], # Single port
|
||||
)
|
||||
|
||||
assert session is not None
|
||||
assert session.status == "running"
|
||||
|
||||
# Verify single port is mapped
|
||||
assert isinstance(session.ports, dict)
|
||||
assert len(session.ports) == 1
|
||||
assert 3000 in session.ports
|
||||
assert isinstance(session.ports[3000], int)
|
||||
assert session.ports[3000] > 0
|
||||
|
||||
# Wait for container initialization to complete
|
||||
init_success = wait_for_container_init(session.container_id)
|
||||
assert init_success, "Container initialization timed out"
|
||||
|
||||
client = docker.from_env()
|
||||
container = client.containers.get(session.container_id)
|
||||
container_ports = container.attrs["NetworkSettings"]["Ports"]
|
||||
|
||||
# Should have exactly one port mapping
|
||||
port_mappings = {
|
||||
k: v for k, v in container_ports.items() if v is not None and len(v) > 0
|
||||
}
|
||||
assert len(port_mappings) == 1
|
||||
assert "3000/tcp" in port_mappings
|
||||
|
||||
finally:
|
||||
# Clean up the container (use kill for faster test cleanup)
|
||||
if session and session.container_id:
|
||||
container_manager.close_session(session.id, kill=True)
|
||||
|
||||
|
||||
@requires_docker
|
||||
def test_integration_kill_vs_stop_speed(isolate_cubbi_config):
|
||||
"""Test that kill is faster than stop for container termination."""
|
||||
import time
|
||||
|
||||
# Get the isolated container manager
|
||||
container_manager = isolate_cubbi_config["container_manager"]
|
||||
|
||||
# Create two identical sessions for comparison
|
||||
session_stop = None
|
||||
session_kill = None
|
||||
|
||||
try:
|
||||
# Create first session (will be stopped gracefully)
|
||||
session_stop = container_manager.create_session(
|
||||
image_name="goose",
|
||||
session_name=f"cubbi-test-stop-{uuid.uuid4().hex[:8]}",
|
||||
mount_local=False,
|
||||
ports=[],
|
||||
)
|
||||
|
||||
# Create second session (will be killed)
|
||||
session_kill = container_manager.create_session(
|
||||
image_name="goose",
|
||||
session_name=f"cubbi-test-kill-{uuid.uuid4().hex[:8]}",
|
||||
mount_local=False,
|
||||
ports=[],
|
||||
)
|
||||
|
||||
assert session_stop is not None
|
||||
assert session_kill is not None
|
||||
|
||||
# Wait for both containers to initialize
|
||||
init_success_stop = wait_for_container_init(session_stop.container_id)
|
||||
init_success_kill = wait_for_container_init(session_kill.container_id)
|
||||
assert init_success_stop, "Stop test container initialization timed out"
|
||||
assert init_success_kill, "Kill test container initialization timed out"
|
||||
|
||||
# Time graceful stop
|
||||
start_time = time.time()
|
||||
container_manager.close_session(session_stop.id, kill=False)
|
||||
stop_time = time.time() - start_time
|
||||
session_stop = None # Mark as cleaned up
|
||||
|
||||
# Time kill
|
||||
start_time = time.time()
|
||||
container_manager.close_session(session_kill.id, kill=True)
|
||||
kill_time = time.time() - start_time
|
||||
session_kill = None # Mark as cleaned up
|
||||
|
||||
# Kill should be faster than stop (usually by several seconds)
|
||||
# We use a generous threshold since system performance can vary
|
||||
assert (
|
||||
kill_time < stop_time
|
||||
), f"Kill ({kill_time:.2f}s) should be faster than stop ({stop_time:.2f}s)"
|
||||
|
||||
# Verify both methods successfully closed the containers
|
||||
# (containers should no longer be in the session list)
|
||||
remaining_sessions = container_manager.list_sessions()
|
||||
session_ids = [s.id for s in remaining_sessions]
|
||||
assert session_stop.id if session_stop else "stop-session" not in session_ids
|
||||
assert session_kill.id if session_kill else "kill-session" not in session_ids
|
||||
|
||||
finally:
|
||||
# Clean up any remaining containers
|
||||
if session_stop and session_stop.container_id:
|
||||
try:
|
||||
container_manager.close_session(session_stop.id, kill=True)
|
||||
except Exception:
|
||||
pass
|
||||
if session_kill and session_kill.container_id:
|
||||
try:
|
||||
container_manager.close_session(session_kill.id, kill=True)
|
||||
except Exception:
|
||||
pass
|
||||
516
tests/test_mcp_commands.py
Normal file
516
tests/test_mcp_commands.py
Normal file
@@ -0,0 +1,516 @@
|
||||
"""
|
||||
Tests for the MCP server management commands.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from unittest.mock import patch
|
||||
from cubbi.cli import app
|
||||
|
||||
|
||||
def test_mcp_list_empty(cli_runner, patched_config_manager):
|
||||
"""Test the 'cubbi mcp list' command with no MCPs configured."""
|
||||
# Make sure mcps is empty
|
||||
patched_config_manager.set("mcps", [])
|
||||
|
||||
with patch("cubbi.cli.mcp_manager.list_mcps") as mock_list_mcps:
|
||||
mock_list_mcps.return_value = []
|
||||
|
||||
result = cli_runner.invoke(app, ["mcp", "list"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "No MCP servers configured" in result.stdout
|
||||
|
||||
|
||||
def test_mcp_add_remote(cli_runner, isolate_cubbi_config):
|
||||
"""Test adding a remote MCP server and listing it."""
|
||||
# Add a remote MCP server
|
||||
result = cli_runner.invoke(
|
||||
app,
|
||||
[
|
||||
"mcp",
|
||||
"add-remote",
|
||||
"test-remote-mcp",
|
||||
"http://mcp-server.example.com/sse",
|
||||
"--header",
|
||||
"Authorization=Bearer test-token",
|
||||
],
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Added remote MCP server" in result.stdout
|
||||
|
||||
# List MCP servers
|
||||
result = cli_runner.invoke(app, ["mcp", "list"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "test-remote-mcp" in result.stdout
|
||||
assert "remote" in result.stdout
|
||||
# Check partial URL since it may be truncated in the table display
|
||||
assert "http://mcp-se" in result.stdout # Truncated in table view
|
||||
|
||||
|
||||
def test_mcp_add(cli_runner, isolate_cubbi_config):
|
||||
"""Test adding a proxy-based MCP server and listing it."""
|
||||
# Add a Docker MCP server
|
||||
result = cli_runner.invoke(
|
||||
app,
|
||||
[
|
||||
"mcp",
|
||||
"add",
|
||||
"test-docker-mcp",
|
||||
"mcp/github:latest",
|
||||
"--command",
|
||||
"github-mcp",
|
||||
"--env",
|
||||
"GITHUB_TOKEN=test-token",
|
||||
],
|
||||
)
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Added MCP server" in result.stdout
|
||||
|
||||
# List MCP servers
|
||||
result = cli_runner.invoke(app, ["mcp", "list"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "test-docker-mcp" in result.stdout
|
||||
assert "proxy" in result.stdout # It's a proxy-based MCP
|
||||
assert "mcp/github:la" in result.stdout # Truncated in table view
|
||||
|
||||
|
||||
def test_mcp_remove(cli_runner, patched_config_manager):
|
||||
"""Test removing an MCP server."""
|
||||
# Add a remote MCP server
|
||||
patched_config_manager.set(
|
||||
"mcps",
|
||||
[
|
||||
{
|
||||
"name": "test-mcp",
|
||||
"type": "remote",
|
||||
"url": "http://test-server.com/sse",
|
||||
"headers": {"Authorization": "Bearer test-token"},
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
# Mock the container_manager.list_sessions to return sessions without MCPs
|
||||
with patch("cubbi.cli.container_manager.list_sessions") as mock_list_sessions:
|
||||
mock_list_sessions.return_value = []
|
||||
|
||||
# Mock the remove_mcp method
|
||||
with patch("cubbi.cli.mcp_manager.remove_mcp") as mock_remove_mcp:
|
||||
# Make remove_mcp return True (successful removal)
|
||||
mock_remove_mcp.return_value = True
|
||||
|
||||
# Remove the MCP server
|
||||
result = cli_runner.invoke(app, ["mcp", "remove", "test-mcp"])
|
||||
|
||||
# Just check it ran successfully with exit code 0
|
||||
assert result.exit_code == 0
|
||||
assert "Removed MCP server 'test-mcp'" in result.stdout
|
||||
|
||||
|
||||
def test_mcp_remove_with_active_sessions(cli_runner, patched_config_manager):
|
||||
"""Test removing an MCP server that is used by active sessions."""
|
||||
from cubbi.models import Session, SessionStatus
|
||||
|
||||
# Add a remote MCP server
|
||||
patched_config_manager.set(
|
||||
"mcps",
|
||||
[
|
||||
{
|
||||
"name": "test-mcp",
|
||||
"type": "remote",
|
||||
"url": "http://test-server.com/sse",
|
||||
"headers": {"Authorization": "Bearer test-token"},
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
# Create mock sessions that use the MCP
|
||||
mock_sessions = [
|
||||
Session(
|
||||
id="session-1",
|
||||
name="test-session-1",
|
||||
image="goose",
|
||||
status=SessionStatus.RUNNING,
|
||||
container_id="container-1",
|
||||
mcps=["test-mcp", "other-mcp"],
|
||||
),
|
||||
Session(
|
||||
id="session-2",
|
||||
name="test-session-2",
|
||||
image="goose",
|
||||
status=SessionStatus.RUNNING,
|
||||
container_id="container-2",
|
||||
mcps=["other-mcp"], # This one doesn't use test-mcp
|
||||
),
|
||||
Session(
|
||||
id="session-3",
|
||||
name="test-session-3",
|
||||
image="goose",
|
||||
status=SessionStatus.RUNNING,
|
||||
container_id="container-3",
|
||||
mcps=["test-mcp"], # This one uses test-mcp
|
||||
),
|
||||
]
|
||||
|
||||
# Mock the container_manager.list_sessions to return our sessions
|
||||
with patch("cubbi.cli.container_manager.list_sessions") as mock_list_sessions:
|
||||
mock_list_sessions.return_value = mock_sessions
|
||||
|
||||
# Mock the remove_mcp method
|
||||
with patch("cubbi.cli.mcp_manager.remove_mcp") as mock_remove_mcp:
|
||||
# Make remove_mcp return True (successful removal)
|
||||
mock_remove_mcp.return_value = True
|
||||
|
||||
# Remove the MCP server
|
||||
result = cli_runner.invoke(app, ["mcp", "remove", "test-mcp"])
|
||||
|
||||
# Check it ran successfully with exit code 0
|
||||
assert result.exit_code == 0
|
||||
assert "Removed MCP server 'test-mcp'" in result.stdout
|
||||
# Check warning about affected sessions
|
||||
assert (
|
||||
"Warning: Found 2 active sessions using MCP 'test-mcp'" in result.stdout
|
||||
)
|
||||
assert "session-1" in result.stdout
|
||||
assert "session-3" in result.stdout
|
||||
# session-2 should not be mentioned since it doesn't use test-mcp
|
||||
assert "session-2" not in result.stdout
|
||||
|
||||
|
||||
def test_mcp_remove_nonexistent(cli_runner, patched_config_manager):
|
||||
"""Test removing a non-existent MCP server."""
|
||||
# No MCPs configured
|
||||
patched_config_manager.set("mcps", [])
|
||||
|
||||
# Mock the container_manager.list_sessions to return empty list
|
||||
with patch("cubbi.cli.container_manager.list_sessions") as mock_list_sessions:
|
||||
mock_list_sessions.return_value = []
|
||||
|
||||
# Mock the remove_mcp method to return False (MCP not found)
|
||||
with patch("cubbi.cli.mcp_manager.remove_mcp") as mock_remove_mcp:
|
||||
mock_remove_mcp.return_value = False
|
||||
|
||||
# Try to remove a non-existent MCP server
|
||||
result = cli_runner.invoke(app, ["mcp", "remove", "nonexistent-mcp"])
|
||||
|
||||
# Check it ran successfully but reported not found
|
||||
assert result.exit_code == 0
|
||||
assert "MCP server 'nonexistent-mcp' not found" in result.stdout
|
||||
|
||||
|
||||
def test_session_mcps_attribute():
|
||||
"""Test that Session model has mcps attribute and can be populated correctly."""
|
||||
from cubbi.models import Session, SessionStatus
|
||||
|
||||
# Test that Session can be created with mcps attribute
|
||||
session = Session(
|
||||
id="test-session",
|
||||
name="test-session",
|
||||
image="goose",
|
||||
status=SessionStatus.RUNNING,
|
||||
container_id="test-container",
|
||||
mcps=["mcp1", "mcp2"],
|
||||
)
|
||||
|
||||
assert session.mcps == ["mcp1", "mcp2"]
|
||||
|
||||
# Test that Session can be created with empty mcps list
|
||||
session_empty = Session(
|
||||
id="test-session-2",
|
||||
name="test-session-2",
|
||||
image="goose",
|
||||
status=SessionStatus.RUNNING,
|
||||
container_id="test-container-2",
|
||||
)
|
||||
|
||||
assert session_empty.mcps == [] # Should default to empty list
|
||||
|
||||
|
||||
def test_session_mcps_from_container_labels():
|
||||
"""Test that Session mcps are correctly populated from container labels."""
|
||||
from unittest.mock import Mock
|
||||
from cubbi.container import ContainerManager
|
||||
|
||||
# Mock a container with MCP labels
|
||||
mock_container = Mock()
|
||||
mock_container.id = "test-container-id"
|
||||
mock_container.status = "running"
|
||||
mock_container.labels = {
|
||||
"cubbi.session": "true",
|
||||
"cubbi.session.id": "test-session",
|
||||
"cubbi.session.name": "test-session-name",
|
||||
"cubbi.image": "goose",
|
||||
"cubbi.mcps": "mcp1,mcp2,mcp3", # Test with multiple MCPs
|
||||
}
|
||||
mock_container.attrs = {"NetworkSettings": {"Ports": {}}}
|
||||
|
||||
# Mock Docker client
|
||||
mock_client = Mock()
|
||||
mock_client.containers.list.return_value = [mock_container]
|
||||
|
||||
# Create container manager with mocked client
|
||||
with patch("cubbi.container.docker.from_env") as mock_docker:
|
||||
mock_docker.return_value = mock_client
|
||||
mock_client.ping.return_value = True
|
||||
|
||||
container_manager = ContainerManager()
|
||||
sessions = container_manager.list_sessions()
|
||||
|
||||
assert len(sessions) == 1
|
||||
session = sessions[0]
|
||||
assert session.id == "test-session"
|
||||
assert session.mcps == ["mcp1", "mcp2", "mcp3"]
|
||||
|
||||
|
||||
def test_session_mcps_from_empty_container_labels():
|
||||
"""Test that Session mcps are correctly handled when container has no MCP labels."""
|
||||
from unittest.mock import Mock
|
||||
from cubbi.container import ContainerManager
|
||||
|
||||
# Mock a container without MCP labels
|
||||
mock_container = Mock()
|
||||
mock_container.id = "test-container-id"
|
||||
mock_container.status = "running"
|
||||
mock_container.labels = {
|
||||
"cubbi.session": "true",
|
||||
"cubbi.session.id": "test-session",
|
||||
"cubbi.session.name": "test-session-name",
|
||||
"cubbi.image": "goose",
|
||||
# No cubbi.mcps label
|
||||
}
|
||||
mock_container.attrs = {"NetworkSettings": {"Ports": {}}}
|
||||
|
||||
# Mock Docker client
|
||||
mock_client = Mock()
|
||||
mock_client.containers.list.return_value = [mock_container]
|
||||
|
||||
# Create container manager with mocked client
|
||||
with patch("cubbi.container.docker.from_env") as mock_docker:
|
||||
mock_docker.return_value = mock_client
|
||||
mock_client.ping.return_value = True
|
||||
|
||||
container_manager = ContainerManager()
|
||||
sessions = container_manager.list_sessions()
|
||||
|
||||
assert len(sessions) == 1
|
||||
session = sessions[0]
|
||||
assert session.id == "test-session"
|
||||
assert session.mcps == [] # Should be empty list when no MCPs
|
||||
|
||||
|
||||
@pytest.mark.requires_docker
|
||||
def test_mcp_status(cli_runner, patched_config_manager, mock_container_manager):
|
||||
"""Test the MCP status command."""
|
||||
# Add a Docker MCP
|
||||
patched_config_manager.set(
|
||||
"mcps",
|
||||
[
|
||||
{
|
||||
"name": "test-docker-mcp",
|
||||
"type": "docker",
|
||||
"image": "mcp/test:latest",
|
||||
"command": "test-command",
|
||||
"env": {"TEST_ENV": "test-value"},
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
# First mock get_mcp to return our MCP config
|
||||
with patch("cubbi.cli.mcp_manager.get_mcp") as mock_get_mcp:
|
||||
mock_get_mcp.return_value = {
|
||||
"name": "test-docker-mcp",
|
||||
"type": "docker",
|
||||
"image": "mcp/test:latest",
|
||||
"command": "test-command",
|
||||
"env": {"TEST_ENV": "test-value"},
|
||||
}
|
||||
|
||||
# Then mock the get_mcp_status method
|
||||
with patch("cubbi.cli.mcp_manager.get_mcp_status") as mock_get_status:
|
||||
mock_get_status.return_value = {
|
||||
"status": "running",
|
||||
"container_id": "test-container-id",
|
||||
"name": "test-docker-mcp",
|
||||
"type": "docker",
|
||||
"image": "mcp/test:latest",
|
||||
"ports": {"8080/tcp": 8080},
|
||||
"created": "2023-01-01T00:00:00Z",
|
||||
}
|
||||
|
||||
# Check MCP status
|
||||
result = cli_runner.invoke(app, ["mcp", "status", "test-docker-mcp"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "test-docker-mcp" in result.stdout
|
||||
assert "running" in result.stdout
|
||||
assert "mcp/test:latest" in result.stdout
|
||||
|
||||
|
||||
@pytest.mark.requires_docker
|
||||
def test_mcp_start(cli_runner, isolate_cubbi_config):
|
||||
"""Test starting an MCP server."""
|
||||
mcp_manager = isolate_cubbi_config["mcp_manager"]
|
||||
|
||||
# Add a Docker MCP
|
||||
isolate_cubbi_config["user_config"].set(
|
||||
"mcps",
|
||||
[
|
||||
{
|
||||
"name": "test-docker-mcp",
|
||||
"type": "docker",
|
||||
"image": "mcp/test:latest",
|
||||
"command": "test-command",
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
# Mock the start_mcp method to avoid actual Docker operations
|
||||
with patch.object(
|
||||
mcp_manager,
|
||||
"start_mcp",
|
||||
return_value={
|
||||
"container_id": "test-container-id",
|
||||
"status": "running",
|
||||
},
|
||||
):
|
||||
# Start the MCP
|
||||
result = cli_runner.invoke(app, ["mcp", "start", "test-docker-mcp"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Started MCP server" in result.stdout
|
||||
assert "test-docker-mcp" in result.stdout
|
||||
|
||||
|
||||
@pytest.mark.requires_docker
|
||||
def test_mcp_stop(cli_runner, isolate_cubbi_config):
|
||||
"""Test stopping an MCP server."""
|
||||
mcp_manager = isolate_cubbi_config["mcp_manager"]
|
||||
|
||||
# Add a Docker MCP
|
||||
isolate_cubbi_config["user_config"].set(
|
||||
"mcps",
|
||||
[
|
||||
{
|
||||
"name": "test-docker-mcp",
|
||||
"type": "docker",
|
||||
"image": "mcp/test:latest",
|
||||
"command": "test-command",
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
# Mock the stop_mcp method to avoid actual Docker operations
|
||||
with patch.object(mcp_manager, "stop_mcp", return_value=True):
|
||||
# Stop the MCP
|
||||
result = cli_runner.invoke(app, ["mcp", "stop", "test-docker-mcp"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Stopped and removed MCP server" in result.stdout
|
||||
assert "test-docker-mcp" in result.stdout
|
||||
|
||||
|
||||
@pytest.mark.requires_docker
|
||||
def test_mcp_restart(cli_runner, isolate_cubbi_config):
|
||||
"""Test restarting an MCP server."""
|
||||
mcp_manager = isolate_cubbi_config["mcp_manager"]
|
||||
|
||||
# Add a Docker MCP
|
||||
isolate_cubbi_config["user_config"].set(
|
||||
"mcps",
|
||||
[
|
||||
{
|
||||
"name": "test-docker-mcp",
|
||||
"type": "docker",
|
||||
"image": "mcp/test:latest",
|
||||
"command": "test-command",
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
# Mock the restart_mcp method to avoid actual Docker operations
|
||||
with patch.object(
|
||||
mcp_manager,
|
||||
"restart_mcp",
|
||||
return_value={
|
||||
"container_id": "test-container-id",
|
||||
"status": "running",
|
||||
},
|
||||
):
|
||||
# Restart the MCP
|
||||
result = cli_runner.invoke(app, ["mcp", "restart", "test-docker-mcp"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Restarted MCP server" in result.stdout
|
||||
assert "test-docker-mcp" in result.stdout
|
||||
|
||||
|
||||
@pytest.mark.requires_docker
|
||||
def test_mcp_logs(cli_runner, patched_config_manager, mock_container_manager):
|
||||
"""Test viewing MCP server logs."""
|
||||
# Add a Docker MCP
|
||||
patched_config_manager.set(
|
||||
"mcps",
|
||||
[
|
||||
{
|
||||
"name": "test-docker-mcp",
|
||||
"type": "docker",
|
||||
"image": "mcp/test:latest",
|
||||
"command": "test-command",
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
# Mock the logs operation
|
||||
with patch("cubbi.cli.mcp_manager.get_mcp_logs") as mock_get_logs:
|
||||
mock_get_logs.return_value = "Test log output"
|
||||
|
||||
# View MCP logs
|
||||
result = cli_runner.invoke(app, ["mcp", "logs", "test-docker-mcp"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Test log output" in result.stdout
|
||||
|
||||
|
||||
def test_session_with_mcp(cli_runner, patched_config_manager, mock_container_manager):
|
||||
"""Test creating a session with an MCP server attached."""
|
||||
# Add an MCP server
|
||||
patched_config_manager.set(
|
||||
"mcps",
|
||||
[
|
||||
{
|
||||
"name": "test-mcp",
|
||||
"type": "docker",
|
||||
"image": "mcp/test:latest",
|
||||
"command": "test-command",
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
# Mock the session creation with MCP
|
||||
from cubbi.models import Session, SessionStatus
|
||||
|
||||
# timestamp no longer needed since we don't use created_at in Session
|
||||
mock_container_manager.create_session.return_value = Session(
|
||||
id="test-session-id",
|
||||
name="test-session",
|
||||
image="goose",
|
||||
status=SessionStatus.RUNNING,
|
||||
container_id="test-container-id",
|
||||
ports={},
|
||||
)
|
||||
|
||||
# Create a session with MCP
|
||||
result = cli_runner.invoke(app, ["session", "create", "--mcp", "test-mcp"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Session created successfully" in result.stdout
|
||||
assert "test-session" in result.stdout
|
||||
# Check that the create_session was called with the mcp parameter
|
||||
assert mock_container_manager.create_session.called
|
||||
# The keyword arguments are in the second element of call_args
|
||||
kwargs = mock_container_manager.create_session.call_args[1]
|
||||
assert "mcp" in kwargs
|
||||
assert "test-mcp" in kwargs["mcp"]
|
||||
82
tests/test_mcp_port_binding.py
Normal file
82
tests/test_mcp_port_binding.py
Normal file
@@ -0,0 +1,82 @@
|
||||
"""
|
||||
Integration test for MCP port binding.
|
||||
"""
|
||||
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from conftest import requires_docker
|
||||
from cubbi.mcp import MCPManager
|
||||
|
||||
|
||||
@requires_docker
|
||||
def test_mcp_port_binding():
|
||||
"""Test that MCP containers don't bind to host ports."""
|
||||
mcp_manager = MCPManager()
|
||||
|
||||
# Add a proxy MCP
|
||||
mcp_name = f"test-mcp-{uuid.uuid4().hex[:8]}"
|
||||
mcp_name2 = None
|
||||
|
||||
try:
|
||||
# Let's check if host port binding was removed
|
||||
mcps_before = len(mcp_manager.list_mcp_containers())
|
||||
|
||||
# Use alpine image for a simple test
|
||||
mcp_manager.add_docker_mcp(
|
||||
name=mcp_name,
|
||||
image="alpine:latest",
|
||||
command="sleep 60", # Keep container running for the test
|
||||
env={"TEST": "test"},
|
||||
)
|
||||
|
||||
# Start the MCP
|
||||
result = mcp_manager.start_mcp(mcp_name)
|
||||
print(f"Start result: {result}")
|
||||
|
||||
# Give container time to start
|
||||
time.sleep(2)
|
||||
|
||||
# Start another MCP to verify we can run multiple instances
|
||||
mcp_name2 = f"test-mcp2-{uuid.uuid4().hex[:8]}"
|
||||
mcp_manager.add_docker_mcp(
|
||||
name=mcp_name2,
|
||||
image="alpine:latest",
|
||||
command="sleep 60", # Keep container running for the test
|
||||
env={"TEST": "test2"},
|
||||
)
|
||||
|
||||
# Start the second MCP
|
||||
result2 = mcp_manager.start_mcp(mcp_name2)
|
||||
print(f"Start result 2: {result2}")
|
||||
|
||||
# Give container time to start
|
||||
time.sleep(2)
|
||||
|
||||
# Check how many containers we have now
|
||||
mcps_after = len(mcp_manager.list_mcp_containers())
|
||||
|
||||
# We should have two more containers than before
|
||||
assert mcps_after >= mcps_before + 2, "Not all MCP containers were created"
|
||||
|
||||
# Get container details and verify no host port bindings
|
||||
all_mcps = mcp_manager.list_mcp_containers()
|
||||
print(f"All MCPs: {all_mcps}")
|
||||
|
||||
# Test successful - we were able to start multiple MCPs without port conflicts
|
||||
|
||||
finally:
|
||||
# Clean up
|
||||
try:
|
||||
if mcp_name:
|
||||
mcp_manager.stop_mcp(mcp_name)
|
||||
mcp_manager.remove_mcp(mcp_name)
|
||||
except Exception as e:
|
||||
print(f"Error cleaning up {mcp_name}: {e}")
|
||||
|
||||
try:
|
||||
if mcp_name2:
|
||||
mcp_manager.stop_mcp(mcp_name2)
|
||||
mcp_manager.remove_mcp(mcp_name2)
|
||||
except Exception as e:
|
||||
print(f"Error cleaning up {mcp_name2}: {e}")
|
||||
311
tests/test_session_commands.py
Normal file
311
tests/test_session_commands.py
Normal file
@@ -0,0 +1,311 @@
|
||||
"""
|
||||
Tests for the session management commands.
|
||||
"""
|
||||
|
||||
from unittest.mock import patch
|
||||
|
||||
|
||||
from cubbi.cli import app
|
||||
|
||||
|
||||
def test_session_list_empty(cli_runner, mock_container_manager):
|
||||
"""Test 'cubbi session list' with no active sessions."""
|
||||
mock_container_manager.list_sessions.return_value = []
|
||||
|
||||
result = cli_runner.invoke(app, ["session", "list"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "No active sessions found" in result.stdout
|
||||
|
||||
|
||||
def test_session_list_with_sessions(cli_runner, mock_container_manager):
|
||||
"""Test 'cubbi session list' with active sessions."""
|
||||
# Create a mock session and set list_sessions to return it
|
||||
from cubbi.models import Session, SessionStatus
|
||||
|
||||
mock_session = Session(
|
||||
id="test-session-id",
|
||||
name="test-session",
|
||||
image="goose",
|
||||
status=SessionStatus.RUNNING,
|
||||
ports={"8080": "8080"},
|
||||
)
|
||||
mock_container_manager.list_sessions.return_value = [mock_session]
|
||||
|
||||
result = cli_runner.invoke(app, ["session", "list"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
# The output display can vary depending on terminal width, so just check
|
||||
# that the command executed successfully
|
||||
|
||||
|
||||
def test_session_create_basic(cli_runner, mock_container_manager):
|
||||
"""Test 'cubbi session create' with basic options."""
|
||||
# We need to patch user_config.get with a side_effect to handle different keys
|
||||
with patch("cubbi.cli.user_config") as mock_user_config:
|
||||
# Handle different key requests appropriately
|
||||
def mock_get_side_effect(key, default=None):
|
||||
if key == "defaults.image":
|
||||
return "goose"
|
||||
elif key == "defaults.volumes":
|
||||
return [] # Return empty list for volumes
|
||||
elif key == "defaults.connect":
|
||||
return True
|
||||
elif key == "defaults.mount_local":
|
||||
return True
|
||||
elif key == "defaults.networks":
|
||||
return []
|
||||
return default
|
||||
|
||||
mock_user_config.get.side_effect = mock_get_side_effect
|
||||
mock_user_config.get_environment_variables.return_value = {}
|
||||
|
||||
result = cli_runner.invoke(app, ["session", "create"])
|
||||
|
||||
if result.exit_code != 0:
|
||||
print(f"Error: {result.exception}")
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Session created successfully" in result.stdout
|
||||
|
||||
# Verify container_manager was called with the expected image
|
||||
mock_container_manager.create_session.assert_called_once()
|
||||
assert (
|
||||
mock_container_manager.create_session.call_args[1]["image_name"] == "goose"
|
||||
)
|
||||
|
||||
|
||||
def test_session_close(cli_runner, mock_container_manager):
|
||||
"""Test 'cubbi session close' command."""
|
||||
mock_container_manager.close_session.return_value = True
|
||||
|
||||
result = cli_runner.invoke(app, ["session", "close", "test-session-id"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "closed successfully" in result.stdout
|
||||
mock_container_manager.close_session.assert_called_once_with(
|
||||
"test-session-id", kill=False
|
||||
)
|
||||
|
||||
|
||||
def test_session_close_all(cli_runner, mock_container_manager):
|
||||
"""Test 'cubbi session close --all' command."""
|
||||
# Set up mock sessions
|
||||
from cubbi.models import Session, SessionStatus
|
||||
|
||||
# timestamp no longer needed since we don't use created_at in Session
|
||||
mock_sessions = [
|
||||
Session(
|
||||
id=f"session-{i}",
|
||||
name=f"Session {i}",
|
||||
image="goose",
|
||||
status=SessionStatus.RUNNING,
|
||||
ports={},
|
||||
)
|
||||
for i in range(3)
|
||||
]
|
||||
|
||||
mock_container_manager.list_sessions.return_value = mock_sessions
|
||||
mock_container_manager.close_all_sessions.return_value = (3, True)
|
||||
|
||||
result = cli_runner.invoke(app, ["session", "close", "--all"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "3 sessions closed successfully" in result.stdout
|
||||
mock_container_manager.close_all_sessions.assert_called_once()
|
||||
|
||||
|
||||
def test_session_create_with_ports(
|
||||
cli_runner, mock_container_manager, patched_config_manager
|
||||
):
|
||||
"""Test session creation with port forwarding."""
|
||||
from cubbi.models import Session, SessionStatus
|
||||
|
||||
# Mock the create_session to return a session with ports
|
||||
mock_session = Session(
|
||||
id="test-session-id",
|
||||
name="test-session",
|
||||
image="goose",
|
||||
status=SessionStatus.RUNNING,
|
||||
ports={8000: 32768, 3000: 32769},
|
||||
)
|
||||
mock_container_manager.create_session.return_value = mock_session
|
||||
|
||||
result = cli_runner.invoke(app, ["session", "create", "--port", "8000,3000"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Session created successfully" in result.stdout
|
||||
assert "Forwarding ports: 8000, 3000" in result.stdout
|
||||
|
||||
# Verify create_session was called with correct ports
|
||||
mock_container_manager.create_session.assert_called_once()
|
||||
call_args = mock_container_manager.create_session.call_args
|
||||
assert call_args.kwargs["ports"] == [8000, 3000]
|
||||
|
||||
|
||||
def test_session_create_with_default_ports(
|
||||
cli_runner, mock_container_manager, patched_config_manager
|
||||
):
|
||||
"""Test session creation using default ports."""
|
||||
from cubbi.models import Session, SessionStatus
|
||||
|
||||
# Set up default ports
|
||||
patched_config_manager.set("defaults.ports", [8080, 9000])
|
||||
|
||||
# Mock the create_session to return a session with ports
|
||||
mock_session = Session(
|
||||
id="test-session-id",
|
||||
name="test-session",
|
||||
image="goose",
|
||||
status=SessionStatus.RUNNING,
|
||||
ports={8080: 32768, 9000: 32769},
|
||||
)
|
||||
mock_container_manager.create_session.return_value = mock_session
|
||||
|
||||
result = cli_runner.invoke(app, ["session", "create"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Session created successfully" in result.stdout
|
||||
assert "Forwarding ports: 8080, 9000" in result.stdout
|
||||
|
||||
# Verify create_session was called with default ports
|
||||
mock_container_manager.create_session.assert_called_once()
|
||||
call_args = mock_container_manager.create_session.call_args
|
||||
assert call_args.kwargs["ports"] == [8080, 9000]
|
||||
|
||||
|
||||
def test_session_create_combine_default_and_custom_ports(
|
||||
cli_runner, mock_container_manager, patched_config_manager
|
||||
):
|
||||
"""Test session creation combining default and custom ports."""
|
||||
from cubbi.models import Session, SessionStatus
|
||||
|
||||
# Set up default ports
|
||||
patched_config_manager.set("defaults.ports", [8080])
|
||||
|
||||
# Mock the create_session to return a session with combined ports
|
||||
mock_session = Session(
|
||||
id="test-session-id",
|
||||
name="test-session",
|
||||
image="goose",
|
||||
status=SessionStatus.RUNNING,
|
||||
ports={8080: 32768, 3000: 32769},
|
||||
)
|
||||
mock_container_manager.create_session.return_value = mock_session
|
||||
|
||||
result = cli_runner.invoke(app, ["session", "create", "--port", "3000"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Session created successfully" in result.stdout
|
||||
# Ports should be combined and deduplicated
|
||||
assert "Forwarding ports:" in result.stdout
|
||||
|
||||
# Verify create_session was called with combined ports
|
||||
mock_container_manager.create_session.assert_called_once()
|
||||
call_args = mock_container_manager.create_session.call_args
|
||||
# Should contain both default (8080) and custom (3000) ports
|
||||
assert set(call_args.kwargs["ports"]) == {8080, 3000}
|
||||
|
||||
|
||||
def test_session_create_invalid_port_format(
|
||||
cli_runner, mock_container_manager, patched_config_manager
|
||||
):
|
||||
"""Test session creation with invalid port format."""
|
||||
result = cli_runner.invoke(app, ["session", "create", "--port", "invalid"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Warning: Ignoring invalid port format" in result.stdout
|
||||
|
||||
# Session creation should continue with empty ports list (invalid port ignored)
|
||||
mock_container_manager.create_session.assert_called_once()
|
||||
call_args = mock_container_manager.create_session.call_args
|
||||
assert call_args.kwargs["ports"] == [] # Invalid port should be ignored
|
||||
|
||||
|
||||
def test_session_create_invalid_port_range(
|
||||
cli_runner, mock_container_manager, patched_config_manager
|
||||
):
|
||||
"""Test session creation with port outside valid range."""
|
||||
result = cli_runner.invoke(app, ["session", "create", "--port", "70000"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Error: Invalid ports [70000]" in result.stdout
|
||||
|
||||
# Session creation should not happen due to early return
|
||||
mock_container_manager.create_session.assert_not_called()
|
||||
|
||||
|
||||
def test_session_list_shows_ports(cli_runner, mock_container_manager):
|
||||
"""Test that session list shows port mappings."""
|
||||
from cubbi.models import Session, SessionStatus
|
||||
|
||||
mock_session = Session(
|
||||
id="test-session-id",
|
||||
name="test-session",
|
||||
image="goose",
|
||||
status=SessionStatus.RUNNING,
|
||||
ports={8000: 32768, 3000: 32769},
|
||||
)
|
||||
mock_container_manager.list_sessions.return_value = [mock_session]
|
||||
|
||||
result = cli_runner.invoke(app, ["session", "list"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "8000:32768" in result.stdout
|
||||
assert "3000:32769" in result.stdout
|
||||
|
||||
|
||||
def test_session_close_with_kill_flag(
|
||||
cli_runner, mock_container_manager, patched_config_manager
|
||||
):
|
||||
"""Test session close with --kill flag."""
|
||||
result = cli_runner.invoke(app, ["session", "close", "test-session-id", "--kill"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
|
||||
# Verify close_session was called with kill=True
|
||||
mock_container_manager.close_session.assert_called_once_with(
|
||||
"test-session-id", kill=True
|
||||
)
|
||||
|
||||
|
||||
def test_session_close_all_with_kill_flag(
|
||||
cli_runner, mock_container_manager, patched_config_manager
|
||||
):
|
||||
"""Test session close --all with --kill flag."""
|
||||
from cubbi.models import Session, SessionStatus
|
||||
|
||||
# Mock some sessions to close
|
||||
mock_sessions = [
|
||||
Session(
|
||||
id="session-1",
|
||||
name="Session 1",
|
||||
image="goose",
|
||||
status=SessionStatus.RUNNING,
|
||||
ports={},
|
||||
),
|
||||
Session(
|
||||
id="session-2",
|
||||
name="Session 2",
|
||||
image="goose",
|
||||
status=SessionStatus.RUNNING,
|
||||
ports={},
|
||||
),
|
||||
]
|
||||
mock_container_manager.list_sessions.return_value = mock_sessions
|
||||
mock_container_manager.close_all_sessions.return_value = (2, True)
|
||||
|
||||
result = cli_runner.invoke(app, ["session", "close", "--all", "--kill"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "2 sessions closed successfully" in result.stdout
|
||||
|
||||
# Verify close_all_sessions was called with kill=True
|
||||
mock_container_manager.close_all_sessions.assert_called_once()
|
||||
call_args = mock_container_manager.close_all_sessions.call_args
|
||||
assert call_args.kwargs["kill"] is True
|
||||
|
||||
|
||||
# For more complex tests that need actual Docker,
|
||||
# we've implemented them in test_integration_docker.py
|
||||
# They will run automatically if Docker is available
|
||||
437
uv.lock
generated
437
uv.lock
generated
@@ -1,57 +1,58 @@
|
||||
version = 1
|
||||
revision = 3
|
||||
requires-python = ">=3.12"
|
||||
|
||||
[[package]]
|
||||
name = "annotated-types"
|
||||
version = "0.7.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643 },
|
||||
{ url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "certifi"
|
||||
version = "2025.1.31"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/1c/ab/c9f1e32b7b1bf505bf26f0ef697775960db7932abeb7b516de930ba2705f/certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651", size = 167577 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/1c/ab/c9f1e32b7b1bf505bf26f0ef697775960db7932abeb7b516de930ba2705f/certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651", size = 167577, upload-time = "2025-01-31T02:16:47.166Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/38/fc/bce832fd4fd99766c04d1ee0eead6b0ec6486fb100ae5e74c1d91292b982/certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe", size = 166393 },
|
||||
{ url = "https://files.pythonhosted.org/packages/38/fc/bce832fd4fd99766c04d1ee0eead6b0ec6486fb100ae5e74c1d91292b982/certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe", size = 166393, upload-time = "2025-01-31T02:16:45.015Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "charset-normalizer"
|
||||
version = "3.4.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/16/b0/572805e227f01586461c80e0fd25d65a2115599cc9dad142fee4b747c357/charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3", size = 123188 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/16/b0/572805e227f01586461c80e0fd25d65a2115599cc9dad142fee4b747c357/charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3", size = 123188, upload-time = "2024-12-24T18:12:35.43Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/0a/9a/dd1e1cdceb841925b7798369a09279bd1cf183cef0f9ddf15a3a6502ee45/charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545", size = 196105 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d3/8c/90bfabf8c4809ecb648f39794cf2a84ff2e7d2a6cf159fe68d9a26160467/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7", size = 140404 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ad/8f/e410d57c721945ea3b4f1a04b74f70ce8fa800d393d72899f0a40526401f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757", size = 150423 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f0/b8/e6825e25deb691ff98cf5c9072ee0605dc2acfca98af70c2d1b1bc75190d/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa", size = 143184 },
|
||||
{ url = "https://files.pythonhosted.org/packages/3e/a2/513f6cbe752421f16d969e32f3583762bfd583848b763913ddab8d9bfd4f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d", size = 145268 },
|
||||
{ url = "https://files.pythonhosted.org/packages/74/94/8a5277664f27c3c438546f3eb53b33f5b19568eb7424736bdc440a88a31f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616", size = 147601 },
|
||||
{ url = "https://files.pythonhosted.org/packages/7c/5f/6d352c51ee763623a98e31194823518e09bfa48be2a7e8383cf691bbb3d0/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b", size = 141098 },
|
||||
{ url = "https://files.pythonhosted.org/packages/78/d4/f5704cb629ba5ab16d1d3d741396aec6dc3ca2b67757c45b0599bb010478/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d", size = 149520 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c5/96/64120b1d02b81785f222b976c0fb79a35875457fa9bb40827678e54d1bc8/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a", size = 152852 },
|
||||
{ url = "https://files.pythonhosted.org/packages/84/c9/98e3732278a99f47d487fd3468bc60b882920cef29d1fa6ca460a1fdf4e6/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9", size = 150488 },
|
||||
{ url = "https://files.pythonhosted.org/packages/13/0e/9c8d4cb99c98c1007cc11eda969ebfe837bbbd0acdb4736d228ccaabcd22/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1", size = 146192 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b2/21/2b6b5b860781a0b49427309cb8670785aa543fb2178de875b87b9cc97746/charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35", size = 95550 },
|
||||
{ url = "https://files.pythonhosted.org/packages/21/5b/1b390b03b1d16c7e382b561c5329f83cc06623916aab983e8ab9239c7d5c/charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f", size = 102785 },
|
||||
{ url = "https://files.pythonhosted.org/packages/38/94/ce8e6f63d18049672c76d07d119304e1e2d7c6098f0841b51c666e9f44a0/charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda", size = 195698 },
|
||||
{ url = "https://files.pythonhosted.org/packages/24/2e/dfdd9770664aae179a96561cc6952ff08f9a8cd09a908f259a9dfa063568/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313", size = 140162 },
|
||||
{ url = "https://files.pythonhosted.org/packages/24/4e/f646b9093cff8fc86f2d60af2de4dc17c759de9d554f130b140ea4738ca6/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9", size = 150263 },
|
||||
{ url = "https://files.pythonhosted.org/packages/5e/67/2937f8d548c3ef6e2f9aab0f6e21001056f692d43282b165e7c56023e6dd/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b", size = 142966 },
|
||||
{ url = "https://files.pythonhosted.org/packages/52/ed/b7f4f07de100bdb95c1756d3a4d17b90c1a3c53715c1a476f8738058e0fa/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11", size = 144992 },
|
||||
{ url = "https://files.pythonhosted.org/packages/96/2c/d49710a6dbcd3776265f4c923bb73ebe83933dfbaa841c5da850fe0fd20b/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f", size = 147162 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b4/41/35ff1f9a6bd380303dea55e44c4933b4cc3c4850988927d4082ada230273/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd", size = 140972 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fb/43/c6a0b685fe6910d08ba971f62cd9c3e862a85770395ba5d9cad4fede33ab/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2", size = 149095 },
|
||||
{ url = "https://files.pythonhosted.org/packages/4c/ff/a9a504662452e2d2878512115638966e75633519ec11f25fca3d2049a94a/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886", size = 152668 },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/71/189996b6d9a4b932564701628af5cee6716733e9165af1d5e1b285c530ed/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601", size = 150073 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e4/93/946a86ce20790e11312c87c75ba68d5f6ad2208cfb52b2d6a2c32840d922/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd", size = 145732 },
|
||||
{ url = "https://files.pythonhosted.org/packages/cd/e5/131d2fb1b0dddafc37be4f3a2fa79aa4c037368be9423061dccadfd90091/charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407", size = 95391 },
|
||||
{ url = "https://files.pythonhosted.org/packages/27/f2/4f9a69cc7712b9b5ad8fdb87039fd89abba997ad5cbe690d1835d40405b0/charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971", size = 102702 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0e/f6/65ecc6878a89bb1c23a086ea335ad4bf21a588990c3f535a227b9eea9108/charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85", size = 49767 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0a/9a/dd1e1cdceb841925b7798369a09279bd1cf183cef0f9ddf15a3a6502ee45/charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545", size = 196105, upload-time = "2024-12-24T18:10:38.83Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d3/8c/90bfabf8c4809ecb648f39794cf2a84ff2e7d2a6cf159fe68d9a26160467/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7", size = 140404, upload-time = "2024-12-24T18:10:44.272Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ad/8f/e410d57c721945ea3b4f1a04b74f70ce8fa800d393d72899f0a40526401f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757", size = 150423, upload-time = "2024-12-24T18:10:45.492Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f0/b8/e6825e25deb691ff98cf5c9072ee0605dc2acfca98af70c2d1b1bc75190d/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa", size = 143184, upload-time = "2024-12-24T18:10:47.898Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3e/a2/513f6cbe752421f16d969e32f3583762bfd583848b763913ddab8d9bfd4f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d", size = 145268, upload-time = "2024-12-24T18:10:50.589Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/74/94/8a5277664f27c3c438546f3eb53b33f5b19568eb7424736bdc440a88a31f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616", size = 147601, upload-time = "2024-12-24T18:10:52.541Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7c/5f/6d352c51ee763623a98e31194823518e09bfa48be2a7e8383cf691bbb3d0/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b", size = 141098, upload-time = "2024-12-24T18:10:53.789Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/78/d4/f5704cb629ba5ab16d1d3d741396aec6dc3ca2b67757c45b0599bb010478/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d", size = 149520, upload-time = "2024-12-24T18:10:55.048Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c5/96/64120b1d02b81785f222b976c0fb79a35875457fa9bb40827678e54d1bc8/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a", size = 152852, upload-time = "2024-12-24T18:10:57.647Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/84/c9/98e3732278a99f47d487fd3468bc60b882920cef29d1fa6ca460a1fdf4e6/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9", size = 150488, upload-time = "2024-12-24T18:10:59.43Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/13/0e/9c8d4cb99c98c1007cc11eda969ebfe837bbbd0acdb4736d228ccaabcd22/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1", size = 146192, upload-time = "2024-12-24T18:11:00.676Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b2/21/2b6b5b860781a0b49427309cb8670785aa543fb2178de875b87b9cc97746/charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35", size = 95550, upload-time = "2024-12-24T18:11:01.952Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/21/5b/1b390b03b1d16c7e382b561c5329f83cc06623916aab983e8ab9239c7d5c/charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f", size = 102785, upload-time = "2024-12-24T18:11:03.142Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/38/94/ce8e6f63d18049672c76d07d119304e1e2d7c6098f0841b51c666e9f44a0/charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda", size = 195698, upload-time = "2024-12-24T18:11:05.834Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/24/2e/dfdd9770664aae179a96561cc6952ff08f9a8cd09a908f259a9dfa063568/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313", size = 140162, upload-time = "2024-12-24T18:11:07.064Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/24/4e/f646b9093cff8fc86f2d60af2de4dc17c759de9d554f130b140ea4738ca6/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9", size = 150263, upload-time = "2024-12-24T18:11:08.374Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5e/67/2937f8d548c3ef6e2f9aab0f6e21001056f692d43282b165e7c56023e6dd/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b", size = 142966, upload-time = "2024-12-24T18:11:09.831Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/52/ed/b7f4f07de100bdb95c1756d3a4d17b90c1a3c53715c1a476f8738058e0fa/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11", size = 144992, upload-time = "2024-12-24T18:11:12.03Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/96/2c/d49710a6dbcd3776265f4c923bb73ebe83933dfbaa841c5da850fe0fd20b/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f", size = 147162, upload-time = "2024-12-24T18:11:13.372Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b4/41/35ff1f9a6bd380303dea55e44c4933b4cc3c4850988927d4082ada230273/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd", size = 140972, upload-time = "2024-12-24T18:11:14.628Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fb/43/c6a0b685fe6910d08ba971f62cd9c3e862a85770395ba5d9cad4fede33ab/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2", size = 149095, upload-time = "2024-12-24T18:11:17.672Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4c/ff/a9a504662452e2d2878512115638966e75633519ec11f25fca3d2049a94a/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886", size = 152668, upload-time = "2024-12-24T18:11:18.989Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/71/189996b6d9a4b932564701628af5cee6716733e9165af1d5e1b285c530ed/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601", size = 150073, upload-time = "2024-12-24T18:11:21.507Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e4/93/946a86ce20790e11312c87c75ba68d5f6ad2208cfb52b2d6a2c32840d922/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd", size = 145732, upload-time = "2024-12-24T18:11:22.774Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/cd/e5/131d2fb1b0dddafc37be4f3a2fa79aa4c037368be9423061dccadfd90091/charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407", size = 95391, upload-time = "2024-12-24T18:11:24.139Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/27/f2/4f9a69cc7712b9b5ad8fdb87039fd89abba997ad5cbe690d1835d40405b0/charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971", size = 102702, upload-time = "2024-12-24T18:11:26.535Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0e/f6/65ecc6878a89bb1c23a086ea335ad4bf21a588990c3f535a227b9eea9108/charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85", size = 49767, upload-time = "2024-12-24T18:12:32.852Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -59,74 +60,32 @@ name = "click"
|
||||
version = "8.1.8"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "colorama", marker = "platform_system == 'Windows'" },
|
||||
{ name = "colorama", marker = "sys_platform == 'win32'" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593, upload-time = "2024-12-21T18:38:44.339Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/7e/d4/7ebdbd03970677812aac39c869717059dbb71a4cfc033ca6e5221787892c/click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2", size = 98188 },
|
||||
{ url = "https://files.pythonhosted.org/packages/7e/d4/7ebdbd03970677812aac39c869717059dbb71a4cfc033ca6e5221787892c/click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2", size = 98188, upload-time = "2024-12-21T18:38:41.666Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "colorama"
|
||||
version = "0.4.6"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "docker"
|
||||
version = "7.1.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pywin32", marker = "sys_platform == 'win32'" },
|
||||
{ name = "requests" },
|
||||
{ name = "urllib3" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/91/9b/4a2ea29aeba62471211598dac5d96825bb49348fa07e906ea930394a83ce/docker-7.1.0.tar.gz", hash = "sha256:ad8c70e6e3f8926cb8a92619b832b4ea5299e2831c14284663184e200546fa6c", size = 117834 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e3/26/57c6fb270950d476074c087527a558ccb6f4436657314bfb6cdf484114c4/docker-7.1.0-py3-none-any.whl", hash = "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0", size = 147774 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "idna"
|
||||
version = "3.10"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "iniconfig"
|
||||
version = "2.0.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", size = 4646 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374", size = 5892 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "markdown-it-py"
|
||||
version = "3.0.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "mdurl" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/38/71/3b932df36c1a044d397a1f92d1cf91ee0a503d91e470cbd670aa66b07ed0/markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb", size = 74596 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mcontainer"
|
||||
version = "0.1.0"
|
||||
name = "cubbi"
|
||||
version = "0.5.0"
|
||||
source = { editable = "." }
|
||||
dependencies = [
|
||||
{ name = "docker" },
|
||||
{ name = "pydantic" },
|
||||
{ name = "pyyaml" },
|
||||
{ name = "questionary" },
|
||||
{ name = "requests" },
|
||||
{ name = "rich" },
|
||||
{ name = "typer" },
|
||||
]
|
||||
@@ -138,6 +97,11 @@ dev = [
|
||||
{ name = "ruff" },
|
||||
]
|
||||
|
||||
[package.dev-dependencies]
|
||||
dev = [
|
||||
{ name = "pytest" },
|
||||
]
|
||||
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "docker", specifier = ">=7.0.0" },
|
||||
@@ -145,18 +109,68 @@ requires-dist = [
|
||||
{ name = "pydantic", specifier = ">=2.5.0" },
|
||||
{ name = "pytest", marker = "extra == 'dev'", specifier = ">=7.4.0" },
|
||||
{ name = "pyyaml", specifier = ">=6.0.1" },
|
||||
{ name = "questionary", specifier = ">=2.0.0" },
|
||||
{ name = "requests", specifier = ">=2.32.3" },
|
||||
{ name = "rich", specifier = ">=13.6.0" },
|
||||
{ name = "ruff", marker = "extra == 'dev'", specifier = ">=0.1.9" },
|
||||
{ name = "typer", specifier = ">=0.9.0" },
|
||||
]
|
||||
provides-extras = ["dev"]
|
||||
|
||||
[package.metadata.requires-dev]
|
||||
dev = [{ name = "pytest", specifier = ">=8.3.5" }]
|
||||
|
||||
[[package]]
|
||||
name = "docker"
|
||||
version = "7.1.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pywin32", marker = "sys_platform == 'win32'" },
|
||||
{ name = "requests" },
|
||||
{ name = "urllib3" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/91/9b/4a2ea29aeba62471211598dac5d96825bb49348fa07e906ea930394a83ce/docker-7.1.0.tar.gz", hash = "sha256:ad8c70e6e3f8926cb8a92619b832b4ea5299e2831c14284663184e200546fa6c", size = 117834, upload-time = "2024-05-23T11:13:57.216Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e3/26/57c6fb270950d476074c087527a558ccb6f4436657314bfb6cdf484114c4/docker-7.1.0-py3-none-any.whl", hash = "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0", size = 147774, upload-time = "2024-05-23T11:13:55.01Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "idna"
|
||||
version = "3.10"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "iniconfig"
|
||||
version = "2.0.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", size = 4646, upload-time = "2023-01-07T11:08:11.254Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374", size = 5892, upload-time = "2023-01-07T11:08:09.864Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "markdown-it-py"
|
||||
version = "3.0.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "mdurl" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/38/71/3b932df36c1a044d397a1f92d1cf91ee0a503d91e470cbd670aa66b07ed0/markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb", size = 74596, upload-time = "2023-06-03T06:41:14.443Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528, upload-time = "2023-06-03T06:41:11.019Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mdurl"
|
||||
version = "0.1.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729, upload-time = "2022-08-14T12:40:10.846Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -167,48 +181,60 @@ dependencies = [
|
||||
{ name = "mypy-extensions" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ce/43/d5e49a86afa64bd3839ea0d5b9c7103487007d728e1293f52525d6d5486a/mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43", size = 3239717 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ce/43/d5e49a86afa64bd3839ea0d5b9c7103487007d728e1293f52525d6d5486a/mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43", size = 3239717, upload-time = "2025-02-05T03:50:34.655Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/98/3a/03c74331c5eb8bd025734e04c9840532226775c47a2c39b56a0c8d4f128d/mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd", size = 10793981 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f0/1a/41759b18f2cfd568848a37c89030aeb03534411eef981df621d8fad08a1d/mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f", size = 9749175 },
|
||||
{ url = "https://files.pythonhosted.org/packages/12/7e/873481abf1ef112c582db832740f4c11b2bfa510e829d6da29b0ab8c3f9c/mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464", size = 11455675 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b3/d0/92ae4cde706923a2d3f2d6c39629134063ff64b9dedca9c1388363da072d/mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee", size = 12410020 },
|
||||
{ url = "https://files.pythonhosted.org/packages/46/8b/df49974b337cce35f828ba6fda228152d6db45fed4c86ba56ffe442434fd/mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e", size = 12498582 },
|
||||
{ url = "https://files.pythonhosted.org/packages/13/50/da5203fcf6c53044a0b699939f31075c45ae8a4cadf538a9069b165c1050/mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22", size = 9366614 },
|
||||
{ url = "https://files.pythonhosted.org/packages/6a/9b/fd2e05d6ffff24d912f150b87db9e364fa8282045c875654ce7e32fffa66/mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445", size = 10788592 },
|
||||
{ url = "https://files.pythonhosted.org/packages/74/37/b246d711c28a03ead1fd906bbc7106659aed7c089d55fe40dd58db812628/mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d", size = 9753611 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a6/ac/395808a92e10cfdac8003c3de9a2ab6dc7cde6c0d2a4df3df1b815ffd067/mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5", size = 11438443 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d2/8b/801aa06445d2de3895f59e476f38f3f8d610ef5d6908245f07d002676cbf/mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036", size = 12402541 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c7/67/5a4268782eb77344cc613a4cf23540928e41f018a9a1ec4c6882baf20ab8/mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357", size = 12494348 },
|
||||
{ url = "https://files.pythonhosted.org/packages/83/3e/57bb447f7bbbfaabf1712d96f9df142624a386d98fb026a761532526057e/mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf", size = 9373648 },
|
||||
{ url = "https://files.pythonhosted.org/packages/09/4e/a7d65c7322c510de2c409ff3828b03354a7c43f5a8ed458a7a131b41c7b9/mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e", size = 2221777 },
|
||||
{ url = "https://files.pythonhosted.org/packages/98/3a/03c74331c5eb8bd025734e04c9840532226775c47a2c39b56a0c8d4f128d/mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd", size = 10793981, upload-time = "2025-02-05T03:50:28.25Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f0/1a/41759b18f2cfd568848a37c89030aeb03534411eef981df621d8fad08a1d/mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f", size = 9749175, upload-time = "2025-02-05T03:50:13.411Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/12/7e/873481abf1ef112c582db832740f4c11b2bfa510e829d6da29b0ab8c3f9c/mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464", size = 11455675, upload-time = "2025-02-05T03:50:31.421Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b3/d0/92ae4cde706923a2d3f2d6c39629134063ff64b9dedca9c1388363da072d/mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee", size = 12410020, upload-time = "2025-02-05T03:48:48.705Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/46/8b/df49974b337cce35f828ba6fda228152d6db45fed4c86ba56ffe442434fd/mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e", size = 12498582, upload-time = "2025-02-05T03:49:03.628Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/13/50/da5203fcf6c53044a0b699939f31075c45ae8a4cadf538a9069b165c1050/mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22", size = 9366614, upload-time = "2025-02-05T03:50:00.313Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6a/9b/fd2e05d6ffff24d912f150b87db9e364fa8282045c875654ce7e32fffa66/mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445", size = 10788592, upload-time = "2025-02-05T03:48:55.789Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/74/37/b246d711c28a03ead1fd906bbc7106659aed7c089d55fe40dd58db812628/mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d", size = 9753611, upload-time = "2025-02-05T03:48:44.581Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a6/ac/395808a92e10cfdac8003c3de9a2ab6dc7cde6c0d2a4df3df1b815ffd067/mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5", size = 11438443, upload-time = "2025-02-05T03:49:25.514Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d2/8b/801aa06445d2de3895f59e476f38f3f8d610ef5d6908245f07d002676cbf/mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036", size = 12402541, upload-time = "2025-02-05T03:49:57.623Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c7/67/5a4268782eb77344cc613a4cf23540928e41f018a9a1ec4c6882baf20ab8/mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357", size = 12494348, upload-time = "2025-02-05T03:48:52.361Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/83/3e/57bb447f7bbbfaabf1712d96f9df142624a386d98fb026a761532526057e/mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf", size = 9373648, upload-time = "2025-02-05T03:49:11.395Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/09/4e/a7d65c7322c510de2c409ff3828b03354a7c43f5a8ed458a7a131b41c7b9/mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e", size = 2221777, upload-time = "2025-02-05T03:50:08.348Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mypy-extensions"
|
||||
version = "1.0.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/98/a4/1ab47638b92648243faf97a5aeb6ea83059cc3624972ab6b8d2316078d3f/mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782", size = 4433 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/98/a4/1ab47638b92648243faf97a5aeb6ea83059cc3624972ab6b8d2316078d3f/mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782", size = 4433, upload-time = "2023-02-04T12:11:27.157Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/2a/e2/5d3f6ada4297caebe1a2add3b126fe800c96f56dbe5d1988a2cbe0b267aa/mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d", size = 4695 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2a/e2/5d3f6ada4297caebe1a2add3b126fe800c96f56dbe5d1988a2cbe0b267aa/mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d", size = 4695, upload-time = "2023-02-04T12:11:25.002Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "packaging"
|
||||
version = "24.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950, upload-time = "2024-11-08T09:47:47.202Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451 },
|
||||
{ url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451, upload-time = "2024-11-08T09:47:44.722Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pluggy"
|
||||
version = "1.5.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955, upload-time = "2024-04-20T21:34:42.531Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556 },
|
||||
{ url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556, upload-time = "2024-04-20T21:34:40.434Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "prompt-toolkit"
|
||||
version = "3.0.51"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "wcwidth" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/bb/6e/9d084c929dfe9e3bfe0c6a47e31f78a25c54627d64a66e884a8bf5474f1c/prompt_toolkit-3.0.51.tar.gz", hash = "sha256:931a162e3b27fc90c86f1b48bb1fb2c528c2761475e57c9c06de13311c7b54ed", size = 428940, upload-time = "2025-04-15T09:18:47.731Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ce/4f/5249960887b1fbe561d9ff265496d170b55a735b76724f10ef19f9e40716/prompt_toolkit-3.0.51-py3-none-any.whl", hash = "sha256:52742911fde84e2d423e2f9a4cf1de7d7ac4e51958f648d9540e0fb8db077b07", size = 387810, upload-time = "2025-04-15T09:18:44.753Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -220,9 +246,9 @@ dependencies = [
|
||||
{ name = "pydantic-core" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b7/ae/d5220c5c52b158b1de7ca89fc5edb72f304a70a4c540c84c8844bf4008de/pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236", size = 761681 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b7/ae/d5220c5c52b158b1de7ca89fc5edb72f304a70a4c540c84c8844bf4008de/pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236", size = 761681, upload-time = "2025-01-24T01:42:12.693Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/f4/3c/8cc1cc84deffa6e25d2d0c688ebb80635dfdbf1dbea3e30c541c8cf4d860/pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584", size = 431696 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f4/3c/8cc1cc84deffa6e25d2d0c688ebb80635dfdbf1dbea3e30c541c8cf4d860/pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584", size = 431696, upload-time = "2025-01-24T01:42:10.371Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -232,45 +258,45 @@ source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/fc/01/f3e5ac5e7c25833db5eb555f7b7ab24cd6f8c322d3a3ad2d67a952dc0abc/pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39", size = 413443 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/fc/01/f3e5ac5e7c25833db5eb555f7b7ab24cd6f8c322d3a3ad2d67a952dc0abc/pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39", size = 413443, upload-time = "2024-12-18T11:31:54.917Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/74/51c8a5482ca447871c93e142d9d4a92ead74de6c8dc5e66733e22c9bba89/pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0", size = 1893127 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d3/f3/c97e80721735868313c58b89d2de85fa80fe8dfeeed84dc51598b92a135e/pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef", size = 1811340 },
|
||||
{ url = "https://files.pythonhosted.org/packages/9e/91/840ec1375e686dbae1bd80a9e46c26a1e0083e1186abc610efa3d9a36180/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7", size = 1822900 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f6/31/4240bc96025035500c18adc149aa6ffdf1a0062a4b525c932065ceb4d868/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934", size = 1869177 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fa/20/02fbaadb7808be578317015c462655c317a77a7c8f0ef274bc016a784c54/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6", size = 2038046 },
|
||||
{ url = "https://files.pythonhosted.org/packages/06/86/7f306b904e6c9eccf0668248b3f272090e49c275bc488a7b88b0823444a4/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c", size = 2685386 },
|
||||
{ url = "https://files.pythonhosted.org/packages/8d/f0/49129b27c43396581a635d8710dae54a791b17dfc50c70164866bbf865e3/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2", size = 1997060 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0d/0f/943b4af7cd416c477fd40b187036c4f89b416a33d3cc0ab7b82708a667aa/pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4", size = 2004870 },
|
||||
{ url = "https://files.pythonhosted.org/packages/35/40/aea70b5b1a63911c53a4c8117c0a828d6790483f858041f47bab0b779f44/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3", size = 1999822 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f2/b3/807b94fd337d58effc5498fd1a7a4d9d59af4133e83e32ae39a96fddec9d/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4", size = 2130364 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/df/791c827cd4ee6efd59248dca9369fb35e80a9484462c33c6649a8d02b565/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57", size = 2158303 },
|
||||
{ url = "https://files.pythonhosted.org/packages/9b/67/4e197c300976af185b7cef4c02203e175fb127e414125916bf1128b639a9/pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc", size = 1834064 },
|
||||
{ url = "https://files.pythonhosted.org/packages/1f/ea/cd7209a889163b8dcca139fe32b9687dd05249161a3edda62860430457a5/pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9", size = 1989046 },
|
||||
{ url = "https://files.pythonhosted.org/packages/bc/49/c54baab2f4658c26ac633d798dab66b4c3a9bbf47cff5284e9c182f4137a/pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b", size = 1885092 },
|
||||
{ url = "https://files.pythonhosted.org/packages/41/b1/9bc383f48f8002f99104e3acff6cba1231b29ef76cfa45d1506a5cad1f84/pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b", size = 1892709 },
|
||||
{ url = "https://files.pythonhosted.org/packages/10/6c/e62b8657b834f3eb2961b49ec8e301eb99946245e70bf42c8817350cbefc/pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154", size = 1811273 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ba/15/52cfe49c8c986e081b863b102d6b859d9defc63446b642ccbbb3742bf371/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9", size = 1823027 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b1/1c/b6f402cfc18ec0024120602bdbcebc7bdd5b856528c013bd4d13865ca473/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9", size = 1868888 },
|
||||
{ url = "https://files.pythonhosted.org/packages/bd/7b/8cb75b66ac37bc2975a3b7de99f3c6f355fcc4d89820b61dffa8f1e81677/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1", size = 2037738 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c8/f1/786d8fe78970a06f61df22cba58e365ce304bf9b9f46cc71c8c424e0c334/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a", size = 2685138 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a6/74/d12b2cd841d8724dc8ffb13fc5cef86566a53ed358103150209ecd5d1999/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e", size = 1997025 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a0/6e/940bcd631bc4d9a06c9539b51f070b66e8f370ed0933f392db6ff350d873/pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4", size = 2004633 },
|
||||
{ url = "https://files.pythonhosted.org/packages/50/cc/a46b34f1708d82498c227d5d80ce615b2dd502ddcfd8376fc14a36655af1/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27", size = 1999404 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ca/2d/c365cfa930ed23bc58c41463bae347d1005537dc8db79e998af8ba28d35e/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee", size = 2130130 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f4/d7/eb64d015c350b7cdb371145b54d96c919d4db516817f31cd1c650cae3b21/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1", size = 2157946 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a4/99/bddde3ddde76c03b65dfd5a66ab436c4e58ffc42927d4ff1198ffbf96f5f/pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130", size = 1834387 },
|
||||
{ url = "https://files.pythonhosted.org/packages/71/47/82b5e846e01b26ac6f1893d3c5f9f3a2eb6ba79be26eef0b759b4fe72946/pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee", size = 1990453 },
|
||||
{ url = "https://files.pythonhosted.org/packages/51/b2/b2b50d5ecf21acf870190ae5d093602d95f66c9c31f9d5de6062eb329ad1/pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b", size = 1885186 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/74/51c8a5482ca447871c93e142d9d4a92ead74de6c8dc5e66733e22c9bba89/pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0", size = 1893127, upload-time = "2024-12-18T11:28:30.346Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d3/f3/c97e80721735868313c58b89d2de85fa80fe8dfeeed84dc51598b92a135e/pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef", size = 1811340, upload-time = "2024-12-18T11:28:32.521Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9e/91/840ec1375e686dbae1bd80a9e46c26a1e0083e1186abc610efa3d9a36180/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7", size = 1822900, upload-time = "2024-12-18T11:28:34.507Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f6/31/4240bc96025035500c18adc149aa6ffdf1a0062a4b525c932065ceb4d868/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934", size = 1869177, upload-time = "2024-12-18T11:28:36.488Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fa/20/02fbaadb7808be578317015c462655c317a77a7c8f0ef274bc016a784c54/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6", size = 2038046, upload-time = "2024-12-18T11:28:39.409Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/06/86/7f306b904e6c9eccf0668248b3f272090e49c275bc488a7b88b0823444a4/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c", size = 2685386, upload-time = "2024-12-18T11:28:41.221Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8d/f0/49129b27c43396581a635d8710dae54a791b17dfc50c70164866bbf865e3/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2", size = 1997060, upload-time = "2024-12-18T11:28:44.709Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0d/0f/943b4af7cd416c477fd40b187036c4f89b416a33d3cc0ab7b82708a667aa/pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4", size = 2004870, upload-time = "2024-12-18T11:28:46.839Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/35/40/aea70b5b1a63911c53a4c8117c0a828d6790483f858041f47bab0b779f44/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3", size = 1999822, upload-time = "2024-12-18T11:28:48.896Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f2/b3/807b94fd337d58effc5498fd1a7a4d9d59af4133e83e32ae39a96fddec9d/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4", size = 2130364, upload-time = "2024-12-18T11:28:50.755Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/df/791c827cd4ee6efd59248dca9369fb35e80a9484462c33c6649a8d02b565/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57", size = 2158303, upload-time = "2024-12-18T11:28:54.122Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9b/67/4e197c300976af185b7cef4c02203e175fb127e414125916bf1128b639a9/pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc", size = 1834064, upload-time = "2024-12-18T11:28:56.074Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1f/ea/cd7209a889163b8dcca139fe32b9687dd05249161a3edda62860430457a5/pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9", size = 1989046, upload-time = "2024-12-18T11:28:58.107Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bc/49/c54baab2f4658c26ac633d798dab66b4c3a9bbf47cff5284e9c182f4137a/pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b", size = 1885092, upload-time = "2024-12-18T11:29:01.335Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/41/b1/9bc383f48f8002f99104e3acff6cba1231b29ef76cfa45d1506a5cad1f84/pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b", size = 1892709, upload-time = "2024-12-18T11:29:03.193Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/10/6c/e62b8657b834f3eb2961b49ec8e301eb99946245e70bf42c8817350cbefc/pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154", size = 1811273, upload-time = "2024-12-18T11:29:05.306Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ba/15/52cfe49c8c986e081b863b102d6b859d9defc63446b642ccbbb3742bf371/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9", size = 1823027, upload-time = "2024-12-18T11:29:07.294Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b1/1c/b6f402cfc18ec0024120602bdbcebc7bdd5b856528c013bd4d13865ca473/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9", size = 1868888, upload-time = "2024-12-18T11:29:09.249Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bd/7b/8cb75b66ac37bc2975a3b7de99f3c6f355fcc4d89820b61dffa8f1e81677/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1", size = 2037738, upload-time = "2024-12-18T11:29:11.23Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c8/f1/786d8fe78970a06f61df22cba58e365ce304bf9b9f46cc71c8c424e0c334/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a", size = 2685138, upload-time = "2024-12-18T11:29:16.396Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a6/74/d12b2cd841d8724dc8ffb13fc5cef86566a53ed358103150209ecd5d1999/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e", size = 1997025, upload-time = "2024-12-18T11:29:20.25Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a0/6e/940bcd631bc4d9a06c9539b51f070b66e8f370ed0933f392db6ff350d873/pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4", size = 2004633, upload-time = "2024-12-18T11:29:23.877Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/50/cc/a46b34f1708d82498c227d5d80ce615b2dd502ddcfd8376fc14a36655af1/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27", size = 1999404, upload-time = "2024-12-18T11:29:25.872Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ca/2d/c365cfa930ed23bc58c41463bae347d1005537dc8db79e998af8ba28d35e/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee", size = 2130130, upload-time = "2024-12-18T11:29:29.252Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f4/d7/eb64d015c350b7cdb371145b54d96c919d4db516817f31cd1c650cae3b21/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1", size = 2157946, upload-time = "2024-12-18T11:29:31.338Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a4/99/bddde3ddde76c03b65dfd5a66ab436c4e58ffc42927d4ff1198ffbf96f5f/pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130", size = 1834387, upload-time = "2024-12-18T11:29:33.481Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/71/47/82b5e846e01b26ac6f1893d3c5f9f3a2eb6ba79be26eef0b759b4fe72946/pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee", size = 1990453, upload-time = "2024-12-18T11:29:35.533Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/51/b2/b2b50d5ecf21acf870190ae5d093602d95f66c9c31f9d5de6062eb329ad1/pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b", size = 1885186, upload-time = "2024-12-18T11:29:37.649Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pygments"
|
||||
version = "2.19.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581, upload-time = "2025-01-06T17:26:30.443Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293 },
|
||||
{ url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293, upload-time = "2025-01-06T17:26:25.553Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -283,9 +309,9 @@ dependencies = [
|
||||
{ name = "packaging" },
|
||||
{ name = "pluggy" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ae/3c/c9d525a414d506893f0cd8a8d0de7706446213181570cdbd766691164e40/pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845", size = 1450891 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ae/3c/c9d525a414d506893f0cd8a8d0de7706446213181570cdbd766691164e40/pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845", size = 1450891, upload-time = "2025-03-02T12:54:54.503Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/30/3d/64ad57c803f1fa1e963a7946b6e0fea4a70df53c1a7fed304586539c2bac/pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820", size = 343634 },
|
||||
{ url = "https://files.pythonhosted.org/packages/30/3d/64ad57c803f1fa1e963a7946b6e0fea4a70df53c1a7fed304586539c2bac/pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820", size = 343634, upload-time = "2025-03-02T12:54:52.069Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -293,38 +319,50 @@ name = "pywin32"
|
||||
version = "309"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/20/2c/b0240b14ff3dba7a8a7122dc9bbf7fbd21ed0e8b57c109633675b5d1761f/pywin32-309-cp312-cp312-win32.whl", hash = "sha256:de9acacced5fa82f557298b1fed5fef7bd49beee04190f68e1e4783fbdc19926", size = 8790648 },
|
||||
{ url = "https://files.pythonhosted.org/packages/dd/11/c36884c732e2b3397deee808b5dac1abbb170ec37f94c6606fcb04d1e9d7/pywin32-309-cp312-cp312-win_amd64.whl", hash = "sha256:6ff9eebb77ffc3d59812c68db33c0a7817e1337e3537859499bd27586330fc9e", size = 9497399 },
|
||||
{ url = "https://files.pythonhosted.org/packages/18/9f/79703972958f8ba3fd38bc9bf1165810bd75124982419b0cc433a2894d46/pywin32-309-cp312-cp312-win_arm64.whl", hash = "sha256:619f3e0a327b5418d833f44dc87859523635cf339f86071cc65a13c07be3110f", size = 8454122 },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/c3/51aca6887cc5e410aa4cdc55662cf8438212440c67335c3f141b02eb8d52/pywin32-309-cp313-cp313-win32.whl", hash = "sha256:008bffd4afd6de8ca46c6486085414cc898263a21a63c7f860d54c9d02b45c8d", size = 8789700 },
|
||||
{ url = "https://files.pythonhosted.org/packages/dd/66/330f265140fa814b4ed1bf16aea701f9d005f8f4ab57a54feb17f53afe7e/pywin32-309-cp313-cp313-win_amd64.whl", hash = "sha256:bd0724f58492db4cbfbeb1fcd606495205aa119370c0ddc4f70e5771a3ab768d", size = 9496714 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2c/84/9a51e6949a03f25cd329ece54dbf0846d57fadd2e79046c3b8d140aaa132/pywin32-309-cp313-cp313-win_arm64.whl", hash = "sha256:8fd9669cfd41863b688a1bc9b1d4d2d76fd4ba2128be50a70b0ea66b8d37953b", size = 8453052 },
|
||||
{ url = "https://files.pythonhosted.org/packages/20/2c/b0240b14ff3dba7a8a7122dc9bbf7fbd21ed0e8b57c109633675b5d1761f/pywin32-309-cp312-cp312-win32.whl", hash = "sha256:de9acacced5fa82f557298b1fed5fef7bd49beee04190f68e1e4783fbdc19926", size = 8790648, upload-time = "2025-03-09T18:04:03.253Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/dd/11/c36884c732e2b3397deee808b5dac1abbb170ec37f94c6606fcb04d1e9d7/pywin32-309-cp312-cp312-win_amd64.whl", hash = "sha256:6ff9eebb77ffc3d59812c68db33c0a7817e1337e3537859499bd27586330fc9e", size = 9497399, upload-time = "2025-03-09T18:04:05.388Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/18/9f/79703972958f8ba3fd38bc9bf1165810bd75124982419b0cc433a2894d46/pywin32-309-cp312-cp312-win_arm64.whl", hash = "sha256:619f3e0a327b5418d833f44dc87859523635cf339f86071cc65a13c07be3110f", size = 8454122, upload-time = "2025-03-09T18:04:07.217Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/c3/51aca6887cc5e410aa4cdc55662cf8438212440c67335c3f141b02eb8d52/pywin32-309-cp313-cp313-win32.whl", hash = "sha256:008bffd4afd6de8ca46c6486085414cc898263a21a63c7f860d54c9d02b45c8d", size = 8789700, upload-time = "2025-03-09T18:04:08.937Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/dd/66/330f265140fa814b4ed1bf16aea701f9d005f8f4ab57a54feb17f53afe7e/pywin32-309-cp313-cp313-win_amd64.whl", hash = "sha256:bd0724f58492db4cbfbeb1fcd606495205aa119370c0ddc4f70e5771a3ab768d", size = 9496714, upload-time = "2025-03-09T18:04:10.619Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2c/84/9a51e6949a03f25cd329ece54dbf0846d57fadd2e79046c3b8d140aaa132/pywin32-309-cp313-cp313-win_arm64.whl", hash = "sha256:8fd9669cfd41863b688a1bc9b1d4d2d76fd4ba2128be50a70b0ea66b8d37953b", size = 8453052, upload-time = "2025-03-09T18:04:12.812Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyyaml"
|
||||
version = "6.0.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631, upload-time = "2024-08-06T20:33:50.674Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154 },
|
||||
{ url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611 },
|
||||
{ url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309 },
|
||||
{ url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679 },
|
||||
{ url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361 },
|
||||
{ url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597 },
|
||||
{ url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446 },
|
||||
{ url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873, upload-time = "2024-08-06T20:32:25.131Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302, upload-time = "2024-08-06T20:32:26.511Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154, upload-time = "2024-08-06T20:32:28.363Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223, upload-time = "2024-08-06T20:32:30.058Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542, upload-time = "2024-08-06T20:32:31.881Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164, upload-time = "2024-08-06T20:32:37.083Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611, upload-time = "2024-08-06T20:32:38.898Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591, upload-time = "2024-08-06T20:32:40.241Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338, upload-time = "2024-08-06T20:32:41.93Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309, upload-time = "2024-08-06T20:32:43.4Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679, upload-time = "2024-08-06T20:32:44.801Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428, upload-time = "2024-08-06T20:32:46.432Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361, upload-time = "2024-08-06T20:32:51.188Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523, upload-time = "2024-08-06T20:32:53.019Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660, upload-time = "2024-08-06T20:32:54.708Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597, upload-time = "2024-08-06T20:32:56.985Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527, upload-time = "2024-08-06T20:33:03.001Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446, upload-time = "2024-08-06T20:33:04.33Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "questionary"
|
||||
version = "2.1.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "prompt-toolkit" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a8/b8/d16eb579277f3de9e56e5ad25280fab52fc5774117fb70362e8c2e016559/questionary-2.1.0.tar.gz", hash = "sha256:6302cdd645b19667d8f6e6634774e9538bfcd1aad9be287e743d96cacaf95587", size = 26775, upload-time = "2024-12-29T11:49:17.802Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ad/3f/11dd4cd4f39e05128bfd20138faea57bec56f9ffba6185d276e3107ba5b2/questionary-2.1.0-py3-none-any.whl", hash = "sha256:44174d237b68bc828e4878c763a9ad6790ee61990e0ae72927694ead57bab8ec", size = 36747, upload-time = "2024-12-29T11:49:16.734Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -337,9 +375,9 @@ dependencies = [
|
||||
{ name = "idna" },
|
||||
{ name = "urllib3" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218, upload-time = "2024-05-29T15:37:49.536Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928, upload-time = "2024-05-29T15:37:47.027Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -350,43 +388,43 @@ dependencies = [
|
||||
{ name = "markdown-it-py" },
|
||||
{ name = "pygments" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ab/3a/0316b28d0761c6734d6bc14e770d85506c986c85ffb239e688eeaab2c2bc/rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098", size = 223149 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ab/3a/0316b28d0761c6734d6bc14e770d85506c986c85ffb239e688eeaab2c2bc/rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098", size = 223149, upload-time = "2024-11-01T16:43:57.873Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/19/71/39c7c0d87f8d4e6c020a393182060eaefeeae6c01dab6a84ec346f2567df/rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90", size = 242424 },
|
||||
{ url = "https://files.pythonhosted.org/packages/19/71/39c7c0d87f8d4e6c020a393182060eaefeeae6c01dab6a84ec346f2567df/rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90", size = 242424, upload-time = "2024-11-01T16:43:55.817Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.9.10"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/20/8e/fafaa6f15c332e73425d9c44ada85360501045d5ab0b81400076aff27cf6/ruff-0.9.10.tar.gz", hash = "sha256:9bacb735d7bada9cfb0f2c227d3658fc443d90a727b47f206fb33f52f3c0eac7", size = 3759776 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/20/8e/fafaa6f15c332e73425d9c44ada85360501045d5ab0b81400076aff27cf6/ruff-0.9.10.tar.gz", hash = "sha256:9bacb735d7bada9cfb0f2c227d3658fc443d90a727b47f206fb33f52f3c0eac7", size = 3759776, upload-time = "2025-03-07T15:27:44.363Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/73/b2/af7c2cc9e438cbc19fafeec4f20bfcd72165460fe75b2b6e9a0958c8c62b/ruff-0.9.10-py3-none-linux_armv6l.whl", hash = "sha256:eb4d25532cfd9fe461acc83498361ec2e2252795b4f40b17e80692814329e42d", size = 10049494 },
|
||||
{ url = "https://files.pythonhosted.org/packages/6d/12/03f6dfa1b95ddd47e6969f0225d60d9d7437c91938a310835feb27927ca0/ruff-0.9.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:188a6638dab1aa9bb6228a7302387b2c9954e455fb25d6b4470cb0641d16759d", size = 10853584 },
|
||||
{ url = "https://files.pythonhosted.org/packages/02/49/1c79e0906b6ff551fb0894168763f705bf980864739572b2815ecd3c9df0/ruff-0.9.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:5284dcac6b9dbc2fcb71fdfc26a217b2ca4ede6ccd57476f52a587451ebe450d", size = 10155692 },
|
||||
{ url = "https://files.pythonhosted.org/packages/5b/01/85e8082e41585e0e1ceb11e41c054e9e36fed45f4b210991052d8a75089f/ruff-0.9.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47678f39fa2a3da62724851107f438c8229a3470f533894b5568a39b40029c0c", size = 10369760 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a1/90/0bc60bd4e5db051f12445046d0c85cc2c617095c0904f1aa81067dc64aea/ruff-0.9.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:99713a6e2766b7a17147b309e8c915b32b07a25c9efd12ada79f217c9c778b3e", size = 9912196 },
|
||||
{ url = "https://files.pythonhosted.org/packages/66/ea/0b7e8c42b1ec608033c4d5a02939c82097ddcb0b3e393e4238584b7054ab/ruff-0.9.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:524ee184d92f7c7304aa568e2db20f50c32d1d0caa235d8ddf10497566ea1a12", size = 11434985 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/86/3171d1eff893db4f91755175a6e1163c5887be1f1e2f4f6c0c59527c2bfd/ruff-0.9.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:df92aeac30af821f9acf819fc01b4afc3dfb829d2782884f8739fb52a8119a16", size = 12155842 },
|
||||
{ url = "https://files.pythonhosted.org/packages/89/9e/700ca289f172a38eb0bca752056d0a42637fa17b81649b9331786cb791d7/ruff-0.9.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de42e4edc296f520bb84954eb992a07a0ec5a02fecb834498415908469854a52", size = 11613804 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f2/92/648020b3b5db180f41a931a68b1c8575cca3e63cec86fd26807422a0dbad/ruff-0.9.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d257f95b65806104b6b1ffca0ea53f4ef98454036df65b1eda3693534813ecd1", size = 13823776 },
|
||||
{ url = "https://files.pythonhosted.org/packages/5e/a6/cc472161cd04d30a09d5c90698696b70c169eeba2c41030344194242db45/ruff-0.9.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b60dec7201c0b10d6d11be00e8f2dbb6f40ef1828ee75ed739923799513db24c", size = 11302673 },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/db/d31c361c4025b1b9102b4d032c70a69adb9ee6fde093f6c3bf29f831c85c/ruff-0.9.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d838b60007da7a39c046fcdd317293d10b845001f38bcb55ba766c3875b01e43", size = 10235358 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d1/86/d6374e24a14d4d93ebe120f45edd82ad7dcf3ef999ffc92b197d81cdc2a5/ruff-0.9.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ccaf903108b899beb8e09a63ffae5869057ab649c1e9231c05ae354ebc62066c", size = 9886177 },
|
||||
{ url = "https://files.pythonhosted.org/packages/00/62/a61691f6eaaac1e945a1f3f59f1eea9a218513139d5b6c2b8f88b43b5b8f/ruff-0.9.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:f9567d135265d46e59d62dc60c0bfad10e9a6822e231f5b24032dba5a55be6b5", size = 10864747 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ee/94/2c7065e1d92a8a8a46d46d9c3cf07b0aa7e0a1e0153d74baa5e6620b4102/ruff-0.9.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5f202f0d93738c28a89f8ed9eaba01b7be339e5d8d642c994347eaa81c6d75b8", size = 11360441 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a7/8f/1f545ea6f9fcd7bf4368551fb91d2064d8f0577b3079bb3f0ae5779fb773/ruff-0.9.10-py3-none-win32.whl", hash = "sha256:bfb834e87c916521ce46b1788fbb8484966e5113c02df216680102e9eb960029", size = 10247401 },
|
||||
{ url = "https://files.pythonhosted.org/packages/4f/18/fb703603ab108e5c165f52f5b86ee2aa9be43bb781703ec87c66a5f5d604/ruff-0.9.10-py3-none-win_amd64.whl", hash = "sha256:f2160eeef3031bf4b17df74e307d4c5fb689a6f3a26a2de3f7ef4044e3c484f1", size = 11366360 },
|
||||
{ url = "https://files.pythonhosted.org/packages/35/85/338e603dc68e7d9994d5d84f24adbf69bae760ba5efd3e20f5ff2cec18da/ruff-0.9.10-py3-none-win_arm64.whl", hash = "sha256:5fd804c0327a5e5ea26615550e706942f348b197d5475ff34c19733aee4b2e69", size = 10436892 },
|
||||
{ url = "https://files.pythonhosted.org/packages/73/b2/af7c2cc9e438cbc19fafeec4f20bfcd72165460fe75b2b6e9a0958c8c62b/ruff-0.9.10-py3-none-linux_armv6l.whl", hash = "sha256:eb4d25532cfd9fe461acc83498361ec2e2252795b4f40b17e80692814329e42d", size = 10049494, upload-time = "2025-03-07T15:26:51.268Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6d/12/03f6dfa1b95ddd47e6969f0225d60d9d7437c91938a310835feb27927ca0/ruff-0.9.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:188a6638dab1aa9bb6228a7302387b2c9954e455fb25d6b4470cb0641d16759d", size = 10853584, upload-time = "2025-03-07T15:26:56.104Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/02/49/1c79e0906b6ff551fb0894168763f705bf980864739572b2815ecd3c9df0/ruff-0.9.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:5284dcac6b9dbc2fcb71fdfc26a217b2ca4ede6ccd57476f52a587451ebe450d", size = 10155692, upload-time = "2025-03-07T15:27:01.385Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5b/01/85e8082e41585e0e1ceb11e41c054e9e36fed45f4b210991052d8a75089f/ruff-0.9.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47678f39fa2a3da62724851107f438c8229a3470f533894b5568a39b40029c0c", size = 10369760, upload-time = "2025-03-07T15:27:04.023Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a1/90/0bc60bd4e5db051f12445046d0c85cc2c617095c0904f1aa81067dc64aea/ruff-0.9.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:99713a6e2766b7a17147b309e8c915b32b07a25c9efd12ada79f217c9c778b3e", size = 9912196, upload-time = "2025-03-07T15:27:06.93Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/66/ea/0b7e8c42b1ec608033c4d5a02939c82097ddcb0b3e393e4238584b7054ab/ruff-0.9.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:524ee184d92f7c7304aa568e2db20f50c32d1d0caa235d8ddf10497566ea1a12", size = 11434985, upload-time = "2025-03-07T15:27:10.082Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/86/3171d1eff893db4f91755175a6e1163c5887be1f1e2f4f6c0c59527c2bfd/ruff-0.9.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:df92aeac30af821f9acf819fc01b4afc3dfb829d2782884f8739fb52a8119a16", size = 12155842, upload-time = "2025-03-07T15:27:12.727Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/89/9e/700ca289f172a38eb0bca752056d0a42637fa17b81649b9331786cb791d7/ruff-0.9.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de42e4edc296f520bb84954eb992a07a0ec5a02fecb834498415908469854a52", size = 11613804, upload-time = "2025-03-07T15:27:15.944Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f2/92/648020b3b5db180f41a931a68b1c8575cca3e63cec86fd26807422a0dbad/ruff-0.9.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d257f95b65806104b6b1ffca0ea53f4ef98454036df65b1eda3693534813ecd1", size = 13823776, upload-time = "2025-03-07T15:27:18.996Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5e/a6/cc472161cd04d30a09d5c90698696b70c169eeba2c41030344194242db45/ruff-0.9.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b60dec7201c0b10d6d11be00e8f2dbb6f40ef1828ee75ed739923799513db24c", size = 11302673, upload-time = "2025-03-07T15:27:21.655Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/db/d31c361c4025b1b9102b4d032c70a69adb9ee6fde093f6c3bf29f831c85c/ruff-0.9.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d838b60007da7a39c046fcdd317293d10b845001f38bcb55ba766c3875b01e43", size = 10235358, upload-time = "2025-03-07T15:27:24.72Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d1/86/d6374e24a14d4d93ebe120f45edd82ad7dcf3ef999ffc92b197d81cdc2a5/ruff-0.9.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ccaf903108b899beb8e09a63ffae5869057ab649c1e9231c05ae354ebc62066c", size = 9886177, upload-time = "2025-03-07T15:27:27.282Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/00/62/a61691f6eaaac1e945a1f3f59f1eea9a218513139d5b6c2b8f88b43b5b8f/ruff-0.9.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:f9567d135265d46e59d62dc60c0bfad10e9a6822e231f5b24032dba5a55be6b5", size = 10864747, upload-time = "2025-03-07T15:27:30.637Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ee/94/2c7065e1d92a8a8a46d46d9c3cf07b0aa7e0a1e0153d74baa5e6620b4102/ruff-0.9.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5f202f0d93738c28a89f8ed9eaba01b7be339e5d8d642c994347eaa81c6d75b8", size = 11360441, upload-time = "2025-03-07T15:27:33.356Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a7/8f/1f545ea6f9fcd7bf4368551fb91d2064d8f0577b3079bb3f0ae5779fb773/ruff-0.9.10-py3-none-win32.whl", hash = "sha256:bfb834e87c916521ce46b1788fbb8484966e5113c02df216680102e9eb960029", size = 10247401, upload-time = "2025-03-07T15:27:35.994Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4f/18/fb703603ab108e5c165f52f5b86ee2aa9be43bb781703ec87c66a5f5d604/ruff-0.9.10-py3-none-win_amd64.whl", hash = "sha256:f2160eeef3031bf4b17df74e307d4c5fb689a6f3a26a2de3f7ef4044e3c484f1", size = 11366360, upload-time = "2025-03-07T15:27:38.66Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/35/85/338e603dc68e7d9994d5d84f24adbf69bae760ba5efd3e20f5ff2cec18da/ruff-0.9.10-py3-none-win_arm64.whl", hash = "sha256:5fd804c0327a5e5ea26615550e706942f348b197d5475ff34c19733aee4b2e69", size = 10436892, upload-time = "2025-03-07T15:27:41.687Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "shellingham"
|
||||
version = "1.5.4"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/58/15/8b3609fd3830ef7b27b655beb4b4e9c62313a4e8da8c676e142cc210d58e/shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de", size = 10310 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/58/15/8b3609fd3830ef7b27b655beb4b4e9c62313a4e8da8c676e142cc210d58e/shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de", size = 10310, upload-time = "2023-10-24T04:13:40.426Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e0/f9/0595336914c5619e5f28a1fb793285925a8cd4b432c9da0a987836c7f822/shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686", size = 9755 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e0/f9/0595336914c5619e5f28a1fb793285925a8cd4b432c9da0a987836c7f822/shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686", size = 9755, upload-time = "2023-10-24T04:13:38.866Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -399,25 +437,34 @@ dependencies = [
|
||||
{ name = "shellingham" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/8b/6f/3991f0f1c7fcb2df31aef28e0594d8d54b05393a0e4e34c65e475c2a5d41/typer-0.15.2.tar.gz", hash = "sha256:ab2fab47533a813c49fe1f16b1a370fd5819099c00b119e0633df65f22144ba5", size = 100711 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/8b/6f/3991f0f1c7fcb2df31aef28e0594d8d54b05393a0e4e34c65e475c2a5d41/typer-0.15.2.tar.gz", hash = "sha256:ab2fab47533a813c49fe1f16b1a370fd5819099c00b119e0633df65f22144ba5", size = 100711, upload-time = "2025-02-27T19:17:34.807Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/7f/fc/5b29fea8cee020515ca82cc68e3b8e1e34bb19a3535ad854cac9257b414c/typer-0.15.2-py3-none-any.whl", hash = "sha256:46a499c6107d645a9c13f7ee46c5d5096cae6f5fc57dd11eccbbb9ae3e44ddfc", size = 45061 },
|
||||
{ url = "https://files.pythonhosted.org/packages/7f/fc/5b29fea8cee020515ca82cc68e3b8e1e34bb19a3535ad854cac9257b414c/typer-0.15.2-py3-none-any.whl", hash = "sha256:46a499c6107d645a9c13f7ee46c5d5096cae6f5fc57dd11eccbbb9ae3e44ddfc", size = 45061, upload-time = "2025-02-27T19:17:32.111Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "typing-extensions"
|
||||
version = "4.12.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321, upload-time = "2024-06-07T18:52:15.995Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438 },
|
||||
{ url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438, upload-time = "2024-06-07T18:52:13.582Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "urllib3"
|
||||
version = "2.3.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/aa/63/e53da845320b757bf29ef6a9062f5c669fe997973f966045cb019c3f4b66/urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d", size = 307268 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/aa/63/e53da845320b757bf29ef6a9062f5c669fe997973f966045cb019c3f4b66/urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d", size = 307268, upload-time = "2024-12-22T07:47:30.032Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c8/19/4ec628951a74043532ca2cf5d97b7b14863931476d117c471e8e2b1eb39f/urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df", size = 128369 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c8/19/4ec628951a74043532ca2cf5d97b7b14863931476d117c471e8e2b1eb39f/urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df", size = 128369, upload-time = "2024-12-22T07:47:28.074Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wcwidth"
|
||||
version = "0.2.13"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/6c/63/53559446a878410fc5a5974feb13d31d78d752eb18aeba59c7fef1af7598/wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5", size = 101301, upload-time = "2024-01-06T02:10:57.829Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/fd/84/fd2ba7aafacbad3c4201d395674fc6348826569da3c0937e75505ead3528/wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", size = 34166, upload-time = "2024-01-06T02:10:55.763Z" },
|
||||
]
|
||||
|
||||
Reference in New Issue
Block a user