fix: crush providers configuration (#30)

This commit is contained in:
2025-08-07 11:04:51 -06:00
committed by GitHub
parent bae951cf7c
commit a709071d10
3 changed files with 62 additions and 346 deletions

View File

@@ -7,6 +7,9 @@ from typing import Any, Dict
from cubbi_init import ToolPlugin, cubbi_config
# Standard providers that Crush supports natively
STANDARD_PROVIDERS = ["anthropic", "openai", "google", "openrouter"]
class CrushPlugin(ToolPlugin):
@property
@@ -27,102 +30,43 @@ class CrushPlugin(ToolPlugin):
return Path("/home/cubbi/.config/crush")
def _map_provider_to_crush_format(
self, provider_name: str, provider_config
self, provider_name: str, provider_config, is_default_provider: bool = False
) -> Dict[str, Any] | None:
"""Map cubbi provider configuration to crush provider format"""
if not provider_config.base_url:
if provider_config.type in STANDARD_PROVIDERS:
provider_entry = {
"api_key": provider_config.api_key,
}
return provider_entry
# Custom provider - include base_url and name
provider_entry = {
"api_key": provider_config.api_key,
"base_url": provider_config.base_url,
"models": [],
}
# Add name and type for custom providers
if provider_config.type in STANDARD_PROVIDERS:
# Standard provider with custom URL - determine type and name
if provider_config.type == "anthropic":
return {
"name": "Anthropic",
"type": "anthropic",
"api_key": provider_config.api_key,
"base_url": provider_config.base_url or "https://api.anthropic.com/v1",
"models": [
{
"id": "claude-3-5-sonnet-20241022",
"name": "Claude 3.5 Sonnet",
"context_window": 200000,
"default_max_tokens": 4096,
},
{
"id": "claude-3-5-haiku-20241022",
"name": "Claude 3.5 Haiku",
"context_window": 200000,
"default_max_tokens": 4096,
},
],
}
provider_entry["type"] = "anthropic"
elif provider_config.type == "openai":
base_url = provider_config.base_url or "https://api.openai.com/v1"
return {
"name": "OpenAI"
if base_url.startswith("https://api.openai.com")
else f"OpenAI ({base_url})",
"type": "openai",
"api_key": provider_config.api_key,
"base_url": base_url,
"models": [
{
"id": "gpt-4o",
"name": "GPT-4o",
"context_window": 128000,
"default_max_tokens": 4096,
},
{
"id": "gpt-4o-mini",
"name": "GPT-4o Mini",
"context_window": 128000,
"default_max_tokens": 16384,
},
],
}
provider_entry["type"] = "openai"
elif provider_config.type == "google":
return {
"name": "Google",
"type": "openai", # Google Gemini uses OpenAI-compatible API
"api_key": provider_config.api_key,
"base_url": "https://generativelanguage.googleapis.com/v1beta/openai/",
"models": [
{
"id": "gemini-1.5-pro",
"name": "Gemini 1.5 Pro",
"context_window": 2000000,
"default_max_tokens": 8192,
},
{
"id": "gemini-1.5-flash",
"name": "Gemini 1.5 Flash",
"context_window": 1000000,
"default_max_tokens": 8192,
},
],
}
provider_entry["type"] = "gemini"
elif provider_config.type == "openrouter":
return {
"name": "OpenRouter",
"type": "openai",
"api_key": provider_config.api_key,
"base_url": "https://openrouter.ai/api/v1",
"models": [
{
"id": "anthropic/claude-3.5-sonnet",
"name": "Claude 3.5 Sonnet (via OpenRouter)",
"context_window": 200000,
"default_max_tokens": 4096,
},
{
"id": "openai/gpt-4o",
"name": "GPT-4o (via OpenRouter)",
"context_window": 128000,
"default_max_tokens": 4096,
},
],
}
provider_entry["type"] = "openai"
# Set name format as 'provider_name (type)'
provider_entry["name"] = f"{provider_name} ({provider_config.type})"
else:
# Non-standard provider with custom URL
provider_entry["type"] = "openai"
provider_entry["name"] = f"{provider_name} ({provider_config.type})"
return None
return provider_entry
def _ensure_user_config_dir(self) -> Path:
config_dir = self._get_user_config_path()
@@ -168,65 +112,45 @@ class CrushPlugin(ToolPlugin):
# Initialize Crush configuration with schema
config_data = {"$schema": "https://charm.land/crush.json", "providers": {}}
# Determine the default provider from the default model
default_provider_name = None
if cubbi_config.defaults.model:
default_provider_name = cubbi_config.defaults.model.split("/", 1)[0]
# Get all configured providers using the new provider system
self.status.log(
f"Found {len(cubbi_config.providers)} configured providers for Crush"
)
for provider_name, provider_config in cubbi_config.providers.items():
is_default_provider = provider_name == default_provider_name
crush_provider = self._map_provider_to_crush_format(
provider_name, provider_config
provider_name, provider_config, is_default_provider
)
if crush_provider:
config_data["providers"][provider_name] = crush_provider
# Translate google provider name to gemini for crush configuration
crush_provider_name = (
"gemini" if provider_config.type == "google" else provider_name
)
config_data["providers"][crush_provider_name] = crush_provider
self.status.log(
f"Added {provider_name} provider to Crush configuration"
f"Added {crush_provider_name} provider to Crush configuration{'(default)' if is_default_provider else ''}"
)
# Fallback to legacy environment variables if no providers found
if not config_data["providers"]:
self.status.log(
"No providers found via new system, falling back to legacy detection"
)
# Check for legacy environment variables
legacy_providers = {
"anthropic": "ANTHROPIC_API_KEY",
"openai": "OPENAI_API_KEY",
"google": "GOOGLE_API_KEY",
"openrouter": "OPENROUTER_API_KEY",
}
for provider_name, env_var in legacy_providers.items():
api_key = os.environ.get(env_var)
if api_key:
# Create a simple object for legacy compatibility
class LegacyProvider:
def __init__(self, provider_type, api_key, base_url=None):
self.type = provider_type
self.api_key = api_key
self.base_url = base_url
if provider_name == "openai":
openai_url = os.environ.get("OPENAI_URL")
legacy_provider = LegacyProvider("openai", api_key, openai_url)
else:
legacy_provider = LegacyProvider(provider_name, api_key)
crush_provider = self._map_provider_to_crush_format(
provider_name, legacy_provider
)
if crush_provider:
config_data["providers"][provider_name] = crush_provider
self.status.log(
f"Added {provider_name} provider from legacy environment (legacy)"
)
# Set default model from cubbi configuration
if cubbi_config.defaults.model:
# Crush expects provider/model format for default model selection
config_data["default_model"] = cubbi_config.defaults.model
self.status.log(f"Set default model to {config_data['default_model']}")
provider_part, model_part = cubbi_config.defaults.model.split("/", 1)
config_data["models"] = {
"large": {"provider": provider_part, "model": model_part},
"small": {"provider": provider_part, "model": model_part},
}
self.status.log(f"Set default model to {cubbi_config.defaults.model}")
# add model to the crush provider only if custom
provider = cubbi_config.providers.get(provider_part)
if provider and provider.base_url:
config_data["providers"][provider_part]["models"].append(
{"id": model_part, "name": model_part}
)
# Only write config if we have providers configured
if not config_data["providers"]:

208
test.sh
View File

@@ -1,208 +0,0 @@
#!/bin/bash
# Comprehensive test script for all cubbi images with different model combinations
# Tests single prompt/response functionality for each tool
set -e
# Configuration
TIMEOUT="180s"
TEST_PROMPT="What is 2+2?"
LOG_FILE="test_results.log"
TEMP_DIR="/tmp/cubbi_test_$$"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Test matrix
declare -a IMAGES=("goose" "aider" "claudecode" "opencode" "crush")
declare -a MODELS=(
"anthropic/claude-sonnet-4-20250514"
"openai/gpt-4o"
"openrouter/openai/gpt-4o"
"litellm/gpt-oss:120b"
)
# Command templates for each tool (based on research)
declare -A COMMANDS=(
["goose"]="goose run -t '$TEST_PROMPT' --no-session --quiet"
["aider"]="aider --message '$TEST_PROMPT' --yes-always --no-fancy-input --no-check-update --no-auto-commits"
["claudecode"]="claude -p '$TEST_PROMPT'"
["opencode"]="opencode run -m %MODEL% '$TEST_PROMPT'"
["crush"]="crush run '$TEST_PROMPT'"
)
# Initialize results tracking
declare -A RESULTS
TOTAL_TESTS=0
PASSED_TESTS=0
FAILED_TESTS=0
# Setup
echo -e "${BLUE}=== Cubbi Plugin Configuration Test Suite ===${NC}"
echo "Starting comprehensive test at $(date)"
echo "Test prompt: '$TEST_PROMPT'"
echo "Timeout: $TIMEOUT"
echo ""
mkdir -p "$TEMP_DIR"
> "$LOG_FILE"
# Function to log with timestamp
log() {
echo "$(date '+%Y-%m-%d %H:%M:%S') $1" >> "$LOG_FILE"
}
# Function to run a single test
run_test() {
local image="$1"
local model="$2"
local command="$3"
# Replace %MODEL% placeholder in command
command="${command//%MODEL%/$model}"
local test_name="${image}_${model//\//_}"
local log_file="${TEMP_DIR}/${test_name}.log"
echo -ne "Testing ${BLUE}$image${NC} with ${YELLOW}$model${NC}... "
log "Starting test: $test_name"
log "Command: $command"
# Run the test with timeout
local start_time=$(date +%s)
if timeout "$TIMEOUT" uv run -m cubbi.cli session create \
-i "$image" \
-m "$model" \
--no-connect \
--no-shell \
--run "$command" > "$log_file" 2>&1; then
local end_time=$(date +%s)
local duration=$((end_time - start_time))
# Check if we got a meaningful response
if grep -q "Initial command finished (exit code: 0)" "$log_file" &&
grep -q "Command execution complete" "$log_file"; then
echo -e "${GREEN}PASS${NC} (${duration}s)"
RESULTS["$test_name"]="PASS"
((PASSED_TESTS++))
log "Test passed in ${duration}s"
else
echo -e "${RED}FAIL${NC} (no valid output)"
RESULTS["$test_name"]="FAIL_NO_OUTPUT"
((FAILED_TESTS++))
log "Test failed - no valid output"
fi
else
local end_time=$(date +%s)
local duration=$((end_time - start_time))
echo -e "${RED}FAIL${NC} (timeout/error after ${duration}s)"
RESULTS["$test_name"]="FAIL_TIMEOUT"
((FAILED_TESTS++))
log "Test failed - timeout or error after ${duration}s"
fi
((TOTAL_TESTS++))
# Save detailed log
log "=== Test output for $test_name ==="
cat "$log_file" >> "$LOG_FILE"
log "=== End test output ==="
log ""
}
# Function to print test matrix header
print_matrix_header() {
echo ""
echo -e "${BLUE}=== Test Results Matrix ===${NC}"
printf "%-15s" "Image/Model"
for model in "${MODELS[@]}"; do
# Shorten model name for display
short_model=$(echo "$model" | sed 's/.*\///')
printf "%-20s" "$short_model"
done
echo ""
printf "%-15s" "==============="
for model in "${MODELS[@]}"; do
printf "%-20s" "===================="
done
echo ""
}
# Function to print test matrix row
print_matrix_row() {
local image="$1"
printf "%-15s" "$image"
for model in "${MODELS[@]}"; do
local test_name="${image}_${model//\//_}"
local result="${RESULTS[$test_name]}"
case "$result" in
"PASS")
printf "%-20s" "$(echo -e "${GREEN}PASS${NC}")"
;;
"FAIL_NO_OUTPUT")
printf "%-20s" "$(echo -e "${RED}FAIL (no output)${NC}")"
;;
"FAIL_TIMEOUT")
printf "%-20s" "$(echo -e "${RED}FAIL (timeout)${NC}")"
;;
*)
printf "%-20s" "$(echo -e "${YELLOW}UNKNOWN${NC}")"
;;
esac
done
echo ""
}
# Main test execution
echo -e "${YELLOW}Running ${#IMAGES[@]} images × ${#MODELS[@]} models = $((${#IMAGES[@]} * ${#MODELS[@]})) total tests${NC}"
echo ""
for image in "${IMAGES[@]}"; do
echo -e "${BLUE}--- Testing $image ---${NC}"
for model in "${MODELS[@]}"; do
command="${COMMANDS[$image]}"
run_test "$image" "$model" "$command"
done
echo ""
done
# Print results summary
print_matrix_header
for image in "${IMAGES[@]}"; do
print_matrix_row "$image"
done
echo ""
echo -e "${BLUE}=== Final Summary ===${NC}"
echo "Total tests: $TOTAL_TESTS"
echo -e "Passed: ${GREEN}$PASSED_TESTS${NC}"
echo -e "Failed: ${RED}$FAILED_TESTS${NC}"
if [ $FAILED_TESTS -eq 0 ]; then
echo -e "${GREEN}All tests passed! 🎉${NC}"
exit_code=0
else
echo -e "${RED}$FAILED_TESTS tests failed${NC}"
exit_code=1
fi
echo ""
echo "Detailed logs saved to: $LOG_FILE"
echo "Test completed at $(date)"
# Cleanup
rm -rf "$TEMP_DIR"
log "Test suite completed. Total: $TOTAL_TESTS, Passed: $PASSED_TESTS, Failed: $FAILED_TESTS"
exit $exit_code

View File

@@ -5,7 +5,7 @@ import pytest
from typing import Dict
IMAGES = ["goose", "aider", "opencode"] # fixme: crush
IMAGES = ["goose", "aider", "opencode", "crush"]
MODELS = [
"anthropic/claude-sonnet-4-20250514",
@@ -19,7 +19,7 @@ COMMANDS: Dict[str, str] = {
"goose": "goose run -t '{prompt}' --no-session --quiet",
"aider": "aider --message '{prompt}' --yes-always --no-fancy-input --no-check-update --no-auto-commits",
"opencode": "opencode run '{prompt}'",
"crush": "crush run '{prompt}'",
"crush": "crush run -q '{prompt}'",
}