chore: remove completed PRD, rename setup doc, drop response_format tests

- Remove docs/01_ollama.prd.md (implementation complete)
- Rename local-dev-setup.md -> standalone-local-setup.md
- Remove TestResponseFormat class from test_llm_retry.py
This commit is contained in:
Igor Loskutov
2026-02-10 16:14:33 -05:00
parent d0af8ffdb7
commit 608a3805c5
3 changed files with 2 additions and 373 deletions

View File

@@ -286,92 +286,6 @@ class TestStructuredOutputWorkflow:
assert mock_settings.llm.acomplete.call_count == 2
class TestResponseFormat:
"""Test that response_format with JSON schema is passed to acomplete"""
@pytest.mark.asyncio
async def test_acomplete_called_with_response_format(self):
"""acomplete() should receive response_format containing Pydantic JSON schema"""
workflow = StructuredOutputWorkflow(
output_cls=TestResponse,
max_retries=3,
timeout=30,
)
with (
patch("reflector.llm.TreeSummarize") as mock_summarize,
patch("reflector.llm.Settings") as mock_settings,
):
mock_summarizer = MagicMock()
mock_summarize.return_value = mock_summarizer
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
mock_settings.llm.acomplete = AsyncMock(
return_value=make_completion_response(
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
)
)
result = await workflow.run(
prompt="Extract data",
texts=["Some text"],
tone_name=None,
)
assert "success" in result
# Verify response_format was passed
call_kwargs = mock_settings.llm.acomplete.call_args
assert "response_format" in call_kwargs.kwargs
rf = call_kwargs.kwargs["response_format"]
assert rf["type"] == "json_schema"
assert rf["json_schema"]["name"] == "TestResponse"
assert rf["json_schema"]["schema"] == TestResponse.model_json_schema()
@pytest.mark.asyncio
async def test_response_format_present_on_retry(self):
"""response_format should be passed on retry attempts too"""
workflow = StructuredOutputWorkflow(
output_cls=TestResponse,
max_retries=3,
timeout=30,
)
with (
patch("reflector.llm.TreeSummarize") as mock_summarize,
patch("reflector.llm.Settings") as mock_settings,
):
mock_summarizer = MagicMock()
mock_summarize.return_value = mock_summarizer
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
call_count = {"count": 0}
async def acomplete_handler(*args, **kwargs):
call_count["count"] += 1
if call_count["count"] == 1:
return make_completion_response('{"title": "Only title"}')
return make_completion_response(
'{"title": "Test", "summary": "Summary", "confidence": 0.9}'
)
mock_settings.llm.acomplete = AsyncMock(side_effect=acomplete_handler)
result = await workflow.run(
prompt="Extract data",
texts=["Some text"],
tone_name=None,
)
assert "success" in result
assert call_count["count"] == 2
# Both calls should have response_format
for call in mock_settings.llm.acomplete.call_args_list:
assert "response_format" in call.kwargs
assert call.kwargs["response_format"]["type"] == "json_schema"
class TestNetworkErrorRetries:
"""Test that network error retries are handled by OpenAILike, not Workflow"""