feat: repo scanner
This commit is contained in:
77
app/api/analyze/route.ts
Normal file
77
app/api/analyze/route.ts
Normal file
@@ -0,0 +1,77 @@
|
||||
import { NextRequest, NextResponse } from 'next/server'
|
||||
import { readFileSync } from 'fs'
|
||||
import { join } from 'path'
|
||||
|
||||
const promptTemplate = readFileSync(join(process.cwd(), 'app/api/analyze/prompt.txt'), 'utf-8')
|
||||
|
||||
// In-memory cache: persists for the lifetime of the server process
|
||||
const cache = new Map<string, { data: any; timestamp: number }>()
|
||||
const CACHE_TTL = 1000 * 60 * 60 * 24 // 24 hours
|
||||
|
||||
export async function POST(req: NextRequest) {
|
||||
try {
|
||||
const { owner, repo, files, stack, dependencies, sensitiveFiles, configFiles } = await req.json()
|
||||
const baseUrl = process.env.SHARED_LLM_BASE_URL
|
||||
const apiKey = process.env.SHARED_LLM_API_KEY
|
||||
|
||||
if (!baseUrl || !apiKey) {
|
||||
return NextResponse.json({ error: 'LLM service not configured' }, { status: 500 })
|
||||
}
|
||||
|
||||
// Check cache
|
||||
const cacheKey = `${owner}/${repo}`.toLowerCase()
|
||||
const cached = cache.get(cacheKey)
|
||||
if (cached && Date.now() - cached.timestamp < CACHE_TTL) {
|
||||
return NextResponse.json(cached.data)
|
||||
}
|
||||
|
||||
const prompt = promptTemplate
|
||||
.replace('{{owner}}', owner)
|
||||
.replace('{{repo}}', repo)
|
||||
.replace('{{files}}', files.slice(0, 80).join(', '))
|
||||
.replace('{{stack}}', stack.join(', ') || 'Unknown')
|
||||
.replace('{{dependencies}}', Object.keys(dependencies || {}).slice(0, 40).join(', ') || 'None detected')
|
||||
.replace('{{sensitiveFiles}}', sensitiveFiles.join(', ') || 'None')
|
||||
.replace('{{configFiles}}', configFiles.join(', ') || 'None')
|
||||
|
||||
let endpoint = baseUrl.replace(/\/+$/, '')
|
||||
endpoint = endpoint.replace(/\/v1$/, '')
|
||||
endpoint += '/v1/chat/completions'
|
||||
|
||||
const llmRes = await fetch(endpoint, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${apiKey}` },
|
||||
body: JSON.stringify({
|
||||
model: process.env.SHARED_LLM_MODEL || 'Kimi-K2.5-sandbox',
|
||||
messages: [{ role: 'user', content: prompt }],
|
||||
max_tokens: 8000,
|
||||
temperature: 0.7,
|
||||
}),
|
||||
})
|
||||
|
||||
if (!llmRes.ok) {
|
||||
console.error('LLM error:', await llmRes.text())
|
||||
return NextResponse.json({ error: 'Failed to generate report' }, { status: 500 })
|
||||
}
|
||||
|
||||
const data = await llmRes.json()
|
||||
const msg = data.choices?.[0]?.message
|
||||
const content = msg?.content || msg?.reasoning_content || ''
|
||||
if (!content) {
|
||||
console.error('LLM returned empty content:', JSON.stringify(data).slice(0, 500))
|
||||
return NextResponse.json({ error: 'LLM returned empty response' }, { status: 500 })
|
||||
}
|
||||
const jsonMatch = content.match(/\{[\s\S]*\}/)
|
||||
if (!jsonMatch) {
|
||||
console.error('No JSON found in LLM response:', content.slice(0, 500))
|
||||
return NextResponse.json({ error: 'Failed to parse report' }, { status: 500 })
|
||||
}
|
||||
|
||||
const report = JSON.parse(jsonMatch[0])
|
||||
cache.set(cacheKey, { data: report, timestamp: Date.now() })
|
||||
return NextResponse.json(report)
|
||||
} catch (e: any) {
|
||||
console.error('Analyze error:', e)
|
||||
return NextResponse.json({ error: e.message || 'Analysis failed' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user