Skip to content

Commit 00da676

Browse files
committed
Add GET endpoint and SSE streaming to /chat
Add smart routing for GET /chat to accept ?message or ?messages (JSON) and ?model/stream/temperature/max_tokens query params. Parse and validate query input, map models with getModelMapping, build prompt, and construct flags from max_tokens/temperature. Implement SSE streaming for stream=true using setSSEHeaders and runQoderRequest with chunk/onError/onEnd handlers (including client-close kill). Provide non-streaming handling that accumulates chunks and returns a full JSON response. Preserve existing POST handler behavior for backward compatibility and return clear 400/500 errors for invalid input or runtime failures.
1 parent 3e77bf4 commit 00da676

1 file changed

Lines changed: 97 additions & 4 deletions

File tree

src/routes/chat.js

Lines changed: 97 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,18 +23,111 @@ const setSSEHeaders = (res) => {
2323
res.setHeader('X-Accel-Buffering', 'no');
2424
};
2525

26-
router.post('/', (req, res) => {
27-
const { messages, model: requestedModel, stream = false, temperature, max_tokens, tools, tool_choice } = req.body;
26+
// Smart routing - support GET requests for better client compatibility
27+
router.get('/', (req, res) => {
28+
// Extract parameters from query string
29+
const { message, messages, model = 'auto', stream = false, temperature, max_tokens } = req.query;
30+
31+
let parsedMessages;
32+
33+
if (messages) {
34+
// Try to parse messages from query parameter (JSON string)
35+
try {
36+
parsedMessages = JSON.parse(decodeURIComponent(messages));
37+
} catch (e) {
38+
return res.status(400).json({
39+
error: {
40+
message: 'Invalid messages parameter. Must be valid JSON array.',
41+
type: 'invalid_request_error',
42+
}
43+
});
44+
}
45+
} else if (message) {
46+
// Simple single message support
47+
parsedMessages = [{ role: 'user', content: decodeURIComponent(message) }];
48+
} else {
49+
return res.status(400).json({
50+
error: {
51+
message: 'Missing required parameter. Use ?message=your_text or ?messages=[{"role":"user","content":"text"}]',
52+
type: 'invalid_request_error',
53+
help: 'GET Example: /v1/chat/completions?message=Hello&model=auto'
54+
}
55+
});
56+
}
2857

29-
if (!messages || !Array.isArray(messages) || messages.length === 0) {
58+
if (!Array.isArray(parsedMessages) || parsedMessages.length === 0) {
3059
return res.status(400).json({
3160
error: {
32-
message: 'messages is required and must be a non-empty array',
61+
message: 'messages must be a non-empty array',
3362
type: 'invalid_request_error',
3463
},
3564
});
3665
}
3766

67+
const mappedModel = getModelMapping(model);
68+
const prompt = messagesToPrompt(parsedMessages);
69+
const id = newId('chatcmpl');
70+
71+
const flags = [];
72+
if (max_tokens != null) flags.push('--max-tokens', String(max_tokens));
73+
if (temperature != null) flags.push('--temperature', String(temperature));
74+
75+
const isStream = stream === 'true';
76+
77+
if (isStream) {
78+
setSSEHeaders(res);
79+
let lastFinishReason = 'stop';
80+
81+
const child = runQoderRequest({
82+
prompt,
83+
model: mappedModel,
84+
flags,
85+
timeoutMs: QODER_TIMEOUT_MS,
86+
onChunk: (data) => {
87+
const content = extractTextContent(data.message);
88+
const finishReason = data.message?.stop_reason || null;
89+
90+
if (finishReason) lastFinishReason = finishReason;
91+
92+
res.write(`data: ${JSON.stringify(buildStreamChunk(data, mappedModel, id))}\n\n`);
93+
},
94+
onError: (error) => {
95+
res.write(`data: ${JSON.stringify({ error: error.message })}\n\n`);
96+
res.write('data: [DONE]\n\n');
97+
res.end();
98+
},
99+
onEnd: () => {
100+
res.write(`data: ${JSON.stringify(buildDoneChunk())}\n\n`);
101+
res.write('data: [DONE]\n\n');
102+
res.end();
103+
},
104+
});
105+
106+
req.on('close', () => child.kill());
107+
} else {
108+
let fullContent = '';
109+
110+
const child = runQoderRequest({
111+
prompt,
112+
model: mappedModel,
113+
flags,
114+
timeoutMs: QODER_TIMEOUT_MS,
115+
onChunk: (data) => {
116+
const content = extractTextContent(data.message);
117+
if (content) fullContent += content;
118+
},
119+
onError: (error) => {
120+
res.status(500).json({ error: { message: error.message, type: 'server_error' } });
121+
},
122+
onEnd: () => {
123+
res.json(buildFullChatResponse(id, mappedModel, fullContent));
124+
},
125+
});
126+
}
127+
});
128+
129+
router.post('/', (req, res) => {
130+
38131
const model = getModelMapping(requestedModel);
39132
const prompt = messagesToPrompt(messages);
40133
const id = newId('chatcmpl');

0 commit comments

Comments
 (0)