@@ -23,18 +23,111 @@ const setSSEHeaders = (res) => {
2323 res . setHeader ( 'X-Accel-Buffering' , 'no' ) ;
2424} ;
2525
26- router . post ( '/' , ( req , res ) => {
27- const { messages, model : requestedModel , stream = false , temperature, max_tokens, tools, tool_choice } = req . body ;
26+ // Smart routing - support GET requests for better client compatibility
27+ router . get ( '/' , ( req , res ) => {
28+ // Extract parameters from query string
29+ const { message, messages, model = 'auto' , stream = false , temperature, max_tokens } = req . query ;
30+
31+ let parsedMessages ;
32+
33+ if ( messages ) {
34+ // Try to parse messages from query parameter (JSON string)
35+ try {
36+ parsedMessages = JSON . parse ( decodeURIComponent ( messages ) ) ;
37+ } catch ( e ) {
38+ return res . status ( 400 ) . json ( {
39+ error : {
40+ message : 'Invalid messages parameter. Must be valid JSON array.' ,
41+ type : 'invalid_request_error' ,
42+ }
43+ } ) ;
44+ }
45+ } else if ( message ) {
46+ // Simple single message support
47+ parsedMessages = [ { role : 'user' , content : decodeURIComponent ( message ) } ] ;
48+ } else {
49+ return res . status ( 400 ) . json ( {
50+ error : {
51+ message : 'Missing required parameter. Use ?message=your_text or ?messages=[{"role":"user","content":"text"}]' ,
52+ type : 'invalid_request_error' ,
53+ help : 'GET Example: /v1/chat/completions?message=Hello&model=auto'
54+ }
55+ } ) ;
56+ }
2857
29- if ( ! messages || ! Array . isArray ( messages ) || messages . length === 0 ) {
58+ if ( ! Array . isArray ( parsedMessages ) || parsedMessages . length === 0 ) {
3059 return res . status ( 400 ) . json ( {
3160 error : {
32- message : 'messages is required and must be a non-empty array' ,
61+ message : 'messages must be a non-empty array' ,
3362 type : 'invalid_request_error' ,
3463 } ,
3564 } ) ;
3665 }
3766
67+ const mappedModel = getModelMapping ( model ) ;
68+ const prompt = messagesToPrompt ( parsedMessages ) ;
69+ const id = newId ( 'chatcmpl' ) ;
70+
71+ const flags = [ ] ;
72+ if ( max_tokens != null ) flags . push ( '--max-tokens' , String ( max_tokens ) ) ;
73+ if ( temperature != null ) flags . push ( '--temperature' , String ( temperature ) ) ;
74+
75+ const isStream = stream === 'true' ;
76+
77+ if ( isStream ) {
78+ setSSEHeaders ( res ) ;
79+ let lastFinishReason = 'stop' ;
80+
81+ const child = runQoderRequest ( {
82+ prompt,
83+ model : mappedModel ,
84+ flags,
85+ timeoutMs : QODER_TIMEOUT_MS ,
86+ onChunk : ( data ) => {
87+ const content = extractTextContent ( data . message ) ;
88+ const finishReason = data . message ?. stop_reason || null ;
89+
90+ if ( finishReason ) lastFinishReason = finishReason ;
91+
92+ res . write ( `data: ${ JSON . stringify ( buildStreamChunk ( data , mappedModel , id ) ) } \n\n` ) ;
93+ } ,
94+ onError : ( error ) => {
95+ res . write ( `data: ${ JSON . stringify ( { error : error . message } ) } \n\n` ) ;
96+ res . write ( 'data: [DONE]\n\n' ) ;
97+ res . end ( ) ;
98+ } ,
99+ onEnd : ( ) => {
100+ res . write ( `data: ${ JSON . stringify ( buildDoneChunk ( ) ) } \n\n` ) ;
101+ res . write ( 'data: [DONE]\n\n' ) ;
102+ res . end ( ) ;
103+ } ,
104+ } ) ;
105+
106+ req . on ( 'close' , ( ) => child . kill ( ) ) ;
107+ } else {
108+ let fullContent = '' ;
109+
110+ const child = runQoderRequest ( {
111+ prompt,
112+ model : mappedModel ,
113+ flags,
114+ timeoutMs : QODER_TIMEOUT_MS ,
115+ onChunk : ( data ) => {
116+ const content = extractTextContent ( data . message ) ;
117+ if ( content ) fullContent += content ;
118+ } ,
119+ onError : ( error ) => {
120+ res . status ( 500 ) . json ( { error : { message : error . message , type : 'server_error' } } ) ;
121+ } ,
122+ onEnd : ( ) => {
123+ res . json ( buildFullChatResponse ( id , mappedModel , fullContent ) ) ;
124+ } ,
125+ } ) ;
126+ }
127+ } ) ;
128+
129+ router . post ( '/' , ( req , res ) => {
130+
38131 const model = getModelMapping ( requestedModel ) ;
39132 const prompt = messagesToPrompt ( messages ) ;
40133 const id = newId ( 'chatcmpl' ) ;
0 commit comments