@@ -26,6 +26,7 @@ import {
2626import { ANTHROPIC_MODELS , getAiSdkModelOptionsForAnthropic } from './anthropic.js' ;
2727import { getAiSdkModelOptionsForGoogle , GOOGLE_MODELS } from './google.js' ;
2828import { getAiSdkModelOptionsForOpenAI , OPENAI_MODELS } from './openai.js' ;
29+ import { AiSdkModelOptions } from './ai-sdk-model-options.js' ;
2930
3031const SUPPORTED_MODELS = [ ...GOOGLE_MODELS , ...ANTHROPIC_MODELS , ...OPENAI_MODELS ] as const ;
3132
@@ -34,7 +35,7 @@ const SUPPORTED_MODELS = [...GOOGLE_MODELS, ...ANTHROPIC_MODELS, ...OPENAI_MODEL
3435// even if it involves many exponential backoff-waiting.
3536const DEFAULT_MAX_RETRIES = 100000 ;
3637
37- export class AiSDKRunner implements LlmRunner {
38+ export class AiSdkRunner implements LlmRunner {
3839 displayName = 'AI SDK' ;
3940 id = 'ai-sdk' ;
4041 hasBuiltInRepairLoop = true ;
@@ -44,9 +45,9 @@ export class AiSDKRunner implements LlmRunner {
4445 ) : Promise < LocalLlmGenerateTextResponse > {
4546 const response = await this . _wrapRequestWithTimeoutAndRateLimiting ( options , async abortSignal =>
4647 generateText ( {
47- ...( await this . _getAiSdkModelOptions ( options ) ) ,
48+ ...( await this . getAiSdkModelOptions ( options ) ) ,
4849 abortSignal : abortSignal ,
49- messages : this . _convertRequestToMessagesList ( options ) ,
50+ messages : this . convertRequestToMessagesList ( options ) ,
5051 maxRetries : DEFAULT_MAX_RETRIES ,
5152 } ) ,
5253 ) ;
@@ -69,8 +70,8 @@ export class AiSDKRunner implements LlmRunner {
6970 ) : Promise < LocalLlmConstrainedOutputGenerateResponse < T > > {
7071 const response = await this . _wrapRequestWithTimeoutAndRateLimiting ( options , async abortSignal =>
7172 generateText ( {
72- ...( await this . _getAiSdkModelOptions ( options ) ) ,
73- messages : this . _convertRequestToMessagesList ( options ) ,
73+ ...( await this . getAiSdkModelOptions ( options ) ) ,
74+ messages : this . convertRequestToMessagesList ( options ) ,
7475 output : Output . object < z . infer < T > > ( { schema : options . schema } ) ,
7576 abortSignal : abortSignal ,
7677 maxRetries : DEFAULT_MAX_RETRIES ,
@@ -138,13 +139,9 @@ export class AiSDKRunner implements LlmRunner {
138139 ) ;
139140 }
140141
141- private async _getAiSdkModelOptions ( request : LocalLlmGenerateTextRequestOptions ) : Promise < {
142- model : LanguageModel ;
143- providerOptions :
144- | { anthropic : AnthropicProviderOptions }
145- | { google : GoogleGenerativeAIProviderOptions }
146- | { openai : OpenAIResponsesProviderOptions } ;
147- } > {
142+ protected async getAiSdkModelOptions (
143+ request : LocalLlmGenerateTextRequestOptions ,
144+ ) : Promise < AiSdkModelOptions > {
148145 const result =
149146 ( await getAiSdkModelOptionsForGoogle ( request . model ) ) ??
150147 ( await getAiSdkModelOptionsForAnthropic ( request . model ) ) ??
@@ -155,7 +152,7 @@ export class AiSDKRunner implements LlmRunner {
155152 return result ;
156153 }
157154
158- private _convertRequestToMessagesList (
155+ protected convertRequestToMessagesList (
159156 request : LocalLlmConstrainedOutputGenerateRequestOptions | LocalLlmGenerateTextRequestOptions ,
160157 ) : ModelMessage [ ] {
161158 return [
@@ -169,13 +166,13 @@ export class AiSDKRunner implements LlmRunner {
169166 ]
170167 : [ ] ) ,
171168 // Optional additional messages
172- ...this . _toAiSDKMessage ( request . messages ?? [ ] ) ,
169+ ...this . toAiSDKMessage ( request . messages ?? [ ] ) ,
173170 // The main message.
174171 { role : 'user' , content : [ { type : 'text' , text : request . prompt } ] } ,
175172 ] ;
176173 }
177174
178- private _toAiSDKMessage ( messages : PromptDataMessage [ ] ) : ModelMessage [ ] {
175+ protected toAiSDKMessage ( messages : PromptDataMessage [ ] ) : ModelMessage [ ] {
179176 const result : ModelMessage [ ] = [ ] ;
180177
181178 for ( const message of messages ) {
0 commit comments