Skip to content

Commit e53c0a0

Browse files
committed
feat: support extension of AiSDKRunner
```ts // env.config.mjs ... class OllamaCustomExecutor extend AiSdkRunner { override getAiSdkModelOptions(request: LocalLlmGenerateTextRequestOptions): Promise<AiSdkModelOptions> { if (<.. model is your custom one>) { ... } return super.getAiSdkModelOptions(request); } } executor = new LocalExecutor({config}, new OllamaCustomExecutor()) ``` Fixes #221
1 parent 00d7ae1 commit e53c0a0

8 files changed

Lines changed: 33 additions & 29 deletions

File tree

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,14 @@
11
import {AnthropicProviderOptions} from '@ai-sdk/anthropic';
22
import {GoogleGenerativeAIProviderOptions} from '@ai-sdk/google';
33
import {OpenAIResponsesProviderOptions} from '@ai-sdk/openai';
4-
import {LanguageModelV3} from '@ai-sdk/provider';
4+
import {LanguageModelV3, SharedV3ProviderOptions} from '@ai-sdk/provider';
55

6-
export type ModelOptions = {
6+
export type AiSdkModelOptions = {
77
model: LanguageModelV3;
88
providerOptions:
99
| {anthropic: AnthropicProviderOptions}
1010
| {google: GoogleGenerativeAIProviderOptions}
11-
| {openai: OpenAIResponsesProviderOptions};
11+
| {openai: OpenAIResponsesProviderOptions}
12+
// This supports extensions of `AISdkRunner` for custom model providers.
13+
| SharedV3ProviderOptions;
1214
};

runner/codegen/ai-sdk/ai-sdk-runner.ts

Lines changed: 12 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ import {
2626
import {ANTHROPIC_MODELS, getAiSdkModelOptionsForAnthropic} from './anthropic.js';
2727
import {getAiSdkModelOptionsForGoogle, GOOGLE_MODELS} from './google.js';
2828
import {getAiSdkModelOptionsForOpenAI, OPENAI_MODELS} from './openai.js';
29+
import {AiSdkModelOptions} from './ai-sdk-model-options.js';
2930

3031
const SUPPORTED_MODELS = [...GOOGLE_MODELS, ...ANTHROPIC_MODELS, ...OPENAI_MODELS] as const;
3132

@@ -34,7 +35,7 @@ const SUPPORTED_MODELS = [...GOOGLE_MODELS, ...ANTHROPIC_MODELS, ...OPENAI_MODEL
3435
// even if it involves many exponential backoff-waiting.
3536
const DEFAULT_MAX_RETRIES = 100000;
3637

37-
export class AiSDKRunner implements LlmRunner {
38+
export class AiSdkRunner implements LlmRunner {
3839
displayName = 'AI SDK';
3940
id = 'ai-sdk';
4041
hasBuiltInRepairLoop = true;
@@ -44,9 +45,9 @@ export class AiSDKRunner implements LlmRunner {
4445
): Promise<LocalLlmGenerateTextResponse> {
4546
const response = await this._wrapRequestWithTimeoutAndRateLimiting(options, async abortSignal =>
4647
generateText({
47-
...(await this._getAiSdkModelOptions(options)),
48+
...(await this.getAiSdkModelOptions(options)),
4849
abortSignal: abortSignal,
49-
messages: this._convertRequestToMessagesList(options),
50+
messages: this.convertRequestToMessagesList(options),
5051
maxRetries: DEFAULT_MAX_RETRIES,
5152
}),
5253
);
@@ -69,8 +70,8 @@ export class AiSDKRunner implements LlmRunner {
6970
): Promise<LocalLlmConstrainedOutputGenerateResponse<T>> {
7071
const response = await this._wrapRequestWithTimeoutAndRateLimiting(options, async abortSignal =>
7172
generateText({
72-
...(await this._getAiSdkModelOptions(options)),
73-
messages: this._convertRequestToMessagesList(options),
73+
...(await this.getAiSdkModelOptions(options)),
74+
messages: this.convertRequestToMessagesList(options),
7475
output: Output.object<z.infer<T>>({schema: options.schema}),
7576
abortSignal: abortSignal,
7677
maxRetries: DEFAULT_MAX_RETRIES,
@@ -138,13 +139,9 @@ export class AiSDKRunner implements LlmRunner {
138139
);
139140
}
140141

141-
private async _getAiSdkModelOptions(request: LocalLlmGenerateTextRequestOptions): Promise<{
142-
model: LanguageModel;
143-
providerOptions:
144-
| {anthropic: AnthropicProviderOptions}
145-
| {google: GoogleGenerativeAIProviderOptions}
146-
| {openai: OpenAIResponsesProviderOptions};
147-
}> {
142+
protected async getAiSdkModelOptions(
143+
request: LocalLlmGenerateTextRequestOptions,
144+
): Promise<AiSdkModelOptions> {
148145
const result =
149146
(await getAiSdkModelOptionsForGoogle(request.model)) ??
150147
(await getAiSdkModelOptionsForAnthropic(request.model)) ??
@@ -155,7 +152,7 @@ export class AiSDKRunner implements LlmRunner {
155152
return result;
156153
}
157154

158-
private _convertRequestToMessagesList(
155+
protected convertRequestToMessagesList(
159156
request: LocalLlmConstrainedOutputGenerateRequestOptions | LocalLlmGenerateTextRequestOptions,
160157
): ModelMessage[] {
161158
return [
@@ -169,13 +166,13 @@ export class AiSDKRunner implements LlmRunner {
169166
]
170167
: []),
171168
// Optional additional messages
172-
...this._toAiSDKMessage(request.messages ?? []),
169+
...this.toAiSDKMessage(request.messages ?? []),
173170
// The main message.
174171
{role: 'user', content: [{type: 'text', text: request.prompt}]},
175172
];
176173
}
177174

178-
private _toAiSDKMessage(messages: PromptDataMessage[]): ModelMessage[] {
175+
protected toAiSDKMessage(messages: PromptDataMessage[]): ModelMessage[] {
179176
const result: ModelMessage[] = [];
180177

181178
for (const message of messages) {

runner/codegen/ai-sdk/anthropic.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import {anthropic, AnthropicProviderOptions} from '@ai-sdk/anthropic';
22
import {wrapLanguageModel} from 'ai';
33
import {anthropicThinkingWithStructuredResponseMiddleware} from './anthropic_thinking_patch.js';
4-
import {ModelOptions} from './ai-sdk-model-options.js';
4+
import {AiSdkModelOptions} from './ai-sdk-model-options.js';
55

66
export const ANTHROPIC_MODELS = [
77
'claude-opus-4.1-no-thinking',
@@ -17,7 +17,7 @@ export const ANTHROPIC_MODELS = [
1717

1818
export async function getAiSdkModelOptionsForAnthropic(
1919
rawModelName: string,
20-
): Promise<ModelOptions | null> {
20+
): Promise<AiSdkModelOptions | null> {
2121
const modelName = rawModelName as (typeof ANTHROPIC_MODELS)[number];
2222

2323
switch (modelName) {

runner/codegen/ai-sdk/google.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import {google, GoogleGenerativeAIProviderOptions} from '@ai-sdk/google';
2-
import {ModelOptions} from './ai-sdk-model-options.js';
2+
import {AiSdkModelOptions} from './ai-sdk-model-options.js';
33

44
export const GOOGLE_MODELS = [
55
'gemini-2.5-flash-lite',
@@ -13,7 +13,7 @@ export const GOOGLE_MODELS = [
1313

1414
export async function getAiSdkModelOptionsForGoogle(
1515
rawModelName: string,
16-
): Promise<ModelOptions | null> {
16+
): Promise<AiSdkModelOptions | null> {
1717
const modelName = rawModelName as (typeof GOOGLE_MODELS)[number];
1818

1919
switch (modelName) {

runner/codegen/ai-sdk/openai.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import {openai, OpenAIResponsesProviderOptions} from '@ai-sdk/openai';
2-
import {ModelOptions} from './ai-sdk-model-options.js';
2+
import {AiSdkModelOptions} from './ai-sdk-model-options.js';
33

44
export const OPENAI_MODELS = [
55
'gpt-5.1-no-thinking',
@@ -10,7 +10,7 @@ export const OPENAI_MODELS = [
1010

1111
export async function getAiSdkModelOptionsForOpenAI(
1212
rawModelName: string,
13-
): Promise<ModelOptions | null> {
13+
): Promise<AiSdkModelOptions | null> {
1414
const modelName = rawModelName as (typeof OPENAI_MODELS)[number];
1515

1616
switch (modelName) {

runner/codegen/runner-creation.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,11 @@ import type {ClaudeCodeRunner} from './claude-code-runner.js';
44
import type {GenkitRunner} from './genkit/genkit-runner.js';
55
import type {CodexRunner} from './codex-runner.js';
66
import type {NoopUnimplementedRunner} from './noop-unimplemented-runner.js';
7-
import {AiSDKRunner} from './ai-sdk/ai-sdk-runner.js';
7+
import {AiSdkRunner} from './ai-sdk/ai-sdk-runner.js';
88

99
interface AvailableRunners {
1010
genkit: GenkitRunner;
11-
'ai-sdk': AiSDKRunner;
11+
'ai-sdk': AiSdkRunner;
1212
'gemini-cli': GeminiCliRunner;
1313
'claude-code': ClaudeCodeRunner;
1414
'codex': CodexRunner;
@@ -31,7 +31,7 @@ export async function getRunnerByName<T extends RunnerName>(name: T): Promise<Av
3131
);
3232
case 'ai-sdk':
3333
return import('./ai-sdk/ai-sdk-runner.js').then(
34-
m => new m.AiSDKRunner() as AvailableRunners[T],
34+
m => new m.AiSdkRunner() as AvailableRunners[T],
3535
);
3636
case 'gemini-cli':
3737
return import('./gemini-cli-runner.js').then(

runner/index.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,3 +52,5 @@ export {replaceAtReferencesInPrompt} from './utils/prompt-at-references.js';
5252
export {extractRubrics} from './utils/extract-rubrics.js';
5353
export {combineReports} from './utils/combine-reports.mjs';
5454
export {writeReportToDisk} from './reporting/report-logging.js';
55+
export {AiSdkRunner} from './codegen/ai-sdk/ai-sdk-runner.js';
56+
export {type AiSdkModelOptions as AiSDKModelOptions} from './codegen/ai-sdk/ai-sdk-model-options.js';

runner/orchestration/executors/local-executor.ts

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,12 @@ export class LocalExecutor implements Executor {
3636

3737
constructor(
3838
public config: LocalExecutorConfig,
39-
runnerName: RunnerName = 'noop-unimplemented',
39+
runnerOrName: RunnerName | LlmRunner = 'noop-unimplemented',
4040
) {
41-
this.llm = getRunnerByName(runnerName);
41+
this.llm =
42+
typeof runnerOrName === 'string'
43+
? getRunnerByName(runnerOrName)
44+
: Promise.resolve(runnerOrName);
4245
}
4346

4447
async initializeEval(_prompt: RootPromptDefinition): Promise<EvalID> {

0 commit comments

Comments
 (0)