Files
open-gsio/packages/server/providers/ollama.ts
2025-06-09 23:18:52 -04:00

75 lines
2.2 KiB
TypeScript

import { OpenAI } from "openai";
import { BaseChatProvider, CommonProviderParams } from "./chat-stream-provider.ts";
import {ProviderRepository} from "./_ProviderRepository";
export class OllamaChatProvider extends BaseChatProvider {
getOpenAIClient(param: CommonProviderParams): OpenAI {
return new OpenAI({
baseURL: param.env.OLLAMA_API_ENDPOINT ?? ProviderRepository.OPENAI_COMPAT_ENDPOINTS.ollama ,
apiKey: param.env.OLLAMA_API_KEY,
});
}
getStreamParams(param: CommonProviderParams, safeMessages: any[]): any {
const tuningParams = {
temperature: 0.75,
};
const getTuningParams = () => {
return tuningParams;
};
return {
model: param.model,
messages: safeMessages,
stream: true,
...getTuningParams(),
};
}
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
if (chunk.choices && chunk.choices[0]?.finish_reason === "stop") {
dataCallback({ type: "chat", data: chunk });
return true;
}
dataCallback({ type: "chat", data: chunk });
return false;
}
}
export class OllamaChatSdk {
private static provider = new OllamaChatProvider();
static async handleOllamaStream(
ctx: {
openai: OpenAI;
systemPrompt: any;
preprocessedContext: any;
maxTokens: unknown | number | undefined;
messages: any;
disableWebhookGeneration: boolean;
model: any;
env: Env;
},
dataCallback: (data: any) => any,
) {
if (!ctx.messages?.length) {
return new Response("No messages provided", { status: 400 });
}
return this.provider.handleStream(
{
systemPrompt: ctx.systemPrompt,
preprocessedContext: ctx.preprocessedContext,
maxTokens: ctx.maxTokens,
messages: ctx.messages,
model: ctx.model,
env: ctx.env,
disableWebhookGeneration: ctx.disableWebhookGeneration,
},
dataCallback,
);
}
}