mirror of
https://github.com/geoffsee/open-gsio.git
synced 2025-09-08 22:56:46 +00:00
add lib tests
This commit is contained in:

committed by
Geoff Seemueller

parent
7019aa30bc
commit
108e5fbd47
@@ -1,13 +1,5 @@
|
||||
import { OpenAI } from "openai";
|
||||
import {
|
||||
_NotCustomized,
|
||||
ISimpleType,
|
||||
ModelPropertiesDeclarationToProperties,
|
||||
ModelSnapshotType2,
|
||||
UnionStringArray,
|
||||
} from "mobx-state-tree";
|
||||
import ChatSdk from "../lib/chat-sdk";
|
||||
import { BaseChatProvider, CommonProviderParams } from "./chat-stream-provider";
|
||||
import {OpenAI} from "openai";
|
||||
import {BaseChatProvider, CommonProviderParams} from "./chat-stream-provider";
|
||||
|
||||
export class CerebrasChatProvider extends BaseChatProvider {
|
||||
getOpenAIClient(param: CommonProviderParams): OpenAI {
|
||||
@@ -18,30 +10,32 @@ export class CerebrasChatProvider extends BaseChatProvider {
|
||||
}
|
||||
|
||||
getStreamParams(param: CommonProviderParams, safeMessages: any[]): any {
|
||||
const llamaTuningParams = {
|
||||
temperature: 0.86,
|
||||
top_p: 0.98,
|
||||
presence_penalty: 0.1,
|
||||
frequency_penalty: 0.3,
|
||||
max_tokens: param.maxTokens as number,
|
||||
};
|
||||
// models provided by cerebras do not follow standard tune params
|
||||
// they must be individually configured
|
||||
// const tuningParams = {
|
||||
// temperature: 0.86,
|
||||
// top_p: 0.98,
|
||||
// presence_penalty: 0.1,
|
||||
// frequency_penalty: 0.3,
|
||||
// max_tokens: param.maxTokens as number,
|
||||
// };
|
||||
|
||||
return {
|
||||
model: param.model,
|
||||
messages: safeMessages,
|
||||
stream: true,
|
||||
stream: true
|
||||
// ...tuningParams
|
||||
};
|
||||
}
|
||||
|
||||
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
|
||||
// Check if this is the final chunk
|
||||
if (chunk.choices && chunk.choices[0]?.finish_reason === "stop") {
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
return true; // Break the stream
|
||||
return true;
|
||||
}
|
||||
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
return false; // Continue the stream
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -53,13 +47,7 @@ export class CerebrasSdk {
|
||||
openai: OpenAI;
|
||||
systemPrompt: any;
|
||||
disableWebhookGeneration: boolean;
|
||||
preprocessedContext: ModelSnapshotType2<
|
||||
ModelPropertiesDeclarationToProperties<{
|
||||
role: ISimpleType<UnionStringArray<string[]>>;
|
||||
content: ISimpleType<unknown>;
|
||||
}>,
|
||||
_NotCustomized
|
||||
>;
|
||||
preprocessedContext: any;
|
||||
maxTokens: unknown | number | undefined;
|
||||
messages: any;
|
||||
model: string;
|
||||
|
@@ -1,5 +1,5 @@
|
||||
import Anthropic from "@anthropic-ai/sdk";
|
||||
import { OpenAI } from "openai";
|
||||
import {OpenAI} from "openai";
|
||||
import {
|
||||
_NotCustomized,
|
||||
ISimpleType,
|
||||
@@ -8,7 +8,7 @@ import {
|
||||
UnionStringArray,
|
||||
} from "mobx-state-tree";
|
||||
import ChatSdk from "../lib/chat-sdk";
|
||||
import { BaseChatProvider, CommonProviderParams } from "./chat-stream-provider";
|
||||
import {BaseChatProvider, CommonProviderParams} from "./chat-stream-provider";
|
||||
|
||||
export class ClaudeChatProvider extends BaseChatProvider {
|
||||
private anthropic: Anthropic | null = null;
|
||||
@@ -51,11 +51,11 @@ export class ClaudeChatProvider extends BaseChatProvider {
|
||||
],
|
||||
},
|
||||
});
|
||||
return true; // Break the stream
|
||||
return true;
|
||||
}
|
||||
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
return false; // Continue the stream
|
||||
return false;
|
||||
}
|
||||
|
||||
// Override the base handleStream method to use Anthropic client instead of OpenAI
|
||||
|
@@ -1,13 +1,5 @@
|
||||
import { OpenAI } from "openai";
|
||||
import {
|
||||
_NotCustomized,
|
||||
ISimpleType,
|
||||
ModelPropertiesDeclarationToProperties,
|
||||
ModelSnapshotType2,
|
||||
UnionStringArray,
|
||||
} from "mobx-state-tree";
|
||||
import ChatSdk from "../lib/chat-sdk";
|
||||
import { BaseChatProvider, CommonProviderParams } from "./chat-stream-provider";
|
||||
import {OpenAI} from "openai";
|
||||
import {BaseChatProvider, CommonProviderParams} from "./chat-stream-provider";
|
||||
|
||||
export class CloudflareAiChatProvider extends BaseChatProvider {
|
||||
getOpenAIClient(param: CommonProviderParams): OpenAI {
|
||||
@@ -109,14 +101,13 @@ export class CloudflareAiChatProvider extends BaseChatProvider {
|
||||
}
|
||||
|
||||
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
|
||||
// Check if this is the final chunk
|
||||
if (chunk.choices && chunk.choices[0]?.finish_reason === "stop") {
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
return true; // Break the stream
|
||||
return true;
|
||||
}
|
||||
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
return false; // Continue the stream
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -127,13 +118,7 @@ export class CloudflareAISdk {
|
||||
param: {
|
||||
openai: OpenAI;
|
||||
systemPrompt: any;
|
||||
preprocessedContext: ModelSnapshotType2<
|
||||
ModelPropertiesDeclarationToProperties<{
|
||||
role: ISimpleType<UnionStringArray<string[]>>;
|
||||
content: ISimpleType<unknown>;
|
||||
}>,
|
||||
_NotCustomized
|
||||
>;
|
||||
preprocessedContext: any;
|
||||
maxTokens: unknown | number | undefined;
|
||||
messages: any;
|
||||
model: string;
|
||||
|
@@ -34,14 +34,13 @@ export class FireworksAiChatProvider extends BaseChatProvider {
|
||||
}
|
||||
|
||||
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
|
||||
// Check if this is the final chunk
|
||||
if (chunk.choices && chunk.choices[0]?.finish_reason === "stop") {
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
return true; // Break the stream
|
||||
return true;
|
||||
}
|
||||
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
return false; // Continue the stream
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,13 +51,7 @@ export class FireworksAiChatSdk {
|
||||
param: {
|
||||
openai: OpenAI;
|
||||
systemPrompt: any;
|
||||
preprocessedContext: ModelSnapshotType2<
|
||||
ModelPropertiesDeclarationToProperties<{
|
||||
role: ISimpleType<UnionStringArray<string[]>>;
|
||||
content: ISimpleType<unknown>;
|
||||
}>,
|
||||
_NotCustomized
|
||||
>;
|
||||
preprocessedContext: any;
|
||||
maxTokens: number;
|
||||
messages: any;
|
||||
model: any;
|
||||
|
@@ -33,7 +33,7 @@ export class GoogleChatProvider extends BaseChatProvider {
|
||||
],
|
||||
},
|
||||
});
|
||||
return true; // Break the stream
|
||||
return true;
|
||||
} else {
|
||||
dataCallback({
|
||||
type: "chat",
|
||||
@@ -47,7 +47,7 @@ export class GoogleChatProvider extends BaseChatProvider {
|
||||
],
|
||||
},
|
||||
});
|
||||
return false; // Continue the stream
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -67,7 +67,6 @@ export class GoogleChatSdk {
|
||||
messages: param.messages,
|
||||
model: param.model,
|
||||
env: param.env,
|
||||
disableWebhookGeneration: param.disableWebhookGeneration,
|
||||
},
|
||||
dataCallback,
|
||||
);
|
||||
|
@@ -17,7 +17,7 @@ export class GroqChatProvider extends BaseChatProvider {
|
||||
}
|
||||
|
||||
getStreamParams(param: CommonProviderParams, safeMessages: any[]): any {
|
||||
const llamaTuningParams = {
|
||||
const tuningParams = {
|
||||
temperature: 0.86,
|
||||
top_p: 0.98,
|
||||
presence_penalty: 0.1,
|
||||
@@ -29,23 +29,21 @@ export class GroqChatProvider extends BaseChatProvider {
|
||||
model: param.model,
|
||||
messages: safeMessages,
|
||||
stream: true,
|
||||
...llamaTuningParams
|
||||
...tuningParams
|
||||
};
|
||||
}
|
||||
|
||||
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
|
||||
// Check if this is the final chunk
|
||||
if (chunk.choices && chunk.choices[0]?.finish_reason === "stop") {
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
return true; // Break the stream
|
||||
return true;
|
||||
}
|
||||
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
return false; // Continue the stream
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Legacy class for backward compatibility
|
||||
export class GroqChatSdk {
|
||||
private static provider = new GroqChatProvider();
|
||||
|
||||
|
@@ -1,88 +1,73 @@
|
||||
import { OpenAI } from "openai";
|
||||
import ChatSdk from "../lib/chat-sdk";
|
||||
import { BaseChatProvider, CommonProviderParams } from "./chat-stream-provider";
|
||||
|
||||
export class XaiChatProvider extends BaseChatProvider {
|
||||
getOpenAIClient(param: CommonProviderParams): OpenAI {
|
||||
return new OpenAI({
|
||||
baseURL: "https://api.x.ai/v1",
|
||||
apiKey: param.env.XAI_API_KEY,
|
||||
});
|
||||
}
|
||||
|
||||
getStreamParams(param: CommonProviderParams, safeMessages: any[]): any {
|
||||
const isO1 = () => {
|
||||
if (param.model === "o1-preview" || param.model === "o1-mini") {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
const tuningParams: Record<string, any> = {};
|
||||
|
||||
const gpt4oTuningParams = {
|
||||
temperature: 0.75,
|
||||
};
|
||||
|
||||
const getTuningParams = () => {
|
||||
if (isO1()) {
|
||||
tuningParams["temperature"] = 1;
|
||||
tuningParams["max_completion_tokens"] = (param.maxTokens as number) + 10000;
|
||||
return tuningParams;
|
||||
}
|
||||
return gpt4oTuningParams;
|
||||
};
|
||||
|
||||
return {
|
||||
model: param.model,
|
||||
messages: safeMessages,
|
||||
stream: true,
|
||||
...getTuningParams(),
|
||||
};
|
||||
}
|
||||
|
||||
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
|
||||
// Check if this is the final chunk
|
||||
if (chunk.choices && chunk.choices[0]?.finish_reason === "stop") {
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
return true; // Break the stream
|
||||
getOpenAIClient(param: CommonProviderParams): OpenAI {
|
||||
return new OpenAI({
|
||||
baseURL: "https://api.x.ai/v1",
|
||||
apiKey: param.env.XAI_API_KEY,
|
||||
});
|
||||
}
|
||||
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
return false; // Continue the stream
|
||||
}
|
||||
getStreamParams(param: CommonProviderParams, safeMessages: any[]): any {
|
||||
const tuningParams = {
|
||||
temperature: 0.75,
|
||||
};
|
||||
|
||||
const getTuningParams = () => {
|
||||
return tuningParams;
|
||||
};
|
||||
|
||||
return {
|
||||
model: param.model,
|
||||
messages: safeMessages,
|
||||
stream: true,
|
||||
...getTuningParams(),
|
||||
};
|
||||
}
|
||||
|
||||
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
|
||||
if (chunk.choices && chunk.choices[0]?.finish_reason === "stop") {
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
return true;
|
||||
}
|
||||
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
export class XaiChatSdk {
|
||||
private static provider = new XaiChatProvider();
|
||||
private static provider = new XaiChatProvider();
|
||||
|
||||
static async handleXaiStream(
|
||||
ctx: {
|
||||
openai: OpenAI;
|
||||
systemPrompt: any;
|
||||
preprocessedContext: any;
|
||||
maxTokens: unknown | number | undefined;
|
||||
messages: any;
|
||||
disableWebhookGeneration: boolean;
|
||||
model: any;
|
||||
env: Env;
|
||||
},
|
||||
dataCallback: (data: any) => any,
|
||||
) {
|
||||
if (!ctx.messages?.length) {
|
||||
return new Response("No messages provided", { status: 400 });
|
||||
static async handleXaiStream(
|
||||
ctx: {
|
||||
openai: OpenAI;
|
||||
systemPrompt: any;
|
||||
preprocessedContext: any;
|
||||
maxTokens: unknown | number | undefined;
|
||||
messages: any;
|
||||
disableWebhookGeneration: boolean;
|
||||
model: any;
|
||||
env: Env;
|
||||
},
|
||||
dataCallback: (data: any) => any,
|
||||
) {
|
||||
if (!ctx.messages?.length) {
|
||||
return new Response("No messages provided", { status: 400 });
|
||||
}
|
||||
|
||||
return this.provider.handleStream(
|
||||
{
|
||||
systemPrompt: ctx.systemPrompt,
|
||||
preprocessedContext: ctx.preprocessedContext,
|
||||
maxTokens: ctx.maxTokens,
|
||||
messages: ctx.messages,
|
||||
model: ctx.model,
|
||||
env: ctx.env,
|
||||
disableWebhookGeneration: ctx.disableWebhookGeneration,
|
||||
},
|
||||
dataCallback,
|
||||
);
|
||||
}
|
||||
|
||||
return this.provider.handleStream(
|
||||
{
|
||||
systemPrompt: ctx.systemPrompt,
|
||||
preprocessedContext: ctx.preprocessedContext,
|
||||
maxTokens: ctx.maxTokens,
|
||||
messages: ctx.messages,
|
||||
model: ctx.model,
|
||||
env: ctx.env,
|
||||
disableWebhookGeneration: ctx.disableWebhookGeneration,
|
||||
},
|
||||
dataCallback,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user