Remove unused services and refactor SDK structure

Deleted outdated SDKs and services, including DocumentService and markdown-sdk. Consolidated and relocated SDKs into a unified "providers" structure to improve maintainability. Updated imports and adjusted utils naming for consistency.
This commit is contained in:
geoffsee
2025-05-27 14:46:32 -04:00
committed by Geoff Seemueller
parent ceeefeff14
commit fc22278b58
24 changed files with 28 additions and 521 deletions

View File

@@ -1,4 +1,4 @@
import { Sdk } from "./sdk";
import { Utils } from "./utils";
import few_shots from "../prompts/few_shots";
export class AssistantSdk {
@@ -12,10 +12,10 @@ export class AssistantSdk {
userTimezone = "UTC",
userLocation = "",
} = params;
const selectedFewshots = Sdk.selectEquitably?.(few_shots) || few_shots;
const selectedFewshots = Utils.selectEquitably?.(few_shots) || few_shots;
const sdkDate =
typeof Sdk.getCurrentDate === "function"
? Sdk.getCurrentDate()
typeof Utils.getCurrentDate === "function"
? Utils.getCurrentDate()
: new Date().toISOString();
const [currentDate] = sdkDate.split("T");
const now = new Date();

View File

@@ -1,54 +0,0 @@
export class MarkdownSdk {
static formatContextContainer(contextContainer) {
let markdown = "# Assistant Tools Results\n\n";
for (const [key, value] of contextContainer.entries()) {
markdown += `## ${this._escapeForMarkdown(key)}\n\n`;
markdown += this._formatValue(value);
}
return markdown.trim();
}
static _formatValue(value, depth = 0) {
if (Array.isArray(value)) {
return this._formatArray(value, depth);
} else if (value && typeof value === "object") {
return this._formatObject(value, depth);
} else {
return this._formatPrimitive(value, depth);
}
}
static _formatArray(arr, depth) {
let output = "";
arr.forEach((item, i) => {
output += `### Item ${i + 1}\n`;
output += this._formatValue(item, depth + 1);
output += "\n";
});
return output;
}
static _formatObject(obj, depth) {
return (
Object.entries(obj)
.map(
([k, v]) =>
`- **${this._escapeForMarkdown(k)}**: ${this._escapeForMarkdown(v)}`,
)
.join("\n") + "\n\n"
);
}
static _formatPrimitive(value, depth) {
return `${this._escapeForMarkdown(String(value))}\n\n`;
}
static _escapeForMarkdown(text) {
if (typeof text !== "string") {
text = String(text);
}
return text.replace(/(\*|`|_|~)/g, "\\$1");
}
}

View File

@@ -1,156 +0,0 @@
interface BaseMessage {
role: "user" | "assistant" | "system";
}
interface TextMessage extends BaseMessage {
content: string;
}
interface O1Message extends BaseMessage {
content: Array<{
type: string;
text: string;
}>;
}
interface LlamaMessage extends BaseMessage {
content: Array<{
type: "text" | "image";
data: string;
}>;
}
interface MessageConverter<T extends BaseMessage, U extends BaseMessage> {
convert(message: T): U;
convertBatch(messages: T[]): U[];
}
class TextToO1Converter implements MessageConverter<TextMessage, O1Message> {
convert(message: TextMessage): O1Message {
return {
role: message.role,
content: [
{
type: "text",
text: message.content,
},
],
};
}
convertBatch(messages: TextMessage[]): O1Message[] {
return messages.map((msg) => this.convert(msg));
}
}
class O1ToTextConverter implements MessageConverter<O1Message, TextMessage> {
convert(message: O1Message): TextMessage {
return {
role: message.role,
content: message.content.map((item) => item.text).join("\n"),
};
}
convertBatch(messages: O1Message[]): TextMessage[] {
return messages.map((msg) => this.convert(msg));
}
}
class TextToLlamaConverter
implements MessageConverter<TextMessage, LlamaMessage>
{
convert(message: TextMessage): LlamaMessage {
return {
role: message.role,
content: [
{
type: "text",
data: message.content,
},
],
};
}
convertBatch(messages: TextMessage[]): LlamaMessage[] {
return messages.map((msg) => this.convert(msg));
}
}
class LlamaToTextConverter
implements MessageConverter<LlamaMessage, TextMessage>
{
convert(message: LlamaMessage): TextMessage {
return {
role: message.role,
content: message.content
.filter((item) => item.type === "text")
.map((item) => item.data)
.join("\n"),
};
}
convertBatch(messages: LlamaMessage[]): TextMessage[] {
return messages.map((msg) => this.convert(msg));
}
}
class MessageConverterFactory {
static createConverter(
fromFormat: string,
toFormat: string,
): MessageConverter<any, any> {
const key = `${fromFormat}->${toFormat}`;
const converters = {
"text->o1": new TextToO1Converter(),
"o1->text": new O1ToTextConverter(),
"text->llama": new TextToLlamaConverter(),
"llama->text": new LlamaToTextConverter(),
};
const converter = converters[key];
if (!converter) {
throw new Error(`Unsupported conversion: ${key}`);
}
return converter;
}
}
function detectMessageFormat(message: any): string {
if (typeof message.content === "string") {
return "text";
}
if (Array.isArray(message.content)) {
if (message.content[0]?.type === "text" && "text" in message.content[0]) {
return "o1";
}
if (message.content[0]?.type && "data" in message.content[0]) {
return "llama";
}
}
throw new Error("Unknown message format");
}
function convertMessage(message: any, targetFormat: string): any {
const sourceFormat = detectMessageFormat(message);
if (sourceFormat === targetFormat) {
return message;
}
const converter = MessageConverterFactory.createConverter(
sourceFormat,
targetFormat,
);
return converter.convert(message);
}
export {
MessageConverterFactory,
convertMessage,
detectMessageFormat,
type BaseMessage,
type TextMessage,
type O1Message,
type LlamaMessage,
type MessageConverter,
};

View File

@@ -1,100 +0,0 @@
import { OpenAI } from "openai";
import {
_NotCustomized,
ISimpleType,
ModelPropertiesDeclarationToProperties,
ModelSnapshotType2,
UnionStringArray,
} from "mobx-state-tree";
import ChatSdk from "../chat-sdk";
export class CerebrasSdk {
static async handleCerebrasStream(
param: {
openai: OpenAI;
systemPrompt: any;
disableWebhookGeneration: boolean;
preprocessedContext: ModelSnapshotType2<
ModelPropertiesDeclarationToProperties<{
role: ISimpleType<UnionStringArray<string[]>>;
content: ISimpleType<unknown>;
}>,
_NotCustomized
>;
maxTokens: unknown | number | undefined;
messages: any;
model: string;
env: Env;
},
dataCallback: (data) => void,
) {
const {
preprocessedContext,
messages,
env,
maxTokens,
systemPrompt,
model,
} = param;
const assistantPrompt = ChatSdk.buildAssistantPrompt({
maxTokens: maxTokens,
});
const safeMessages = ChatSdk.buildMessageChain(messages, {
systemPrompt: systemPrompt,
model,
assistantPrompt,
toolResults: preprocessedContext,
});
const openai = new OpenAI({
baseURL: "https://api.cerebras.ai/v1",
apiKey: param.env.CEREBRAS_API_KEY,
});
return CerebrasSdk.streamCerebrasResponse(
safeMessages,
{
model: param.model,
maxTokens: param.maxTokens,
openai: openai,
},
dataCallback,
);
}
private static async streamCerebrasResponse(
messages: any[],
opts: {
model: string;
maxTokens: number | unknown | undefined;
openai: OpenAI;
},
dataCallback: (data: any) => void,
) {
const tuningParams: Record<string, any> = {};
const llamaTuningParams = {
temperature: 0.86,
top_p: 0.98,
presence_penalty: 0.1,
frequency_penalty: 0.3,
max_tokens: opts.maxTokens,
};
const getLlamaTuningParams = () => {
return llamaTuningParams;
};
const groqStream = await opts.openai.chat.completions.create({
model: opts.model,
messages: messages,
stream: true,
});
for await (const chunk of groqStream) {
dataCallback({ type: "chat", data: chunk });
}
}
}

View File

@@ -1,100 +0,0 @@
import Anthropic from "@anthropic-ai/sdk";
import { OpenAI } from "openai";
import {
_NotCustomized,
ISimpleType,
ModelPropertiesDeclarationToProperties,
ModelSnapshotType2,
UnionStringArray,
} from "mobx-state-tree";
import ChatSdk from "../chat-sdk";
export class ClaudeChatSdk {
private static async streamClaudeResponse(
messages: any[],
param: {
model: string;
maxTokens: number | unknown | undefined;
anthropic: Anthropic;
},
dataCallback: (data: any) => void,
) {
const claudeStream = await param.anthropic.messages.create({
stream: true,
model: param.model,
max_tokens: param.maxTokens,
messages: messages,
});
for await (const chunk of claudeStream) {
if (chunk.type === "message_stop") {
dataCallback({
type: "chat",
data: {
choices: [
{
delta: { content: "" },
logprobs: null,
finish_reason: "stop",
},
],
},
});
break;
}
dataCallback({ type: "chat", data: chunk });
}
}
static async handleClaudeStream(
param: {
openai: OpenAI;
systemPrompt: any;
preprocessedContext: ModelSnapshotType2<
ModelPropertiesDeclarationToProperties<{
role: ISimpleType<UnionStringArray<string[]>>;
content: ISimpleType<unknown>;
}>,
_NotCustomized
>;
maxTokens: unknown | number | undefined;
messages: any;
model: string;
env: Env;
},
dataCallback: (data) => void,
) {
const {
preprocessedContext,
messages,
env,
maxTokens,
systemPrompt,
model,
} = param;
const assistantPrompt = ChatSdk.buildAssistantPrompt({
maxTokens: maxTokens,
});
const safeMessages = ChatSdk.buildMessageChain(messages, {
systemPrompt: systemPrompt,
model,
assistantPrompt,
toolResults: preprocessedContext,
});
const anthropic = new Anthropic({
apiKey: env.ANTHROPIC_API_KEY,
});
return ClaudeChatSdk.streamClaudeResponse(
safeMessages,
{
model: param.model,
maxTokens: param.maxTokens,
anthropic: anthropic,
},
dataCallback,
);
}
}

View File

@@ -1,174 +0,0 @@
import { OpenAI } from "openai";
import {
_NotCustomized,
ISimpleType,
ModelPropertiesDeclarationToProperties,
ModelSnapshotType2,
UnionStringArray,
} from "mobx-state-tree";
import ChatSdk from "../chat-sdk";
export class CloudflareAISdk {
static async handleCloudflareAIStream(
param: {
openai: OpenAI;
systemPrompt: any;
preprocessedContext: ModelSnapshotType2<
ModelPropertiesDeclarationToProperties<{
role: ISimpleType<UnionStringArray<string[]>>;
content: ISimpleType<unknown>;
}>,
_NotCustomized
>;
maxTokens: unknown | number | undefined;
messages: any;
model: string;
env: Env;
},
dataCallback: (data) => void,
) {
const {
preprocessedContext,
messages,
env,
maxTokens,
systemPrompt,
model,
} = param;
const assistantPrompt = ChatSdk.buildAssistantPrompt({
maxTokens: maxTokens,
});
const safeMessages = ChatSdk.buildMessageChain(messages, {
systemPrompt: systemPrompt,
model,
assistantPrompt,
toolResults: preprocessedContext,
});
const cfAiURL = `https://api.cloudflare.com/client/v4/accounts/${env.CLOUDFLARE_ACCOUNT_ID}/ai/v1`;
console.log({ cfAiURL });
const openai = new OpenAI({
apiKey: env.CLOUDFLARE_API_KEY,
baseURL: cfAiURL,
});
return CloudflareAISdk.streamCloudflareAIResponse(
safeMessages,
{
model: param.model,
maxTokens: param.maxTokens,
openai: openai,
},
dataCallback,
);
}
private static async streamCloudflareAIResponse(
messages: any[],
opts: {
model: string;
maxTokens: number | unknown | undefined;
openai: OpenAI;
},
dataCallback: (data: any) => void,
) {
const tuningParams: Record<string, any> = {};
const llamaTuningParams = {
temperature: 0.86,
top_p: 0.98,
presence_penalty: 0.1,
frequency_penalty: 0.3,
max_tokens: opts.maxTokens,
};
const getLlamaTuningParams = () => {
return llamaTuningParams;
};
let modelPrefix = `@cf/meta`;
if (opts.model.toLowerCase().includes("llama")) {
modelPrefix = `@cf/meta`;
}
if (opts.model.toLowerCase().includes("hermes-2-pro-mistral-7b")) {
modelPrefix = `@hf/nousresearch`;
}
if (opts.model.toLowerCase().includes("mistral-7b-instruct")) {
modelPrefix = `@hf/mistral`;
}
if (opts.model.toLowerCase().includes("gemma")) {
modelPrefix = `@cf/google`;
}
if (opts.model.toLowerCase().includes("deepseek")) {
modelPrefix = `@cf/deepseek-ai`;
}
if (opts.model.toLowerCase().includes("openchat-3.5-0106")) {
modelPrefix = `@cf/openchat`;
}
const isNueralChat = opts.model
.toLowerCase()
.includes("neural-chat-7b-v3-1-awq");
if (
isNueralChat ||
opts.model.toLowerCase().includes("openhermes-2.5-mistral-7b-awq") ||
opts.model.toLowerCase().includes("zephyr-7b-beta-awq") ||
opts.model.toLowerCase().includes("deepseek-coder-6.7b-instruct-awq")
) {
modelPrefix = `@hf/thebloke`;
}
const generationParams: Record<string, any> = {
model: `${modelPrefix}/${opts.model}`,
messages: messages,
stream: true,
};
if (modelPrefix === "@cf/meta") {
generationParams["max_tokens"] = 4096;
}
if (modelPrefix === "@hf/mistral") {
generationParams["max_tokens"] = 4096;
}
if (opts.model.toLowerCase().includes("hermes-2-pro-mistral-7b")) {
generationParams["max_tokens"] = 1000;
}
if (opts.model.toLowerCase().includes("openhermes-2.5-mistral-7b-awq")) {
generationParams["max_tokens"] = 1000;
}
if (opts.model.toLowerCase().includes("deepseek-coder-6.7b-instruct-awq")) {
generationParams["max_tokens"] = 590;
}
if (opts.model.toLowerCase().includes("deepseek-math-7b-instruct")) {
generationParams["max_tokens"] = 512;
}
if (opts.model.toLowerCase().includes("neural-chat-7b-v3-1-awq")) {
generationParams["max_tokens"] = 590;
}
if (opts.model.toLowerCase().includes("openchat-3.5-0106")) {
generationParams["max_tokens"] = 2000;
}
const cloudflareAiStream = await opts.openai.chat.completions.create({
...generationParams,
});
for await (const chunk of cloudflareAiStream) {
dataCallback({ type: "chat", data: chunk });
}
}
}

View File

@@ -1,93 +0,0 @@
import { OpenAI } from "openai";
import {
_NotCustomized,
castToSnapshot,
getSnapshot,
ISimpleType,
ModelPropertiesDeclarationToProperties,
ModelSnapshotType2,
UnionStringArray,
} from "mobx-state-tree";
import Message from "../../models/Message";
import { MarkdownSdk } from "../markdown-sdk";
import ChatSdk from "../chat-sdk";
export class FireworksAiChatSdk {
private static async streamFireworksResponse(
messages: any[],
opts: {
model: string;
maxTokens: number | unknown | undefined;
openai: OpenAI;
},
dataCallback: (data: any) => void,
) {
let modelPrefix = "accounts/fireworks/models/";
if (opts.model.toLowerCase().includes("yi-")) {
modelPrefix = "accounts/yi-01-ai/models/";
}
const fireworksStream = await opts.openai.chat.completions.create({
model: `${modelPrefix}${opts.model}`,
messages: messages,
stream: true,
});
for await (const chunk of fireworksStream) {
dataCallback({ type: "chat", data: chunk });
}
}
static async handleFireworksStream(
param: {
openai: OpenAI;
systemPrompt: any;
preprocessedContext: ModelSnapshotType2<
ModelPropertiesDeclarationToProperties<{
role: ISimpleType<UnionStringArray<string[]>>;
content: ISimpleType<unknown>;
}>,
_NotCustomized
>;
maxTokens: number;
messages: any;
model: any;
env: Env;
},
dataCallback: (data) => void,
) {
const {
preprocessedContext,
messages,
env,
maxTokens,
systemPrompt,
model,
} = param;
const assistantPrompt = ChatSdk.buildAssistantPrompt({
maxTokens: maxTokens,
});
const safeMessages = ChatSdk.buildMessageChain(messages, {
systemPrompt: systemPrompt,
model,
assistantPrompt,
toolResults: preprocessedContext,
});
const fireworksOpenAIClient = new OpenAI({
apiKey: param.env.FIREWORKS_API_KEY,
baseURL: "https://api.fireworks.ai/inference/v1",
});
return FireworksAiChatSdk.streamFireworksResponse(
safeMessages,
{
model: param.model,
maxTokens: param.maxTokens,
openai: fireworksOpenAIClient,
},
dataCallback,
);
}
}

View File

@@ -1,97 +0,0 @@
import { OpenAI } from "openai";
import ChatSdk from "../chat-sdk";
import { StreamParams } from "../../services/ChatService";
export class GoogleChatSdk {
static async handleGoogleStream(
param: StreamParams,
dataCallback: (data) => void,
) {
const {
preprocessedContext,
messages,
env,
maxTokens,
systemPrompt,
model,
} = param;
const assistantPrompt = ChatSdk.buildAssistantPrompt({
maxTokens: maxTokens,
});
const safeMessages = ChatSdk.buildMessageChain(messages, {
systemPrompt: systemPrompt,
model,
assistantPrompt,
toolResults: preprocessedContext,
});
const openai = new OpenAI({
baseURL: "https://generativelanguage.googleapis.com/v1beta/openai",
apiKey: param.env.GEMINI_API_KEY,
});
return GoogleChatSdk.streamGoogleResponse(
safeMessages,
{
model: param.model,
maxTokens: param.maxTokens,
openai: openai,
},
dataCallback,
);
}
private static async streamGoogleResponse(
messages: any[],
opts: {
model: string;
maxTokens: number | unknown | undefined;
openai: OpenAI;
},
dataCallback: (data: any) => void,
) {
const chatReq = JSON.stringify({
model: opts.model,
messages: messages,
stream: true,
});
const googleStream = await opts.openai.chat.completions.create(
JSON.parse(chatReq),
);
for await (const chunk of googleStream) {
console.log(JSON.stringify(chunk));
if (chunk.choices?.[0]?.finishReason === "stop") {
dataCallback({
type: "chat",
data: {
choices: [
{
delta: { content: chunk.choices[0].delta.content || "" },
finish_reason: "stop",
index: chunk.choices[0].index,
},
],
},
});
break;
} else {
dataCallback({
type: "chat",
data: {
choices: [
{
delta: { content: chunk.choices?.[0]?.delta?.content || "" },
finish_reason: null,
index: chunk.choices?.[0]?.index || 0,
},
],
},
});
}
}
}
}

View File

@@ -1,99 +0,0 @@
import { OpenAI } from "openai";
import {
_NotCustomized,
ISimpleType,
ModelPropertiesDeclarationToProperties,
ModelSnapshotType2,
UnionStringArray,
} from "mobx-state-tree";
import ChatSdk from "../chat-sdk";
export class GroqChatSdk {
static async handleGroqStream(
param: {
openai: OpenAI;
systemPrompt: any;
preprocessedContext: ModelSnapshotType2<
ModelPropertiesDeclarationToProperties<{
role: ISimpleType<UnionStringArray<string[]>>;
content: ISimpleType<unknown>;
}>,
_NotCustomized
>;
maxTokens: unknown | number | undefined;
messages: any;
model: string;
env: Env;
},
dataCallback: (data) => void,
) {
const {
preprocessedContext,
messages,
env,
maxTokens,
systemPrompt,
model,
} = param;
const assistantPrompt = ChatSdk.buildAssistantPrompt({
maxTokens: maxTokens,
});
const safeMessages = ChatSdk.buildMessageChain(messages, {
systemPrompt: systemPrompt,
model,
assistantPrompt,
toolResults: preprocessedContext,
});
const openai = new OpenAI({
baseURL: "https://api.groq.com/openai/v1",
apiKey: param.env.GROQ_API_KEY,
});
return GroqChatSdk.streamGroqResponse(
safeMessages,
{
model: param.model,
maxTokens: param.maxTokens,
openai: openai,
},
dataCallback,
);
}
private static async streamGroqResponse(
messages: any[],
opts: {
model: string;
maxTokens: number | unknown | undefined;
openai: OpenAI;
},
dataCallback: (data: any) => void,
) {
const tuningParams: Record<string, any> = {};
const llamaTuningParams = {
temperature: 0.86,
top_p: 0.98,
presence_penalty: 0.1,
frequency_penalty: 0.3,
max_tokens: opts.maxTokens,
};
const getLlamaTuningParams = () => {
return llamaTuningParams;
};
const groqStream = await opts.openai.chat.completions.create({
model: opts.model,
messages: messages,
frequency_penalty: 2,
stream: true,
temperature: 0.78,
});
for await (const chunk of groqStream) {
dataCallback({ type: "chat", data: chunk });
}
}
}

View File

@@ -1,95 +0,0 @@
import { OpenAI } from "openai";
import ChatSdk from "../chat-sdk";
export class OpenAiChatSdk {
static async handleOpenAiStream(
ctx: {
openai: OpenAI;
systemPrompt: any;
preprocessedContext: any;
maxTokens: unknown | number | undefined;
messages: any;
model: any;
},
dataCallback: (data: any) => any,
) {
const {
openai,
systemPrompt,
maxTokens,
messages,
model,
preprocessedContext,
} = ctx;
if (!messages?.length) {
return new Response("No messages provided", { status: 400 });
}
const assistantPrompt = ChatSdk.buildAssistantPrompt({
maxTokens: maxTokens,
});
const safeMessages = ChatSdk.buildMessageChain(messages, {
systemPrompt: systemPrompt,
model,
assistantPrompt,
toolResults: preprocessedContext,
});
return OpenAiChatSdk.streamOpenAiResponse(
safeMessages,
{
model,
maxTokens: maxTokens as number,
openai: openai,
},
dataCallback,
);
}
private static async streamOpenAiResponse(
messages: any[],
opts: {
model: string;
maxTokens: number | undefined;
openai: OpenAI;
},
dataCallback: (data: any) => any,
) {
const isO1 = () => {
if (opts.model === "o1-preview" || opts.model === "o1-mini") {
return true;
}
};
const tuningParams: Record<string, any> = {};
const gpt4oTuningParams = {
temperature: 0.86,
top_p: 0.98,
presence_penalty: 0.1,
frequency_penalty: 0.3,
max_tokens: opts.maxTokens,
};
const getTuningParams = () => {
if (isO1()) {
tuningParams["temperature"] = 1;
tuningParams["max_completion_tokens"] = opts.maxTokens + 10000;
return tuningParams;
}
return gpt4oTuningParams;
};
const openAIStream = await opts.openai.chat.completions.create({
model: opts.model,
messages: messages,
stream: true,
...getTuningParams(),
});
for await (const chunk of openAIStream) {
dataCallback({ type: "chat", data: chunk });
}
}
}

View File

@@ -1,114 +0,0 @@
import { OpenAI } from "openai";
import ChatSdk from "../chat-sdk";
export class XaiChatSdk {
static async handleXaiStream(
ctx: {
openai: OpenAI;
systemPrompt: any;
preprocessedContext: any;
maxTokens: unknown | number | undefined;
messages: any;
disableWebhookGeneration: boolean;
model: any;
env: Env;
},
dataCallback: (data: any) => any,
) {
const {
openai,
systemPrompt,
maxTokens,
messages,
env,
model,
preprocessedContext,
} = ctx;
if (!messages?.length) {
return new Response("No messages provided", { status: 400 });
}
const getMaxTokens = async (mt) => {
if (mt) {
return await ChatSdk.calculateMaxTokens(
JSON.parse(JSON.stringify(messages)),
{
env,
maxTokens: mt,
},
);
} else {
return undefined;
}
};
const assistantPrompt = ChatSdk.buildAssistantPrompt({
maxTokens: maxTokens,
});
const safeMessages = ChatSdk.buildMessageChain(messages, {
systemPrompt: systemPrompt,
model,
assistantPrompt,
toolResults: preprocessedContext,
});
const xAiClient = new OpenAI({
baseURL: "https://api.x.ai/v1",
apiKey: env.XAI_API_KEY,
});
return XaiChatSdk.streamOpenAiResponse(
safeMessages,
{
model,
maxTokens: maxTokens as number,
openai: xAiClient,
},
dataCallback,
);
}
private static async streamOpenAiResponse(
messages: any[],
opts: {
model: string;
maxTokens: number | undefined;
openai: OpenAI;
},
dataCallback: (data: any) => any,
) {
const isO1 = () => {
if (opts.model === "o1-preview" || opts.model === "o1-mini") {
return true;
}
};
const tuningParams: Record<string, any> = {};
const gpt4oTuningParams = {
temperature: 0.75,
};
const getTuningParams = () => {
if (isO1()) {
tuningParams["temperature"] = 1;
tuningParams["max_completion_tokens"] = opts.maxTokens + 10000;
return tuningParams;
}
return gpt4oTuningParams;
};
const xAIStream = await opts.openai.chat.completions.create({
model: opts.model,
messages: messages,
stream: true,
...getTuningParams(),
});
for await (const chunk of xAIStream) {
dataCallback({ type: "chat", data: chunk });
}
}
}

View File

@@ -1,97 +0,0 @@
export interface AdvancedSearchParams {
mainQuery?: string;
titleQuery?: string;
descriptionQuery?: string;
contentQuery?: string;
mustInclude?: string[];
mustNotInclude?: string[];
exactPhrases?: string[];
urlContains?: string;
}
export class PerigonSearchBuilder {
private buildExactPhraseQuery(phrases: string[]): string {
return phrases.map((phrase) => `"${phrase}"`).join(" AND ");
}
private buildMustIncludeQuery(terms: string[]): string {
return terms.join(" AND ");
}
private buildMustNotIncludeQuery(terms: string[]): string {
return terms.map((term) => `NOT ${term}`).join(" AND ");
}
buildSearchParams(params: AdvancedSearchParams): SearchParams {
const searchParts: string[] = [];
const searchParams: SearchParams = {};
if (params.mainQuery) {
searchParams.q = params.mainQuery;
}
if (params.titleQuery) {
searchParams.title = params.titleQuery;
}
if (params.descriptionQuery) {
searchParams.desc = params.descriptionQuery;
}
if (params.contentQuery) {
searchParams.content = params.contentQuery;
}
if (params.exactPhrases?.length) {
searchParts.push(this.buildExactPhraseQuery(params.exactPhrases));
}
if (params.mustInclude?.length) {
searchParts.push(this.buildMustIncludeQuery(params.mustInclude));
}
if (params.mustNotInclude?.length) {
searchParts.push(this.buildMustNotIncludeQuery(params.mustNotInclude));
}
if (searchParts.length) {
searchParams.q = searchParams.q
? `(${searchParams.q}) AND (${searchParts.join(" AND ")})`
: searchParts.join(" AND ");
}
if (params.urlContains) {
searchParams.url = `"${params.urlContains}"`;
}
return searchParams;
}
}
export interface SearchParams {
/** Main search query parameter that searches across title, description and content */
q?: string;
/** Search only in article titles */
title?: string;
/** Search only in article descriptions */
desc?: string;
/** Search only in article content */
content?: string;
/** Search in article URLs */
url?: string;
/** Additional search parameters can be added here as needed */
[key: string]: string | undefined;
}
export interface Article {
translation: {
title: string;
description: string;
content: string;
url: string;
};
}
export interface SearchResponse {
articles?: Article[];
}

View File

@@ -1,38 +0,0 @@
export class StreamProcessorSdk {
static preprocessContent(buffer: string): string {
return buffer
.replace(/(\n\- .*\n)+/g, "$&\n")
.replace(/(\n\d+\. .*\n)+/g, "$&\n")
.replace(/\n{3,}/g, "\n\n");
}
static async handleStreamProcessing(
stream: any,
controller: ReadableStreamDefaultController,
) {
const encoder = new TextEncoder();
let buffer = "";
try {
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content || "";
buffer += content;
let processedContent = StreamProcessorSdk.preprocessContent(buffer);
controller.enqueue(encoder.encode(processedContent));
buffer = "";
}
if (buffer) {
let processedContent = StreamProcessorSdk.preprocessContent(buffer);
controller.enqueue(encoder.encode(processedContent));
}
} catch (error) {
controller.error(error);
throw new Error("Stream processing error");
} finally {
controller.close();
}
}
}

View File

@@ -1,4 +1,4 @@
export class Sdk {
export class Utils {
static getSeason(date: string): string {
const hemispheres = {
Northern: ["Winter", "Spring", "Summer", "Autumn"],