- Add killport.js script for terminating processes on specific ports

- Introduce `supportedModels` in `ClientChatStore` and update model validation logic
- Enhance OpenAI inferencing with local setup adaptations and improved streaming options
- Modify ChatService to handle local and remote model fetching
- Update input menu to dynamically fetch and display supported models
- Add start_inference_server.sh for initiating local inference server
- Upgrade OpenAI SDK to v5.0.1 and adjust dependencies accordingly
This commit is contained in:
geoffsee
2025-05-29 19:28:54 -04:00
committed by Geoff Seemueller
parent c9ee7c7690
commit cc0da17b5f
11 changed files with 204 additions and 23 deletions

View File

@@ -59,4 +59,38 @@ export class Utils {
return result;
}
static normalizeWithBlanks<T extends Normalize.ChatMessage>(msgs: T[]): T[] {
const out: T[] = [];
// In local mode first turn expected to be user.
let expected: Normalize.Role = "user";
for (const m of msgs) {
while (m.role !== expected) {
// Insert blanks to match expected sequence user/assistant/user...
out.push(Normalize.makeBlank(expected) as T);
expected = expected === "user" ? "assistant" : "user";
}
out.push(m);
expected = expected === "user" ? "assistant" : "user";
}
return out;
}
}
module Normalize {
export type Role = "user" | "assistant";
export interface ChatMessage extends Record<any, any> {
role: Role;
}
export const makeBlank = (role: Role): ChatMessage => ({
role,
content: ""
});
}