mirror of
https://github.com/geoffsee/predict-otron-9001.git
synced 2025-09-08 22:46:44 +00:00
Refactor apply_cached_repeat_penalty
for optimized caching and reuse, add extensive unit tests, and integrate special handling for gemma-specific models.
Removed `test_request.sh`, deprecated functionality, and unused imports; introduced a new CLI tool (`cli.ts`) for testing inference engine and adjusted handling of non-streaming/streaming chat completions. - Add CPU fallback support for text generation when primary device is unsupported - Introduce `execute_with_fallback` method to handle device compatibility and shape mismatch errors - Extend unit tests to reproduce tensor shape mismatch errors specific to model configurations - Increase HTTP timeout limits in `curl_chat_stream.sh` script for reliable API testing chat completion endpoint functions with gemma3 (no streaming) Add benchmarking guide with HTML reporting, Leptos chat crate, and middleware for metrics tracking
This commit is contained in:
51
crates/leptos-chat/Cargo.toml
Normal file
51
crates/leptos-chat/Cargo.toml
Normal file
@@ -0,0 +1,51 @@
|
||||
[package]
|
||||
name = "leptos-chat"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[dependencies]
|
||||
leptos = { version = "0.6", features = ["csr"] }
|
||||
leptos_meta = { version = "0.6", features = ["csr"] }
|
||||
leptos_router = { version = "0.6", features = ["csr"] }
|
||||
wasm-bindgen = "0.2"
|
||||
console_error_panic_hook = "0.1"
|
||||
console_log = "1"
|
||||
log = "0.4"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
js-sys = "0.3"
|
||||
either = { version = "1.9", features = ["serde"] }
|
||||
# Make async-openai optional and only included for non-wasm targets
|
||||
async-openai-wasm = { default-features = false, version = "0.29" }
|
||||
# Only include tokio for non-wasm targets
|
||||
#tokio = { version = "1", default-features = false, features = ["sync", "macros", "io-util", "rt"] }
|
||||
#reqwest = {version = "0.12.23", default-features = false, optional = false}
|
||||
futures-util = "0.3"
|
||||
|
||||
|
||||
|
||||
web-sys = { version = "0.3", features = [
|
||||
"console",
|
||||
"Window",
|
||||
"Document",
|
||||
"Element",
|
||||
"HtmlElement",
|
||||
"HtmlInputElement",
|
||||
"HtmlTextAreaElement",
|
||||
"Event",
|
||||
"EventTarget",
|
||||
"KeyboardEvent",
|
||||
] }
|
||||
gloo-net = "0.6.0"
|
||||
|
||||
[dependencies.uuid]
|
||||
version = "1.0"
|
||||
features = [
|
||||
"v4", # Lets you generate random UUIDs
|
||||
"fast-rng", # Use a faster (but still sufficiently random) RNG
|
||||
"macro-diagnostics", # Enable better diagnostics for compile-time UUIDs
|
||||
"js", # Enable JavaScript RNG for WASM targets
|
||||
]
|
7
crates/leptos-chat/Trunk.toml
Normal file
7
crates/leptos-chat/Trunk.toml
Normal file
@@ -0,0 +1,7 @@
|
||||
[build]
|
||||
# Set the RUSTFLAGS environment variable for getrandom's WebAssembly support
|
||||
rustflags = ["--cfg", "getrandom_backend=\"wasm_js\""]
|
||||
|
||||
[serve]
|
||||
# Use the same port as in the run.sh script
|
||||
port = 8788
|
15
crates/leptos-chat/index.html
Normal file
15
crates/leptos-chat/index.html
Normal file
@@ -0,0 +1,15 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
<title>Chat Interface</title>
|
||||
<link rel="stylesheet" href="style/main.css" />
|
||||
</head>
|
||||
<body>
|
||||
<script type="module">
|
||||
import init from './pkg/leptos_chat.js';
|
||||
init();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
6
crates/leptos-chat/run.sh
Executable file
6
crates/leptos-chat/run.sh
Executable file
@@ -0,0 +1,6 @@
|
||||
#!/usr/bin/env sh
|
||||
|
||||
# Set RUSTFLAGS for getrandom's WebAssembly support
|
||||
export RUSTFLAGS='--cfg getrandom_backend="wasm_js"'
|
||||
|
||||
trunk serve --port 8788
|
599
crates/leptos-chat/src/lib.rs
Normal file
599
crates/leptos-chat/src/lib.rs
Normal file
@@ -0,0 +1,599 @@
|
||||
use leptos::*;
|
||||
use leptos_meta::*;
|
||||
use leptos_router::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::VecDeque;
|
||||
use uuid::Uuid;
|
||||
use js_sys::Date;
|
||||
use web_sys::{HtmlInputElement, KeyboardEvent, SubmitEvent};
|
||||
use futures_util::StreamExt;
|
||||
use async_openai_wasm::{
|
||||
types::{
|
||||
ChatCompletionRequestAssistantMessageArgs, ChatCompletionRequestSystemMessageArgs,
|
||||
ChatCompletionRequestUserMessageArgs, CreateChatCompletionRequestArgs,
|
||||
},
|
||||
Client,
|
||||
};
|
||||
use async_openai_wasm::config::OpenAIConfig;
|
||||
use async_openai_wasm::types::ChatCompletionResponseStream;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Message {
|
||||
pub id: String,
|
||||
pub role: String,
|
||||
pub content: String,
|
||||
pub timestamp: f64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MessageContent(pub either::Either<String, Vec<std::collections::HashMap<String, MessageInnerContent>>>);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MessageInnerContent(pub either::Either<String, std::collections::HashMap<String, String>>);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ChatMessage {
|
||||
pub role: String,
|
||||
pub content: Option<MessageContent>,
|
||||
pub name: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ChatRequest {
|
||||
pub model: String,
|
||||
pub messages: Vec<ChatMessage>,
|
||||
pub max_tokens: Option<usize>,
|
||||
pub temperature: Option<f64>,
|
||||
pub top_p: Option<f64>,
|
||||
pub stream: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ChatResponse {
|
||||
pub id: String,
|
||||
pub object: String,
|
||||
pub created: u64,
|
||||
pub model: String,
|
||||
pub choices: Vec<Choice>,
|
||||
pub usage: Usage,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Choice {
|
||||
pub index: usize,
|
||||
pub message: ChatMessage,
|
||||
pub finish_reason: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Usage {
|
||||
pub prompt_tokens: usize,
|
||||
pub completion_tokens: usize,
|
||||
pub total_tokens: usize,
|
||||
}
|
||||
|
||||
#[component]
|
||||
pub fn App() -> impl IntoView {
|
||||
provide_meta_context();
|
||||
|
||||
view! {
|
||||
<Stylesheet id="leptos" href="/style/main.css"/>
|
||||
<Title text="Chat Interface"/>
|
||||
<Router>
|
||||
<main>
|
||||
<Routes>
|
||||
<Route path="/" view=ChatInterface/>
|
||||
</Routes>
|
||||
</main>
|
||||
</Router>
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_chat_request(chat_request: ChatRequest) -> ChatCompletionResponseStream {
|
||||
let config = OpenAIConfig::new().with_api_base("http://localhost:8080".to_string());
|
||||
let client = Client::with_config(config);
|
||||
|
||||
let mut typed_chat = async_openai_wasm::types::CreateChatCompletionRequest {
|
||||
messages: vec![],
|
||||
model: "".to_string(),
|
||||
store: None,
|
||||
reasoning_effort: None,
|
||||
metadata: None,
|
||||
frequency_penalty: None,
|
||||
logit_bias: None,
|
||||
logprobs: None,
|
||||
top_logprobs: None,
|
||||
max_tokens: None,
|
||||
max_completion_tokens: None,
|
||||
n: None,
|
||||
modalities: None,
|
||||
prediction: None,
|
||||
audio: None,
|
||||
presence_penalty: None,
|
||||
response_format: None,
|
||||
seed: None,
|
||||
service_tier: None,
|
||||
stop: None,
|
||||
stream: None,
|
||||
stream_options: None,
|
||||
temperature: None,
|
||||
top_p: None,
|
||||
tools: None,
|
||||
tool_choice: None,
|
||||
parallel_tool_calls: None,
|
||||
user: None,
|
||||
function_call: None,
|
||||
functions: None,
|
||||
web_search_options: None,
|
||||
extra_params: None,
|
||||
};
|
||||
|
||||
typed_chat.messages = chat_request.messages
|
||||
.iter()
|
||||
.map(|msg| {
|
||||
let content = match &msg.content {
|
||||
Some(MessageContent(either::Either::Left(text))) => text.clone(),
|
||||
_ => "".to_string()
|
||||
};
|
||||
let role = msg.role.clone();
|
||||
match role.as_str() {
|
||||
"system" => ChatCompletionRequestSystemMessageArgs::default()
|
||||
.content(content)
|
||||
.build()
|
||||
.expect("failed to build system message")
|
||||
.into(),
|
||||
"user" => ChatCompletionRequestUserMessageArgs::default()
|
||||
.content(content)
|
||||
.build()
|
||||
.expect("failed to build user message")
|
||||
.into(),
|
||||
"assistant" => ChatCompletionRequestAssistantMessageArgs::default()
|
||||
.content(content)
|
||||
.build()
|
||||
.expect("failed to build assistant message")
|
||||
.into(),
|
||||
_ => ChatCompletionRequestUserMessageArgs::default()
|
||||
.content(content)
|
||||
.build()
|
||||
.expect("failed to build default message")
|
||||
.into()
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
client.chat().create_stream(typed_chat).await.unwrap()
|
||||
}
|
||||
|
||||
// #[cfg(not(target_arch = "wasm32"))]
|
||||
// async fn send_chat_request(_chat_request: ChatRequest) -> Result<ChatResponse, String> {
|
||||
// Err("leptos-chat chat request only supported on wasm32 target".to_string())
|
||||
// }
|
||||
|
||||
#[component]
|
||||
fn ChatInterface() -> impl IntoView {
|
||||
let (messages, set_messages) = create_signal::<VecDeque<Message>>(VecDeque::new());
|
||||
let (input_value, set_input_value) = create_signal(String::new());
|
||||
let (is_loading, set_is_loading) = create_signal(false);
|
||||
|
||||
let send_message = create_action(move |content: &String| {
|
||||
let content = content.clone();
|
||||
async move {
|
||||
if content.trim().is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
set_is_loading.set(true);
|
||||
|
||||
// Add user message to chat
|
||||
let user_message = Message {
|
||||
id: Uuid::new_v4().to_string(),
|
||||
role: "user".to_string(),
|
||||
content: content.clone(),
|
||||
timestamp: Date::now(),
|
||||
};
|
||||
|
||||
set_messages.update(|msgs| msgs.push_back(user_message.clone()));
|
||||
set_input_value.set(String::new());
|
||||
|
||||
let mut chat_messages = Vec::new();
|
||||
|
||||
// Add system message
|
||||
let system_message = ChatCompletionRequestSystemMessageArgs::default()
|
||||
.content("You are a helpful assistant.")
|
||||
.build()
|
||||
.expect("failed to build system message");
|
||||
chat_messages.push(system_message.into());
|
||||
|
||||
// Add history messages
|
||||
messages.with(|msgs| {
|
||||
for msg in msgs.iter() {
|
||||
let message = ChatCompletionRequestUserMessageArgs::default()
|
||||
.content(msg.content.clone())
|
||||
.build()
|
||||
.expect("failed to build message");
|
||||
chat_messages.push(message.into());
|
||||
}
|
||||
});
|
||||
|
||||
// Add current user message
|
||||
let message = ChatCompletionRequestUserMessageArgs::default()
|
||||
.content(user_message.content.clone())
|
||||
.build()
|
||||
.expect("failed to build user message");
|
||||
chat_messages.push(message.into());
|
||||
|
||||
let request = CreateChatCompletionRequestArgs::default()
|
||||
.model("gemma-2b-it")
|
||||
.max_tokens(512u32)
|
||||
.messages(chat_messages)
|
||||
.stream(true) // ensure server streams
|
||||
.build()
|
||||
.expect("failed to build request");
|
||||
|
||||
// Send request
|
||||
let config = OpenAIConfig::new().with_api_base("http://localhost:8080/v1".to_string());
|
||||
let client = Client::with_config(config);
|
||||
|
||||
match client.chat().create_stream(request).await {
|
||||
Ok(mut stream) => {
|
||||
// Insert a placeholder assistant message to append into
|
||||
let assistant_id = Uuid::new_v4().to_string();
|
||||
set_messages.update(|msgs| {
|
||||
msgs.push_back(Message {
|
||||
id: assistant_id.clone(),
|
||||
role: "assistant".to_string(),
|
||||
content: String::new(),
|
||||
timestamp: Date::now(),
|
||||
});
|
||||
});
|
||||
|
||||
// Stream loop: append deltas to the last message
|
||||
while let Some(next) = stream.next().await {
|
||||
match next {
|
||||
Ok(chunk) => {
|
||||
// Try to pull out the content delta in a tolerant way.
|
||||
// async-openai 0.28.x stream chunk usually looks like:
|
||||
// choices[0].delta.content: Option<String>
|
||||
let mut delta_txt = String::new();
|
||||
|
||||
if let Some(choice) = chunk.choices.get(0) {
|
||||
// Newer message API may expose different shapes; try common ones
|
||||
// 1) Simple string content delta
|
||||
if let Some(content) = &choice.delta.content {
|
||||
delta_txt.push_str(content);
|
||||
}
|
||||
|
||||
// 2) Some providers pack text under .delta.role/.delta.<other>
|
||||
// If nothing extracted, ignore quietly.
|
||||
|
||||
// If a finish_reason arrives, we could stop early,
|
||||
// but usually the stream naturally ends.
|
||||
}
|
||||
|
||||
if !delta_txt.is_empty() {
|
||||
set_messages.update(|msgs| {
|
||||
if let Some(last) = msgs.back_mut() {
|
||||
if last.role == "assistant" {
|
||||
last.content.push_str(&delta_txt);
|
||||
last.timestamp = Date::now();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!("Stream error: {:?}", e);
|
||||
set_messages.update(|msgs| {
|
||||
msgs.push_back(Message {
|
||||
id: Uuid::new_v4().to_string(),
|
||||
role: "system".to_string(),
|
||||
content: format!("Stream error: {e}"),
|
||||
timestamp: Date::now(),
|
||||
});
|
||||
});
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!("Failed to send request: {:?}", e);
|
||||
let error_message = Message {
|
||||
id: Uuid::new_v4().to_string(),
|
||||
role: "system".to_string(),
|
||||
content: "Error: Failed to connect to server".to_string(),
|
||||
timestamp: Date::now(),
|
||||
};
|
||||
set_messages.update(|msgs| msgs.push_back(error_message));
|
||||
}
|
||||
}
|
||||
|
||||
set_is_loading.set(false);
|
||||
}
|
||||
});
|
||||
|
||||
let on_input = move |ev| {
|
||||
let input = event_target::<HtmlInputElement>(&ev);
|
||||
set_input_value.set(input.value());
|
||||
};
|
||||
|
||||
let on_submit = move |ev: SubmitEvent| {
|
||||
ev.prevent_default();
|
||||
let content = input_value.get();
|
||||
send_message.dispatch(content);
|
||||
};
|
||||
|
||||
let on_keypress = move |ev: KeyboardEvent| {
|
||||
if ev.key() == "Enter" && !ev.shift_key() {
|
||||
ev.prevent_default();
|
||||
let content = input_value.get();
|
||||
send_message.dispatch(content);
|
||||
}
|
||||
};
|
||||
|
||||
let messages_list = move || {
|
||||
messages.get()
|
||||
.into_iter()
|
||||
.map(|message| {
|
||||
let role_class = match message.role.as_str() {
|
||||
"user" => "user-message",
|
||||
"assistant" => "assistant-message",
|
||||
_ => "system-message",
|
||||
};
|
||||
|
||||
view! {
|
||||
<div class=format!("message {}", role_class)>
|
||||
<div class="message-role">{message.role}</div>
|
||||
<div class="message-content">{message.content}</div>
|
||||
</div>
|
||||
}
|
||||
})
|
||||
.collect_view()
|
||||
};
|
||||
|
||||
let loading_indicator = move || {
|
||||
is_loading.get().then(|| {
|
||||
view! {
|
||||
<div class="message assistant-message">
|
||||
<div class="message-role">"assistant"</div>
|
||||
<div class="message-content">"Thinking..."</div>
|
||||
</div>
|
||||
}
|
||||
})
|
||||
};
|
||||
|
||||
view! {
|
||||
<div class="chat-container">
|
||||
<h1>"Chat Interface"</h1>
|
||||
<div class="messages-container">
|
||||
{messages_list}
|
||||
{loading_indicator}
|
||||
</div>
|
||||
<form class="input-form" on:submit=on_submit>
|
||||
<input
|
||||
type="text"
|
||||
class="message-input"
|
||||
placeholder="Type your message here..."
|
||||
prop:value=input_value
|
||||
on:input=on_input
|
||||
on:keypress=on_keypress
|
||||
prop:disabled=is_loading
|
||||
/>
|
||||
<button
|
||||
type="submit"
|
||||
class="send-button"
|
||||
prop:disabled=move || is_loading.get() || input_value.get().trim().is_empty()
|
||||
>
|
||||
"Send"
|
||||
</button>
|
||||
</form>
|
||||
</div>
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// #[component]
|
||||
// fn ChatInterface() -> impl IntoView {
|
||||
// let (messages, set_messages) = create_signal::<VecDeque<Message>>(VecDeque::new());
|
||||
// let (input_value, set_input_value) = create_signal(String::new());
|
||||
// let (is_loading, set_is_loading) = create_signal(false);
|
||||
//
|
||||
// let send_message = create_action(move |content: &String| {
|
||||
// let content = content.clone();
|
||||
// async move {
|
||||
// if content.trim().is_empty() {
|
||||
// return;
|
||||
// }
|
||||
//
|
||||
// set_is_loading.set(true);
|
||||
//
|
||||
// // Add user message to chat
|
||||
// let user_message = Message {
|
||||
// id: Uuid::new_v4().to_string(),
|
||||
// role: "user".to_string(),
|
||||
// content: content.clone(),
|
||||
// timestamp: Date::now(),
|
||||
// };
|
||||
//
|
||||
// set_messages.update(|msgs| msgs.push_back(user_message.clone()));
|
||||
// set_input_value.set(String::new());
|
||||
//
|
||||
// let mut chat_messages = Vec::new();
|
||||
//
|
||||
// // Add system message
|
||||
// let system_message = ChatCompletionRequestSystemMessageArgs::default()
|
||||
// .content("You are a helpful assistant.")
|
||||
// .build()
|
||||
// .expect("failed to build system message");
|
||||
// chat_messages.push(system_message.into());
|
||||
//
|
||||
// // Add history messages
|
||||
// messages.with(|msgs| {
|
||||
// for msg in msgs.iter() {
|
||||
// let message = ChatCompletionRequestUserMessageArgs::default()
|
||||
// .content(msg.content.clone().into())
|
||||
// .build()
|
||||
// .expect("failed to build message");
|
||||
// chat_messages.push(message.into());
|
||||
// }
|
||||
// });
|
||||
//
|
||||
// // Add current user message
|
||||
// let message = ChatCompletionRequestUserMessageArgs::default()
|
||||
// .content(user_message.content.clone().into())
|
||||
// .build()
|
||||
// .expect("failed to build user message");
|
||||
// chat_messages.push(message.into());
|
||||
//
|
||||
// let request = CreateChatCompletionRequestArgs::default()
|
||||
// .model("gemma-2b-it")
|
||||
// .max_tokens(512u32)
|
||||
// .messages(chat_messages)
|
||||
// .build()
|
||||
// .expect("failed to build request");
|
||||
//
|
||||
// // Send request
|
||||
// let config = OpenAIConfig::new().with_api_base("http://localhost:8080".to_string());
|
||||
// let client = Client::with_config(config);
|
||||
//
|
||||
// match client
|
||||
// .chat()
|
||||
// .create_stream(request)
|
||||
// .await
|
||||
// {
|
||||
// Ok(chat_response) => {
|
||||
//
|
||||
//
|
||||
// // if let Some(choice) = chat_response {
|
||||
// // // Extract content from the message
|
||||
// // let content_text = match &choice.message.content {
|
||||
// // Some(message_content) => {
|
||||
// // match &message_content.0 {
|
||||
// // either::Either::Left(text) => text.clone(),
|
||||
// // either::Either::Right(_) => "Complex content not supported".to_string(),
|
||||
// // }
|
||||
// // }
|
||||
// // None => "No content provided".to_string(),
|
||||
// // };
|
||||
// //
|
||||
// // let assistant_message = Message {
|
||||
// // id: Uuid::new_v4().to_string(),
|
||||
// // role: "assistant".to_string(),
|
||||
// // content: content_text,
|
||||
// // timestamp: Date::now(),
|
||||
// // };
|
||||
// // set_messages.update(|msgs| msgs.push_back(assistant_message));
|
||||
// //
|
||||
// //
|
||||
// //
|
||||
// // // Log token usage information
|
||||
// // log::debug!("Token usage - Prompt: {}, Completion: {}, Total: {}",
|
||||
// // chat_response.usage.prompt_tokens,
|
||||
// // chat_response.usage.completion_tokens,
|
||||
// // chat_response.usage.total_tokens);
|
||||
// // }
|
||||
// }
|
||||
// Err(e) => {
|
||||
// log::error!("Failed to send request: {:?}", e);
|
||||
// let error_message = Message {
|
||||
// id: Uuid::new_v4().to_string(),
|
||||
// role: "system".to_string(),
|
||||
// content: "Error: Failed to connect to server".to_string(),
|
||||
// timestamp: Date::now(),
|
||||
// };
|
||||
// set_messages.update(|msgs| msgs.push_back(error_message));
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// set_is_loading.set(false);
|
||||
// }
|
||||
// });
|
||||
//
|
||||
// let on_input = move |ev| {
|
||||
// let input = event_target::<HtmlInputElement>(&ev);
|
||||
// set_input_value.set(input.value());
|
||||
// };
|
||||
//
|
||||
// let on_submit = move |ev: SubmitEvent| {
|
||||
// ev.prevent_default();
|
||||
// let content = input_value.get();
|
||||
// send_message.dispatch(content);
|
||||
// };
|
||||
//
|
||||
// let on_keypress = move |ev: KeyboardEvent| {
|
||||
// if ev.key() == "Enter" && !ev.shift_key() {
|
||||
// ev.prevent_default();
|
||||
// let content = input_value.get();
|
||||
// send_message.dispatch(content);
|
||||
// }
|
||||
// };
|
||||
//
|
||||
// let messages_list = move || {
|
||||
// messages.get()
|
||||
// .into_iter()
|
||||
// .map(|message| {
|
||||
// let role_class = match message.role.as_str() {
|
||||
// "user" => "user-message",
|
||||
// "assistant" => "assistant-message",
|
||||
// _ => "system-message",
|
||||
// };
|
||||
//
|
||||
// view! {
|
||||
// <div class=format!("message {}", role_class)>
|
||||
// <div class="message-role">{message.role}</div>
|
||||
// <div class="message-content">{message.content}</div>
|
||||
// </div>
|
||||
// }
|
||||
// })
|
||||
// .collect_view()
|
||||
// };
|
||||
//
|
||||
// let loading_indicator = move || {
|
||||
// is_loading.get().then(|| {
|
||||
// view! {
|
||||
// <div class="message assistant-message">
|
||||
// <div class="message-role">"assistant"</div>
|
||||
// <div class="message-content">"Thinking..."</div>
|
||||
// </div>
|
||||
// }
|
||||
// })
|
||||
// };
|
||||
//
|
||||
// view! {
|
||||
// <div class="chat-container">
|
||||
// <h1>"Chat Interface"</h1>
|
||||
// <div class="messages-container">
|
||||
// {messages_list}
|
||||
// {loading_indicator}
|
||||
// </div>
|
||||
// <form class="input-form" on:submit=on_submit>
|
||||
// <input
|
||||
// type="text"
|
||||
// class="message-input"
|
||||
// placeholder="Type your message here..."
|
||||
// prop:value=input_value
|
||||
// on:input=on_input
|
||||
// on:keypress=on_keypress
|
||||
// prop:disabled=is_loading
|
||||
// />
|
||||
// <button
|
||||
// type="submit"
|
||||
// class="send-button"
|
||||
// prop:disabled=move || is_loading.get() || input_value.get().trim().is_empty()
|
||||
// >
|
||||
// "Send"
|
||||
// </button>
|
||||
// </form>
|
||||
// </div>
|
||||
// }
|
||||
// }
|
||||
|
||||
#[wasm_bindgen::prelude::wasm_bindgen(start)]
|
||||
pub fn main() {
|
||||
// Set up error handling and logging for WebAssembly
|
||||
console_error_panic_hook::set_once();
|
||||
console_log::init_with_level(log::Level::Debug).expect("error initializing logger");
|
||||
|
||||
// Mount the App component to the document body
|
||||
|
||||
leptos::mount_to_body(App)
|
||||
}
|
165
crates/leptos-chat/style/main.css
Normal file
165
crates/leptos-chat/style/main.css
Normal file
@@ -0,0 +1,165 @@
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, 'Helvetica Neue', Arial, sans-serif;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
background-color: #f5f5f5;
|
||||
}
|
||||
|
||||
.chat-container {
|
||||
max-width: 800px;
|
||||
margin: 0 auto;
|
||||
height: 100vh;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
background-color: white;
|
||||
box-shadow: 0 0 20px rgba(0, 0, 0, 0.1);
|
||||
}
|
||||
|
||||
h1 {
|
||||
background-color: #4a90e2;
|
||||
color: white;
|
||||
margin: 0;
|
||||
padding: 20px;
|
||||
text-align: center;
|
||||
font-size: 24px;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.messages-container {
|
||||
flex: 1;
|
||||
overflow-y: auto;
|
||||
padding: 20px;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 15px;
|
||||
}
|
||||
|
||||
.message {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
max-width: 70%;
|
||||
padding: 12px 16px;
|
||||
border-radius: 18px;
|
||||
word-wrap: break-word;
|
||||
}
|
||||
|
||||
.user-message {
|
||||
align-self: flex-end;
|
||||
background-color: #4a90e2;
|
||||
color: white;
|
||||
}
|
||||
|
||||
.assistant-message {
|
||||
align-self: flex-start;
|
||||
background-color: #e9ecef;
|
||||
color: #333;
|
||||
}
|
||||
|
||||
.system-message {
|
||||
align-self: center;
|
||||
background-color: #ffebcc;
|
||||
color: #856404;
|
||||
border: 1px solid #ffeaa7;
|
||||
}
|
||||
|
||||
.message-role {
|
||||
font-size: 12px;
|
||||
font-weight: 600;
|
||||
margin-bottom: 4px;
|
||||
opacity: 0.7;
|
||||
text-transform: capitalize;
|
||||
}
|
||||
|
||||
.message-content {
|
||||
font-size: 14px;
|
||||
line-height: 1.4;
|
||||
}
|
||||
|
||||
.input-form {
|
||||
display: flex;
|
||||
padding: 20px;
|
||||
gap: 10px;
|
||||
background-color: #f8f9fa;
|
||||
border-top: 1px solid #dee2e6;
|
||||
}
|
||||
|
||||
.message-input {
|
||||
flex: 1;
|
||||
padding: 12px 16px;
|
||||
border: 1px solid #ced4da;
|
||||
border-radius: 25px;
|
||||
font-size: 14px;
|
||||
outline: none;
|
||||
transition: border-color 0.2s ease;
|
||||
}
|
||||
|
||||
.message-input:focus {
|
||||
border-color: #4a90e2;
|
||||
box-shadow: 0 0 0 2px rgba(74, 144, 226, 0.25);
|
||||
}
|
||||
|
||||
.message-input:disabled {
|
||||
background-color: #f8f9fa;
|
||||
color: #6c757d;
|
||||
cursor: not-allowed;
|
||||
}
|
||||
|
||||
.send-button {
|
||||
padding: 12px 24px;
|
||||
background-color: #4a90e2;
|
||||
color: white;
|
||||
border: none;
|
||||
border-radius: 25px;
|
||||
font-size: 14px;
|
||||
font-weight: 600;
|
||||
cursor: pointer;
|
||||
transition: background-color 0.2s ease;
|
||||
min-width: 80px;
|
||||
}
|
||||
|
||||
.send-button:hover:not(:disabled) {
|
||||
background-color: #357abd;
|
||||
}
|
||||
|
||||
.send-button:disabled {
|
||||
background-color: #6c757d;
|
||||
cursor: not-allowed;
|
||||
}
|
||||
|
||||
/* Scrollbar styling */
|
||||
.messages-container::-webkit-scrollbar {
|
||||
width: 8px;
|
||||
}
|
||||
|
||||
.messages-container::-webkit-scrollbar-track {
|
||||
background: #f1f1f1;
|
||||
}
|
||||
|
||||
.messages-container::-webkit-scrollbar-thumb {
|
||||
background: #c1c1c1;
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
.messages-container::-webkit-scrollbar-thumb:hover {
|
||||
background: #a1a1a1;
|
||||
}
|
||||
|
||||
/* Responsive design */
|
||||
@media (max-width: 768px) {
|
||||
.chat-container {
|
||||
height: 100vh;
|
||||
}
|
||||
|
||||
.message {
|
||||
max-width: 85%;
|
||||
}
|
||||
|
||||
.input-form {
|
||||
padding: 15px;
|
||||
}
|
||||
|
||||
h1 {
|
||||
padding: 15px;
|
||||
font-size: 20px;
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user