mirror of
https://github.com/geoffsee/predict-otron-9001.git
synced 2025-09-08 22:46:44 +00:00

- Increase default maximum tokens in CLI configuration to 256. - Refactor and reorganize CLI
49 lines
3.6 KiB
Plaintext
49 lines
3.6 KiB
Plaintext
Compiling inference-engine v0.1.0 (/Users/williamseemueller/workspace/seemueller-io/predict-otron-9000/crates/inference-engine)
|
||
warning: unused import: `Config as Config1`
|
||
--> crates/inference-engine/src/model.rs:2:42
|
||
|
|
||
2 | use candle_transformers::models::gemma::{Config as Config1, Model as Model1};
|
||
| ^^^^^^^^^^^^^^^^^
|
||
|
|
||
= note: `#[warn(unused_imports)]` on by default
|
||
|
||
warning: unused import: `Config as Config2`
|
||
--> crates/inference-engine/src/model.rs:3:43
|
||
|
|
||
3 | use candle_transformers::models::gemma2::{Config as Config2, Model as Model2};
|
||
| ^^^^^^^^^^^^^^^^^
|
||
|
||
warning: unused import: `Config as Config3`
|
||
--> crates/inference-engine/src/model.rs:4:43
|
||
|
|
||
4 | use candle_transformers::models::gemma3::{Config as Config3, Model as Model3};
|
||
| ^^^^^^^^^^^^^^^^^
|
||
|
||
warning: unused import: `self`
|
||
--> crates/inference-engine/src/server.rs:10:28
|
||
|
|
||
10 | use futures_util::stream::{self, Stream};
|
||
| ^^^^
|
||
|
||
warning: `inference-engine` (lib) generated 4 warnings (run `cargo fix --lib -p inference-engine` to apply 4 suggestions)
|
||
Compiling predict-otron-9000 v0.1.0 (/Users/williamseemueller/workspace/seemueller-io/predict-otron-9000/crates/predict-otron-9000)
|
||
Finished `release` profile [optimized] target(s) in 4.01s
|
||
Running `target/release/predict-otron-9000`
|
||
[2m2025-08-28T01:43:11.512475Z[0m [32m INFO[0m [2mpredict_otron_9000::middleware::metrics[0m[2m:[0m Performance metrics summary:
|
||
avx: false, neon: true, simd128: false, f16c: false
|
||
[2m2025-08-28T01:43:11.512811Z[0m [32m INFO[0m [2mhf_hub[0m[2m:[0m Using token file found "/Users/williamseemueller/.cache/huggingface/token"
|
||
retrieved the files in 685.958µs
|
||
[2m2025-08-28T01:43:12.661378Z[0m [32m INFO[0m [2mpredict_otron_9000[0m[2m:[0m Unified predict-otron-9000 server listening on 127.0.0.1:8080
|
||
[2m2025-08-28T01:43:12.661400Z[0m [32m INFO[0m [2mpredict_otron_9000[0m[2m:[0m Performance metrics tracking enabled - summary logs every 60 seconds
|
||
[2m2025-08-28T01:43:12.661403Z[0m [32m INFO[0m [2mpredict_otron_9000[0m[2m:[0m Available endpoints:
|
||
[2m2025-08-28T01:43:12.661405Z[0m [32m INFO[0m [2mpredict_otron_9000[0m[2m:[0m GET / - Root endpoint from embeddings-engine
|
||
[2m2025-08-28T01:43:12.661407Z[0m [32m INFO[0m [2mpredict_otron_9000[0m[2m:[0m POST /v1/embeddings - Text embeddings
|
||
[2m2025-08-28T01:43:12.661409Z[0m [32m INFO[0m [2mpredict_otron_9000[0m[2m:[0m POST /v1/chat/completions - Chat completions
|
||
[2m2025-08-28T01:43:19.166677Z[0m [33m WARN[0m [2minference_engine::server[0m[2m:[0m Detected repetition pattern: ' plus' (count: 1)
|
||
[2m2025-08-28T01:43:19.296257Z[0m [33m WARN[0m [2minference_engine::server[0m[2m:[0m Detected repetition pattern: ' plus' (count: 2)
|
||
[2m2025-08-28T01:43:19.424883Z[0m [33m WARN[0m [2minference_engine::server[0m[2m:[0m Detected repetition pattern: ' plus' (count: 3)
|
||
[2m2025-08-28T01:43:19.554508Z[0m [33m WARN[0m [2minference_engine::server[0m[2m:[0m Detected repetition pattern: ' plus' (count: 4)
|
||
[2m2025-08-28T01:43:19.683153Z[0m [33m WARN[0m [2minference_engine::server[0m[2m:[0m Detected repetition pattern: ' plus' (count: 5)
|
||
[2m2025-08-28T01:43:19.683181Z[0m [32m INFO[0m [2minference_engine::server[0m[2m:[0m Stopping generation due to excessive repetition
|
||
[2m2025-08-28T01:43:19.683221Z[0m [32m INFO[0m [2minference_engine::server[0m[2m:[0m Text generation stopped: Repetition detected - stopping generation
|