mirror of
https://github.com/geoffsee/predict-otron-9001.git
synced 2025-09-08 22:46:44 +00:00
align dependencies across inference features
This commit is contained in:
@@ -4,26 +4,12 @@ version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
accelerate-src = { version = "0.3.2", optional = true }
|
||||
candle-datasets = { version = "=0.9.1", optional = true }
|
||||
candle-nn = { version = "=0.9.1" }
|
||||
candle-transformers = { version = "=0.9.1" }
|
||||
candle-core = { git = "https://github.com/huggingface/candle.git" }
|
||||
candle-nn = { git = "https://github.com/huggingface/candle.git" }
|
||||
candle-transformers = { git = "https://github.com/huggingface/candle.git" }
|
||||
candle-flash-attn = { version = "=0.9.1", optional = true }
|
||||
candle-onnx = { version = "=0.9.1", optional = true }
|
||||
|
||||
csv = "1.3.0"
|
||||
cudarc = { version = "0.16.3", features = ["std", "cublas", "cublaslt", "curand", "driver", "nvrtc", "f16", "cuda-version-from-build-system", "dynamic-linking"], default-features=false, optional = true }
|
||||
half = { version = "2.5.0", features = ["num-traits", "use-intrinsics", "rand_distr"], optional = true }
|
||||
hf-hub = { version = "0.4.1", features = ["tokio"] }
|
||||
image = { version = "0.25.2", default-features = false, features = ["jpeg", "png"] }
|
||||
intel-mkl-src = { version = "0.8.1", features = ["mkl-static-lp64-iomp"], optional = true }
|
||||
num-traits = { version = "0.2.15" }
|
||||
palette = { version = "0.7.6", optional = true }
|
||||
enterpolation = { version = "0.2.1", optional = true}
|
||||
pyo3 = { version = "0.22.0", features = ["auto-initialize", "abi3-py311"], optional = true }
|
||||
rayon = "1.7.0"
|
||||
rubato = { version = "0.15.0", optional = true }
|
||||
safetensors = "0.4.1"
|
||||
serde = { version = "1.0.171", features = ["derive"] }
|
||||
serde_json = "1.0.99"
|
||||
symphonia = { version = "0.5.3", features = ["all"], optional = true }
|
||||
@@ -48,19 +34,11 @@ futures-util = "0.3.31"
|
||||
gemma-runner = { path = "../gemma-runner" }
|
||||
llama-runner = { path = "../llama-runner" }
|
||||
|
||||
# --- Add this section for conditional compilation ---
|
||||
[target.'cfg(target_os = "macos")'.dependencies]
|
||||
# Use CPU backend for macOS to avoid Metal rotary-emb implementation issues
|
||||
candle-core = { version = "=0.9.1", features = ["metal"], optional = false }
|
||||
candle-core = { git = "https://github.com/huggingface/candle.git", features = ["metal"] }
|
||||
candle-nn = { git = "https://github.com/huggingface/candle.git", features = ["metal"] }
|
||||
candle-transformers = { git = "https://github.com/huggingface/candle.git", features = ["metal"] }
|
||||
|
||||
[target.'cfg(not(target_os = "macos"))'.dependencies]
|
||||
# For Linux or other non-macOS systems, you likely want the CPU backend or CUDA
|
||||
# If you're building on Linux with a CUDA-enabled GPU:
|
||||
candle-core = { version = "=0.9.1", features = ["cuda"], default-features = false } # Or just "cuda" if not using default features
|
||||
|
||||
# If you're building on Linux with only CPU:
|
||||
# candle-core = { version = "=0.9.1", default-features = false } # CPU is often the default, but good to be explicit
|
||||
# --- End of conditional compilation section ---
|
||||
|
||||
[dev-dependencies]
|
||||
anyhow = { version = "1", features = ["backtrace"] }
|
||||
|
Reference in New Issue
Block a user