mirror of
https://github.com/ollama/ollama.git
synced 2025-05-13 03:16:47 +02:00
first pass at linux gpu support (#454)
* linux gpu support * handle multiple gpus * add cuda docker image (#488) --------- Co-authored-by: Michael Yang <mxyng@pm.me>
This commit is contained in:
parent
45ac07cd02
commit
f221637053
9 changed files with 158 additions and 22 deletions
14
llm/llama.cpp/generate_linux.go
Normal file
14
llm/llama.cpp/generate_linux.go
Normal file
|
@ -0,0 +1,14 @@
|
|||
package llm
|
||||
|
||||
//go:generate git submodule init
|
||||
|
||||
//go:generate git submodule update --force ggml
|
||||
//go:generate -command git-apply git -C ggml apply
|
||||
//go:generate git-apply ../ggml_patch/0001-add-detokenize-endpoint.patch
|
||||
//go:generate git-apply ../ggml_patch/0002-34B-model-support.patch
|
||||
//go:generate cmake -S ggml -B ggml/build/gpu -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on
|
||||
//go:generate cmake --build ggml/build/gpu --target server --config Release
|
||||
|
||||
//go:generate git submodule update --force gguf
|
||||
//go:generate cmake -S gguf -B gguf/build/gpu -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on
|
||||
//go:generate cmake --build gguf/build/gpu --target server --config Release
|
Loading…
Add table
Add a link
Reference in a new issue