mirror of
https://github.com/ollama/ollama.git
synced 2025-05-11 18:36:41 +02:00
Implement linux NUMA detection
If the system has multiple numa nodes, enable numa support in llama.cpp If we detect numactl in the path, use that, else use the basic "distribute" mode.
This commit is contained in:
parent
39f2bc6bfc
commit
f457d63400
3 changed files with 29 additions and 4 deletions
|
@ -256,8 +256,14 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
|||
params = append(params, "--mlock")
|
||||
}
|
||||
|
||||
if opts.UseNUMA {
|
||||
params = append(params, "--numa")
|
||||
if gpu.IsNUMA() {
|
||||
numaMode := "distribute"
|
||||
if runtime.GOOS == "linux" {
|
||||
if _, err := exec.LookPath("numactl"); err == nil {
|
||||
numaMode = "numactl"
|
||||
}
|
||||
}
|
||||
params = append(params, "--numa", numaMode)
|
||||
}
|
||||
|
||||
params = append(params, "--parallel", strconv.Itoa(numParallel))
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue