mirror of
https://github.com/ollama/ollama.git
synced 2025-05-11 18:36:41 +02:00
ggml: Don't allocate CPU buffers as CUDA Host buffers
Allocating (and in particular, freeing) memory from CUDA host buffers is expensive and can cause a significant performance hit if we do it for every token. Using normal system memory avoids this issue and also gives the OS more flexibility to manage it. There is no performance impact from this patch directly (either positive or negative) but it makes a difference once we start freeing memory correctly.
This commit is contained in:
parent
f33ccd5d27
commit
34c3b68fc8
1 changed files with 0 additions and 6 deletions
|
@ -384,12 +384,6 @@ func New(ctx context.Context, r *os.File, params ml.BackendParams) (ml.Backend,
|
|||
for _, d := range append(gpus, append(accels, cpus...)...) {
|
||||
b := C.ggml_backend_dev_init(d, nil)
|
||||
bt := C.ggml_backend_get_default_buffer_type(b)
|
||||
if d := C.ggml_backend_get_device(b); C.ggml_backend_dev_type(d) == C.GGML_BACKEND_DEVICE_TYPE_CPU && len(gpus) > 0 {
|
||||
// use the first gpu host buffer type for gpu if possible
|
||||
if hbt := C.ggml_backend_dev_host_buffer_type(gpus[0]); hbt != nil {
|
||||
bt = hbt
|
||||
}
|
||||
}
|
||||
|
||||
deviceBufferTypes[d] = bt
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue