mirror of
https://github.com/ollama/ollama.git
synced 2025-05-11 18:36:41 +02:00
ollamarunner: Re-enable worst case graph preallocation.
Worst case graph preallocation was disabled by a27462b
"ollamarunner: Temporarily disable worst case graph preallocation"
since it caused crashes with large batches when not using the GPU.
This backports upstream llama.cpp commit f057808
"ggml: Don't assert fail when tensor data changes (#13222)", which
fixes the underlying bug and allows reverting the previous workaround.
This commit is contained in:
parent
57fb759f3c
commit
c2f5d6662b
3 changed files with 46 additions and 7 deletions
5
ml/backend/ggml/ggml/src/ggml-alloc.c
vendored
5
ml/backend/ggml/ggml/src/ggml-alloc.c
vendored
|
@ -816,7 +816,10 @@ static void ggml_gallocr_init_tensor(ggml_gallocr_t galloc, struct ggml_tensor *
|
|||
static bool ggml_gallocr_node_needs_realloc(ggml_gallocr_t galloc, struct ggml_tensor * node, struct tensor_alloc * talloc) {
|
||||
size_t node_size = 0;
|
||||
if (!node->data && !node->view_src) {
|
||||
GGML_ASSERT(talloc->buffer_id >= 0); // prevent segfault when misusing the API
|
||||
// If we previously had data but don't now then reallocate
|
||||
if (talloc->buffer_id < 0) {
|
||||
return false;
|
||||
}
|
||||
node_size = ggml_backend_buft_get_alloc_size(galloc->bufts[talloc->buffer_id], node);
|
||||
}
|
||||
return talloc->size_max >= node_size;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue