llm: speed up gguf decoding by a lot (#5246)

Previously, some costly things were causing the loading of GGUF files
and their metadata and tensor information to be VERY slow:

  * Too many allocations when decoding strings
  * Hitting disk for each read of each key and value, resulting in a
    not-okay amount of syscalls/disk I/O.

The show API is now down to 33ms from 800ms+ for llama3 on a macbook pro
m3.

This commit also prevents collecting large arrays of values when
decoding GGUFs (if desired). When such keys are encountered, their
values are null, and are encoded as such in JSON.

Also, this fixes a broken test that was not encoding valid GGUF.
This commit is contained in:
Blake Mizerany 2024-06-24 21:47:52 -07:00 committed by GitHub
parent 2aa91a937b
commit cb42e607c5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 263 additions and 69 deletions

View file

@ -63,7 +63,7 @@ func parseFromModel(ctx context.Context, name model.Name, fn func(api.ProgressRe
}
defer blob.Close()
ggml, _, err := llm.DecodeGGML(blob)
ggml, _, err := llm.DecodeGGML(blob, 0)
if err != nil {
return nil, err
}
@ -176,7 +176,7 @@ func parseFromZipFile(_ context.Context, file *os.File, digest string, fn func(a
}
defer bin.Close()
ggml, _, err := llm.DecodeGGML(bin)
ggml, _, err := llm.DecodeGGML(bin, 0)
if err != nil {
return nil, err
}
@ -210,7 +210,7 @@ func parseFromFile(ctx context.Context, file *os.File, digest string, fn func(ap
var offset int64
for offset < stat.Size() {
ggml, n, err := llm.DecodeGGML(file)
ggml, n, err := llm.DecodeGGML(file, 0)
if errors.Is(err, io.EOF) {
break
} else if err != nil {