next ollama runner (#7913)

feat: add new Ollama engine using ggml through cgo

This change introduces a new way to run pretrained models. It introduces 3 high level interfaces and a bunch of smaller helper interfaces to facilitate this.

- `model.Model` defines the interface for a model architecture. Models such as `llama` and `mllama`, which are provided as examples, can implement the model's forward propagation in the `Forward` method. This method will be called to generate completions. This interface can be found in `model/model.go`
- `ml.Backend` defines the interface for a backend tensor library, in this case `ggml`. Among other things, a Backend is responsible for loading a pretrained model into hardware (GPU, CPU, etc) and providing an interface for Models to access loaded tensors. This interface can be found in `ml/backend.go`
- `ml.Tensor` defines the interface for a tensor and tensor operations

This is the first implementation of the new engine. Follow up PRs will implement more features:

- non-greedy sampling (#8410)
- integration with Ollama and KV caching (#8301)
- more model support (#9080) with more coming soon

Co-authored-by: Bruce MacDonald <brucewmacdonald@gmail.com>
This commit is contained in:
Michael Yang 2025-02-14 00:31:21 +00:00 committed by GitHub
parent 8cf16063a5
commit 58245413f4
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
57 changed files with 475427 additions and 494 deletions

View file

@ -21,8 +21,8 @@ import (
"github.com/ollama/ollama/convert"
"github.com/ollama/ollama/envconfig"
"github.com/ollama/ollama/format"
"github.com/ollama/ollama/fs/ggml"
"github.com/ollama/ollama/llama"
"github.com/ollama/ollama/llm"
"github.com/ollama/ollama/template"
"github.com/ollama/ollama/types/errtypes"
"github.com/ollama/ollama/types/model"
@ -205,7 +205,7 @@ func detectModelTypeFromFiles(files map[string]string) string {
return ""
}
ct := llm.DetectGGMLType(buf)
ct := ggml.DetectContentType(buf)
if ct == "gguf" {
return "gguf"
}
@ -271,11 +271,11 @@ func convertFromSafetensors(files map[string]string, baseLayers []*layerGGML, is
return nil, err
}
ggml, _, err := llm.DecodeGGML(bin, 0)
f, _, err := ggml.Decode(bin, 0)
if err != nil {
return nil, err
}
layers := []*layerGGML{{layer, ggml}}
layers := []*layerGGML{{layer, f}}
if !isAdapter {
return detectChatTemplate(layers)
@ -283,13 +283,13 @@ func convertFromSafetensors(files map[string]string, baseLayers []*layerGGML, is
return layers, nil
}
func kvFromLayers(baseLayers []*layerGGML) (llm.KV, error) {
func kvFromLayers(baseLayers []*layerGGML) (ggml.KV, error) {
for _, l := range baseLayers {
if l.GGML != nil {
return l.KV(), nil
}
}
return llm.KV{}, fmt.Errorf("no base model was found")
return ggml.KV{}, fmt.Errorf("no base model was found")
}
func createModel(r api.CreateRequest, name model.Name, baseLayers []*layerGGML, fn func(resp api.ProgressResponse)) (err error) {
@ -306,7 +306,7 @@ func createModel(r api.CreateRequest, name model.Name, baseLayers []*layerGGML,
if layer.GGML != nil {
quantType := strings.ToUpper(cmp.Or(r.Quantize, r.Quantization))
if quantType != "" && layer.GGML.Name() == "gguf" && layer.MediaType == "application/vnd.ollama.image.model" {
want, err := llm.ParseFileType(quantType)
want, err := ggml.ParseFileType(quantType)
if err != nil {
return err
}
@ -403,7 +403,7 @@ func quantizeLayer(layer *layerGGML, quantizeType string, fn func(resp api.Progr
ft := layer.GGML.KV().FileType()
fn(api.ProgressResponse{Status: fmt.Sprintf("quantizing %s model to %s", ft, quantizeType)})
want, err := llm.ParseFileType(quantizeType)
want, err := ggml.ParseFileType(quantizeType)
if err != nil {
return nil, err
}
@ -433,13 +433,13 @@ func quantizeLayer(layer *layerGGML, quantizeType string, fn func(resp api.Progr
return nil, err
}
ggml, _, err := llm.DecodeGGML(temp, 0)
f, _, err := ggml.Decode(temp, 0)
if err != nil {
slog.Error(fmt.Sprintf("error decoding ggml: %s\n", err))
return nil, err
}
return &layerGGML{newLayer, ggml}, nil
return &layerGGML{newLayer, f}, nil
}
func ggufLayers(digest string, fn func(resp api.ProgressResponse)) ([]*layerGGML, error) {
@ -475,7 +475,7 @@ func ggufLayers(digest string, fn func(resp api.ProgressResponse)) ([]*layerGGML
var offset int64
for offset < stat.Size() {
ggml, n, err := llm.DecodeGGML(blob, 0)
f, n, err := ggml.Decode(blob, 0)
if errors.Is(err, io.EOF) {
break
} else if err != nil {
@ -483,9 +483,9 @@ func ggufLayers(digest string, fn func(resp api.ProgressResponse)) ([]*layerGGML
}
mediatype := "application/vnd.ollama.image.model"
if ggml.KV().Kind() == "adapter" {
if f.KV().Kind() == "adapter" {
mediatype = "application/vnd.ollama.image.adapter"
} else if _, ok := ggml.KV()[fmt.Sprintf("%s.vision.block_count", ggml.KV().Architecture())]; ok || ggml.KV().Kind() == "projector" {
} else if _, ok := f.KV()[fmt.Sprintf("%s.vision.block_count", f.KV().Architecture())]; ok || f.KV().Kind() == "projector" {
mediatype = "application/vnd.ollama.image.projector"
}
@ -506,7 +506,7 @@ func ggufLayers(digest string, fn func(resp api.ProgressResponse)) ([]*layerGGML
}
}
layers = append(layers, &layerGGML{layer, ggml})
layers = append(layers, &layerGGML{layer, f})
offset = n
}