mirror of
https://github.com/ollama/ollama.git
synced 2025-05-11 18:36:41 +02:00
Add some new test coverage for various model architectures, and switch from orca-mini to the small llama model.
47 lines
1,008 B
Go
47 lines
1,008 B
Go
//go:build integration
|
|
|
|
package integration
|
|
|
|
import (
|
|
"context"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/ollama/ollama/api"
|
|
)
|
|
|
|
// TODO - this would ideally be in the llm package, but that would require some refactoring of interfaces in the server
|
|
// package to avoid circular dependencies
|
|
|
|
var (
|
|
stream = false
|
|
req = [2]api.GenerateRequest{
|
|
{
|
|
Model: smol,
|
|
Prompt: "why is the ocean blue?",
|
|
Stream: &stream,
|
|
Options: map[string]any{
|
|
"seed": 42,
|
|
"temperature": 0.0,
|
|
},
|
|
}, {
|
|
Model: smol,
|
|
Prompt: "what is the origin of the us thanksgiving holiday?",
|
|
Stream: &stream,
|
|
Options: map[string]any{
|
|
"seed": 42,
|
|
"temperature": 0.0,
|
|
},
|
|
},
|
|
}
|
|
resp = [2][]string{
|
|
{"sunlight", "scattering", "interact"},
|
|
{"england", "english", "massachusetts", "pilgrims"},
|
|
}
|
|
)
|
|
|
|
func TestIntegrationSimple(t *testing.T) {
|
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second*120)
|
|
defer cancel()
|
|
GenerateTestHelper(ctx, t, req[0], resp[0])
|
|
}
|