llama: preserve field order in user-defined JSON schemas (#8002)

Previously we decoded and re-encoded JSON schemas during validation,
which served no purpose since json.RawMessage already validates JSON
syntax. Worse, the re-encoding lost field ordering from the original
schema, which affects inference quality during step-by-step reasoning.

While fixing this ordering issue by using json.RawMessage directly,
testing revealed that schema_to_grammar (from llama.cpp) also fails to
preserve field order during grammar generation. This appears to be the
root cause of inference degradation.

This change prevents us from mangling the user's original schema order,
but we still need to address the ordering issue in schema_to_grammar.
That will be a separate change.

Updates #7978
This commit is contained in:
Blake Mizerany 2024-12-11 14:07:30 -08:00 committed by GitHub
parent 581a4a5553
commit 9039c821a2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 104 additions and 114 deletions

View file

@ -67,7 +67,7 @@ type ResponseFormat struct {
}
type JsonSchema struct {
Schema map[string]any `json:"schema"`
Schema json.RawMessage `json:"schema"`
}
type EmbedRequest struct {
@ -495,11 +495,7 @@ func fromChatRequest(r ChatCompletionRequest) (*api.ChatRequest, error) {
format = json.RawMessage(`"json"`)
case "json_schema":
if r.ResponseFormat.JsonSchema != nil {
schema, err := json.Marshal(r.ResponseFormat.JsonSchema.Schema)
if err != nil {
return nil, fmt.Errorf("failed to marshal json schema: %w", err)
}
format = schema
format = r.ResponseFormat.JsonSchema.Schema
}
}
}