mirror of
https://github.com/ollama/ollama.git
synced 2025-05-11 10:26:53 +02:00
Always dynamically load the llm server library
This switches darwin to dynamic loading, and refactors the code now that no static linking of the library is used on any platform
This commit is contained in:
parent
d88c527be3
commit
39928a42e8
23 changed files with 290 additions and 463 deletions
|
@ -32,8 +32,15 @@ func CheckVRAM() (int64, error) {
|
|||
|
||||
func GetGPUInfo() GpuInfo {
|
||||
mem, _ := getCPUMem()
|
||||
if runtime.GOARCH == "amd64" {
|
||||
return GpuInfo{
|
||||
Library: "default",
|
||||
Variant: GetCPUVariant(),
|
||||
memInfo: mem,
|
||||
}
|
||||
}
|
||||
return GpuInfo{
|
||||
Library: "default",
|
||||
Library: "metal",
|
||||
memInfo: mem,
|
||||
}
|
||||
}
|
||||
|
@ -45,12 +52,3 @@ func getCPUMem() (memInfo, error) {
|
|||
DeviceCount: 0,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func nativeInit() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetCPUVariant() string {
|
||||
// We don't yet have CPU based builds for Darwin...
|
||||
return ""
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue