mirror of
https://github.com/ollama/ollama.git
synced 2025-05-11 02:16:36 +02:00
Adapted rocm support to cgo based llama.cpp
This commit is contained in:
parent
f8ef4439e9
commit
35934b2e05
37 changed files with 1688 additions and 658 deletions
34
gpu/gpu_darwin.go
Normal file
34
gpu/gpu_darwin.go
Normal file
|
@ -0,0 +1,34 @@
|
|||
//go:build darwin
|
||||
|
||||
package gpu
|
||||
|
||||
import "C"
|
||||
import (
|
||||
"github.com/jmorganca/ollama/api"
|
||||
)
|
||||
|
||||
// CheckVRAM returns the free VRAM in bytes on Linux machines with NVIDIA GPUs
|
||||
func CheckVRAM() (int64, error) {
|
||||
// TODO - assume metal, and return free memory?
|
||||
return 0, nil
|
||||
|
||||
}
|
||||
|
||||
func GetGPUInfo() GpuInfo {
|
||||
// TODO - Metal vs. x86 macs...
|
||||
|
||||
return GpuInfo{
|
||||
Driver: "METAL",
|
||||
TotalMemory: 0,
|
||||
FreeMemory: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func NumGPU(numLayer, fileSizeBytes int64, opts api.Options) int {
|
||||
// default to enable metal on macOS
|
||||
return 1
|
||||
}
|
||||
|
||||
func nativeInit() error {
|
||||
return nil
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue