mirror of
https://github.com/ollama/ollama.git
synced 2025-05-11 10:26:53 +02:00
Use our libraries first
Trying to live off the land for cuda libraries was not the right strategy. We need to use the version we compiled against to ensure things work properly
This commit is contained in:
parent
0963c65027
commit
380378cc80
2 changed files with 31 additions and 15 deletions
|
@ -166,6 +166,12 @@ func GetGPUInfo() GpuInfoList {
|
|||
slog.Warn("CPU does not have AVX or AVX2, disabling GPU support.")
|
||||
}
|
||||
|
||||
// On windows we bundle the nvidia library one level above the runner dir
|
||||
depPath := ""
|
||||
if runtime.GOOS == "windows" && envconfig.RunnersDir != "" {
|
||||
depPath = filepath.Dir(envconfig.RunnersDir)
|
||||
}
|
||||
|
||||
var memInfo C.mem_info_t
|
||||
resp := []GpuInfo{}
|
||||
|
||||
|
@ -198,6 +204,7 @@ func GetGPUInfo() GpuInfoList {
|
|||
gpuInfo.Major = int(memInfo.major)
|
||||
gpuInfo.Minor = int(memInfo.minor)
|
||||
gpuInfo.MinimumMemory = cudaMinimumMemory
|
||||
gpuInfo.DependencyPath = depPath
|
||||
|
||||
// TODO potentially sort on our own algorithm instead of what the underlying GPU library does...
|
||||
resp = append(resp, gpuInfo)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue