Compare commits
2 Commits
main
...
pdevine/ne
Author | SHA1 | Date | |
---|---|---|---|
|
b8af12ceaf | ||
|
6f041ddfa4 |
@ -3,7 +3,9 @@ ollama
|
||||
app
|
||||
macapp
|
||||
dist
|
||||
llm/llama.cpp
|
||||
.env
|
||||
.cache
|
||||
test_data
|
||||
llm/build
|
||||
llama/build
|
||||
|
10
.gitattributes
vendored
10
.gitattributes
vendored
@ -1,11 +1,3 @@
|
||||
llama/**/*.cpp linguist-vendored
|
||||
llama/**/*.hpp linguist-vendored
|
||||
llama/**/*.h linguist-vendored
|
||||
llama/**/*.c linguist-vendored
|
||||
llama/**/*.cu linguist-vendored
|
||||
llama/**/*.cuh linguist-vendored
|
||||
llama/**/*.m linguist-vendored
|
||||
llama/**/*.metal linguist-vendored
|
||||
|
||||
llm/ext_server/* linguist-vendored
|
||||
* text=auto
|
||||
*.go text eol=lf
|
||||
|
525
.github/workflows/release.yaml
vendored
525
.github/workflows/release.yaml
vendored
@ -1,9 +1,5 @@
|
||||
name: release
|
||||
|
||||
env:
|
||||
ROCM_WINDOWS_URL: https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q3-WinSvr2022-For-HIP.exe
|
||||
MSYS2_URL: https://github.com/msys2/msys2-installer/releases/download/2024-07-27/msys2-x86_64-20240727.exe
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
@ -12,7 +8,7 @@ on:
|
||||
jobs:
|
||||
# Full build of the Mac assets
|
||||
build-darwin:
|
||||
runs-on: macos-13
|
||||
runs-on: macos-12
|
||||
environment: release
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@ -43,8 +39,8 @@ jobs:
|
||||
APPLE_PASSWORD: ${{ secrets.APPLE_PASSWORD }}
|
||||
APPLE_TEAM_ID: ${{ vars.APPLE_TEAM_ID }}
|
||||
APPLE_ID: ${{ vars.APPLE_ID }}
|
||||
SDKROOT: /Applications/Xcode_14.1.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk
|
||||
DEVELOPER_DIR: /Applications/Xcode_14.1.0.app/Contents/Developer
|
||||
SDKROOT: /Applications/Xcode_13.4.1.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk
|
||||
DEVELOPER_DIR: /Applications/Xcode_13.4.1.app/Contents/Developer
|
||||
run: |
|
||||
./scripts/build_darwin.sh
|
||||
|
||||
@ -52,8 +48,8 @@ jobs:
|
||||
with:
|
||||
name: dist-darwin
|
||||
path: |
|
||||
dist/Ollama-darwin.zip
|
||||
dist/ollama-darwin
|
||||
dist/*arwin*
|
||||
!dist/*-cov
|
||||
|
||||
# Windows builds take a long time to both install the dependencies and build, so parallelize
|
||||
# CPU generation step
|
||||
@ -64,286 +60,14 @@ jobs:
|
||||
KEY_CONTAINER: ${{ vars.KEY_CONTAINER }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set make jobs default
|
||||
run: |
|
||||
echo "MAKEFLAGS=--jobs=$((Get-ComputerInfo -Property CsProcessors).CsProcessors.NumberOfCores)" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||
- name: Set Version
|
||||
shell: bash
|
||||
run: echo "VERSION=${GITHUB_REF_NAME#v}" >> $GITHUB_ENV
|
||||
- name: Add msys paths
|
||||
run: |
|
||||
echo "c:\msys64\usr\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
echo "C:\msys64\clang64\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
- name: Install msys2 tools
|
||||
run: |
|
||||
Start-Process "c:\msys64\usr\bin\pacman.exe" -ArgumentList @("-S", "--noconfirm", "mingw-w64-clang-x86_64-gcc-compat", "mingw-w64-clang-x86_64-clang") -NoNewWindow -Wait
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: true
|
||||
- run: |
|
||||
import-module 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Microsoft.VisualStudio.DevShell.dll'
|
||||
Enter-VsDevShell -vsinstallpath 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise' -skipautomaticlocation -DevCmdArguments '-arch=x64 -no_logo'
|
||||
if (!(gcc --version | select-string -quiet clang)) { throw "wrong gcc compiler detected - must be clang" }
|
||||
make
|
||||
name: make
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: generate-windows-cpu
|
||||
path: |
|
||||
build/**/*
|
||||
dist/windows-amd64/**
|
||||
|
||||
# ROCm generation step
|
||||
generate-windows-rocm:
|
||||
environment: release
|
||||
runs-on: windows
|
||||
env:
|
||||
KEY_CONTAINER: ${{ vars.KEY_CONTAINER }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set make jobs default
|
||||
run: |
|
||||
echo "MAKEFLAGS=--jobs=$((Get-ComputerInfo -Property CsProcessors).CsProcessors.NumberOfCores)" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||
- name: Set Version
|
||||
shell: bash
|
||||
run: echo "VERSION=${GITHUB_REF_NAME#v}" >> $GITHUB_ENV
|
||||
- name: Add msys paths
|
||||
run: |
|
||||
echo "c:\msys64\usr\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
echo "C:\msys64\clang64\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
- name: Install msys2 tools
|
||||
run: |
|
||||
Start-Process "c:\msys64\usr\bin\pacman.exe" -ArgumentList @("-S", "--noconfirm", "mingw-w64-clang-x86_64-gcc-compat", "mingw-w64-clang-x86_64-clang") -NoNewWindow -Wait
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: true
|
||||
# ROCM installation steps
|
||||
- name: 'Cache ROCm installer'
|
||||
id: cache-rocm
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: rocm-install.exe
|
||||
key: ${{ env.ROCM_WINDOWS_URL }}
|
||||
- name: 'Conditionally Download ROCm'
|
||||
if: steps.cache-rocm.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
Invoke-WebRequest -Uri "${env:ROCM_WINDOWS_URL}" -OutFile "rocm-install.exe"
|
||||
- name: 'Install ROCm'
|
||||
run: |
|
||||
Start-Process "rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait
|
||||
- name: 'Verify ROCm'
|
||||
run: |
|
||||
& 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' --version
|
||||
echo "HIP_PATH=$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path | select -first 1)" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||
- name: make rocm runner
|
||||
run: |
|
||||
import-module 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Microsoft.VisualStudio.DevShell.dll'
|
||||
Enter-VsDevShell -vsinstallpath 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise' -skipautomaticlocation -DevCmdArguments '-arch=x64 -no_logo'
|
||||
if (!(gcc --version | select-string -quiet clang)) { throw "wrong gcc compiler detected - must be clang" }
|
||||
make -C llama print-HIP_PATH print-HIP_LIB_DIR
|
||||
make rocm
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: generate-windows-rocm
|
||||
path: |
|
||||
build/**/*
|
||||
dist/windows-amd64/**
|
||||
|
||||
# CUDA generation step
|
||||
generate-windows-cuda:
|
||||
environment: release
|
||||
runs-on: windows
|
||||
strategy:
|
||||
matrix:
|
||||
cuda:
|
||||
- version: "11.3"
|
||||
url: https://developer.download.nvidia.com/compute/cuda/11.3.1/local_installers/cuda_11.3.1_465.89_win10.exe
|
||||
- version: "12.4"
|
||||
url: https://developer.download.nvidia.com/compute/cuda/12.4.0/local_installers/cuda_12.4.0_551.61_windows.exe
|
||||
env:
|
||||
KEY_CONTAINER: ${{ vars.KEY_CONTAINER }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set make jobs default
|
||||
run: |
|
||||
echo "MAKEFLAGS=--jobs=$((Get-ComputerInfo -Property CsProcessors).CsProcessors.NumberOfCores)" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||
- name: Set Version
|
||||
shell: bash
|
||||
run: echo "VERSION=${GITHUB_REF_NAME#v}" >> $GITHUB_ENV
|
||||
- name: Install msys2
|
||||
run: |
|
||||
$msys2_url="https://github.com/msys2/msys2-installer/releases/download/2024-07-27/msys2-x86_64-20240727.exe"
|
||||
write-host "Downloading msys2"
|
||||
Invoke-WebRequest -Uri "${msys2_url}" -OutFile "${env:RUNNER_TEMP}\msys2.exe"
|
||||
write-host "Installing msys2"
|
||||
Start-Process "${env:RUNNER_TEMP}\msys2.exe" -ArgumentList @("in", "--confirm-command", "--accept-messages", "--root", "C:/msys64") -NoNewWindow -Wait
|
||||
echo "c:\msys64\usr\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
- name: Install msys2 tools
|
||||
run: |
|
||||
Start-Process "c:\msys64\usr\bin\pacman.exe" -ArgumentList @("-S", "--noconfirm", "mingw-w64-clang-x86_64-gcc-compat", "mingw-w64-clang-x86_64-clang", "make") -NoNewWindow -Wait
|
||||
echo "C:\msys64\clang64\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
- name: verify tools
|
||||
run: |
|
||||
get-command gcc
|
||||
gcc --version
|
||||
get-command make
|
||||
make --version
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: true
|
||||
# CUDA installation steps
|
||||
- name: 'Cache CUDA installer'
|
||||
id: cache-cuda
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: cuda-install.exe
|
||||
key: ${{ matrix.cuda.url }}
|
||||
- name: 'Conditionally Download CUDA'
|
||||
if: steps.cache-cuda.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
Invoke-WebRequest -Uri "${{ matrix.cuda.url }}" -OutFile "cuda-install.exe"
|
||||
- name: 'Install CUDA'
|
||||
run: |
|
||||
$subpackages = @("cudart", "nvcc", "cublas", "cublas_dev") | foreach-object {"${_}_${{ matrix.cuda.version }}"}
|
||||
Start-Process "cuda-install.exe" -ArgumentList (@("-s") + $subpackages) -NoNewWindow -Wait
|
||||
- name: 'Verify CUDA'
|
||||
run: |
|
||||
& (resolve-path "c:\Program Files\NVIDIA*\CUDA\v*\bin\nvcc.exe")[0] --version
|
||||
$cudaPath=((resolve-path "c:\Program Files\NVIDIA*\CUDA\v*\bin\nvcc.exe")[0].path | split-path | split-path)
|
||||
$cudaVer=($cudaPath | split-path -leaf ) -replace 'v(\d+).(\d+)', '$1_$2'
|
||||
echo "$cudaPath\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
echo "CUDA_PATH=$cudaPath" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||
echo "CUDA_PATH_V${cudaVer}=$cudaPath" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||
echo "CUDA_PATH_VX_Y=CUDA_PATH_V${cudaVer}" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||
|
||||
- name: make cuda runner
|
||||
run: |
|
||||
import-module 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Microsoft.VisualStudio.DevShell.dll'
|
||||
Enter-VsDevShell -vsinstallpath 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise' -skipautomaticlocation -DevCmdArguments '-arch=x64 -no_logo'
|
||||
if (!(gcc --version | select-string -quiet clang)) { throw "wrong gcc compiler detected - must be clang" }
|
||||
make cuda_v$(($env:CUDA_PATH | split-path -leaf) -replace 'v(\d+).*', '$1')
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: generate-windows-cuda-${{ matrix.cuda.version }}
|
||||
path: |
|
||||
build/**/*
|
||||
dist/windows-amd64/**
|
||||
|
||||
# windows arm64 generate, go build, and zip file (no installer)
|
||||
# Output of this build is aggregated into the final x86 build
|
||||
# for a unified windows installer
|
||||
windows-arm64:
|
||||
runs-on: windows-arm64
|
||||
environment: release
|
||||
env:
|
||||
KEY_CONTAINER: ${{ vars.KEY_CONTAINER }}
|
||||
steps:
|
||||
# The current Windows arm64 beta image has effectively zero dev tools installed...
|
||||
- name: Install git and gzip
|
||||
run: |
|
||||
Set-ExecutionPolicy Bypass -Scope Process -Force
|
||||
[System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072
|
||||
iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
|
||||
choco install -y --no-progress git gzip
|
||||
echo "C:\Program Files\Git\cmd" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
echo "C:\ProgramData\chocolatey\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
# pacman is buggy on win arm64, so we avoid using it, but rely on the binary artifacts
|
||||
# we download the sfx (7zip bundle) which isn't fully set up, but the binaries we need to build work
|
||||
- name: Install msys2 x64
|
||||
run: |
|
||||
$url="https://github.com/msys2/msys2-installer/releases/download/2024-07-27/msys2-base-x86_64-20240727.sfx.exe"
|
||||
write-host "Downloading MSYS2"
|
||||
Invoke-WebRequest -Uri "$url" -outfile "${env:RUNNER_TEMP}\msys2.exe"
|
||||
write-host "Installing msys2"
|
||||
Start-Process "${env:RUNNER_TEMP}\msys2.exe" -ArgumentList @(
|
||||
'-y', '-oC:\'
|
||||
) -NoNewWindow -Wait
|
||||
echo "c:\msys64\usr\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
# since pacman isn't reliable, we just download the tar file and extract directly
|
||||
- name: Downloading and extracting msys2 make tar file
|
||||
run: |
|
||||
$url="https://mirror.msys2.org/msys/x86_64/make-4.4.1-2-x86_64.pkg.tar.zst"
|
||||
write-host "Downloading make"
|
||||
Invoke-WebRequest -Uri "$url" -outfile c:\msys64\make.tar.zst
|
||||
cd c:\msys64; tar -xf make.tar.zst
|
||||
rm c:\msys64\make.tar.zst
|
||||
- name: Verify Make works properly
|
||||
run: |
|
||||
echo $env:PATH
|
||||
make --version
|
||||
- name: Install Visual Studio 2022
|
||||
run: |
|
||||
$components = @(
|
||||
"Microsoft.VisualStudio.Component.CoreEditor",
|
||||
"Microsoft.VisualStudio.Workload.CoreEditor",
|
||||
"Microsoft.VisualStudio.Component.Roslyn.Compiler",
|
||||
"Microsoft.Component.MSBuild",
|
||||
"Microsoft.VisualStudio.Component.TextTemplating",
|
||||
"Microsoft.VisualStudio.Component.Debugger.JustInTime",
|
||||
"Microsoft.VisualStudio.Component.VC.CoreIde",
|
||||
"Microsoft.VisualStudio.Component.VC.Tools.x86.x64",
|
||||
"Microsoft.VisualStudio.Component.Windows11SDK.22621",
|
||||
"Microsoft.VisualStudio.Component.VC.Tools.ARM64EC",
|
||||
"Microsoft.VisualStudio.Component.VC.Tools.ARM64",
|
||||
"Microsoft.VisualStudio.Component.VC.ATL",
|
||||
"Microsoft.VisualStudio.Component.VC.ATL.ARM64",
|
||||
"Microsoft.VisualStudio.Component.Graphics",
|
||||
"Microsoft.VisualStudio.Component.VC.Redist.14.Latest",
|
||||
"Microsoft.VisualStudio.ComponentGroup.NativeDesktop.Core",
|
||||
"Microsoft.VisualStudio.Component.Windows11Sdk.WindowsPerformanceToolkit",
|
||||
"Microsoft.VisualStudio.Component.CppBuildInsights",
|
||||
"Microsoft.VisualStudio.Component.VC.DiagnosticTools",
|
||||
"Microsoft.VisualStudio.ComponentGroup.WebToolsExtensions.CMake",
|
||||
"Microsoft.VisualStudio.Component.VC.CMake.Project",
|
||||
"Microsoft.VisualStudio.Component.VC.ASAN",
|
||||
"Microsoft.VisualStudio.Component.Vcpkg",
|
||||
"Microsoft.VisualStudio.Workload.NativeDesktop"
|
||||
)
|
||||
$config = @{
|
||||
"version" = "1.0"
|
||||
"components" = $components
|
||||
"extensions" = @()
|
||||
}
|
||||
$configPath = "${env:RUNNER_TEMP}\vsconfig"
|
||||
$config | ConvertTo-Json | Out-File -FilePath $configPath
|
||||
$bootstrapperFilePath = "${env:RUNNER_TEMP}\vs_community.exe"
|
||||
write-host "Downloading Visual Studio 2022"
|
||||
Invoke-WebRequest -Uri "https://aka.ms/vs/17/release/vs_community.exe" -outfile $bootstrapperFilePath
|
||||
$bootstrapperArgumentList = ('/c', $bootstrapperFilePath, '--config', $configPath, '--quiet', '--wait' )
|
||||
write-host "Installing Visual Studio 2022"
|
||||
$process = Start-Process -FilePath cmd.exe -ArgumentList $bootstrapperArgumentList -Wait -PassThru
|
||||
$exitCode = $process.ExitCode
|
||||
write-host $exitCode
|
||||
# pacman in mingw/msys2 is ~broken on windows arm right now - hangs consistently during attempts to install
|
||||
# so we'll use this alternative GCC binary
|
||||
- name: Install llvm-mingw GCC
|
||||
run: |
|
||||
$gcc_url="https://github.com/mstorsjo/llvm-mingw/releases/download/20240619/llvm-mingw-20240619-ucrt-aarch64.zip"
|
||||
write-host "Downloading llvm-mingw"
|
||||
Invoke-WebRequest -Uri "${gcc_url}" -OutFile "${env:RUNNER_TEMP}\gcc.zip"
|
||||
write-host "Unpacking llvm-mingw"
|
||||
expand-archive -path "${env:RUNNER_TEMP}\gcc.zip" -destinationpath "c:\"
|
||||
mv c:\llvm-mingw-* c:\llvm-mingw
|
||||
echo "c:\llvm-mingw\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
- name: Verify GCC
|
||||
run: |
|
||||
echo $env:PATH
|
||||
gcc --version
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set Version
|
||||
run: |
|
||||
$ver=${env:GITHUB_REF_NAME}.trim("v")
|
||||
echo VERSION=$ver | Out-File -FilePath ${env:GITHUB_ENV} -Encoding utf8 -Append
|
||||
- uses: 'google-github-actions/auth@v2'
|
||||
with:
|
||||
project_id: 'ollama'
|
||||
credentials_json: '${{ secrets.GOOGLE_SIGNING_CREDENTIALS }}'
|
||||
- run: echo "${{ vars.OLLAMA_CERT }}" | Out-File -FilePath ollama_inc.crt -Encoding utf8
|
||||
- run: echo "${{ vars.OLLAMA_CERT }}" > ollama_inc.crt
|
||||
- name: install Windows SDK 8.1 to get signtool
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
@ -368,23 +92,189 @@ jobs:
|
||||
- run: go get ./...
|
||||
- run: |
|
||||
$gopath=(get-command go).source | split-path -parent
|
||||
$gccpath=(get-command gcc).source | split-path -parent
|
||||
import-module 'C:\Program Files\Microsoft Visual Studio\2022\Community\Common7\Tools\Microsoft.VisualStudio.DevShell.dll'
|
||||
Enter-VsDevShell -Arch arm64 -vsinstallpath 'C:\Program Files\Microsoft Visual Studio\2022\Community' -skipautomaticlocation
|
||||
$env:PATH="$gopath;$gccpath;$env:PATH"
|
||||
echo $env:PATH
|
||||
$env:ARCH="arm64"
|
||||
.\scripts\build_windows.ps1 buildOllama buildApp gatherDependencies sign distZip
|
||||
name: 'Windows Build'
|
||||
& "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Launch-VsDevShell.ps1"
|
||||
cd $env:GITHUB_WORKSPACE
|
||||
$env:CMAKE_SYSTEM_VERSION="10.0.22621.0"
|
||||
$env:PATH="$gopath;$env:PATH"
|
||||
go generate -x ./...
|
||||
name: go generate
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: windows-arm64
|
||||
name: generate-windows-cpu
|
||||
path: |
|
||||
dist/windows-arm64/**
|
||||
dist/windows-arm64-app.exe
|
||||
dist/ollama-windows-arm64.zip
|
||||
build/**/*
|
||||
build/**/*.a
|
||||
llm/build/**/*.a
|
||||
dist/windows-amd64/**
|
||||
|
||||
# Import the prior generation steps plus the full arm64 build, and build the final windows assets
|
||||
# ROCm generation step
|
||||
generate-windows-rocm:
|
||||
environment: release
|
||||
runs-on: windows
|
||||
env:
|
||||
KEY_CONTAINER: ${{ vars.KEY_CONTAINER }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set Version
|
||||
shell: bash
|
||||
run: echo "VERSION=${GITHUB_REF_NAME#v}" >> $GITHUB_ENV
|
||||
- uses: 'google-github-actions/auth@v2'
|
||||
with:
|
||||
project_id: 'ollama'
|
||||
credentials_json: '${{ secrets.GOOGLE_SIGNING_CREDENTIALS }}'
|
||||
- run: echo "${{ vars.OLLAMA_CERT }}" > ollama_inc.crt
|
||||
- name: install Windows SDK 8.1 to get signtool
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
write-host "downloading SDK"
|
||||
Invoke-WebRequest -Uri "https://go.microsoft.com/fwlink/p/?LinkId=323507" -OutFile "${env:RUNNER_TEMP}\sdksetup.exe"
|
||||
Start-Process "${env:RUNNER_TEMP}\sdksetup.exe" -ArgumentList @("/q") -NoNewWindow -Wait
|
||||
write-host "Win SDK 8.1 installed"
|
||||
gci -path 'C:\Program Files (x86)\Windows Kits\' -r -fi 'signtool.exe'
|
||||
- name: install signing plugin
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
write-host "downloading plugin"
|
||||
Invoke-WebRequest -Uri "https://github.com/GoogleCloudPlatform/kms-integrations/releases/download/cng-v1.0/kmscng-1.0-windows-amd64.zip" -OutFile "${env:RUNNER_TEMP}\plugin.zip"
|
||||
Expand-Archive -Path "${env:RUNNER_TEMP}\plugin.zip" -DestinationPath ${env:RUNNER_TEMP}\plugin\
|
||||
write-host "Installing plugin"
|
||||
& "${env:RUNNER_TEMP}\plugin\*\kmscng.msi" /quiet
|
||||
write-host "plugin installed"
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: true
|
||||
- name: 'Install ROCm'
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
write-host "downloading AMD HIP Installer"
|
||||
Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q3-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe"
|
||||
write-host "Installing AMD HIP"
|
||||
Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait
|
||||
write-host "Completed AMD HIP"
|
||||
- name: 'Verify ROCm'
|
||||
run: |
|
||||
& 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' --version
|
||||
- run: go get ./...
|
||||
- run: |
|
||||
$gopath=(get-command go).source | split-path -parent
|
||||
& "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Launch-VsDevShell.ps1"
|
||||
cd $env:GITHUB_WORKSPACE
|
||||
$env:CMAKE_SYSTEM_VERSION="10.0.22621.0"
|
||||
$env:PATH="$gopath;$env:PATH"
|
||||
$env:OLLAMA_SKIP_CPU_GENERATE="1"
|
||||
$env:HIP_PATH=$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path)
|
||||
go generate -x ./...
|
||||
name: go generate
|
||||
- name: 'gather rocm dependencies'
|
||||
run: |
|
||||
$HIP_PATH=$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path)
|
||||
md "dist\deps\bin\rocblas\library"
|
||||
cp "${HIP_PATH}\bin\hipblas.dll" "dist\deps\bin\"
|
||||
cp "${HIP_PATH}\bin\rocblas.dll" "dist\deps\bin\"
|
||||
cp "${HIP_PATH}\bin\rocblas\library\*" "dist\deps\bin\rocblas\library\"
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: generate-windows-rocm
|
||||
path: |
|
||||
build/**/*
|
||||
dist/windows-amd64/**
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: windows-rocm-deps
|
||||
path: dist/deps/*
|
||||
|
||||
# CUDA generation step
|
||||
generate-windows-cuda:
|
||||
environment: release
|
||||
runs-on: windows
|
||||
strategy:
|
||||
matrix:
|
||||
cuda:
|
||||
- version: "11"
|
||||
url: 'https://developer.download.nvidia.com/compute/cuda/11.3.1/local_installers/cuda_11.3.1_465.89_win10.exe'
|
||||
- version: "12"
|
||||
url: 'https://developer.download.nvidia.com/compute/cuda/12.4.0/local_installers/cuda_12.4.0_551.61_windows.exe'
|
||||
env:
|
||||
KEY_CONTAINER: ${{ vars.KEY_CONTAINER }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set Version
|
||||
shell: bash
|
||||
run: echo "VERSION=${GITHUB_REF_NAME#v}" >> $GITHUB_ENV
|
||||
- uses: 'google-github-actions/auth@v2'
|
||||
with:
|
||||
project_id: 'ollama'
|
||||
credentials_json: '${{ secrets.GOOGLE_SIGNING_CREDENTIALS }}'
|
||||
- run: echo "${{ vars.OLLAMA_CERT }}" > ollama_inc.crt
|
||||
- name: install Windows SDK 8.1 to get signtool
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
write-host "downloading SDK"
|
||||
Invoke-WebRequest -Uri "https://go.microsoft.com/fwlink/p/?LinkId=323507" -OutFile "${env:RUNNER_TEMP}\sdksetup.exe"
|
||||
Start-Process "${env:RUNNER_TEMP}\sdksetup.exe" -ArgumentList @("/q") -NoNewWindow -Wait
|
||||
write-host "Win SDK 8.1 installed"
|
||||
gci -path 'C:\Program Files (x86)\Windows Kits\' -r -fi 'signtool.exe'
|
||||
- name: install signing plugin
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
write-host "downloading plugin"
|
||||
Invoke-WebRequest -Uri "https://github.com/GoogleCloudPlatform/kms-integrations/releases/download/cng-v1.0/kmscng-1.0-windows-amd64.zip" -OutFile "${env:RUNNER_TEMP}\plugin.zip"
|
||||
Expand-Archive -Path "${env:RUNNER_TEMP}\plugin.zip" -DestinationPath ${env:RUNNER_TEMP}\plugin\
|
||||
write-host "Installing plugin"
|
||||
& "${env:RUNNER_TEMP}\plugin\*\kmscng.msi" /quiet
|
||||
write-host "plugin installed"
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: true
|
||||
- name: 'Install CUDA ${{ matrix.cuda.version }}'
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
write-host "downloading CUDA Installer"
|
||||
Invoke-WebRequest -Uri "${{ matrix.cuda.url }}" -OutFile "${env:RUNNER_TEMP}\cuda-install.exe"
|
||||
write-host "Installing CUDA"
|
||||
Start-Process "${env:RUNNER_TEMP}\cuda-install.exe" -ArgumentList '-s' -NoNewWindow -Wait
|
||||
write-host "Completed CUDA"
|
||||
$cudaPath=((resolve-path "c:\Program Files\NVIDIA*\CUDA\v*\bin\nvcc.exe")[0].path | split-path | split-path)
|
||||
$cudaVer=($cudaPath | split-path -leaf ) -replace 'v(\d+).(\d+)', '$1_$2'
|
||||
echo "$cudaPath\bin" >> $env:GITHUB_PATH
|
||||
echo "CUDA_PATH=$cudaPath" >> $env:GITHUB_ENV
|
||||
echo "CUDA_PATH_V${cudaVer}=$cudaPath" >> $env:GITHUB_ENV
|
||||
echo "CUDA_PATH_VX_Y=CUDA_PATH_V${cudaVer}" >> $env:GITHUB_ENV
|
||||
- name: 'Verify CUDA'
|
||||
run: nvcc -V
|
||||
- run: go get ./...
|
||||
- name: go generate
|
||||
run: |
|
||||
$gopath=(get-command go).source | split-path -parent
|
||||
$cudabin=(get-command nvcc).source | split-path
|
||||
& "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Launch-VsDevShell.ps1"
|
||||
cd $env:GITHUB_WORKSPACE
|
||||
$env:CMAKE_SYSTEM_VERSION="10.0.22621.0"
|
||||
$env:PATH="$gopath;$cudabin;$env:PATH"
|
||||
$env:OLLAMA_SKIP_CPU_GENERATE="1"
|
||||
go generate -x ./...
|
||||
- name: 'gather cuda dependencies'
|
||||
run: |
|
||||
$NVIDIA_DIR=(resolve-path 'C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\*\bin\')[0]
|
||||
md "dist\deps"
|
||||
cp "${NVIDIA_DIR}\cudart64_*.dll" "dist\deps\"
|
||||
cp "${NVIDIA_DIR}\cublas64_*.dll" "dist\deps\"
|
||||
cp "${NVIDIA_DIR}\cublasLt64_*.dll" "dist\deps\"
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: generate-windows-cuda-${{ matrix.cuda.version }}
|
||||
path: |
|
||||
build/**/*
|
||||
dist/windows-amd64/**
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: windows-cuda-deps-${{ matrix.cuda.version }}
|
||||
path: dist/deps/*
|
||||
|
||||
|
||||
# Import the prior generation steps and build the final windows assets
|
||||
build-windows:
|
||||
environment: release
|
||||
runs-on: windows
|
||||
@ -392,7 +282,6 @@ jobs:
|
||||
- generate-windows-cuda
|
||||
- generate-windows-rocm
|
||||
- generate-windows-cpu
|
||||
- windows-arm64
|
||||
env:
|
||||
KEY_CONTAINER: ${{ vars.KEY_CONTAINER }}
|
||||
steps:
|
||||
@ -424,24 +313,6 @@ jobs:
|
||||
write-host "Installing plugin"
|
||||
& "${env:RUNNER_TEMP}\plugin\*\kmscng.msi" /quiet
|
||||
write-host "plugin installed"
|
||||
- name: Install msys2
|
||||
run: |
|
||||
$msys2_url="https://github.com/msys2/msys2-installer/releases/download/2024-07-27/msys2-x86_64-20240727.exe"
|
||||
write-host "Downloading msys2"
|
||||
Invoke-WebRequest -Uri "${msys2_url}" -OutFile "${env:RUNNER_TEMP}\msys2.exe"
|
||||
write-host "Installing msys2"
|
||||
Start-Process "${env:RUNNER_TEMP}\msys2.exe" -ArgumentList @("in", "--confirm-command", "--accept-messages", "--root", "C:/msys64") -NoNewWindow -Wait
|
||||
echo "c:\msys64\usr\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
- name: Install msys2 tools
|
||||
run: |
|
||||
Start-Process "c:\msys64\usr\bin\pacman.exe" -ArgumentList @("-S", "--noconfirm", "mingw-w64-clang-x86_64-gcc-compat", "mingw-w64-clang-x86_64-clang", "make") -NoNewWindow -Wait
|
||||
echo "C:\msys64\clang64\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
- name: verify tools
|
||||
run: |
|
||||
get-command gcc
|
||||
gcc --version
|
||||
get-command make
|
||||
make --version
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
@ -452,24 +323,30 @@ jobs:
|
||||
name: generate-windows-cpu
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: generate-windows-cuda-11.3
|
||||
name: generate-windows-cuda-11
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: generate-windows-cuda-12.4
|
||||
name: generate-windows-cuda-12
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: windows-cuda-deps-11
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: windows-cuda-deps-12
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: windows-rocm-deps
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: generate-windows-rocm
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: windows-arm64
|
||||
path: dist
|
||||
- run: dir build
|
||||
- run: |
|
||||
import-module 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Microsoft.VisualStudio.DevShell.dll'
|
||||
Enter-VsDevShell -vsinstallpath 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise' -skipautomaticlocation -DevCmdArguments '-arch=x64 -no_logo'
|
||||
$gopath=(get-command go).source | split-path -parent
|
||||
& "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Launch-VsDevShell.ps1"
|
||||
cd $env:GITHUB_WORKSPACE
|
||||
$env:CMAKE_SYSTEM_VERSION="10.0.22621.0"
|
||||
$env:PATH="$gopath;$env:PATH"
|
||||
$env:OLLAMA_SKIP_GENERATE="1"
|
||||
$env:ARCH="amd64"
|
||||
if (!(gcc --version | select-string -quiet clang)) { throw "wrong gcc compiler detected - must be clang" }
|
||||
& .\scripts\build_windows.ps1
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
|
350
.github/workflows/test.yaml
vendored
350
.github/workflows/test.yaml
vendored
@ -1,11 +1,5 @@
|
||||
name: test
|
||||
|
||||
env:
|
||||
ROCM_WINDOWS_URL: https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q3-WinSvr2022-For-HIP.exe
|
||||
MSYS2_URL: https://github.com/msys2/msys2-installer/releases/download/2024-07-27/msys2-x86_64-20240727.exe
|
||||
CUDA_12_WINDOWS_URL: https://developer.download.nvidia.com/compute/cuda/12.4.0/local_installers/cuda_12.4.0_551.61_windows.exe
|
||||
CUDA_12_WINDOWS_VER: 12.4
|
||||
|
||||
concurrency:
|
||||
# For PRs, later CI runs preempt previous ones. e.g. a force push on a PR
|
||||
# cancels running CI jobs and starts all new ones.
|
||||
@ -27,7 +21,9 @@ jobs:
|
||||
changes:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
RUNNERS: ${{ steps.changes.outputs.RUNNERS }}
|
||||
GENERATE: ${{ steps.changes.outputs.GENERATE }}
|
||||
GENERATE_CUDA: ${{ steps.changes.outputs.GENERATE_CUDA }}
|
||||
GENERATE_ROCM: ${{ steps.changes.outputs.GENERATE_ROCM }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@ -42,167 +38,14 @@ jobs:
|
||||
}
|
||||
|
||||
{
|
||||
echo RUNNERS=$(changed 'llama/**')
|
||||
echo GENERATE=$(changed 'llm/llama.cpp' 'llm/patches/**' 'llm/ext_server/**' 'llm/generate/**')
|
||||
echo GENERATE_CUDA=$(changed 'llm/llama.cpp' 'llm/patches/**' 'llm/ext_server/**' 'llm/generate/**')
|
||||
echo GENERATE_ROCM=$(changed 'llm/llama.cpp' 'llm/patches/**' 'llm/ext_server/**' 'llm/generate/**')
|
||||
} >>$GITHUB_OUTPUT
|
||||
|
||||
runners-linux-cuda:
|
||||
generate:
|
||||
needs: [changes]
|
||||
if: ${{ needs.changes.outputs.RUNNERS == 'True' }}
|
||||
strategy:
|
||||
matrix:
|
||||
cuda-version:
|
||||
- '11.8.0'
|
||||
runs-on: linux
|
||||
container: nvidia/cuda:${{ matrix.cuda-version }}-devel-ubuntu20.04
|
||||
steps:
|
||||
- run: |
|
||||
apt-get update && apt-get install -y git build-essential curl
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: true
|
||||
- run: go get ./...
|
||||
- run: |
|
||||
git config --global --add safe.directory /__w/ollama/ollama
|
||||
cores=$(grep '^core id' /proc/cpuinfo |sort -u|wc -l)
|
||||
make -j $cores cuda_v11
|
||||
runners-linux-rocm:
|
||||
needs: [changes]
|
||||
if: ${{ needs.changes.outputs.RUNNERS == 'True' }}
|
||||
strategy:
|
||||
matrix:
|
||||
rocm-version:
|
||||
- '6.1.2'
|
||||
runs-on: linux
|
||||
container: rocm/dev-ubuntu-20.04:${{ matrix.rocm-version }}
|
||||
steps:
|
||||
- run: |
|
||||
apt-get update && apt-get install -y git build-essential curl rocm-libs
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: true
|
||||
- run: go get ./...
|
||||
- run: |
|
||||
git config --global --add safe.directory /__w/ollama/ollama
|
||||
cores=$(grep '^core id' /proc/cpuinfo |sort -u|wc -l)
|
||||
make -j $cores rocm
|
||||
|
||||
# ROCm generation step
|
||||
runners-windows-rocm:
|
||||
needs: [changes]
|
||||
if: ${{ needs.changes.outputs.RUNNERS == 'True' }}
|
||||
runs-on: windows
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: true
|
||||
- name: Set make jobs default
|
||||
run: |
|
||||
echo "MAKEFLAGS=--jobs=$((Get-ComputerInfo -Property CsProcessors).CsProcessors.NumberOfCores)" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||
|
||||
# ROCM installation steps
|
||||
- name: 'Cache ROCm installer'
|
||||
id: cache-rocm
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: rocm-install.exe
|
||||
key: ${{ env.ROCM_WINDOWS_URL }}
|
||||
- name: 'Conditionally Download ROCm'
|
||||
if: steps.cache-rocm.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
Invoke-WebRequest -Uri "${env:ROCM_WINDOWS_URL}" -OutFile "rocm-install.exe"
|
||||
- name: 'Install ROCm'
|
||||
run: |
|
||||
Start-Process "rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait
|
||||
- name: 'Verify ROCm'
|
||||
run: |
|
||||
& 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' --version
|
||||
echo "HIP_PATH=$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path | select -first 1)" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||
|
||||
- name: Add msys paths
|
||||
run: |
|
||||
echo "c:\msys64\usr\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
echo "C:\msys64\clang64\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
- name: Install msys2 tools
|
||||
run: |
|
||||
Start-Process "c:\msys64\usr\bin\pacman.exe" -ArgumentList @("-S", "--noconfirm", "mingw-w64-clang-x86_64-gcc-compat", "mingw-w64-clang-x86_64-clang") -NoNewWindow -Wait
|
||||
|
||||
- name: make rocm runner
|
||||
run: |
|
||||
import-module 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Microsoft.VisualStudio.DevShell.dll'
|
||||
Enter-VsDevShell -vsinstallpath 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise' -skipautomaticlocation -DevCmdArguments '-arch=x64 -no_logo'
|
||||
if (!(gcc --version | select-string -quiet clang)) { throw "wrong gcc compiler detected - must be clang" }
|
||||
make -C llama print-HIP_PATH print-HIP_LIB_DIR
|
||||
make rocm
|
||||
|
||||
# CUDA generation step
|
||||
runners-windows-cuda:
|
||||
needs: [changes]
|
||||
if: ${{ needs.changes.outputs.RUNNERS == 'True' }}
|
||||
runs-on: windows
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: true
|
||||
- name: Set make jobs default
|
||||
run: |
|
||||
echo "MAKEFLAGS=--jobs=$((Get-ComputerInfo -Property CsProcessors).CsProcessors.NumberOfCores)" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||
|
||||
# CUDA installation steps
|
||||
- name: 'Cache CUDA installer'
|
||||
id: cache-cuda
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: cuda-install.exe
|
||||
key: ${{ env.CUDA_12_WINDOWS_URL }}
|
||||
- name: 'Conditionally Download CUDA'
|
||||
if: steps.cache-cuda.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
Invoke-WebRequest -Uri "${env:CUDA_12_WINDOWS_URL}" -OutFile "cuda-install.exe"
|
||||
- name: 'Install CUDA'
|
||||
run: |
|
||||
$subpackages = @("cudart", "nvcc", "cublas", "cublas_dev") | foreach-object {"${_}_${{ env.CUDA_12_WINDOWS_VER }}"}
|
||||
Start-Process "cuda-install.exe" -ArgumentList (@("-s") + $subpackages) -NoNewWindow -Wait
|
||||
- name: 'Verify CUDA'
|
||||
run: |
|
||||
& (resolve-path "c:\Program Files\NVIDIA*\CUDA\v*\bin\nvcc.exe")[0] --version
|
||||
$cudaPath=((resolve-path "c:\Program Files\NVIDIA*\CUDA\v*\bin\nvcc.exe")[0].path | split-path | split-path)
|
||||
$cudaVer=($cudaPath | split-path -leaf ) -replace 'v(\d+).(\d+)', '$1_$2'
|
||||
echo "$cudaPath\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
echo "CUDA_PATH=$cudaPath" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||
echo "CUDA_PATH_V${cudaVer}=$cudaPath" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||
echo "CUDA_PATH_VX_Y=CUDA_PATH_V${cudaVer}" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||
|
||||
- name: Add msys paths
|
||||
run: |
|
||||
echo "c:\msys64\usr\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
echo "C:\msys64\clang64\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
- name: Install msys2 tools
|
||||
run: |
|
||||
Start-Process "c:\msys64\usr\bin\pacman.exe" -ArgumentList @("-S", "--noconfirm", "mingw-w64-clang-x86_64-gcc-compat", "mingw-w64-clang-x86_64-clang") -NoNewWindow -Wait
|
||||
- name: make cuda runner
|
||||
run: |
|
||||
import-module 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Microsoft.VisualStudio.DevShell.dll'
|
||||
Enter-VsDevShell -vsinstallpath 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise' -skipautomaticlocation -DevCmdArguments '-arch=x64 -no_logo'
|
||||
if (!(gcc --version | select-string -quiet clang)) { throw "wrong gcc compiler detected - must be clang" }
|
||||
make cuda_v$(($env:CUDA_PATH | split-path -leaf) -replace 'v(\d+).*', '$1')
|
||||
|
||||
runners-cpu:
|
||||
needs: [changes]
|
||||
if: ${{ needs.changes.outputs.RUNNERS == 'True' }}
|
||||
if: ${{ needs.changes.outputs.GENERATE == 'True' }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-2019]
|
||||
@ -215,7 +58,6 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
GOARCH: ${{ matrix.arch }}
|
||||
ARCH: ${{ matrix.arch }}
|
||||
CGO_ENABLED: '1'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@ -223,31 +65,153 @@ jobs:
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: true
|
||||
- name: Add msys paths
|
||||
if: ${{ startsWith(matrix.os, 'windows-') }}
|
||||
run: |
|
||||
echo "c:\msys64\usr\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
echo "C:\msys64\clang64\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
- name: Install msys2 tools
|
||||
if: ${{ startsWith(matrix.os, 'windows-') }}
|
||||
run: |
|
||||
Start-Process "c:\msys64\usr\bin\pacman.exe" -ArgumentList @("-S", "--noconfirm", "mingw-w64-clang-x86_64-gcc-compat", "mingw-w64-clang-x86_64-clang") -NoNewWindow -Wait
|
||||
- name: 'Build Windows Go Runners'
|
||||
if: ${{ startsWith(matrix.os, 'windows-') }}
|
||||
run: |
|
||||
- run: go get ./...
|
||||
- run: |
|
||||
$gopath=(get-command go).source | split-path -parent
|
||||
$gccpath=(get-command gcc).source | split-path -parent
|
||||
import-module 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Microsoft.VisualStudio.DevShell.dll'
|
||||
Enter-VsDevShell -vsinstallpath 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise' -skipautomaticlocation -DevCmdArguments '-arch=x64 -no_logo'
|
||||
& "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Launch-VsDevShell.ps1"
|
||||
cd $env:GITHUB_WORKSPACE
|
||||
$env:CMAKE_SYSTEM_VERSION="10.0.22621.0"
|
||||
$env:PATH="$gopath;$gccpath;$env:PATH"
|
||||
echo $env:PATH
|
||||
if (!(gcc --version | select-string -quiet clang)) { throw "wrong gcc compiler detected - must be clang" }
|
||||
make -j 4
|
||||
- name: 'Build Unix Go Runners'
|
||||
go generate -x ./...
|
||||
if: ${{ startsWith(matrix.os, 'windows-') }}
|
||||
name: 'Windows Go Generate'
|
||||
- run: go generate -x ./...
|
||||
if: ${{ ! startsWith(matrix.os, 'windows-') }}
|
||||
run: make -j 4
|
||||
name: 'Unix Go Generate'
|
||||
- run: go build .
|
||||
generate-cuda:
|
||||
needs: [changes]
|
||||
if: ${{ needs.changes.outputs.GENERATE_CUDA == 'True' }}
|
||||
strategy:
|
||||
matrix:
|
||||
cuda-version:
|
||||
- '11.8.0'
|
||||
runs-on: linux
|
||||
container: nvidia/cuda:${{ matrix.cuda-version }}-devel-ubuntu20.04
|
||||
steps:
|
||||
- run: |
|
||||
apt-get update && apt-get install -y git build-essential curl
|
||||
curl -fsSL https://github.com/Kitware/CMake/releases/download/v3.28.1/cmake-3.28.1-linux-x86_64.tar.gz \
|
||||
| tar -zx -C /usr --strip-components 1
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: true
|
||||
- run: go get ./...
|
||||
- run: |
|
||||
git config --global --add safe.directory /__w/ollama/ollama
|
||||
go generate -x ./...
|
||||
env:
|
||||
OLLAMA_SKIP_CPU_GENERATE: '1'
|
||||
generate-rocm:
|
||||
needs: [changes]
|
||||
if: ${{ needs.changes.outputs.GENERATE_ROCM == 'True' }}
|
||||
strategy:
|
||||
matrix:
|
||||
rocm-version:
|
||||
- '6.1.2'
|
||||
runs-on: linux
|
||||
container: rocm/dev-ubuntu-20.04:${{ matrix.rocm-version }}
|
||||
steps:
|
||||
- run: |
|
||||
apt-get update && apt-get install -y git build-essential curl rocm-libs
|
||||
curl -fsSL https://github.com/Kitware/CMake/releases/download/v3.28.1/cmake-3.28.1-linux-x86_64.tar.gz \
|
||||
| tar -zx -C /usr --strip-components 1
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: true
|
||||
- run: go get ./...
|
||||
- run: |
|
||||
git config --global --add safe.directory /__w/ollama/ollama
|
||||
go generate -x ./...
|
||||
env:
|
||||
OLLAMA_SKIP_CPU_GENERATE: '1'
|
||||
|
||||
# ROCm generation step
|
||||
generate-windows-rocm:
|
||||
needs: [changes]
|
||||
if: ${{ needs.changes.outputs.GENERATE_ROCM == 'True' }}
|
||||
runs-on: windows
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: true
|
||||
- name: 'Install ROCm'
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
write-host "downloading AMD HIP Installer"
|
||||
Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q3-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe"
|
||||
write-host "Installing AMD HIP"
|
||||
Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait
|
||||
write-host "Completed AMD HIP"
|
||||
- name: 'Verify ROCm'
|
||||
run: |
|
||||
& 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' --version
|
||||
- run: go get ./...
|
||||
- run: |
|
||||
$gopath=(get-command go).source | split-path -parent
|
||||
& "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Launch-VsDevShell.ps1"
|
||||
cd $env:GITHUB_WORKSPACE
|
||||
$env:CMAKE_SYSTEM_VERSION="10.0.22621.0"
|
||||
$env:PATH="$gopath;$env:PATH"
|
||||
$env:OLLAMA_SKIP_CPU_GENERATE="1"
|
||||
$env:HIP_PATH=$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path)
|
||||
go generate -x ./...
|
||||
name: go generate
|
||||
env:
|
||||
OLLAMA_SKIP_CPU_GENERATE: '1'
|
||||
|
||||
# CUDA generation step
|
||||
generate-windows-cuda:
|
||||
needs: [changes]
|
||||
if: ${{ needs.changes.outputs.GENERATE_CUDA == 'True' }}
|
||||
runs-on: windows
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: true
|
||||
- name: 'Install CUDA'
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
write-host "downloading CUDA Installer"
|
||||
Invoke-WebRequest -Uri "https://developer.download.nvidia.com/compute/cuda/11.3.1/local_installers/cuda_11.3.1_465.89_win10.exe" -OutFile "${env:RUNNER_TEMP}\cuda-install.exe"
|
||||
write-host "Installing CUDA"
|
||||
Start-Process "${env:RUNNER_TEMP}\cuda-install.exe" -ArgumentList '-s' -NoNewWindow -Wait
|
||||
write-host "Completed CUDA"
|
||||
$cudaPath=((resolve-path "c:\Program Files\NVIDIA*\CUDA\v*\bin\nvcc.exe")[0].path | split-path | split-path)
|
||||
$cudaVer=($cudaPath | split-path -leaf ) -replace 'v(\d+).(\d+)', '$1_$2'
|
||||
echo "$cudaPath\bin" >> $env:GITHUB_PATH
|
||||
echo "CUDA_PATH=$cudaPath" >> $env:GITHUB_ENV
|
||||
echo "CUDA_PATH_V${cudaVer}=$cudaPath" >> $env:GITHUB_ENV
|
||||
echo "CUDA_PATH_VX_Y=CUDA_PATH_V${cudaVer}" >> $env:GITHUB_ENV
|
||||
- name: 'Verify CUDA'
|
||||
run: nvcc -V
|
||||
- run: go get ./...
|
||||
- name: go generate
|
||||
run: |
|
||||
$gopath=(get-command go).source | split-path -parent
|
||||
$cudabin=(get-command nvcc).source | split-path
|
||||
& "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Launch-VsDevShell.ps1"
|
||||
cd $env:GITHUB_WORKSPACE
|
||||
$env:CMAKE_SYSTEM_VERSION="10.0.22621.0"
|
||||
$env:PATH="$gopath;$cudabin;$env:PATH"
|
||||
$env:OLLAMA_SKIP_CPU_GENERATE="1"
|
||||
go generate -x ./...
|
||||
env:
|
||||
OLLAMA_SKIP_CPU_GENERATE: '1'
|
||||
|
||||
lint:
|
||||
strategy:
|
||||
@ -281,7 +245,7 @@ jobs:
|
||||
shell: bash
|
||||
- uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
args: --timeout 10m0s -v
|
||||
args: --timeout 8m0s -v
|
||||
test:
|
||||
strategy:
|
||||
matrix:
|
||||
@ -296,6 +260,9 @@ jobs:
|
||||
env:
|
||||
GOARCH: ${{ matrix.arch }}
|
||||
CGO_ENABLED: '1'
|
||||
OLLAMA_CPU_TARGET: 'static'
|
||||
OLLAMA_SKIP_CPU_GENERATE: '1'
|
||||
OLLAMA_SKIP_METAL_GENERATE: '1'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@ -310,17 +277,6 @@ jobs:
|
||||
arm64) echo ARCH=arm64 ;;
|
||||
esac >>$GITHUB_ENV
|
||||
shell: bash
|
||||
- run: go generate ./...
|
||||
- run: go build
|
||||
- run: go test -v ./...
|
||||
|
||||
patches:
|
||||
needs: [changes]
|
||||
if: ${{ needs.changes.outputs.RUNNERS == 'True' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- name: Verify patches carry all the changes
|
||||
run: |
|
||||
make apply-patches sync && git diff --compact-summary --exit-code llama
|
4
.gitignore
vendored
4
.gitignore
vendored
@ -5,6 +5,7 @@
|
||||
.swp
|
||||
dist
|
||||
ollama
|
||||
ggml-metal.metal
|
||||
.cache
|
||||
*.exe
|
||||
.idea
|
||||
@ -14,5 +15,4 @@ llm/build
|
||||
build/*/*/*
|
||||
!build/**/placeholder
|
||||
llama/build
|
||||
__debug_bin*
|
||||
llama/vendor
|
||||
__debug_bin*
|
4
.gitmodules
vendored
Normal file
4
.gitmodules
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
[submodule "llama.cpp"]
|
||||
path = llm/llama.cpp
|
||||
url = https://github.com/ggerganov/llama.cpp.git
|
||||
shallow = true
|
336
Dockerfile
336
Dockerfile
@ -1,204 +1,197 @@
|
||||
ARG GOLANG_VERSION=1.22.8
|
||||
ARG GOLANG_VERSION=1.22.5
|
||||
ARG CMAKE_VERSION=3.22.1
|
||||
ARG CUDA_VERSION_11=11.3.1
|
||||
ARG CUDA_V11_ARCHITECTURES="50;52;53;60;61;62;70;72;75;80;86"
|
||||
ARG CUDA_VERSION_12=12.4.0
|
||||
ARG CUDA_V12_ARCHITECTURES="60;61;62;70;72;75;80;86;87;89;90;90a"
|
||||
ARG ROCM_VERSION=6.1.2
|
||||
ARG JETPACK_6=r36.2.0
|
||||
ARG JETPACK_5=r35.4.1
|
||||
|
||||
### To create a local image for building linux binaries on mac or windows with efficient incremental builds
|
||||
#
|
||||
# docker build --platform linux/amd64 -t builder-amd64 -f Dockerfile --target unified-builder-amd64 .
|
||||
# docker run --platform linux/amd64 --rm -it -v $(pwd):/go/src/github.com/ollama/ollama/ builder-amd64
|
||||
#
|
||||
### Then incremental builds will be much faster in this container
|
||||
#
|
||||
# make -j 10 && go build -trimpath -o dist/linux-amd64/ollama .
|
||||
#
|
||||
FROM --platform=linux/amd64 rocm/dev-centos-7:${ROCM_VERSION}-complete AS unified-builder-amd64
|
||||
# Copy the minimal context we need to run the generate scripts
|
||||
FROM scratch AS llm-code
|
||||
COPY .git .git
|
||||
COPY .gitmodules .gitmodules
|
||||
COPY llm llm
|
||||
|
||||
FROM --platform=linux/amd64 nvidia/cuda:$CUDA_VERSION_11-devel-centos7 AS cuda-11-build-amd64
|
||||
ARG CMAKE_VERSION
|
||||
COPY ./scripts/rh_linux_deps.sh /
|
||||
RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh
|
||||
ENV PATH=/opt/rh/devtoolset-10/root/usr/bin:$PATH
|
||||
COPY --from=llm-code / /go/src/github.com/ollama/ollama/
|
||||
WORKDIR /go/src/github.com/ollama/ollama/llm/generate
|
||||
ARG CGO_CFLAGS
|
||||
ARG CUDA_V11_ARCHITECTURES
|
||||
ENV GOARCH=amd64
|
||||
RUN --mount=type=cache,target=/root/.ccache \
|
||||
OLLAMA_SKIP_STATIC_GENERATE=1 \
|
||||
OLLAMA_SKIP_CPU_GENERATE=1 \
|
||||
CMAKE_CUDA_ARCHITECTURES="${CUDA_V11_ARCHITECTURES}" \
|
||||
CUDA_VARIANT="_v11" \
|
||||
bash gen_linux.sh
|
||||
|
||||
FROM --platform=linux/amd64 nvidia/cuda:$CUDA_VERSION_12-devel-centos7 AS cuda-12-build-amd64
|
||||
ARG CMAKE_VERSION
|
||||
COPY ./scripts/rh_linux_deps.sh /
|
||||
RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh
|
||||
ENV PATH=/opt/rh/devtoolset-10/root/usr/bin:$PATH
|
||||
COPY --from=llm-code / /go/src/github.com/ollama/ollama/
|
||||
WORKDIR /go/src/github.com/ollama/ollama/llm/generate
|
||||
ARG CGO_CFLAGS
|
||||
ARG CUDA_V12_ARCHITECTURES
|
||||
ENV GOARCH=amd64
|
||||
RUN --mount=type=cache,target=/root/.ccache \
|
||||
OLLAMA_SKIP_STATIC_GENERATE=1 \
|
||||
OLLAMA_SKIP_CPU_GENERATE=1 \
|
||||
CMAKE_CUDA_ARCHITECTURES="${CUDA_V12_ARCHITECTURES}" \
|
||||
CUDA_VARIANT="_v12" \
|
||||
OLLAMA_CUSTOM_CUDA_DEFS="-DGGML_CUDA_USE_GRAPHS=on" \
|
||||
bash gen_linux.sh
|
||||
|
||||
FROM --platform=linux/arm64 nvidia/cuda:$CUDA_VERSION_11-devel-rockylinux8 AS cuda-11-build-runner-arm64
|
||||
ARG CMAKE_VERSION
|
||||
COPY ./scripts/rh_linux_deps.sh /
|
||||
RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh
|
||||
ENV PATH=/opt/rh/gcc-toolset-10/root/usr/bin:$PATH
|
||||
COPY --from=llm-code / /go/src/github.com/ollama/ollama/
|
||||
WORKDIR /go/src/github.com/ollama/ollama/llm/generate
|
||||
ARG CGO_CFLAGS
|
||||
ARG CUDA_V11_ARCHITECTURES
|
||||
ENV GOARCH=arm64
|
||||
RUN OLLAMA_SKIP_STATIC_GENERATE=1 \
|
||||
OLLAMA_SKIP_CPU_GENERATE=1 \
|
||||
CMAKE_CUDA_ARCHITECTURES="${CUDA_V11_ARCHITECTURES}" \
|
||||
CUDA_VARIANT="_v11" \
|
||||
bash gen_linux.sh
|
||||
|
||||
FROM --platform=linux/arm64 nvidia/cuda:$CUDA_VERSION_12-devel-rockylinux8 AS cuda-12-build-runner-arm64
|
||||
ARG CMAKE_VERSION
|
||||
COPY ./scripts/rh_linux_deps.sh /
|
||||
RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh
|
||||
ENV PATH=/opt/rh/gcc-toolset-10/root/usr/bin:$PATH
|
||||
COPY --from=llm-code / /go/src/github.com/ollama/ollama/
|
||||
WORKDIR /go/src/github.com/ollama/ollama/llm/generate
|
||||
ARG CGO_CFLAGS
|
||||
ARG CUDA_V12_ARCHITECTURES
|
||||
ENV GOARCH=arm64
|
||||
RUN --mount=type=cache,target=/root/.ccache \
|
||||
OLLAMA_SKIP_STATIC_GENERATE=1 \
|
||||
OLLAMA_SKIP_CPU_GENERATE=1 \
|
||||
CMAKE_CUDA_ARCHITECTURES="${CUDA_V12_ARCHITECTURES}" \
|
||||
CUDA_VARIANT="_v12" \
|
||||
OLLAMA_CUSTOM_CUDA_DEFS="-DGGML_CUDA_USE_GRAPHS=on" \
|
||||
bash gen_linux.sh
|
||||
|
||||
|
||||
FROM --platform=linux/amd64 rocm/dev-centos-7:${ROCM_VERSION}-complete AS rocm-build-amd64
|
||||
ARG CMAKE_VERSION
|
||||
COPY ./scripts/rh_linux_deps.sh /
|
||||
RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh
|
||||
ENV PATH=/opt/rh/devtoolset-10/root/usr/bin:$PATH
|
||||
ENV LIBRARY_PATH=/opt/amdgpu/lib64
|
||||
COPY --from=llm-code / /go/src/github.com/ollama/ollama/
|
||||
WORKDIR /go/src/github.com/ollama/ollama/llm/generate
|
||||
ARG CGO_CFLAGS
|
||||
ARG AMDGPU_TARGETS
|
||||
ENV GOARCH=amd64
|
||||
RUN --mount=type=cache,target=/root/.ccache \
|
||||
OLLAMA_SKIP_STATIC_GENERATE=1 OLLAMA_SKIP_CPU_GENERATE=1 bash gen_linux.sh
|
||||
RUN mkdir -p ../../dist/linux-amd64-rocm/lib/ollama && \
|
||||
(cd /opt/rocm/lib && tar cf - rocblas/library) | (cd ../../dist/linux-amd64-rocm/lib/ollama && tar xf - )
|
||||
|
||||
FROM --platform=linux/amd64 centos:7 AS cpu-builder-amd64
|
||||
ARG CMAKE_VERSION
|
||||
ARG GOLANG_VERSION
|
||||
ARG CUDA_VERSION_11
|
||||
ARG CUDA_VERSION_12
|
||||
COPY ./scripts/rh_linux_deps.sh /
|
||||
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:/usr/local/cuda/bin:$PATH
|
||||
ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/cuda/lib64
|
||||
ENV LIBRARY_PATH=/usr/local/cuda/lib64/stubs:/opt/amdgpu/lib64
|
||||
RUN CMAKE_VERSION=${CMAKE_VERSION} GOLANG_VERSION=${GOLANG_VERSION} sh /rh_linux_deps.sh
|
||||
RUN yum-config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/rhel7/x86_64/cuda-rhel7.repo && \
|
||||
dnf clean all && \
|
||||
dnf install -y \
|
||||
zsh \
|
||||
cuda-$(echo ${CUDA_VERSION_11} | cut -f1-2 -d. | sed -e "s/\./-/g") \
|
||||
cuda-$(echo ${CUDA_VERSION_12} | cut -f1-2 -d. | sed -e "s/\./-/g")
|
||||
# TODO intel oneapi goes here...
|
||||
ENV GOARCH amd64
|
||||
ENV CGO_ENABLED 1
|
||||
WORKDIR /go/src/github.com/ollama/ollama/
|
||||
ENTRYPOINT [ "zsh" ]
|
||||
ENV PATH=/opt/rh/devtoolset-10/root/usr/bin:$PATH
|
||||
COPY --from=llm-code / /go/src/github.com/ollama/ollama/
|
||||
ARG OLLAMA_CUSTOM_CPU_DEFS
|
||||
ARG CGO_CFLAGS
|
||||
ENV GOARCH=amd64
|
||||
WORKDIR /go/src/github.com/ollama/ollama/llm/generate
|
||||
|
||||
### To create a local image for building linux binaries on mac or linux/arm64 with efficient incremental builds
|
||||
# Note: this does not contain jetson variants
|
||||
#
|
||||
# docker build --platform linux/arm64 -t builder-arm64 -f Dockerfile --target unified-builder-arm64 .
|
||||
# docker run --platform linux/arm64 --rm -it -v $(pwd):/go/src/github.com/ollama/ollama/ builder-arm64
|
||||
#
|
||||
FROM --platform=linux/arm64 rockylinux:8 AS unified-builder-arm64
|
||||
FROM --platform=linux/amd64 cpu-builder-amd64 AS static-build-amd64
|
||||
RUN --mount=type=cache,target=/root/.ccache \
|
||||
OLLAMA_CPU_TARGET="static" bash gen_linux.sh
|
||||
FROM --platform=linux/amd64 cpu-builder-amd64 AS cpu-build-amd64
|
||||
RUN --mount=type=cache,target=/root/.ccache \
|
||||
OLLAMA_SKIP_STATIC_GENERATE=1 OLLAMA_CPU_TARGET="cpu" bash gen_linux.sh
|
||||
FROM --platform=linux/amd64 cpu-builder-amd64 AS cpu_avx-build-amd64
|
||||
RUN --mount=type=cache,target=/root/.ccache \
|
||||
OLLAMA_SKIP_STATIC_GENERATE=1 OLLAMA_CPU_TARGET="cpu_avx" bash gen_linux.sh
|
||||
FROM --platform=linux/amd64 cpu-builder-amd64 AS cpu_avx2-build-amd64
|
||||
RUN --mount=type=cache,target=/root/.ccache \
|
||||
OLLAMA_SKIP_STATIC_GENERATE=1 OLLAMA_CPU_TARGET="cpu_avx2" bash gen_linux.sh
|
||||
|
||||
FROM --platform=linux/arm64 rockylinux:8 AS cpu-builder-arm64
|
||||
ARG CMAKE_VERSION
|
||||
ARG GOLANG_VERSION
|
||||
ARG CUDA_VERSION_11
|
||||
ARG CUDA_VERSION_12
|
||||
COPY ./scripts/rh_linux_deps.sh /
|
||||
RUN CMAKE_VERSION=${CMAKE_VERSION} GOLANG_VERSION=${GOLANG_VERSION} sh /rh_linux_deps.sh
|
||||
RUN yum-config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/rhel8/sbsa/cuda-rhel8.repo && \
|
||||
dnf config-manager --set-enabled appstream && \
|
||||
dnf clean all && \
|
||||
dnf install -y \
|
||||
zsh \
|
||||
cuda-toolkit-$(echo ${CUDA_VERSION_11} | cut -f1-2 -d. | sed -e "s/\./-/g") \
|
||||
cuda-toolkit-$(echo ${CUDA_VERSION_12} | cut -f1-2 -d. | sed -e "s/\./-/g")
|
||||
ENV PATH /opt/rh/gcc-toolset-10/root/usr/bin:$PATH:/usr/local/cuda/bin
|
||||
ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/cuda/lib64
|
||||
ENV LIBRARY_PATH=/usr/local/cuda/lib64/stubs:/opt/amdgpu/lib64
|
||||
ENV GOARCH amd64
|
||||
ENV CGO_ENABLED 1
|
||||
WORKDIR /go/src/github.com/ollama/ollama/
|
||||
ENTRYPOINT [ "zsh" ]
|
||||
|
||||
FROM --platform=linux/amd64 unified-builder-amd64 AS runners-amd64
|
||||
COPY . .
|
||||
ARG OLLAMA_SKIP_CUDA_GENERATE
|
||||
ARG OLLAMA_SKIP_CUDA_11_GENERATE
|
||||
ARG OLLAMA_SKIP_CUDA_12_GENERATE
|
||||
ARG OLLAMA_SKIP_ROCM_GENERATE
|
||||
ARG CUDA_V11_ARCHITECTURES
|
||||
ARG CUDA_V12_ARCHITECTURES
|
||||
ARG OLLAMA_FAST_BUILD
|
||||
RUN --mount=type=cache,target=/root/.ccache \
|
||||
if grep "^flags" /proc/cpuinfo|grep avx>/dev/null; then \
|
||||
make -j $(expr $(nproc) / 2 ) ; \
|
||||
else \
|
||||
make -j 5 ; \
|
||||
fi
|
||||
|
||||
FROM --platform=linux/arm64 unified-builder-arm64 AS runners-arm64
|
||||
COPY . .
|
||||
ARG OLLAMA_SKIP_CUDA_GENERATE
|
||||
ARG OLLAMA_SKIP_CUDA_11_GENERATE
|
||||
ARG OLLAMA_SKIP_CUDA_12_GENERATE
|
||||
ARG CUDA_V11_ARCHITECTURES
|
||||
ARG CUDA_V12_ARCHITECTURES
|
||||
ARG OLLAMA_FAST_BUILD
|
||||
RUN --mount=type=cache,target=/root/.ccache \
|
||||
make -j 5
|
||||
|
||||
# Jetsons need to be built in discrete stages
|
||||
FROM --platform=linux/arm64 nvcr.io/nvidia/l4t-jetpack:${JETPACK_5} AS runners-jetpack5-arm64
|
||||
ARG GOLANG_VERSION
|
||||
RUN apt-get update && apt-get install -y git curl ccache && \
|
||||
curl -s -L https://dl.google.com/go/go${GOLANG_VERSION}.linux-arm64.tar.gz | tar xz -C /usr/local && \
|
||||
ln -s /usr/local/go/bin/go /usr/local/bin/go && \
|
||||
ln -s /usr/local/go/bin/gofmt /usr/local/bin/gofmt && \
|
||||
apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
WORKDIR /go/src/github.com/ollama/ollama/
|
||||
COPY . .
|
||||
ENV PATH=/opt/rh/gcc-toolset-10/root/usr/bin:$PATH
|
||||
COPY --from=llm-code / /go/src/github.com/ollama/ollama/
|
||||
ARG OLLAMA_CUSTOM_CPU_DEFS
|
||||
ARG CGO_CFLAGS
|
||||
ENV GOARCH arm64
|
||||
RUN --mount=type=cache,target=/root/.ccache \
|
||||
make -j 5 cuda_v11 \
|
||||
CUDA_ARCHITECTURES="72;87" \
|
||||
GPU_RUNNER_VARIANT=_jetpack5 \
|
||||
CGO_EXTRA_LDFLAGS_LINUX=-L/usr/local/cuda/lib64/stubs \
|
||||
DIST_LIB_DIR=/go/src/github.com/ollama/ollama/dist/linux-arm64-jetpack5/lib/ollama \
|
||||
DIST_GPU_RUNNER_DEPS_DIR=/go/src/github.com/ollama/ollama/dist/linux-arm64-jetpack5/lib/ollama/cuda_jetpack5
|
||||
ENV GOARCH=arm64
|
||||
WORKDIR /go/src/github.com/ollama/ollama/llm/generate
|
||||
|
||||
FROM --platform=linux/arm64 nvcr.io/nvidia/l4t-jetpack:${JETPACK_6} AS runners-jetpack6-arm64
|
||||
ARG GOLANG_VERSION
|
||||
RUN apt-get update && apt-get install -y git curl ccache && \
|
||||
curl -s -L https://dl.google.com/go/go${GOLANG_VERSION}.linux-arm64.tar.gz | tar xz -C /usr/local && \
|
||||
ln -s /usr/local/go/bin/go /usr/local/bin/go && \
|
||||
ln -s /usr/local/go/bin/gofmt /usr/local/bin/gofmt && \
|
||||
apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
WORKDIR /go/src/github.com/ollama/ollama/
|
||||
COPY . .
|
||||
ARG CGO_CFLAGS
|
||||
ENV GOARCH arm64
|
||||
FROM --platform=linux/arm64 cpu-builder-arm64 AS static-build-arm64
|
||||
RUN --mount=type=cache,target=/root/.ccache \
|
||||
make -j 5 cuda_v12 \
|
||||
CUDA_ARCHITECTURES="87" \
|
||||
GPU_RUNNER_VARIANT=_jetpack6 \
|
||||
CGO_EXTRA_LDFLAGS_LINUX=-L/usr/local/cuda/lib64/stubs \
|
||||
DIST_LIB_DIR=/go/src/github.com/ollama/ollama/dist/linux-arm64-jetpack6/lib/ollama \
|
||||
DIST_GPU_RUNNER_DEPS_DIR=/go/src/github.com/ollama/ollama/dist/linux-arm64-jetpack6/lib/ollama/cuda_jetpack6
|
||||
OLLAMA_CPU_TARGET="static" bash gen_linux.sh
|
||||
FROM --platform=linux/arm64 cpu-builder-arm64 AS cpu-build-arm64
|
||||
RUN --mount=type=cache,target=/root/.ccache \
|
||||
OLLAMA_SKIP_STATIC_GENERATE=1 OLLAMA_CPU_TARGET="cpu" bash gen_linux.sh
|
||||
|
||||
|
||||
# Intermediate stages used for ./scripts/build_linux.sh
|
||||
FROM --platform=linux/amd64 centos:7 AS builder-amd64
|
||||
ARG CMAKE_VERSION
|
||||
ARG GOLANG_VERSION
|
||||
COPY ./scripts/rh_linux_deps.sh /
|
||||
RUN CMAKE_VERSION=${CMAKE_VERSION} GOLANG_VERSION=${GOLANG_VERSION} sh /rh_linux_deps.sh
|
||||
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
|
||||
ENV CGO_ENABLED 1
|
||||
ENV GOARCH amd64
|
||||
FROM --platform=linux/amd64 cpu-build-amd64 AS build-amd64
|
||||
ENV CGO_ENABLED=1
|
||||
WORKDIR /go/src/github.com/ollama/ollama
|
||||
|
||||
FROM --platform=linux/amd64 builder-amd64 AS build-amd64
|
||||
COPY . .
|
||||
COPY --from=runners-amd64 /go/src/github.com/ollama/ollama/dist/ dist/
|
||||
COPY --from=runners-amd64 /go/src/github.com/ollama/ollama/build/ build/
|
||||
COPY --from=static-build-amd64 /go/src/github.com/ollama/ollama/llm/build/ llm/build/
|
||||
COPY --from=cpu_avx-build-amd64 /go/src/github.com/ollama/ollama/build/ build/
|
||||
COPY --from=cpu_avx2-build-amd64 /go/src/github.com/ollama/ollama/build/ build/
|
||||
COPY --from=cuda-11-build-amd64 /go/src/github.com/ollama/ollama/dist/ dist/
|
||||
COPY --from=cuda-11-build-amd64 /go/src/github.com/ollama/ollama/build/ build/
|
||||
COPY --from=cuda-12-build-amd64 /go/src/github.com/ollama/ollama/dist/ dist/
|
||||
COPY --from=cuda-12-build-amd64 /go/src/github.com/ollama/ollama/build/ build/
|
||||
COPY --from=rocm-build-amd64 /go/src/github.com/ollama/ollama/dist/ dist/
|
||||
COPY --from=rocm-build-amd64 /go/src/github.com/ollama/ollama/build/ build/
|
||||
ARG GOFLAGS
|
||||
ARG CGO_CFLAGS
|
||||
ARG OLLAMA_SKIP_ROCM_GENERATE
|
||||
RUN --mount=type=cache,target=/root/.ccache \
|
||||
go build -trimpath -o dist/linux-amd64/bin/ollama .
|
||||
RUN cd dist/linux-$GOARCH && \
|
||||
tar --exclude runners -cf - . | pigz --best > ../ollama-linux-$GOARCH.tgz
|
||||
RUN if [ -z ${OLLAMA_SKIP_ROCM_GENERATE} ] ; then \
|
||||
cd dist/linux-$GOARCH-rocm && \
|
||||
tar -cf - . | pigz --best > ../ollama-linux-$GOARCH-rocm.tgz ;\
|
||||
fi
|
||||
RUN cd dist/linux-$GOARCH-rocm && \
|
||||
tar -cf - . | pigz --best > ../ollama-linux-$GOARCH-rocm.tgz
|
||||
|
||||
FROM --platform=linux/arm64 rockylinux:8 AS builder-arm64
|
||||
ARG CMAKE_VERSION
|
||||
FROM --platform=linux/arm64 cpu-build-arm64 AS build-arm64
|
||||
ENV CGO_ENABLED=1
|
||||
ARG GOLANG_VERSION
|
||||
COPY ./scripts/rh_linux_deps.sh /
|
||||
RUN CMAKE_VERSION=${CMAKE_VERSION} GOLANG_VERSION=${GOLANG_VERSION} sh /rh_linux_deps.sh
|
||||
ENV PATH /opt/rh/gcc-toolset-10/root/usr/bin:$PATH
|
||||
ENV CGO_ENABLED 1
|
||||
ENV GOARCH arm64
|
||||
WORKDIR /go/src/github.com/ollama/ollama
|
||||
|
||||
FROM --platform=linux/arm64 builder-arm64 AS build-arm64
|
||||
COPY . .
|
||||
COPY --from=runners-arm64 /go/src/github.com/ollama/ollama/dist/ dist/
|
||||
COPY --from=runners-arm64 /go/src/github.com/ollama/ollama/build/ build/
|
||||
COPY --from=runners-jetpack5-arm64 /go/src/github.com/ollama/ollama/dist/ dist/
|
||||
COPY --from=runners-jetpack5-arm64 /go/src/github.com/ollama/ollama/build/ build/
|
||||
COPY --from=runners-jetpack6-arm64 /go/src/github.com/ollama/ollama/dist/ dist/
|
||||
COPY --from=runners-jetpack6-arm64 /go/src/github.com/ollama/ollama/build/ build/
|
||||
COPY --from=static-build-arm64 /go/src/github.com/ollama/ollama/llm/build/ llm/build/
|
||||
COPY --from=cuda-11-build-runner-arm64 /go/src/github.com/ollama/ollama/dist/ dist/
|
||||
COPY --from=cuda-11-build-runner-arm64 /go/src/github.com/ollama/ollama/build/ build/
|
||||
COPY --from=cuda-12-build-runner-arm64 /go/src/github.com/ollama/ollama/dist/ dist/
|
||||
COPY --from=cuda-12-build-runner-arm64 /go/src/github.com/ollama/ollama/build/ build/
|
||||
ARG GOFLAGS
|
||||
ARG CGO_CFLAGS
|
||||
RUN --mount=type=cache,target=/root/.ccache \
|
||||
go build -trimpath -o dist/linux-arm64/bin/ollama .
|
||||
RUN cd dist/linux-$GOARCH && \
|
||||
tar --exclude runners -cf - . | pigz --best > ../ollama-linux-$GOARCH.tgz
|
||||
RUN cd dist/linux-$GOARCH-jetpack5 && \
|
||||
tar --exclude runners -cf - . | pigz --best > ../ollama-linux-$GOARCH-jetpack5.tgz
|
||||
RUN cd dist/linux-$GOARCH-jetpack6 && \
|
||||
tar --exclude runners -cf - . | pigz --best > ../ollama-linux-$GOARCH-jetpack6.tgz
|
||||
|
||||
FROM --platform=linux/amd64 scratch AS dist-amd64
|
||||
COPY --from=build-amd64 /go/src/github.com/ollama/ollama/dist/ollama-linux-*.tgz /
|
||||
FROM --platform=linux/arm64 scratch AS dist-arm64
|
||||
COPY --from=build-arm64 /go/src/github.com/ollama/ollama/dist/ollama-linux-*.tgz /
|
||||
FROM dist-$TARGETARCH AS dist
|
||||
FROM dist-$TARGETARCH as dist
|
||||
|
||||
|
||||
# Optimized container images do not cary nested payloads
|
||||
FROM --platform=linux/amd64 builder-amd64 AS container-build-amd64
|
||||
FROM --platform=linux/amd64 static-build-amd64 AS container-build-amd64
|
||||
WORKDIR /go/src/github.com/ollama/ollama
|
||||
COPY . .
|
||||
ARG GOFLAGS
|
||||
@ -206,7 +199,7 @@ ARG CGO_CFLAGS
|
||||
RUN --mount=type=cache,target=/root/.ccache \
|
||||
go build -trimpath -o dist/linux-amd64/bin/ollama .
|
||||
|
||||
FROM --platform=linux/arm64 builder-arm64 AS container-build-arm64
|
||||
FROM --platform=linux/arm64 static-build-arm64 AS container-build-arm64
|
||||
WORKDIR /go/src/github.com/ollama/ollama
|
||||
COPY . .
|
||||
ARG GOFLAGS
|
||||
@ -214,28 +207,18 @@ ARG CGO_CFLAGS
|
||||
RUN --mount=type=cache,target=/root/.ccache \
|
||||
go build -trimpath -o dist/linux-arm64/bin/ollama .
|
||||
|
||||
# For amd64 container images, filter out cuda/rocm to minimize size
|
||||
FROM runners-amd64 AS runners-cuda-amd64
|
||||
RUN rm -rf \
|
||||
./dist/linux-amd64/lib/ollama/libggml_hipblas.so \
|
||||
./dist/linux-amd64/lib/ollama/runners/rocm*
|
||||
|
||||
FROM runners-amd64 AS runners-rocm-amd64
|
||||
RUN rm -rf \
|
||||
./dist/linux-amd64/lib/ollama/libggml_cuda*.so \
|
||||
./dist/linux-amd64/lib/ollama/libcu*.so* \
|
||||
./dist/linux-amd64/lib/ollama/runners/cuda*
|
||||
|
||||
FROM --platform=linux/amd64 ubuntu:22.04 AS runtime-amd64
|
||||
RUN apt-get update && \
|
||||
apt-get install -y ca-certificates && \
|
||||
apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
COPY --from=container-build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/bin/ /bin/
|
||||
COPY --from=runners-cuda-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/lib/ /lib/
|
||||
COPY --from=cpu-build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/lib/ /lib/
|
||||
COPY --from=cpu_avx-build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/lib/ /lib/
|
||||
COPY --from=cpu_avx2-build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/lib/ /lib/
|
||||
COPY --from=cuda-11-build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/lib/ /lib/
|
||||
COPY --from=cuda-12-build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/lib/ /lib/
|
||||
|
||||
FROM --platform=linux/arm64 ubuntu:22.04 AS runtime-arm64
|
||||
COPY --from=build-arm64 /go/src/github.com/ollama/ollama/dist/linux-arm64-jetpack5/lib/ /lib/
|
||||
COPY --from=build-arm64 /go/src/github.com/ollama/ollama/dist/linux-arm64-jetpack6/lib/ /lib/
|
||||
RUN apt-get update && \
|
||||
apt-get install -y ca-certificates && \
|
||||
apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
@ -243,30 +226,29 @@ COPY --from=container-build-arm64 /go/src/github.com/ollama/ollama/dist/linux-ar
|
||||
COPY --from=cpu-build-arm64 /go/src/github.com/ollama/ollama/dist/linux-arm64/lib/ /lib/
|
||||
COPY --from=cuda-11-build-runner-arm64 /go/src/github.com/ollama/ollama/dist/linux-arm64/lib/ /lib/
|
||||
COPY --from=cuda-12-build-runner-arm64 /go/src/github.com/ollama/ollama/dist/linux-arm64/lib/ /lib/
|
||||
COPY --from=cuda-build-jetpack5-arm64 /go/src/github.com/ollama/ollama/dist/linux-arm64/lib/ /lib/
|
||||
COPY --from=cuda-build-jetpack6-arm64 /go/src/github.com/ollama/ollama/dist/linux-arm64/lib/ /lib/
|
||||
|
||||
|
||||
# ROCm libraries larger so we keep it distinct from the CPU/CUDA image
|
||||
FROM --platform=linux/amd64 ubuntu:22.04 AS runtime-rocm
|
||||
# Frontload the rocm libraries which are large, and rarely change to increase chance of a common layer
|
||||
# across releases
|
||||
COPY --from=build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64-rocm/lib/ /lib/
|
||||
COPY --from=rocm-build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64-rocm/lib/ /lib/
|
||||
RUN apt-get update && \
|
||||
apt-get install -y ca-certificates && \
|
||||
apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
COPY --from=container-build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/bin/ /bin/
|
||||
COPY --from=runners-rocm-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/lib/ /lib/
|
||||
|
||||
COPY --from=cpu-build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/lib/ /lib/
|
||||
COPY --from=cpu_avx-build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/lib/ /lib/
|
||||
COPY --from=cpu_avx2-build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/lib/ /lib/
|
||||
COPY --from=rocm-build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/lib/ /lib/
|
||||
EXPOSE 11434
|
||||
ENV OLLAMA_HOST 0.0.0.0
|
||||
ENV OLLAMA_HOST=0.0.0.0
|
||||
|
||||
ENTRYPOINT ["/bin/ollama"]
|
||||
CMD ["serve"]
|
||||
|
||||
FROM runtime-$TARGETARCH
|
||||
EXPOSE 11434
|
||||
ENV OLLAMA_HOST 0.0.0.0
|
||||
ENV OLLAMA_HOST=0.0.0.0
|
||||
ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
ENV LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64
|
||||
ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
|
||||
|
4
Makefile
4
Makefile
@ -1,4 +0,0 @@
|
||||
GOALS := $(or $(MAKECMDGOALS),all)
|
||||
.PHONY: $(GOALS)
|
||||
$(GOALS):
|
||||
$(MAKE) -C llama $@
|
96
README.md
96
README.md
@ -12,7 +12,7 @@ Get up and running with large language models.
|
||||
|
||||
[Download](https://ollama.com/download/Ollama-darwin.zip)
|
||||
|
||||
### Windows
|
||||
### Windows preview
|
||||
|
||||
[Download](https://ollama.com/download/OllamaSetup.exe)
|
||||
|
||||
@ -35,10 +35,10 @@ The official [Ollama Docker image](https://hub.docker.com/r/ollama/ollama) `olla
|
||||
|
||||
## Quickstart
|
||||
|
||||
To run and chat with [Llama 3.2](https://ollama.com/library/llama3.2):
|
||||
To run and chat with [Llama 3.1](https://ollama.com/library/llama3.1):
|
||||
|
||||
```
|
||||
ollama run llama3.2
|
||||
ollama run llama3.1
|
||||
```
|
||||
|
||||
## Model library
|
||||
@ -47,28 +47,24 @@ Ollama supports a list of models available on [ollama.com/library](https://ollam
|
||||
|
||||
Here are some example models that can be downloaded:
|
||||
|
||||
| Model | Parameters | Size | Download |
|
||||
| ------------------ | ---------- | ----- | -------------------------------- |
|
||||
| Llama 3.2 | 3B | 2.0GB | `ollama run llama3.2` |
|
||||
| Llama 3.2 | 1B | 1.3GB | `ollama run llama3.2:1b` |
|
||||
| Llama 3.2 Vision | 11B | 7.9GB | `ollama run llama3.2-vision` |
|
||||
| Llama 3.2 Vision | 90B | 55GB | `ollama run llama3.2-vision:90b` |
|
||||
| Llama 3.1 | 8B | 4.7GB | `ollama run llama3.1` |
|
||||
| Llama 3.1 | 70B | 40GB | `ollama run llama3.1:70b` |
|
||||
| Llama 3.1 | 405B | 231GB | `ollama run llama3.1:405b` |
|
||||
| Phi 3 Mini | 3.8B | 2.3GB | `ollama run phi3` |
|
||||
| Phi 3 Medium | 14B | 7.9GB | `ollama run phi3:medium` |
|
||||
| Gemma 2 | 2B | 1.6GB | `ollama run gemma2:2b` |
|
||||
| Gemma 2 | 9B | 5.5GB | `ollama run gemma2` |
|
||||
| Gemma 2 | 27B | 16GB | `ollama run gemma2:27b` |
|
||||
| Mistral | 7B | 4.1GB | `ollama run mistral` |
|
||||
| Moondream 2 | 1.4B | 829MB | `ollama run moondream` |
|
||||
| Neural Chat | 7B | 4.1GB | `ollama run neural-chat` |
|
||||
| Starling | 7B | 4.1GB | `ollama run starling-lm` |
|
||||
| Code Llama | 7B | 3.8GB | `ollama run codellama` |
|
||||
| Llama 2 Uncensored | 7B | 3.8GB | `ollama run llama2-uncensored` |
|
||||
| LLaVA | 7B | 4.5GB | `ollama run llava` |
|
||||
| Solar | 10.7B | 6.1GB | `ollama run solar` |
|
||||
| Model | Parameters | Size | Download |
|
||||
| ------------------ | ---------- | ----- | ------------------------------ |
|
||||
| Llama 3.1 | 8B | 4.7GB | `ollama run llama3.1` |
|
||||
| Llama 3.1 | 70B | 40GB | `ollama run llama3.1:70b` |
|
||||
| Llama 3.1 | 405B | 231GB | `ollama run llama3.1:405b` |
|
||||
| Phi 3 Mini | 3.8B | 2.3GB | `ollama run phi3` |
|
||||
| Phi 3 Medium | 14B | 7.9GB | `ollama run phi3:medium` |
|
||||
| Gemma 2 | 2B | 1.6GB | `ollama run gemma2:2b` |
|
||||
| Gemma 2 | 9B | 5.5GB | `ollama run gemma2` |
|
||||
| Gemma 2 | 27B | 16GB | `ollama run gemma2:27b` |
|
||||
| Mistral | 7B | 4.1GB | `ollama run mistral` |
|
||||
| Moondream 2 | 1.4B | 829MB | `ollama run moondream` |
|
||||
| Neural Chat | 7B | 4.1GB | `ollama run neural-chat` |
|
||||
| Starling | 7B | 4.1GB | `ollama run starling-lm` |
|
||||
| Code Llama | 7B | 3.8GB | `ollama run codellama` |
|
||||
| Llama 2 Uncensored | 7B | 3.8GB | `ollama run llama2-uncensored` |
|
||||
| LLaVA | 7B | 4.5GB | `ollama run llava` |
|
||||
| Solar | 10.7B | 6.1GB | `ollama run solar` |
|
||||
|
||||
> [!NOTE]
|
||||
> You should have at least 8 GB of RAM available to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models.
|
||||
@ -103,16 +99,16 @@ See the [guide](docs/import.md) on importing models for more information.
|
||||
|
||||
### Customize a prompt
|
||||
|
||||
Models from the Ollama library can be customized with a prompt. For example, to customize the `llama3.2` model:
|
||||
Models from the Ollama library can be customized with a prompt. For example, to customize the `llama3.1` model:
|
||||
|
||||
```
|
||||
ollama pull llama3.2
|
||||
ollama pull llama3.1
|
||||
```
|
||||
|
||||
Create a `Modelfile`:
|
||||
|
||||
```
|
||||
FROM llama3.2
|
||||
FROM llama3.1
|
||||
|
||||
# set the temperature to 1 [higher is more creative, lower is more coherent]
|
||||
PARAMETER temperature 1
|
||||
@ -147,7 +143,7 @@ ollama create mymodel -f ./Modelfile
|
||||
### Pull a model
|
||||
|
||||
```
|
||||
ollama pull llama3.2
|
||||
ollama pull llama3.1
|
||||
```
|
||||
|
||||
> This command can also be used to update a local model. Only the diff will be pulled.
|
||||
@ -155,13 +151,13 @@ ollama pull llama3.2
|
||||
### Remove a model
|
||||
|
||||
```
|
||||
ollama rm llama3.2
|
||||
ollama rm llama3.1
|
||||
```
|
||||
|
||||
### Copy a model
|
||||
|
||||
```
|
||||
ollama cp llama3.2 my-model
|
||||
ollama cp llama3.1 my-model
|
||||
```
|
||||
|
||||
### Multiline input
|
||||
@ -185,14 +181,14 @@ The image features a yellow smiley face, which is likely the central focus of th
|
||||
### Pass the prompt as an argument
|
||||
|
||||
```
|
||||
$ ollama run llama3.2 "Summarize this file: $(cat README.md)"
|
||||
$ ollama run llama3.1 "Summarize this file: $(cat README.md)"
|
||||
Ollama is a lightweight, extensible framework for building and running language models on the local machine. It provides a simple API for creating, running, and managing models, as well as a library of pre-built models that can be easily used in a variety of applications.
|
||||
```
|
||||
|
||||
### Show model information
|
||||
|
||||
```
|
||||
ollama show llama3.2
|
||||
ollama show llama3.1
|
||||
```
|
||||
|
||||
### List models on your computer
|
||||
@ -201,18 +197,6 @@ ollama show llama3.2
|
||||
ollama list
|
||||
```
|
||||
|
||||
### List which models are currently loaded
|
||||
|
||||
```
|
||||
ollama ps
|
||||
```
|
||||
|
||||
### Stop a model which is currently running
|
||||
|
||||
```
|
||||
ollama stop llama3.2
|
||||
```
|
||||
|
||||
### Start Ollama
|
||||
|
||||
`ollama serve` is used when you want to start ollama without running the desktop application.
|
||||
@ -232,7 +216,7 @@ Next, start the server:
|
||||
Finally, in a separate shell, run a model:
|
||||
|
||||
```
|
||||
./ollama run llama3.2
|
||||
./ollama run llama3.1
|
||||
```
|
||||
|
||||
## REST API
|
||||
@ -243,7 +227,7 @@ Ollama has a REST API for running and managing models.
|
||||
|
||||
```
|
||||
curl http://localhost:11434/api/generate -d '{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"prompt":"Why is the sky blue?"
|
||||
}'
|
||||
```
|
||||
@ -252,7 +236,7 @@ curl http://localhost:11434/api/generate -d '{
|
||||
|
||||
```
|
||||
curl http://localhost:11434/api/chat -d '{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"messages": [
|
||||
{ "role": "user", "content": "why is the sky blue?" }
|
||||
]
|
||||
@ -329,12 +313,6 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
||||
- [ConfiChat](https://github.com/1runeberg/confichat) (Lightweight, standalone, multi-platform, and privacy focused LLM chat interface with optional encryption)
|
||||
- [Archyve](https://github.com/nickthecook/archyve) (RAG-enabling document library)
|
||||
- [crewAI with Mesop](https://github.com/rapidarchitect/ollama-crew-mesop) (Mesop Web Interface to run crewAI with Ollama)
|
||||
- [LLMChat](https://github.com/trendy-design/llmchat) (Privacy focused, 100% local, intuitive all-in-one chat interface)
|
||||
- [ARGO](https://github.com/xark-argo/argo) (Locally download and run Ollama and Huggingface models with RAG on Mac/Windows/Linux)
|
||||
- [G1](https://github.com/bklieger-groq/g1) (Prototype of using prompting strategies to improve the LLM's reasoning through o1-like reasoning chains.)
|
||||
- [Ollama App](https://github.com/JHubi1/ollama-app) (Modern and easy-to-use multi-platform client for Ollama)
|
||||
- [Hexabot](https://github.com/hexastack/hexabot) (A conversational AI builder)
|
||||
- [Reddit Rate]((https://github.com/rapidarchitect/reddit_analyzer)) (Search and Rate Reddit topics with a weighted summation)
|
||||
|
||||
### Terminal
|
||||
|
||||
@ -361,7 +339,6 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
||||
- [Ollama eBook Summary](https://github.com/cognitivetech/ollama-ebook-summary/)
|
||||
- [Ollama Mixture of Experts (MOE) in 50 lines of code](https://github.com/rapidarchitect/ollama_moe)
|
||||
- [vim-intelligence-bridge](https://github.com/pepo-ec/vim-intelligence-bridge) Simple interaction of "Ollama" with the Vim editor
|
||||
- [aichat](https://github.com/sigoden/aichat) All-in-one LLM CLI tool featuring Shell Assistant, Chat-REPL, RAG, AI tools & agents, with access to OpenAI, Claude, Gemini, Ollama, Groq, and more.
|
||||
|
||||
### Apple Vision Pro
|
||||
- [Enchanted](https://github.com/AugustDev/enchanted)
|
||||
@ -382,13 +359,13 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
||||
|
||||
### Libraries
|
||||
|
||||
- [LangChain](https://python.langchain.com/docs/integrations/llms/ollama) and [LangChain.js](https://js.langchain.com/docs/integrations/chat/ollama/) with [example](https://js.langchain.com/docs/tutorials/local_rag/)
|
||||
- [LangChain](https://python.langchain.com/docs/integrations/llms/ollama) and [LangChain.js](https://js.langchain.com/docs/modules/model_io/models/llms/integrations/ollama) with [example](https://js.langchain.com/docs/use_cases/question_answering/local_retrieval_qa)
|
||||
- [Firebase Genkit](https://firebase.google.com/docs/genkit/plugins/ollama)
|
||||
- [crewAI](https://github.com/crewAIInc/crewAI)
|
||||
- [LangChainGo](https://github.com/tmc/langchaingo/) with [example](https://github.com/tmc/langchaingo/tree/main/examples/ollama-completion-example)
|
||||
- [LangChain4j](https://github.com/langchain4j/langchain4j) with [example](https://github.com/langchain4j/langchain4j-examples/tree/main/ollama-examples/src/main/java)
|
||||
- [LangChainRust](https://github.com/Abraxas-365/langchain-rust) with [example](https://github.com/Abraxas-365/langchain-rust/blob/main/examples/llm_ollama.rs)
|
||||
- [LlamaIndex](https://docs.llamaindex.ai/en/stable/examples/llm/ollama/) and [LlamaIndexTS](https://ts.llamaindex.ai/modules/llms/available_llms/ollama)
|
||||
- [LlamaIndex](https://gpt-index.readthedocs.io/en/stable/examples/llm/ollama.html)
|
||||
- [LiteLLM](https://github.com/BerriAI/litellm)
|
||||
- [OllamaFarm for Go](https://github.com/presbrey/ollamafarm)
|
||||
- [OllamaSharp for .NET](https://github.com/awaescher/OllamaSharp)
|
||||
@ -417,14 +394,11 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
||||
- [High-level function abstraction in Go](https://gitlab.com/tozd/go/fun)
|
||||
- [Ollama PHP](https://github.com/ArdaGnsrn/ollama-php)
|
||||
- [Agents-Flex for Java](https://github.com/agents-flex/agents-flex) with [example](https://github.com/agents-flex/agents-flex/tree/main/agents-flex-llm/agents-flex-llm-ollama/src/test/java/com/agentsflex/llm/ollama)
|
||||
- [Ollama for Swift](https://github.com/mattt/ollama-swift)
|
||||
- [GoLamify](https://github.com/prasad89/golamify)
|
||||
|
||||
### Mobile
|
||||
|
||||
- [Enchanted](https://github.com/AugustDev/enchanted)
|
||||
- [Maid](https://github.com/Mobile-Artificial-Intelligence/maid)
|
||||
- [Ollama App](https://github.com/JHubi1/ollama-app) (Modern and easy-to-use multi-platform client for Ollama)
|
||||
- [ConfiChat](https://github.com/1runeberg/confichat) (Lightweight, standalone, multi-platform, and privacy focused LLM chat interface with optional encryption)
|
||||
|
||||
### Extensions & Plugins
|
||||
@ -456,12 +430,10 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
||||
- [Discord-Ollama Chat Bot](https://github.com/kevinthedang/discord-ollama) (Generalized TypeScript Discord Bot w/ Tuning Documentation)
|
||||
- [Discord AI chat/moderation bot](https://github.com/rapmd73/Companion) Chat/moderation bot written in python. Uses Ollama to create personalities.
|
||||
- [Headless Ollama](https://github.com/nischalj10/headless-ollama) (Scripts to automatically install ollama client & models on any OS for apps that depends on ollama server)
|
||||
- [Local AI Helper](https://github.com/ivostoykov/localAI) (Chrome and Firefox extensions that enable interactions with the active tab and customisable API endpoints. Includes secure storage for user prompts.)
|
||||
- [vnc-lm](https://github.com/jk011ru/vnc-lm) (A containerized Discord bot with support for attachments and web links)
|
||||
- [LSP-AI](https://github.com/SilasMarvin/lsp-ai) (Open-source language server for AI-powered functionality)
|
||||
- [QodeAssist](https://github.com/Palm1r/QodeAssist) (AI-powered coding assistant plugin for Qt Creator)
|
||||
- [Obsidian Quiz Generator plugin](https://github.com/ECuiDev/obsidian-quiz-generator)
|
||||
- [TextCraft](https://github.com/suncloudsmoon/TextCraft) (Copilot in Word alternative using Ollama)
|
||||
|
||||
### Supported backends
|
||||
|
||||
|
@ -55,7 +55,7 @@ func checkError(resp *http.Response, body []byte) error {
|
||||
|
||||
// ClientFromEnvironment creates a new [Client] using configuration from the
|
||||
// environment variable OLLAMA_HOST, which points to the network host and
|
||||
// port on which the ollama service is listening. The format of this variable
|
||||
// port on which the ollama service is listenting. The format of this variable
|
||||
// is:
|
||||
//
|
||||
// <scheme>://<host>:<port>
|
||||
|
15
api/types.go
15
api/types.go
@ -12,7 +12,7 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// StatusError is an error with an HTTP status code and message.
|
||||
// StatusError is an error with and HTTP status code.
|
||||
type StatusError struct {
|
||||
StatusCode int
|
||||
Status string
|
||||
@ -57,7 +57,7 @@ type GenerateRequest struct {
|
||||
Template string `json:"template"`
|
||||
|
||||
// Context is the context parameter returned from a previous call to
|
||||
// [Client.Generate]. It can be used to keep a short conversational memory.
|
||||
// Generate call. It can be used to keep a short conversational memory.
|
||||
Context []int `json:"context,omitempty"`
|
||||
|
||||
// Stream specifies whether the response is streaming; it is true by default.
|
||||
@ -90,14 +90,14 @@ type ChatRequest struct {
|
||||
// Messages is the messages of the chat - can be used to keep a chat memory.
|
||||
Messages []Message `json:"messages"`
|
||||
|
||||
// Stream enables streaming of returned responses; true by default.
|
||||
// Stream enable streaming of returned response; true by default.
|
||||
Stream *bool `json:"stream,omitempty"`
|
||||
|
||||
// Format is the format to return the response in (e.g. "json").
|
||||
Format string `json:"format"`
|
||||
|
||||
// KeepAlive controls how long the model will stay loaded into memory
|
||||
// following the request.
|
||||
// followin the request.
|
||||
KeepAlive *Duration `json:"keep_alive,omitempty"`
|
||||
|
||||
// Tools is an optional list of tools the model has access to.
|
||||
@ -203,8 +203,8 @@ type Metrics struct {
|
||||
EvalDuration time.Duration `json:"eval_duration,omitempty"`
|
||||
}
|
||||
|
||||
// Options specified in [GenerateRequest]. If you add a new option here, also
|
||||
// add it to the API docs.
|
||||
// Options specified in [GenerateRequest], if you add a new option here add it
|
||||
// to the API docs also.
|
||||
type Options struct {
|
||||
Runner
|
||||
|
||||
@ -236,7 +236,7 @@ type Runner struct {
|
||||
NumGPU int `json:"num_gpu,omitempty"`
|
||||
MainGPU int `json:"main_gpu,omitempty"`
|
||||
LowVRAM bool `json:"low_vram,omitempty"`
|
||||
F16KV bool `json:"f16_kv,omitempty"` // Deprecated: This option is ignored
|
||||
F16KV bool `json:"f16_kv,omitempty"`
|
||||
LogitsAll bool `json:"logits_all,omitempty"`
|
||||
VocabOnly bool `json:"vocab_only,omitempty"`
|
||||
UseMMap *bool `json:"use_mmap,omitempty"`
|
||||
@ -613,6 +613,7 @@ func DefaultOptions() Options {
|
||||
NumGPU: -1, // -1 here indicates that NumGPU should be set dynamically
|
||||
NumThread: 0, // let the runtime decide
|
||||
LowVRAM: false,
|
||||
F16KV: true,
|
||||
UseMLock: false,
|
||||
UseMMap: nil,
|
||||
},
|
||||
|
@ -11,12 +11,10 @@ import (
|
||||
|
||||
"github.com/ollama/ollama/app/store"
|
||||
"github.com/ollama/ollama/app/tray"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
)
|
||||
|
||||
func Run() {
|
||||
InitLogging()
|
||||
slog.Info("app config", "env", envconfig.Values())
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
var done chan int
|
||||
|
@ -36,13 +36,8 @@ func init() {
|
||||
ServerLogFile = filepath.Join(AppDataDir, "server.log")
|
||||
UpgradeLogFile = filepath.Join(AppDataDir, "upgrade.log")
|
||||
|
||||
exe, err := os.Executable()
|
||||
if err != nil {
|
||||
slog.Warn("error discovering executable directory", "error", err)
|
||||
AppDir = filepath.Join(localAppData, "Programs", "Ollama")
|
||||
} else {
|
||||
AppDir = filepath.Dir(exe)
|
||||
}
|
||||
// Executables are stored in APPDATA
|
||||
AppDir = filepath.Join(localAppData, "Programs", "Ollama")
|
||||
|
||||
// Make sure we have PATH set correctly for any spawned children
|
||||
paths := strings.Split(os.Getenv("PATH"), ";")
|
||||
@ -69,7 +64,7 @@ func init() {
|
||||
}
|
||||
|
||||
// Make sure our logging dir exists
|
||||
_, err = os.Stat(AppDataDir)
|
||||
_, err := os.Stat(AppDataDir)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
if err := os.MkdirAll(AppDataDir, 0o755); err != nil {
|
||||
slog.Error(fmt.Sprintf("create ollama dir %s: %v", AppDataDir, err))
|
||||
|
@ -18,17 +18,11 @@ func getCLIFullPath(command string) string {
|
||||
var cmdPath string
|
||||
appExe, err := os.Executable()
|
||||
if err == nil {
|
||||
// Check both the same location as the tray app, as well as ./bin
|
||||
cmdPath = filepath.Join(filepath.Dir(appExe), command)
|
||||
_, err := os.Stat(cmdPath)
|
||||
if err == nil {
|
||||
return cmdPath
|
||||
}
|
||||
cmdPath = filepath.Join(filepath.Dir(appExe), "bin", command)
|
||||
_, err = os.Stat(cmdPath)
|
||||
if err == nil {
|
||||
return cmdPath
|
||||
}
|
||||
}
|
||||
cmdPath, err = exec.LookPath(command)
|
||||
if err == nil {
|
||||
|
@ -26,15 +26,19 @@ func DoUpgrade(cancel context.CancelFunc, done chan int) error {
|
||||
slog.Info("starting upgrade with " + installerExe)
|
||||
slog.Info("upgrade log file " + UpgradeLogFile)
|
||||
|
||||
// make the upgrade show progress, but non interactive
|
||||
// When running in debug mode, we'll be "verbose" and let the installer pop up and prompt
|
||||
installArgs := []string{
|
||||
"/CLOSEAPPLICATIONS", // Quit the tray app if it's still running
|
||||
"/LOG=" + filepath.Base(UpgradeLogFile), // Only relative seems reliable, so set pwd
|
||||
"/FORCECLOSEAPPLICATIONS", // Force close the tray app - might be needed
|
||||
"/SP", // Skip the "This will install... Do you wish to continue" prompt
|
||||
"/NOCANCEL", // Disable the ability to cancel upgrade mid-flight to avoid partially installed upgrades
|
||||
"/SILENT",
|
||||
}
|
||||
// make the upgrade as quiet as possible (no GUI, no prompts)
|
||||
installArgs = append(installArgs,
|
||||
"/SP", // Skip the "This will install... Do you wish to continue" prompt
|
||||
"/SUPPRESSMSGBOXES",
|
||||
"/SILENT",
|
||||
"/VERYSILENT",
|
||||
)
|
||||
|
||||
// Safeguard in case we have requests in flight that need to drain...
|
||||
slog.Info("Waiting for server to shutdown")
|
||||
|
@ -28,8 +28,8 @@ AppPublisher={#MyAppPublisher}
|
||||
AppPublisherURL={#MyAppURL}
|
||||
AppSupportURL={#MyAppURL}
|
||||
AppUpdatesURL={#MyAppURL}
|
||||
ArchitecturesAllowed=x64compatible arm64
|
||||
ArchitecturesInstallIn64BitMode=x64compatible arm64
|
||||
ArchitecturesAllowed=x64 arm64
|
||||
ArchitecturesInstallIn64BitMode=x64 arm64
|
||||
DefaultDirName={localappdata}\Programs\{#MyAppName}
|
||||
DefaultGroupName={#MyAppName}
|
||||
DisableProgramGroupPage=yes
|
||||
@ -48,13 +48,12 @@ OutputDir=..\dist\
|
||||
SetupLogging=yes
|
||||
CloseApplications=yes
|
||||
RestartApplications=no
|
||||
RestartIfNeededByRun=no
|
||||
|
||||
; https://jrsoftware.org/ishelp/index.php?topic=setup_wizardimagefile
|
||||
WizardSmallImageFile=.\assets\setup.bmp
|
||||
|
||||
; Ollama requires Windows 10 22H2 or newer for proper unicode rendering
|
||||
; TODO: consider setting this to 10.0.19045
|
||||
; TODO verifty actual min windows version...
|
||||
; OG Win 10
|
||||
MinVersion=10.0.10240
|
||||
|
||||
; First release that supports WinRT UI Composition for win32 apps
|
||||
@ -87,21 +86,12 @@ Name: "english"; MessagesFile: "compiler:Default.isl"
|
||||
DialogFontSize=12
|
||||
|
||||
[Files]
|
||||
#if DirExists("..\dist\windows-amd64")
|
||||
Source: "..\dist\windows-amd64-app.exe"; DestDir: "{app}"; DestName: "{#MyAppExeName}" ;Check: not IsArm64(); Flags: ignoreversion 64bit
|
||||
Source: "..\dist\windows-amd64\ollama.exe"; DestDir: "{app}"; Check: not IsArm64(); Flags: ignoreversion 64bit
|
||||
Source: "..\dist\windows-amd64\lib\ollama\*"; DestDir: "{app}\lib\ollama\"; Check: not IsArm64(); Flags: ignoreversion 64bit recursesubdirs
|
||||
#endif
|
||||
|
||||
#if DirExists("..\dist\windows-arm64")
|
||||
Source: "..\dist\windows-arm64\vc_redist.arm64.exe"; DestDir: "{tmp}"; Check: IsArm64() and vc_redist_needed(); Flags: deleteafterinstall
|
||||
Source: "..\dist\windows-arm64-app.exe"; DestDir: "{app}"; DestName: "{#MyAppExeName}" ;Check: IsArm64(); Flags: ignoreversion 64bit
|
||||
Source: "..\dist\windows-arm64\ollama.exe"; DestDir: "{app}"; Check: IsArm64(); Flags: ignoreversion 64bit
|
||||
Source: "..\dist\windows-arm64\lib\ollama\*"; DestDir: "{app}\lib\ollama\"; Check: IsArm64(); Flags: ignoreversion 64bit recursesubdirs
|
||||
#endif
|
||||
|
||||
Source: ".\app.exe"; DestDir: "{app}"; DestName: "{#MyAppExeName}" ; Flags: ignoreversion 64bit
|
||||
Source: "..\ollama.exe"; DestDir: "{app}"; Flags: ignoreversion 64bit
|
||||
Source: "..\dist\windows-{#ARCH}\lib\ollama\runners\*"; DestDir: "{app}\lib\ollama\runners"; Flags: ignoreversion 64bit recursesubdirs
|
||||
Source: "..\dist\ollama_welcome.ps1"; DestDir: "{app}"; Flags: ignoreversion
|
||||
Source: ".\assets\app.ico"; DestDir: "{app}"; Flags: ignoreversion
|
||||
Source: "..\dist\windows-amd64\lib\ollama\*"; DestDir: "{app}\lib\ollama\"; Flags: ignoreversion recursesubdirs
|
||||
|
||||
[Icons]
|
||||
Name: "{group}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"; IconFilename: "{app}\app.ico"
|
||||
@ -109,9 +99,6 @@ Name: "{userstartup}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"; IconFilen
|
||||
Name: "{userprograms}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"; IconFilename: "{app}\app.ico"
|
||||
|
||||
[Run]
|
||||
#if DirExists("..\dist\windows-arm64")
|
||||
Filename: "{tmp}\vc_redist.arm64.exe"; Parameters: "/install /passive /norestart"; Check: IsArm64() and vc_redist_needed(); StatusMsg: "Installing VC++ Redistributables..."; Flags: waituntilterminated
|
||||
#endif
|
||||
Filename: "{cmd}"; Parameters: "/C set PATH={app};%PATH% & ""{app}\{#MyAppExeName}"""; Flags: postinstall nowait runhidden
|
||||
|
||||
[UninstallRun]
|
||||
@ -136,13 +123,13 @@ Type: filesandordirs; Name: "{%TEMP}\ollama*"
|
||||
Type: filesandordirs; Name: "{%LOCALAPPDATA}\Programs\Ollama"
|
||||
|
||||
[Messages]
|
||||
WizardReady=Ollama
|
||||
WizardReady=Ollama Windows Preview
|
||||
ReadyLabel1=%nLet's get you up and running with your own large language models.
|
||||
SetupAppRunningError=Another Ollama installer is running.%n%nPlease cancel or finish the other installer, then click OK to continue with this install, or Cancel to exit.
|
||||
|
||||
|
||||
;FinishedHeadingLabel=Run your first model
|
||||
;FinishedLabel=%nRun this command in a PowerShell or cmd terminal.%n%n%n ollama run llama3.2
|
||||
;FinishedLabel=%nRun this command in a PowerShell or cmd terminal.%n%n%n ollama run llama3.1
|
||||
;ClickFinish=%n
|
||||
|
||||
[Registry]
|
||||
@ -167,39 +154,3 @@ begin
|
||||
{ Pos() returns 0 if not found }
|
||||
Result := Pos(';' + ExpandConstant(Param) + ';', ';' + OrigPath + ';') = 0;
|
||||
end;
|
||||
|
||||
{ --- VC Runtime libraries discovery code - Only install vc_redist if it isn't already installed ----- }
|
||||
const VCRTL_MIN_V1 = 14;
|
||||
const VCRTL_MIN_V2 = 40;
|
||||
const VCRTL_MIN_V3 = 33807;
|
||||
const VCRTL_MIN_V4 = 0;
|
||||
|
||||
// check if the minimum required vc redist is installed (by looking the registry)
|
||||
function vc_redist_needed (): Boolean;
|
||||
var
|
||||
sRegKey: string;
|
||||
v1: Cardinal;
|
||||
v2: Cardinal;
|
||||
v3: Cardinal;
|
||||
v4: Cardinal;
|
||||
begin
|
||||
sRegKey := 'SOFTWARE\WOW6432Node\Microsoft\VisualStudio\14.0\VC\Runtimes\arm64';
|
||||
if (RegQueryDWordValue (HKEY_LOCAL_MACHINE, sRegKey, 'Major', v1) and
|
||||
RegQueryDWordValue (HKEY_LOCAL_MACHINE, sRegKey, 'Minor', v2) and
|
||||
RegQueryDWordValue (HKEY_LOCAL_MACHINE, sRegKey, 'Bld', v3) and
|
||||
RegQueryDWordValue (HKEY_LOCAL_MACHINE, sRegKey, 'RBld', v4)) then
|
||||
begin
|
||||
Log ('VC Redist version: ' + IntToStr (v1) +
|
||||
'.' + IntToStr (v2) + '.' + IntToStr (v3) +
|
||||
'.' + IntToStr (v4));
|
||||
{ Version info was found. Return true if later or equal to our
|
||||
minimal required version RTL_MIN_Vx }
|
||||
Result := not (
|
||||
(v1 > VCRTL_MIN_V1) or ((v1 = VCRTL_MIN_V1) and
|
||||
((v2 > VCRTL_MIN_V2) or ((v2 = VCRTL_MIN_V2) and
|
||||
((v3 > VCRTL_MIN_V3) or ((v3 = VCRTL_MIN_V3) and
|
||||
(v4 >= VCRTL_MIN_V4)))))));
|
||||
end
|
||||
else
|
||||
Result := TRUE;
|
||||
end;
|
||||
|
@ -4,5 +4,5 @@ write-host "Welcome to Ollama!"
|
||||
write-host ""
|
||||
write-host "Run your first model:"
|
||||
write-host ""
|
||||
write-host "`tollama run llama3.2"
|
||||
write-host "`tollama run llama3.1"
|
||||
write-host ""
|
@ -11,13 +11,12 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
_ = iota
|
||||
updateAvailableMenuID
|
||||
updateMenuID
|
||||
separatorMenuID
|
||||
diagLogsMenuID
|
||||
diagSeparatorMenuID
|
||||
quitMenuID
|
||||
updateAvailableMenuID = 1
|
||||
updateMenuID = updateAvailableMenuID + 1
|
||||
separatorMenuID = updateMenuID + 1
|
||||
diagLogsMenuID = separatorMenuID + 1
|
||||
diagSeparatorMenuID = diagLogsMenuID + 1
|
||||
quitMenuID = diagSeparatorMenuID + 1
|
||||
)
|
||||
|
||||
func (t *winTray) initMenus() error {
|
||||
|
89
cmd/cmd.go
89
cmd/cmd.go
@ -21,6 +21,7 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
@ -46,58 +47,28 @@ import (
|
||||
"github.com/ollama/ollama/version"
|
||||
)
|
||||
|
||||
var (
|
||||
errModelNotFound = errors.New("no Modelfile or safetensors files found")
|
||||
errModelfileNotFound = errors.New("specified Modelfile wasn't found")
|
||||
)
|
||||
|
||||
func getModelfileName(cmd *cobra.Command) (string, error) {
|
||||
fn, _ := cmd.Flags().GetString("file")
|
||||
|
||||
filename := fn
|
||||
if filename == "" {
|
||||
filename = "Modelfile"
|
||||
}
|
||||
|
||||
absName, err := filepath.Abs(filename)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
_, err = os.Stat(absName)
|
||||
if err != nil {
|
||||
return fn, err
|
||||
}
|
||||
|
||||
return absName, nil
|
||||
}
|
||||
|
||||
func CreateHandler(cmd *cobra.Command, args []string) error {
|
||||
filename, _ := cmd.Flags().GetString("file")
|
||||
filename, err := filepath.Abs(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := api.ClientFromEnvironment()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p := progress.NewProgress(os.Stderr)
|
||||
defer p.Stop()
|
||||
|
||||
var reader io.Reader
|
||||
|
||||
filename, err := getModelfileName(cmd)
|
||||
if os.IsNotExist(err) {
|
||||
if filename == "" {
|
||||
reader = strings.NewReader("FROM .\n")
|
||||
} else {
|
||||
return errModelfileNotFound
|
||||
}
|
||||
} else if err != nil {
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reader = f
|
||||
defer f.Close()
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
modelfile, err := parser.ParseFile(reader)
|
||||
modelfile, err := parser.ParseFile(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -112,11 +83,6 @@ func CreateHandler(cmd *cobra.Command, args []string) error {
|
||||
p.Add(status, spinner)
|
||||
defer p.Stop()
|
||||
|
||||
client, err := api.ClientFromEnvironment()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := range modelfile.Commands {
|
||||
switch modelfile.Commands[i].Name {
|
||||
case "model", "adapter":
|
||||
@ -255,7 +221,7 @@ func tempZipFiles(path string) (string, error) {
|
||||
// covers consolidated.x.pth, consolidated.pth
|
||||
files = append(files, pt...)
|
||||
} else {
|
||||
return "", errModelNotFound
|
||||
return "", errors.New("no safetensors or torch files found")
|
||||
}
|
||||
|
||||
// add configuration files, json files are detected as text/plain
|
||||
@ -487,7 +453,7 @@ func RunHandler(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
opts.MultiModal = len(info.ProjectorInfo) != 0
|
||||
opts.MultiModal = slices.Contains(info.Details.Families, "clip")
|
||||
opts.ParentModel = info.Details.ParentModel
|
||||
|
||||
if interactive {
|
||||
@ -714,17 +680,6 @@ func DeleteHandler(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Unload the model if it's running before deletion
|
||||
opts := &runOptions{
|
||||
Model: args[0],
|
||||
KeepAlive: &api.Duration{Duration: 0},
|
||||
}
|
||||
if err := loadOrUnloadModel(cmd, opts); err != nil {
|
||||
if !strings.Contains(err.Error(), "not found") {
|
||||
return fmt.Errorf("unable to stop existing running model \"%s\": %s", args[0], err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, name := range args {
|
||||
req := api.DeleteRequest{Name: name}
|
||||
if err := client.Delete(cmd.Context(), &req); err != nil {
|
||||
@ -800,9 +755,9 @@ func ShowHandler(cmd *cobra.Command, args []string) error {
|
||||
case "parameters":
|
||||
fmt.Println(resp.Parameters)
|
||||
case "system":
|
||||
fmt.Print(resp.System)
|
||||
fmt.Println(resp.System)
|
||||
case "template":
|
||||
fmt.Print(resp.Template)
|
||||
fmt.Println(resp.Template)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -1318,7 +1273,7 @@ func NewCLI() *cobra.Command {
|
||||
log.SetFlags(log.LstdFlags | log.Lshortfile)
|
||||
cobra.EnableCommandSorting = false
|
||||
|
||||
if runtime.GOOS == "windows" && term.IsTerminal(int(os.Stdout.Fd())) {
|
||||
if runtime.GOOS == "windows" {
|
||||
console.ConsoleFromFile(os.Stdin) //nolint:errcheck
|
||||
}
|
||||
|
||||
@ -1350,7 +1305,7 @@ func NewCLI() *cobra.Command {
|
||||
RunE: CreateHandler,
|
||||
}
|
||||
|
||||
createCmd.Flags().StringP("file", "f", "", "Name of the Modelfile (default \"Modelfile\"")
|
||||
createCmd.Flags().StringP("file", "f", "Modelfile", "Name of the Modelfile")
|
||||
createCmd.Flags().StringP("quantize", "q", "", "Quantize model to this level (e.g. q4_0)")
|
||||
|
||||
showCmd := &cobra.Command{
|
||||
|
165
cmd/cmd_test.go
165
cmd/cmd_test.go
@ -2,17 +2,11 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
)
|
||||
@ -210,162 +204,3 @@ Weigh anchor!
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestDeleteHandler(t *testing.T) {
|
||||
stopped := false
|
||||
mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/api/delete" && r.Method == http.MethodDelete {
|
||||
var req api.DeleteRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if req.Name == "test-model" {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
return
|
||||
}
|
||||
if r.URL.Path == "/api/generate" && r.Method == http.MethodPost {
|
||||
var req api.GenerateRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if req.Model == "test-model" {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
if err := json.NewEncoder(w).Encode(api.GenerateResponse{
|
||||
Done: true,
|
||||
}); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
stopped = true
|
||||
return
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
if err := json.NewEncoder(w).Encode(api.GenerateResponse{
|
||||
Done: false,
|
||||
}); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
}))
|
||||
|
||||
t.Setenv("OLLAMA_HOST", mockServer.URL)
|
||||
t.Cleanup(mockServer.Close)
|
||||
|
||||
cmd := &cobra.Command{}
|
||||
cmd.SetContext(context.TODO())
|
||||
if err := DeleteHandler(cmd, []string{"test-model"}); err != nil {
|
||||
t.Fatalf("DeleteHandler failed: %v", err)
|
||||
}
|
||||
if !stopped {
|
||||
t.Fatal("Model was not stopped before deletion")
|
||||
}
|
||||
|
||||
err := DeleteHandler(cmd, []string{"test-model-not-found"})
|
||||
if err == nil || !strings.Contains(err.Error(), "unable to stop existing running model \"test-model-not-found\"") {
|
||||
t.Fatalf("DeleteHandler failed: expected error about stopping non-existent model, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetModelfileName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
modelfileName string
|
||||
fileExists bool
|
||||
expectedName string
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
name: "no modelfile specified, no modelfile exists",
|
||||
modelfileName: "",
|
||||
fileExists: false,
|
||||
expectedName: "",
|
||||
expectedErr: os.ErrNotExist,
|
||||
},
|
||||
{
|
||||
name: "no modelfile specified, modelfile exists",
|
||||
modelfileName: "",
|
||||
fileExists: true,
|
||||
expectedName: "Modelfile",
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "modelfile specified, no modelfile exists",
|
||||
modelfileName: "crazyfile",
|
||||
fileExists: false,
|
||||
expectedName: "crazyfile",
|
||||
expectedErr: os.ErrNotExist,
|
||||
},
|
||||
{
|
||||
name: "modelfile specified, modelfile exists",
|
||||
modelfileName: "anotherfile",
|
||||
fileExists: true,
|
||||
expectedName: "anotherfile",
|
||||
expectedErr: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cmd := &cobra.Command{
|
||||
Use: "fakecmd",
|
||||
}
|
||||
cmd.Flags().String("file", "", "path to modelfile")
|
||||
|
||||
var expectedFilename string
|
||||
|
||||
if tt.fileExists {
|
||||
tempDir, err := os.MkdirTemp("", "modelfiledir")
|
||||
defer os.RemoveAll(tempDir)
|
||||
if err != nil {
|
||||
t.Fatalf("temp modelfile dir creation failed: %v", err)
|
||||
}
|
||||
var fn string
|
||||
if tt.modelfileName != "" {
|
||||
fn = tt.modelfileName
|
||||
} else {
|
||||
fn = "Modelfile"
|
||||
}
|
||||
|
||||
tempFile, err := os.CreateTemp(tempDir, fn)
|
||||
if err != nil {
|
||||
t.Fatalf("temp modelfile creation failed: %v", err)
|
||||
}
|
||||
|
||||
expectedFilename = tempFile.Name()
|
||||
err = cmd.Flags().Set("file", expectedFilename)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't set file flag: %v", err)
|
||||
}
|
||||
} else {
|
||||
if tt.modelfileName != "" {
|
||||
expectedFilename = tt.modelfileName
|
||||
err := cmd.Flags().Set("file", tt.modelfileName)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't set file flag: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
actualFilename, actualErr := getModelfileName(cmd)
|
||||
|
||||
if actualFilename != expectedFilename {
|
||||
t.Errorf("expected filename: '%s' actual filename: '%s'", expectedFilename, actualFilename)
|
||||
}
|
||||
|
||||
if tt.expectedErr != os.ErrNotExist {
|
||||
if actualErr != tt.expectedErr {
|
||||
t.Errorf("expected err: %v actual err: %v", tt.expectedErr, actualErr)
|
||||
}
|
||||
} else {
|
||||
if !os.IsNotExist(actualErr) {
|
||||
t.Errorf("expected err: %v actual err: %v", tt.expectedErr, actualErr)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -30,6 +30,11 @@ const (
|
||||
MultilineSystem
|
||||
)
|
||||
|
||||
const (
|
||||
scannerPrompt = ">>> "
|
||||
scannerAltPrompt = "... "
|
||||
)
|
||||
|
||||
func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
||||
usage := func() {
|
||||
fmt.Fprintln(os.Stderr, "Available Commands:")
|
||||
@ -111,8 +116,8 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
||||
}
|
||||
|
||||
scanner, err := readline.New(readline.Prompt{
|
||||
Prompt: ">>> ",
|
||||
AltPrompt: "... ",
|
||||
Prompt: scannerPrompt,
|
||||
AltPrompt: scannerAltPrompt,
|
||||
Placeholder: "Send a message (/? for help)",
|
||||
AltPlaceholder: `Use """ to end multi-line input`,
|
||||
})
|
||||
@ -144,6 +149,11 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
||||
scanner.Prompt.UseAlt = false
|
||||
sb.Reset()
|
||||
|
||||
continue
|
||||
case errors.Is(err, readline.ErrNewLineDetected):
|
||||
sb.WriteString(line)
|
||||
fmt.Fprintln(&sb)
|
||||
scanner.Prompt.Prompt = scannerAltPrompt
|
||||
continue
|
||||
case err != nil:
|
||||
return err
|
||||
@ -169,7 +179,7 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
||||
|
||||
multiline = MultilineNone
|
||||
scanner.Prompt.UseAlt = false
|
||||
case strings.HasPrefix(line, `"""`):
|
||||
case strings.HasPrefix(line, `"""`) && !scanner.Pasting:
|
||||
line := strings.TrimPrefix(line, `"""`)
|
||||
line, ok := strings.CutSuffix(line, `"""`)
|
||||
sb.WriteString(line)
|
||||
@ -433,7 +443,7 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
||||
sb.WriteString(line)
|
||||
}
|
||||
|
||||
if sb.Len() > 0 && multiline == MultilineNone {
|
||||
if sb.Len() > 0 && strings.TrimSpace(sb.String()) != "" && multiline == MultilineNone {
|
||||
newMessage := api.Message{Role: "user", Content: sb.String()}
|
||||
|
||||
if opts.MultiModal {
|
||||
@ -442,6 +452,13 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// clear all previous images for better responses
|
||||
if len(images) > 0 {
|
||||
for i := range opts.Messages {
|
||||
opts.Messages[i].Images = nil
|
||||
}
|
||||
}
|
||||
|
||||
newMessage.Content = msg
|
||||
newMessage.Images = images
|
||||
}
|
||||
@ -457,6 +474,7 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
||||
}
|
||||
|
||||
sb.Reset()
|
||||
scanner.Prompt.Prompt = scannerPrompt
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -494,22 +512,28 @@ func buildModelfile(opts runOptions) string {
|
||||
}
|
||||
|
||||
func normalizeFilePath(fp string) string {
|
||||
return strings.NewReplacer(
|
||||
"\\ ", " ", // Escaped space
|
||||
"\\(", "(", // Escaped left parenthesis
|
||||
"\\)", ")", // Escaped right parenthesis
|
||||
"\\[", "[", // Escaped left square bracket
|
||||
"\\]", "]", // Escaped right square bracket
|
||||
"\\{", "{", // Escaped left curly brace
|
||||
"\\}", "}", // Escaped right curly brace
|
||||
"\\$", "$", // Escaped dollar sign
|
||||
"\\&", "&", // Escaped ampersand
|
||||
"\\;", ";", // Escaped semicolon
|
||||
"\\'", "'", // Escaped single quote
|
||||
"\\\\", "\\", // Escaped backslash
|
||||
"\\*", "*", // Escaped asterisk
|
||||
"\\?", "?", // Escaped question mark
|
||||
).Replace(fp)
|
||||
// Define a map of escaped characters and their replacements
|
||||
replacements := map[string]string{
|
||||
"\\ ": " ", // Escaped space
|
||||
"\\(": "(", // Escaped left parenthesis
|
||||
"\\)": ")", // Escaped right parenthesis
|
||||
"\\[": "[", // Escaped left square bracket
|
||||
"\\]": "]", // Escaped right square bracket
|
||||
"\\{": "{", // Escaped left curly brace
|
||||
"\\}": "}", // Escaped right curly brace
|
||||
"\\$": "$", // Escaped dollar sign
|
||||
"\\&": "&", // Escaped ampersand
|
||||
"\\;": ";", // Escaped semicolon
|
||||
"\\'": "'", // Escaped single quote
|
||||
"\\\\": "\\", // Escaped backslash
|
||||
"\\*": "*", // Escaped asterisk
|
||||
"\\?": "?", // Escaped question mark
|
||||
}
|
||||
|
||||
for escaped, actual := range replacements {
|
||||
fp = strings.ReplaceAll(fp, escaped, actual)
|
||||
}
|
||||
return fp
|
||||
}
|
||||
|
||||
func extractFileNames(input string) []string {
|
||||
@ -529,9 +553,10 @@ func extractFileData(input string) (string, []api.ImageData, error) {
|
||||
for _, fp := range filePaths {
|
||||
nfp := normalizeFilePath(fp)
|
||||
data, err := getImageData(nfp)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
continue
|
||||
} else if err != nil {
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "Couldn't process image: %q\n", err)
|
||||
return "", imgs, err
|
||||
}
|
||||
@ -539,7 +564,7 @@ func extractFileData(input string) (string, []api.ImageData, error) {
|
||||
input = strings.ReplaceAll(input, fp, "")
|
||||
imgs = append(imgs, data)
|
||||
}
|
||||
return strings.TrimSpace(input), imgs, nil
|
||||
return input, imgs, nil
|
||||
}
|
||||
|
||||
func getImageData(filePath string) ([]byte, error) {
|
||||
|
@ -29,7 +29,7 @@ type tensorData struct {
|
||||
Shape []int `json:"shape"`
|
||||
}
|
||||
|
||||
func convertFull(t *testing.T, fsys fs.FS) (*os.File, llm.KV, *llm.Tensors) {
|
||||
func convertFull(t *testing.T, fsys fs.FS) (*os.File, llm.KV, llm.Tensors) {
|
||||
t.Helper()
|
||||
|
||||
f, err := os.CreateTemp(t.TempDir(), "f16")
|
||||
@ -60,7 +60,7 @@ func convertFull(t *testing.T, fsys fs.FS) (*os.File, llm.KV, *llm.Tensors) {
|
||||
return r, m.KV(), m.Tensors()
|
||||
}
|
||||
|
||||
func generateResultsJSON(t *testing.T, f *os.File, kv llm.KV, tensors *llm.Tensors) map[string]string {
|
||||
func generateResultsJSON(t *testing.T, f *os.File, kv llm.KV, tensors llm.Tensors) map[string]string {
|
||||
actual := make(map[string]string)
|
||||
for k, v := range kv {
|
||||
if s, ok := v.(json.Marshaler); !ok {
|
||||
|
@ -1,199 +0,0 @@
|
||||
package discover
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/ollama/ollama/format"
|
||||
)
|
||||
|
||||
var CudartGlobs = []string{
|
||||
"/usr/local/cuda/lib64/libcudart.so*",
|
||||
"/usr/lib/x86_64-linux-gnu/nvidia/current/libcudart.so*",
|
||||
"/usr/lib/x86_64-linux-gnu/libcudart.so*",
|
||||
"/usr/lib/wsl/lib/libcudart.so*",
|
||||
"/usr/lib/wsl/drivers/*/libcudart.so*",
|
||||
"/opt/cuda/lib64/libcudart.so*",
|
||||
"/usr/local/cuda*/targets/aarch64-linux/lib/libcudart.so*",
|
||||
"/usr/lib/aarch64-linux-gnu/nvidia/current/libcudart.so*",
|
||||
"/usr/lib/aarch64-linux-gnu/libcudart.so*",
|
||||
"/usr/local/cuda/lib*/libcudart.so*",
|
||||
"/usr/lib*/libcudart.so*",
|
||||
"/usr/local/lib*/libcudart.so*",
|
||||
}
|
||||
|
||||
var NvmlGlobs = []string{}
|
||||
|
||||
var NvcudaGlobs = []string{
|
||||
"/usr/local/cuda*/targets/*/lib/libcuda.so*",
|
||||
"/usr/lib/*-linux-gnu/nvidia/current/libcuda.so*",
|
||||
"/usr/lib/*-linux-gnu/libcuda.so*",
|
||||
"/usr/lib/wsl/lib/libcuda.so*",
|
||||
"/usr/lib/wsl/drivers/*/libcuda.so*",
|
||||
"/opt/cuda/lib*/libcuda.so*",
|
||||
"/usr/local/cuda/lib*/libcuda.so*",
|
||||
"/usr/lib*/libcuda.so*",
|
||||
"/usr/local/lib*/libcuda.so*",
|
||||
}
|
||||
|
||||
var OneapiGlobs = []string{
|
||||
"/usr/lib/x86_64-linux-gnu/libze_intel_gpu.so*",
|
||||
"/usr/lib*/libze_intel_gpu.so*",
|
||||
}
|
||||
|
||||
var (
|
||||
CudartMgmtName = "libcudart.so*"
|
||||
NvcudaMgmtName = "libcuda.so*"
|
||||
NvmlMgmtName = "" // not currently wired on linux
|
||||
OneapiMgmtName = "libze_intel_gpu.so*"
|
||||
)
|
||||
|
||||
func GetCPUMem() (memInfo, error) {
|
||||
var mem memInfo
|
||||
var total, available, free, buffers, cached, freeSwap uint64
|
||||
f, err := os.Open("/proc/meminfo")
|
||||
if err != nil {
|
||||
return mem, err
|
||||
}
|
||||
defer f.Close()
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
switch {
|
||||
case strings.HasPrefix(line, "MemTotal:"):
|
||||
_, err = fmt.Sscanf(line, "MemTotal:%d", &total)
|
||||
case strings.HasPrefix(line, "MemAvailable:"):
|
||||
_, err = fmt.Sscanf(line, "MemAvailable:%d", &available)
|
||||
case strings.HasPrefix(line, "MemFree:"):
|
||||
_, err = fmt.Sscanf(line, "MemFree:%d", &free)
|
||||
case strings.HasPrefix(line, "Buffers:"):
|
||||
_, err = fmt.Sscanf(line, "Buffers:%d", &buffers)
|
||||
case strings.HasPrefix(line, "Cached:"):
|
||||
_, err = fmt.Sscanf(line, "Cached:%d", &cached)
|
||||
case strings.HasPrefix(line, "SwapFree:"):
|
||||
_, err = fmt.Sscanf(line, "SwapFree:%d", &freeSwap)
|
||||
default:
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return mem, err
|
||||
}
|
||||
}
|
||||
mem.TotalMemory = total * format.KibiByte
|
||||
mem.FreeSwap = freeSwap * format.KibiByte
|
||||
if available > 0 {
|
||||
mem.FreeMemory = available * format.KibiByte
|
||||
} else {
|
||||
mem.FreeMemory = (free + buffers + cached) * format.KibiByte
|
||||
}
|
||||
return mem, nil
|
||||
}
|
||||
|
||||
const CpuInfoFilename = "/proc/cpuinfo"
|
||||
|
||||
type linuxCpuInfo struct {
|
||||
ID string `cpuinfo:"processor"`
|
||||
VendorID string `cpuinfo:"vendor_id"`
|
||||
ModelName string `cpuinfo:"model name"`
|
||||
PhysicalID string `cpuinfo:"physical id"`
|
||||
Siblings string `cpuinfo:"siblings"`
|
||||
CoreID string `cpuinfo:"core id"`
|
||||
}
|
||||
|
||||
func GetCPUDetails() ([]CPU, error) {
|
||||
file, err := os.Open(CpuInfoFilename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return linuxCPUDetails(file)
|
||||
}
|
||||
|
||||
func linuxCPUDetails(file io.Reader) ([]CPU, error) {
|
||||
reColumns := regexp.MustCompile("\t+: ")
|
||||
scanner := bufio.NewScanner(file)
|
||||
cpuInfos := []linuxCpuInfo{}
|
||||
cpu := &linuxCpuInfo{}
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if sl := reColumns.Split(line, 2); len(sl) > 1 {
|
||||
t := reflect.TypeOf(cpu).Elem()
|
||||
s := reflect.ValueOf(cpu).Elem()
|
||||
for i := range t.NumField() {
|
||||
field := t.Field(i)
|
||||
tag := field.Tag.Get("cpuinfo")
|
||||
if tag == sl[0] {
|
||||
s.FieldByName(field.Name).SetString(sl[1])
|
||||
break
|
||||
}
|
||||
}
|
||||
} else if strings.TrimSpace(line) == "" && cpu.ID != "" {
|
||||
cpuInfos = append(cpuInfos, *cpu)
|
||||
cpu = &linuxCpuInfo{}
|
||||
}
|
||||
}
|
||||
if cpu.ID != "" {
|
||||
cpuInfos = append(cpuInfos, *cpu)
|
||||
}
|
||||
|
||||
// Process the sockets/cores/threads
|
||||
socketByID := map[string]*CPU{}
|
||||
coreBySocket := map[string]map[string]struct{}{}
|
||||
threadsByCoreBySocket := map[string]map[string]int{}
|
||||
for _, c := range cpuInfos {
|
||||
if _, found := socketByID[c.PhysicalID]; !found {
|
||||
socketByID[c.PhysicalID] = &CPU{
|
||||
ID: c.PhysicalID,
|
||||
VendorID: c.VendorID,
|
||||
ModelName: c.ModelName,
|
||||
}
|
||||
coreBySocket[c.PhysicalID] = map[string]struct{}{}
|
||||
threadsByCoreBySocket[c.PhysicalID] = map[string]int{}
|
||||
}
|
||||
if c.CoreID != "" {
|
||||
coreBySocket[c.PhysicalID][c.PhysicalID+":"+c.CoreID] = struct{}{}
|
||||
threadsByCoreBySocket[c.PhysicalID][c.PhysicalID+":"+c.CoreID]++
|
||||
} else {
|
||||
coreBySocket[c.PhysicalID][c.PhysicalID+":"+c.ID] = struct{}{}
|
||||
threadsByCoreBySocket[c.PhysicalID][c.PhysicalID+":"+c.ID]++
|
||||
}
|
||||
}
|
||||
|
||||
// Tally up the values from the tracking maps
|
||||
for id, s := range socketByID {
|
||||
s.CoreCount = len(coreBySocket[id])
|
||||
s.ThreadCount = 0
|
||||
for _, tc := range threadsByCoreBySocket[id] {
|
||||
s.ThreadCount += tc
|
||||
}
|
||||
|
||||
// This only works if HT is enabled, consider a more reliable model, maybe cache size comparisons?
|
||||
efficiencyCoreCount := 0
|
||||
for _, threads := range threadsByCoreBySocket[id] {
|
||||
if threads == 1 {
|
||||
efficiencyCoreCount++
|
||||
}
|
||||
}
|
||||
if efficiencyCoreCount == s.CoreCount {
|
||||
// 1:1 mapping means they're not actually efficiency cores, but regular cores
|
||||
s.EfficiencyCoreCount = 0
|
||||
} else {
|
||||
s.EfficiencyCoreCount = efficiencyCoreCount
|
||||
}
|
||||
}
|
||||
keys := make([]string, 0, len(socketByID))
|
||||
result := make([]CPU, 0, len(socketByID))
|
||||
for k := range socketByID {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, k := range keys {
|
||||
result = append(result, *socketByID[k])
|
||||
}
|
||||
return result, nil
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,234 +0,0 @@
|
||||
package discover
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type MEMORYSTATUSEX struct {
|
||||
length uint32
|
||||
MemoryLoad uint32
|
||||
TotalPhys uint64
|
||||
AvailPhys uint64
|
||||
TotalPageFile uint64
|
||||
AvailPageFile uint64
|
||||
TotalVirtual uint64
|
||||
AvailVirtual uint64
|
||||
AvailExtendedVirtual uint64
|
||||
}
|
||||
|
||||
var (
|
||||
k32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
globalMemoryStatusExProc = k32.NewProc("GlobalMemoryStatusEx")
|
||||
sizeofMemoryStatusEx = uint32(unsafe.Sizeof(MEMORYSTATUSEX{}))
|
||||
GetLogicalProcessorInformationEx = k32.NewProc("GetLogicalProcessorInformationEx")
|
||||
)
|
||||
|
||||
var CudartGlobs = []string{
|
||||
"c:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v*\\bin\\cudart64_*.dll",
|
||||
}
|
||||
|
||||
var NvmlGlobs = []string{
|
||||
"c:\\Windows\\System32\\nvml.dll",
|
||||
}
|
||||
|
||||
var NvcudaGlobs = []string{
|
||||
"c:\\windows\\system*\\nvcuda.dll",
|
||||
}
|
||||
|
||||
var OneapiGlobs = []string{
|
||||
"c:\\Windows\\System32\\DriverStore\\FileRepository\\*\\ze_intel_gpu64.dll",
|
||||
}
|
||||
|
||||
var (
|
||||
CudartMgmtName = "cudart64_*.dll"
|
||||
NvcudaMgmtName = "nvcuda.dll"
|
||||
NvmlMgmtName = "nvml.dll"
|
||||
OneapiMgmtName = "ze_intel_gpu64.dll"
|
||||
)
|
||||
|
||||
func GetCPUMem() (memInfo, error) {
|
||||
memStatus := MEMORYSTATUSEX{length: sizeofMemoryStatusEx}
|
||||
r1, _, err := globalMemoryStatusExProc.Call(uintptr(unsafe.Pointer(&memStatus)))
|
||||
if r1 == 0 {
|
||||
return memInfo{}, fmt.Errorf("GlobalMemoryStatusEx failed: %w", err)
|
||||
}
|
||||
return memInfo{TotalMemory: memStatus.TotalPhys, FreeMemory: memStatus.AvailPhys, FreeSwap: memStatus.AvailPageFile}, nil
|
||||
}
|
||||
|
||||
type LOGICAL_PROCESSOR_RELATIONSHIP uint32
|
||||
|
||||
const (
|
||||
RelationProcessorCore LOGICAL_PROCESSOR_RELATIONSHIP = iota
|
||||
RelationNumaNode
|
||||
RelationCache
|
||||
RelationProcessorPackage
|
||||
RelationGroup
|
||||
RelationProcessorDie
|
||||
RelationNumaNodeEx
|
||||
RelationProcessorModule
|
||||
)
|
||||
const RelationAll LOGICAL_PROCESSOR_RELATIONSHIP = 0xffff
|
||||
|
||||
type GROUP_AFFINITY struct {
|
||||
Mask uintptr // KAFFINITY
|
||||
Group uint16
|
||||
Reserved [3]uint16
|
||||
}
|
||||
|
||||
type PROCESSOR_RELATIONSHIP struct {
|
||||
Flags byte
|
||||
EfficiencyClass byte
|
||||
Reserved [20]byte
|
||||
GroupCount uint16
|
||||
GroupMask [1]GROUP_AFFINITY // len GroupCount
|
||||
}
|
||||
|
||||
// Omitted unused structs: NUMA_NODE_RELATIONSHIP CACHE_RELATIONSHIP GROUP_RELATIONSHIP
|
||||
|
||||
type SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX struct {
|
||||
Relationship LOGICAL_PROCESSOR_RELATIONSHIP
|
||||
Size uint32
|
||||
U [1]byte // Union len Size
|
||||
// PROCESSOR_RELATIONSHIP
|
||||
// NUMA_NODE_RELATIONSHIP
|
||||
// CACHE_RELATIONSHIP
|
||||
// GROUP_RELATIONSHIP
|
||||
}
|
||||
|
||||
func (group *GROUP_AFFINITY) IsMember(target *GROUP_AFFINITY) bool {
|
||||
if group == nil || target == nil {
|
||||
return false
|
||||
}
|
||||
return group.Mask&target.Mask != 0
|
||||
}
|
||||
|
||||
type winPackage struct {
|
||||
groups []*GROUP_AFFINITY
|
||||
coreCount int // performance cores = coreCount - efficiencyCoreCount
|
||||
efficiencyCoreCount int
|
||||
threadCount int
|
||||
}
|
||||
|
||||
func (pkg *winPackage) IsMember(target *GROUP_AFFINITY) bool {
|
||||
for _, group := range pkg.groups {
|
||||
if group.IsMember(target) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getLogicalProcessorInformationEx() ([]byte, error) {
|
||||
buf := make([]byte, 1)
|
||||
bufSize := len(buf)
|
||||
ret, _, err := GetLogicalProcessorInformationEx.Call(
|
||||
uintptr(RelationAll),
|
||||
uintptr(unsafe.Pointer(&buf[0])),
|
||||
uintptr(unsafe.Pointer(&bufSize)),
|
||||
)
|
||||
if ret != 0 {
|
||||
return nil, fmt.Errorf("failed to determine size info ret:%d %w", ret, err)
|
||||
}
|
||||
|
||||
buf = make([]byte, bufSize)
|
||||
ret, _, err = GetLogicalProcessorInformationEx.Call(
|
||||
uintptr(RelationAll),
|
||||
uintptr(unsafe.Pointer(&buf[0])),
|
||||
uintptr(unsafe.Pointer(&bufSize)),
|
||||
)
|
||||
if ret == 0 {
|
||||
return nil, fmt.Errorf("failed to gather processor information ret:%d buflen:%d %w", ret, bufSize, err)
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func processSystemLogicalProcessorInforationList(buf []byte) []*winPackage {
|
||||
var slpi *SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX
|
||||
// Find all the packages first
|
||||
packages := []*winPackage{}
|
||||
for bufOffset := 0; bufOffset < len(buf); bufOffset += int(slpi.Size) {
|
||||
slpi = (*SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX)(unsafe.Pointer(&buf[bufOffset]))
|
||||
if slpi.Relationship != RelationProcessorPackage {
|
||||
continue
|
||||
}
|
||||
pr := (*PROCESSOR_RELATIONSHIP)(unsafe.Pointer(&slpi.U[0]))
|
||||
pkg := &winPackage{}
|
||||
ga0 := unsafe.Pointer(&pr.GroupMask[0])
|
||||
for j := range pr.GroupCount {
|
||||
gm := (*GROUP_AFFINITY)(unsafe.Pointer(uintptr(ga0) + uintptr(j)*unsafe.Sizeof(GROUP_AFFINITY{})))
|
||||
pkg.groups = append(pkg.groups, gm)
|
||||
}
|
||||
packages = append(packages, pkg)
|
||||
}
|
||||
|
||||
slog.Info("packages", "count", len(packages))
|
||||
|
||||
// To identify efficiency cores we have to compare the relative values
|
||||
// Larger values are "less efficient" (aka, more performant)
|
||||
var maxEfficiencyClass byte
|
||||
for bufOffset := 0; bufOffset < len(buf); bufOffset += int(slpi.Size) {
|
||||
slpi = (*SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX)(unsafe.Pointer(&buf[bufOffset]))
|
||||
if slpi.Relationship != RelationProcessorCore {
|
||||
continue
|
||||
}
|
||||
pr := (*PROCESSOR_RELATIONSHIP)(unsafe.Pointer(&slpi.U[0]))
|
||||
if pr.EfficiencyClass > maxEfficiencyClass {
|
||||
maxEfficiencyClass = pr.EfficiencyClass
|
||||
}
|
||||
}
|
||||
if maxEfficiencyClass > 0 {
|
||||
slog.Info("efficiency cores detected", "maxEfficiencyClass", maxEfficiencyClass)
|
||||
}
|
||||
|
||||
// then match up the Cores to the Packages, count up cores, threads and efficiency cores
|
||||
for bufOffset := 0; bufOffset < len(buf); bufOffset += int(slpi.Size) {
|
||||
slpi = (*SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX)(unsafe.Pointer(&buf[bufOffset]))
|
||||
if slpi.Relationship != RelationProcessorCore {
|
||||
continue
|
||||
}
|
||||
pr := (*PROCESSOR_RELATIONSHIP)(unsafe.Pointer(&slpi.U[0]))
|
||||
ga0 := unsafe.Pointer(&pr.GroupMask[0])
|
||||
for j := range pr.GroupCount {
|
||||
gm := (*GROUP_AFFINITY)(unsafe.Pointer(uintptr(ga0) + uintptr(j)*unsafe.Sizeof(GROUP_AFFINITY{})))
|
||||
for _, pkg := range packages {
|
||||
if pkg.IsMember(gm) {
|
||||
pkg.coreCount++
|
||||
if pr.Flags == 0 {
|
||||
pkg.threadCount++
|
||||
} else {
|
||||
pkg.threadCount += 2
|
||||
}
|
||||
if pr.EfficiencyClass < maxEfficiencyClass {
|
||||
pkg.efficiencyCoreCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sumarize the results
|
||||
for i, pkg := range packages {
|
||||
slog.Info("", "package", i, "cores", pkg.coreCount, "efficiency", pkg.efficiencyCoreCount, "threads", pkg.threadCount)
|
||||
}
|
||||
|
||||
return packages
|
||||
}
|
||||
|
||||
func GetCPUDetails() ([]CPU, error) {
|
||||
buf, err := getLogicalProcessorInformationEx()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
packages := processSystemLogicalProcessorInforationList(buf)
|
||||
cpus := make([]CPU, len(packages))
|
||||
|
||||
for i, pkg := range packages {
|
||||
cpus[i].CoreCount = pkg.coreCount
|
||||
cpus[i].EfficiencyCoreCount = pkg.efficiencyCoreCount
|
||||
cpus[i].ThreadCount = pkg.threadCount
|
||||
}
|
||||
return cpus, nil
|
||||
}
|
File diff suppressed because one or more lines are too long
138
docs/api.md
138
docs/api.md
@ -69,7 +69,7 @@ Enable JSON mode by setting the `format` parameter to `json`. This will structur
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/generate -d '{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"prompt": "Why is the sky blue?"
|
||||
}'
|
||||
```
|
||||
@ -80,7 +80,7 @@ A stream of JSON objects is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"created_at": "2023-08-04T08:52:19.385406455-07:00",
|
||||
"response": "The",
|
||||
"done": false
|
||||
@ -102,7 +102,7 @@ To calculate how fast the response is generated in tokens per second (token/s),
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"created_at": "2023-08-04T19:22:45.499127Z",
|
||||
"response": "",
|
||||
"done": true,
|
||||
@ -124,7 +124,7 @@ A response can be received in one reply when streaming is off.
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/generate -d '{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"prompt": "Why is the sky blue?",
|
||||
"stream": false
|
||||
}'
|
||||
@ -136,7 +136,7 @@ If `stream` is set to `false`, the response will be a single JSON object:
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"created_at": "2023-08-04T19:22:45.499127Z",
|
||||
"response": "The sky is blue because it is the color of the sky.",
|
||||
"done": true,
|
||||
@ -194,7 +194,7 @@ curl http://localhost:11434/api/generate -d '{
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/generate -d '{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"prompt": "What color is the sky at different times of the day? Respond using JSON",
|
||||
"format": "json",
|
||||
"stream": false
|
||||
@ -205,7 +205,7 @@ curl http://localhost:11434/api/generate -d '{
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"created_at": "2023-11-09T21:07:55.186497Z",
|
||||
"response": "{\n\"morning\": {\n\"color\": \"blue\"\n},\n\"noon\": {\n\"color\": \"blue-gray\"\n},\n\"afternoon\": {\n\"color\": \"warm gray\"\n},\n\"evening\": {\n\"color\": \"orange\"\n}\n}\n",
|
||||
"done": true,
|
||||
@ -327,7 +327,7 @@ If you want to set custom options for the model at runtime rather than in the Mo
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/generate -d '{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"prompt": "Why is the sky blue?",
|
||||
"stream": false,
|
||||
"options": {
|
||||
@ -355,6 +355,7 @@ curl http://localhost:11434/api/generate -d '{
|
||||
"num_gpu": 1,
|
||||
"main_gpu": 0,
|
||||
"low_vram": false,
|
||||
"f16_kv": true,
|
||||
"vocab_only": false,
|
||||
"use_mmap": true,
|
||||
"use_mlock": false,
|
||||
@ -367,7 +368,7 @@ curl http://localhost:11434/api/generate -d '{
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"created_at": "2023-08-04T19:22:45.499127Z",
|
||||
"response": "The sky is blue because it is the color of the sky.",
|
||||
"done": true,
|
||||
@ -389,7 +390,7 @@ If an empty prompt is provided, the model will be loaded into memory.
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/generate -d '{
|
||||
"model": "llama3.2"
|
||||
"model": "llama3.1"
|
||||
}'
|
||||
```
|
||||
|
||||
@ -399,40 +400,13 @@ A single JSON object is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"created_at": "2023-12-18T19:52:07.071755Z",
|
||||
"response": "",
|
||||
"done": true
|
||||
}
|
||||
```
|
||||
|
||||
#### Unload a model
|
||||
|
||||
If an empty prompt is provided and the `keep_alive` parameter is set to `0`, a model will be unloaded from memory.
|
||||
|
||||
##### Request
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/generate -d '{
|
||||
"model": "llama3.2",
|
||||
"keep_alive": 0
|
||||
}'
|
||||
```
|
||||
|
||||
##### Response
|
||||
|
||||
A single JSON object is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "llama3.2",
|
||||
"created_at": "2024-09-12T03:54:03.516566Z",
|
||||
"response": "",
|
||||
"done": true,
|
||||
"done_reason": "unload"
|
||||
}
|
||||
```
|
||||
|
||||
## Generate a chat completion
|
||||
|
||||
```shell
|
||||
@ -471,7 +445,7 @@ Send a chat message with a streaming response.
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/chat -d '{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
@ -487,7 +461,7 @@ A stream of JSON objects is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"created_at": "2023-08-04T08:52:19.385406455-07:00",
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
@ -502,7 +476,7 @@ Final response:
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"created_at": "2023-08-04T19:22:45.499127Z",
|
||||
"done": true,
|
||||
"total_duration": 4883583458,
|
||||
@ -520,7 +494,7 @@ Final response:
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/chat -d '{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
@ -535,7 +509,7 @@ curl http://localhost:11434/api/chat -d '{
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"created_at": "2023-12-12T14:13:43.416799Z",
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
@ -559,7 +533,7 @@ Send a chat message with a conversation history. You can use this same approach
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/chat -d '{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
@ -583,7 +557,7 @@ A stream of JSON objects is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"created_at": "2023-08-04T08:52:19.385406455-07:00",
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
@ -597,7 +571,7 @@ Final response:
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"created_at": "2023-08-04T19:22:45.499127Z",
|
||||
"done": true,
|
||||
"total_duration": 8113331500,
|
||||
@ -655,7 +629,7 @@ curl http://localhost:11434/api/chat -d '{
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/chat -d '{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
@ -673,7 +647,7 @@ curl http://localhost:11434/api/chat -d '{
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"created_at": "2023-12-12T14:13:43.416799Z",
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
@ -695,7 +669,7 @@ curl http://localhost:11434/api/chat -d '{
|
||||
|
||||
```
|
||||
curl http://localhost:11434/api/chat -d '{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
@ -734,7 +708,7 @@ curl http://localhost:11434/api/chat -d '{
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"created_at": "2024-07-22T20:33:28.123648Z",
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
@ -762,64 +736,6 @@ curl http://localhost:11434/api/chat -d '{
|
||||
}
|
||||
```
|
||||
|
||||
#### Load a model
|
||||
|
||||
If the messages array is empty, the model will be loaded into memory.
|
||||
|
||||
##### Request
|
||||
|
||||
```
|
||||
curl http://localhost:11434/api/chat -d '{
|
||||
"model": "llama3.2",
|
||||
"messages": []
|
||||
}'
|
||||
```
|
||||
|
||||
##### Response
|
||||
```json
|
||||
{
|
||||
"model": "llama3.2",
|
||||
"created_at":"2024-09-12T21:17:29.110811Z",
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"content": ""
|
||||
},
|
||||
"done_reason": "load",
|
||||
"done": true
|
||||
}
|
||||
```
|
||||
|
||||
#### Unload a model
|
||||
|
||||
If the messages array is empty and the `keep_alive` parameter is set to `0`, a model will be unloaded from memory.
|
||||
|
||||
##### Request
|
||||
|
||||
```
|
||||
curl http://localhost:11434/api/chat -d '{
|
||||
"model": "llama3.2",
|
||||
"messages": [],
|
||||
"keep_alive": 0
|
||||
}'
|
||||
```
|
||||
|
||||
##### Response
|
||||
|
||||
A single JSON object is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "llama3.2",
|
||||
"created_at":"2024-09-12T21:33:17.547535Z",
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"content": ""
|
||||
},
|
||||
"done_reason": "unload",
|
||||
"done": true
|
||||
}
|
||||
```
|
||||
|
||||
## Create a Model
|
||||
|
||||
```shell
|
||||
@ -988,7 +904,7 @@ Show information about a model including details, modelfile, template, parameter
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/show -d '{
|
||||
"name": "llama3.2"
|
||||
"name": "llama3.1"
|
||||
}'
|
||||
```
|
||||
|
||||
@ -1049,7 +965,7 @@ Copy a model. Creates a model with another name from an existing model.
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/copy -d '{
|
||||
"source": "llama3.2",
|
||||
"source": "llama3.1",
|
||||
"destination": "llama3-backup"
|
||||
}'
|
||||
```
|
||||
@ -1104,7 +1020,7 @@ Download a model from the ollama library. Cancelled pulls are resumed from where
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/pull -d '{
|
||||
"name": "llama3.2"
|
||||
"name": "llama3.1"
|
||||
}'
|
||||
```
|
||||
|
||||
|
@ -2,13 +2,15 @@
|
||||
|
||||
Install required tools:
|
||||
|
||||
- cmake version 3.24 or higher
|
||||
- go version 1.22 or higher
|
||||
- gcc version 11.4.0 or higher
|
||||
|
||||
|
||||
### MacOS
|
||||
|
||||
[Download Go](https://go.dev/dl/)
|
||||
```bash
|
||||
brew install go cmake gcc
|
||||
```
|
||||
|
||||
Optionally enable debugging and more verbose logging:
|
||||
|
||||
@ -20,10 +22,10 @@ export CGO_CFLAGS="-g"
|
||||
export OLLAMA_DEBUG=1
|
||||
```
|
||||
|
||||
Get the required libraries and build the native LLM code: (Adjust the job count based on your number of processors for a faster build)
|
||||
Get the required libraries and build the native LLM code:
|
||||
|
||||
```bash
|
||||
make -j 5
|
||||
go generate ./...
|
||||
```
|
||||
|
||||
Then build ollama:
|
||||
@ -38,17 +40,13 @@ Now you can run `ollama`:
|
||||
./ollama
|
||||
```
|
||||
|
||||
#### Xcode 15 warnings
|
||||
|
||||
If you are using Xcode newer than version 14, you may see a warning during `go build` about `ld: warning: ignoring duplicate libraries: '-lobjc'` due to Golang issue https://github.com/golang/go/issues/67799 which can be safely ignored. You can suppress the warning with `export CGO_LDFLAGS="-Wl,-no_warn_duplicate_libraries"`
|
||||
|
||||
### Linux
|
||||
|
||||
#### Linux CUDA (NVIDIA)
|
||||
|
||||
_Your operating system distribution may already have packages for NVIDIA CUDA. Distro packages are often preferable, but instructions are distro-specific. Please consult distro-specific docs for dependencies if available!_
|
||||
|
||||
Install `make`, `gcc` and `golang` as well as [NVIDIA CUDA](https://developer.nvidia.com/cuda-downloads)
|
||||
Install `cmake` and `golang` as well as [NVIDIA CUDA](https://developer.nvidia.com/cuda-downloads)
|
||||
development and runtime packages.
|
||||
|
||||
Typically the build scripts will auto-detect CUDA, however, if your Linux distro
|
||||
@ -57,10 +55,10 @@ specifying an environment variable `CUDA_LIB_DIR` to the location of the shared
|
||||
libraries, and `CUDACXX` to the location of the nvcc compiler. You can customize
|
||||
a set of target CUDA architectures by setting `CMAKE_CUDA_ARCHITECTURES` (e.g. "50;60;70")
|
||||
|
||||
Then generate dependencies: (Adjust the job count based on your number of processors for a faster build)
|
||||
Then generate dependencies:
|
||||
|
||||
```
|
||||
make -j 5
|
||||
go generate ./...
|
||||
```
|
||||
|
||||
Then build the binary:
|
||||
@ -73,7 +71,7 @@ go build .
|
||||
|
||||
_Your operating system distribution may already have packages for AMD ROCm and CLBlast. Distro packages are often preferable, but instructions are distro-specific. Please consult distro-specific docs for dependencies if available!_
|
||||
|
||||
Install [CLBlast](https://github.com/CNugteren/CLBlast/blob/master/doc/installation.md) and [ROCm](https://rocm.docs.amd.com/en/latest/) development packages first, as well as `make`, `gcc`, and `golang`.
|
||||
Install [CLBlast](https://github.com/CNugteren/CLBlast/blob/master/doc/installation.md) and [ROCm](https://rocm.docs.amd.com/en/latest/) development packages first, as well as `cmake` and `golang`.
|
||||
|
||||
Typically the build scripts will auto-detect ROCm, however, if your Linux distro
|
||||
or installation approach uses unusual paths, you can specify the location by
|
||||
@ -82,10 +80,8 @@ install (typically `/opt/rocm`), and `CLBlast_DIR` to the location of the
|
||||
CLBlast install (typically `/usr/lib/cmake/CLBlast`). You can also customize
|
||||
the AMD GPU targets by setting AMDGPU_TARGETS (e.g. `AMDGPU_TARGETS="gfx1101;gfx1102"`)
|
||||
|
||||
Then generate dependencies: (Adjust the job count based on your number of processors for a faster build)
|
||||
|
||||
```
|
||||
make -j 5
|
||||
go generate ./...
|
||||
```
|
||||
|
||||
Then build the binary:
|
||||
@ -98,13 +94,19 @@ ROCm requires elevated privileges to access the GPU at runtime. On most distros
|
||||
|
||||
#### Advanced CPU Settings
|
||||
|
||||
By default, running `make` will compile a few different variations
|
||||
By default, running `go generate ./...` will compile a few different variations
|
||||
of the LLM library based on common CPU families and vector math capabilities,
|
||||
including a lowest-common-denominator which should run on almost any 64 bit CPU
|
||||
somewhat slowly. At runtime, Ollama will auto-detect the optimal variation to
|
||||
load.
|
||||
load. If you would like to build a CPU-based build customized for your
|
||||
processor, you can set `OLLAMA_CUSTOM_CPU_DEFS` to the llama.cpp flags you would
|
||||
like to use. For example, to compile an optimized binary for an Intel i9-9880H,
|
||||
you might use:
|
||||
|
||||
Custom CPU settings are not currently supported in the new Go server build but will be added back after we complete the transition.
|
||||
```
|
||||
OLLAMA_CUSTOM_CPU_DEFS="-DGGML_AVX=on -DGGML_AVX2=on -DGGML_F16C=on -DGGML_FMA=on" go generate ./...
|
||||
go build .
|
||||
```
|
||||
|
||||
#### Containerized Linux Build
|
||||
|
||||
@ -112,64 +114,37 @@ If you have Docker available, you can build linux binaries with `./scripts/build
|
||||
|
||||
### Windows
|
||||
|
||||
The following tools are required as a minimal development environment to build CPU inference support.
|
||||
Note: The Windows build for Ollama is still under development.
|
||||
|
||||
First, install required tools:
|
||||
|
||||
- MSVC toolchain - C/C++ and cmake as minimal requirements
|
||||
- Go version 1.22 or higher
|
||||
- https://go.dev/dl/
|
||||
- Git
|
||||
- https://git-scm.com/download/win
|
||||
- clang with gcc compat and Make. There are multiple options on how to go about installing these tools on Windows. We have verified the following, but others may work as well:
|
||||
- MinGW (pick one variant) with GCC.
|
||||
- [MinGW-w64](https://www.mingw-w64.org/)
|
||||
- [MSYS2](https://www.msys2.org/)
|
||||
- After installing, from an MSYS2 terminal, run `pacman -S mingw-w64-clang-x86_64-gcc-compat mingw-w64-clang-x86_64-clang make` to install the required tools
|
||||
- Assuming you used the default install prefix for msys2 above, add `C:\msys64\clang64\bin` and `c:\msys64\usr\bin` to your environment variable `PATH` where you will perform the build steps below (e.g. system-wide, account-level, powershell, cmd, etc.)
|
||||
|
||||
> [!NOTE]
|
||||
> Due to bugs in the GCC C++ library for unicode support, Ollama should be built with clang on windows.
|
||||
- The `ThreadJob` Powershell module: `Install-Module -Name ThreadJob -Scope CurrentUser`
|
||||
|
||||
Then, build the `ollama` binary:
|
||||
|
||||
```powershell
|
||||
$env:CGO_ENABLED="1"
|
||||
make -j 8
|
||||
go generate ./...
|
||||
go build .
|
||||
```
|
||||
|
||||
#### GPU Support
|
||||
|
||||
The GPU tools require the Microsoft native build tools. To build either CUDA or ROCm, you must first install MSVC via Visual Studio:
|
||||
|
||||
- Make sure to select `Desktop development with C++` as a Workload during the Visual Studio install
|
||||
- You must complete the Visual Studio install and run it once **BEFORE** installing CUDA or ROCm for the tools to properly register
|
||||
- Add the location of the **64 bit (x64)** compiler (`cl.exe`) to your `PATH`
|
||||
- Note: the default Developer Shell may configure the 32 bit (x86) compiler which will lead to build failures. Ollama requires a 64 bit toolchain.
|
||||
|
||||
#### Windows CUDA (NVIDIA)
|
||||
|
||||
In addition to the common Windows development tools and MSVC described above:
|
||||
In addition to the common Windows development tools described above, install CUDA after installing MSVC.
|
||||
|
||||
- [NVIDIA CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-microsoft-windows/index.html)
|
||||
|
||||
|
||||
#### Windows ROCm (AMD Radeon)
|
||||
|
||||
In addition to the common Windows development tools and MSVC described above:
|
||||
In addition to the common Windows development tools described above, install AMDs HIP package after installing MSVC.
|
||||
|
||||
- [AMD HIP](https://www.amd.com/en/developer/resources/rocm-hub/hip-sdk.html)
|
||||
- [Strawberry Perl](https://strawberryperl.com/)
|
||||
|
||||
#### Windows arm64
|
||||
|
||||
The default `Developer PowerShell for VS 2022` may default to x86 which is not what you want. To ensure you get an arm64 development environment, start a plain PowerShell terminal and run:
|
||||
|
||||
```powershell
|
||||
import-module 'C:\\Program Files\\Microsoft Visual Studio\\2022\\Community\\Common7\\Tools\\Microsoft.VisualStudio.DevShell.dll'
|
||||
Enter-VsDevShell -Arch arm64 -vsinstallpath 'C:\\Program Files\\Microsoft Visual Studio\\2022\\Community' -skipautomaticlocation
|
||||
```
|
||||
|
||||
You can confirm with `write-host $env:VSCMD_ARG_TGT_ARCH`
|
||||
|
||||
Follow the instructions at https://www.msys2.org/wiki/arm64/ to set up an arm64 msys2 environment. Ollama requires gcc and mingw32-make to compile, which is not currently available on Windows arm64, but a gcc compatibility adapter is available via `mingw-w64-clang-aarch64-gcc-compat`. At a minimum you will need to install the following:
|
||||
|
||||
```
|
||||
pacman -S mingw-w64-clang-aarch64-clang mingw-w64-clang-aarch64-gcc-compat mingw-w64-clang-aarch64-make make
|
||||
```
|
||||
|
||||
You will need to ensure your PATH includes go, cmake, gcc and clang mingw32-make to build ollama from source. (typically `C:\msys64\clangarm64\bin\`)
|
||||
Lastly, add `ninja.exe` included with MSVC to the system path (e.g. `C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\Common7\IDE\CommonExtensions\Microsoft\CMake\Ninja`).
|
||||
|
@ -63,7 +63,7 @@ docker run -d --device /dev/kfd --device /dev/dri -v ollama:/root/.ollama -p 114
|
||||
Now you can run a model:
|
||||
|
||||
```
|
||||
docker exec -it ollama ollama run llama3.2
|
||||
docker exec -it ollama ollama run llama3.1
|
||||
```
|
||||
|
||||
### Try different models
|
||||
|
20
docs/faq.md
20
docs/faq.md
@ -32,7 +32,7 @@ When using the API, specify the `num_ctx` parameter:
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/generate -d '{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"prompt": "Why is the sky blue?",
|
||||
"options": {
|
||||
"num_ctx": 4096
|
||||
@ -232,18 +232,14 @@ curl http://localhost:11434/api/chat -d '{"model": "mistral"}'
|
||||
|
||||
To preload a model using the CLI, use the command:
|
||||
```shell
|
||||
ollama run llama3.2 ""
|
||||
ollama run llama3.1 ""
|
||||
```
|
||||
|
||||
## How do I keep a model loaded in memory or make it unload immediately?
|
||||
|
||||
By default models are kept in memory for 5 minutes before being unloaded. This allows for quicker response times if you're making numerous requests to the LLM. If you want to immediately unload a model from memory, use the `ollama stop` command:
|
||||
By default models are kept in memory for 5 minutes before being unloaded. This allows for quicker response times if you are making numerous requests to the LLM. You may, however, want to free up the memory before the 5 minutes have elapsed or keep the model loaded indefinitely. Use the `keep_alive` parameter with either the `/api/generate` and `/api/chat` API endpoints to control how long the model is left in memory.
|
||||
|
||||
```shell
|
||||
ollama stop llama3.2
|
||||
```
|
||||
|
||||
If you're using the API, use the `keep_alive` parameter with the `/api/generate` and `/api/chat` endpoints to set the amount of time that a model stays in memory. The `keep_alive` parameter can be set to:
|
||||
The `keep_alive` parameter can be set to:
|
||||
* a duration string (such as "10m" or "24h")
|
||||
* a number in seconds (such as 3600)
|
||||
* any negative number which will keep the model loaded in memory (e.g. -1 or "-1m")
|
||||
@ -251,17 +247,17 @@ If you're using the API, use the `keep_alive` parameter with the `/api/generate`
|
||||
|
||||
For example, to preload a model and leave it in memory use:
|
||||
```shell
|
||||
curl http://localhost:11434/api/generate -d '{"model": "llama3.2", "keep_alive": -1}'
|
||||
curl http://localhost:11434/api/generate -d '{"model": "llama3.1", "keep_alive": -1}'
|
||||
```
|
||||
|
||||
To unload the model and free up memory use:
|
||||
```shell
|
||||
curl http://localhost:11434/api/generate -d '{"model": "llama3.2", "keep_alive": 0}'
|
||||
curl http://localhost:11434/api/generate -d '{"model": "llama3.1", "keep_alive": 0}'
|
||||
```
|
||||
|
||||
Alternatively, you can change the amount of time all models are loaded into memory by setting the `OLLAMA_KEEP_ALIVE` environment variable when starting the Ollama server. The `OLLAMA_KEEP_ALIVE` variable uses the same parameter types as the `keep_alive` parameter types mentioned above. Refer to the section explaining [how to configure the Ollama server](#how-do-i-configure-ollama-server) to correctly set the environment variable.
|
||||
Alternatively, you can change the amount of time all models are loaded into memory by setting the `OLLAMA_KEEP_ALIVE` environment variable when starting the Ollama server. The `OLLAMA_KEEP_ALIVE` variable uses the same parameter types as the `keep_alive` parameter types mentioned above. Refer to section explaining [how to configure the Ollama server](#how-do-i-configure-ollama-server) to correctly set the environment variable.
|
||||
|
||||
The `keep_alive` API parameter with the `/api/generate` and `/api/chat` API endpoints will override the `OLLAMA_KEEP_ALIVE` setting.
|
||||
If you wish to override the `OLLAMA_KEEP_ALIVE` setting, use the `keep_alive` API parameter with the `/api/generate` or `/api/chat` API endpoints.
|
||||
|
||||
## How do I manage the maximum number of requests the Ollama server can queue?
|
||||
|
||||
|
@ -74,10 +74,6 @@ would set `HSA_OVERRIDE_GFX_VERSION="10.3.0"` as an environment variable for the
|
||||
server. If you have an unsupported AMD GPU you can experiment using the list of
|
||||
supported types below.
|
||||
|
||||
If you have multiple GPUs with different GFX versions, append the numeric device
|
||||
number to the environment variable to set them individually. For example,
|
||||
`HSA_OVERRIDE_GFX_VERSION_0=10.3.0` and `HSA_OVERRIDE_GFX_VERSION_1=11.0.0`
|
||||
|
||||
At this time, the known supported GPU types on linux are the following LLVM Targets.
|
||||
This table shows some example GPUs that map to these LLVM targets:
|
||||
| **LLVM Target** | **An Example GPU** |
|
||||
@ -103,10 +99,9 @@ Reach out on [Discord](https://discord.gg/ollama) or file an
|
||||
### GPU Selection
|
||||
|
||||
If you have multiple AMD GPUs in your system and want to limit Ollama to use a
|
||||
subset, you can set `ROCR_VISIBLE_DEVICES` to a comma separated list of GPUs.
|
||||
subset, you can set `HIP_VISIBLE_DEVICES` to a comma separated list of GPUs.
|
||||
You can see the list of devices with `rocminfo`. If you want to ignore the GPUs
|
||||
and force CPU usage, use an invalid GPU ID (e.g., "-1"). When available, use the
|
||||
`Uuid` to uniquely identify the device instead of numeric value.
|
||||
and force CPU usage, use an invalid GPU ID (e.g., "-1")
|
||||
|
||||
### Container Permission
|
||||
|
||||
|
@ -32,7 +32,7 @@ ollama run my-model
|
||||
|
||||
Ollama supports importing adapters based on several different model architectures including:
|
||||
|
||||
* Llama (including Llama 2, Llama 3, Llama 3.1, and Llama 3.2);
|
||||
* Llama (including Llama 2, Llama 3, and Llama 3.1);
|
||||
* Mistral (including Mistral 1, Mistral 2, and Mixtral); and
|
||||
* Gemma (including Gemma 1 and Gemma 2)
|
||||
|
||||
@ -67,12 +67,14 @@ ollama run my-model
|
||||
|
||||
Ollama supports importing models for several different architectures including:
|
||||
|
||||
* Llama (including Llama 2, Llama 3, Llama 3.1, and Llama 3.2);
|
||||
* Llama (including Llama 2, Llama 3, and Llama 3.1);
|
||||
* Mistral (including Mistral 1, Mistral 2, and Mixtral);
|
||||
* Gemma (including Gemma 1 and Gemma 2); and
|
||||
* Phi3
|
||||
|
||||
This includes importing foundation models as well as any fine tuned models which have been _fused_ with a foundation model.
|
||||
This includes importing foundation models as well as any fine tuned models which which have been _fused_ with a foundation model.
|
||||
|
||||
|
||||
## Importing a GGUF based model or adapter
|
||||
|
||||
If you have a GGUF based model or adapter it is possible to import it into Ollama. You can obtain a GGUF model or adapter by:
|
||||
|
@ -50,7 +50,7 @@ INSTRUCTION arguments
|
||||
An example of a `Modelfile` creating a mario blueprint:
|
||||
|
||||
```modelfile
|
||||
FROM llama3.2
|
||||
FROM llama3.1
|
||||
# sets the temperature to 1 [higher is more creative, lower is more coherent]
|
||||
PARAMETER temperature 1
|
||||
# sets the context window size to 4096, this controls how many tokens the LLM can use as context to generate the next token
|
||||
@ -72,10 +72,10 @@ More examples are available in the [examples directory](../examples).
|
||||
To view the Modelfile of a given model, use the `ollama show --modelfile` command.
|
||||
|
||||
```bash
|
||||
> ollama show --modelfile llama3.2
|
||||
> ollama show --modelfile llama3.1
|
||||
# Modelfile generated by "ollama show"
|
||||
# To build a new Modelfile based on this one, replace the FROM line with:
|
||||
# FROM llama3.2:latest
|
||||
# FROM llama3.1:latest
|
||||
FROM /Users/pdevine/.ollama/models/blobs/sha256-00e1317cbf74d901080d7100f57580ba8dd8de57203072dc6f668324ba545f29
|
||||
TEMPLATE """{{ if .System }}<|start_header_id|>system<|end_header_id|>
|
||||
|
||||
@ -103,7 +103,7 @@ FROM <model name>:<tag>
|
||||
#### Build from existing model
|
||||
|
||||
```modelfile
|
||||
FROM llama3.2
|
||||
FROM llama3.1
|
||||
```
|
||||
|
||||
A list of available base models:
|
||||
@ -120,7 +120,7 @@ FROM <model directory>
|
||||
The model directory should contain the Safetensors weights for a supported architecture.
|
||||
|
||||
Currently supported model architectures:
|
||||
* Llama (including Llama 2, Llama 3, Llama 3.1, and Llama 3.2)
|
||||
* Llama (including Llama 2, Llama 3, and Llama 3.1)
|
||||
* Mistral (including Mistral 1, Mistral 2, and Mixtral)
|
||||
* Gemma (including Gemma 1 and Gemma 2)
|
||||
* Phi3
|
||||
|
@ -25,7 +25,7 @@ chat_completion = client.chat.completions.create(
|
||||
'content': 'Say this is a test',
|
||||
}
|
||||
],
|
||||
model='llama3.2',
|
||||
model='llama3.1',
|
||||
)
|
||||
|
||||
response = client.chat.completions.create(
|
||||
@ -37,7 +37,7 @@ response = client.chat.completions.create(
|
||||
{"type": "text", "text": "What's in this image?"},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAG0AAABmCAYAAADBPx+VAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAA3VSURBVHgB7Z27r0zdG8fX743i1bi1ikMoFMQloXRpKFFIqI7LH4BEQ+NWIkjQuSWCRIEoULk0gsK1kCBI0IhrQVT7tz/7zZo888yz1r7MnDl7z5xvsjkzs2fP3uu71nNfa7lkAsm7d++Sffv2JbNmzUqcc8m0adOSzZs3Z+/XES4ZckAWJEGWPiCxjsQNLWmQsWjRIpMseaxcuTKpG/7HP27I8P79e7dq1ars/yL4/v27S0ejqwv+cUOGEGGpKHR37tzJCEpHV9tnT58+dXXCJDdECBE2Ojrqjh071hpNECjx4cMHVycM1Uhbv359B2F79+51586daxN/+pyRkRFXKyRDAqxEp4yMlDDzXG1NPnnyJKkThoK0VFd1ELZu3TrzXKxKfW7dMBQ6bcuWLW2v0VlHjx41z717927ba22U9APcw7Nnz1oGEPeL3m3p2mTAYYnFmMOMXybPPXv2bNIPpFZr1NHn4HMw0KRBjg9NuRw95s8PEcz/6DZELQd/09C9QGq5RsmSRybqkwHGjh07OsJSsYYm3ijPpyHzoiacg35MLdDSIS/O1yM778jOTwYUkKNHWUzUWaOsylE00MyI0fcnOwIdjvtNdW/HZwNLGg+sR1kMepSNJXmIwxBZiG8tDTpEZzKg0GItNsosY8USkxDhD0Rinuiko2gfL/RbiD2LZAjU9zKQJj8RDR0vJBR1/Phx9+PHj9Z7REF4nTZkxzX4LCXHrV271qXkBAPGfP/atWvu/PnzHe4C97F48eIsRLZ9+3a3f/9+87dwP1JxaF7/3r17ba+5l4EcaVo0lj3SBq5kGTJSQmLWMjgYNei2GPT1MuMqGTDEFHzeQSP2wi/jGnkmPJ/nhccs44jvDAxpVcxnq0F6eT8h4ni/iIWpR5lPyA6ETkNXoSukvpJAD3AsXLiwpZs49+fPn5ke4j10TqYvegSfn0OnafC+Tv9ooA/JPkgQysqQNBzagXY55nO/oa1F7qvIPWkRL12WRpMWUvpVDYmxAPehxWSe8ZEXL20sadYIozfmNch4QJPAfeJgW3rNsnzphBKNJM2KKODo1rVOMRYik5ETy3ix4qWNI81qAAirizgMIc+yhTytx0JWZuNI03qsrgWlGtwjoS9XwgUhWGyhUaRZZQNNIEwCiXD16tXcAHUs79co0vSD8rrJCIW98pzvxpAWyyo3HYwqS0+H0BjStClcZJT5coMm6D2LOF8TolGJtK9fvyZpyiC5ePFi9nc/oJU4eiEP0jVoAnHa9wyJycITMP78+eMeP37sXrx44d6+fdt6f82aNdkx1pg9e3Zb5W+RSRE+n+VjksQWifvVaTKFhn5O8my63K8Qabdv33b379/PiAP//vuvW7BggZszZ072/+TJk91YgkafPn166zXB1rQHFvouAWHq9z3SEevSUerqCn2/dDCeta2jxYbr69evk4MHDyY7d+7MjhMnTiTPnz9Pfv/+nfQT2ggpO2dMF8cghuoM7Ygj5iWCqRlGFml0QC/ftGmTmzt3rmsaKDsgBSPh0/8yPeLLBihLkOKJc0jp8H8vUzcxIA1k6QJ/c78tWEyj5P3o4u9+jywNPdJi5rAH9x0KHcl4Hg570eQp3+vHXGyrmEeigzQsQsjavXt38ujRo44LQuDDhw+TW7duRS1HGgMxhNXHgflaNTOsHyKvHK5Ijo2jbFjJBQK9YwFd6RVMzfgRBmEfP37suBBm/p49e1qjEP2mwTViNRo0VJWH1deMXcNK08uUjVUu7s/zRaL+oLNxz1bpANco4npUgX4G2eFbpDFyQoQxojBCpEGSytmOH8qrH5Q9vuzD6ofQylkCUmh8DBAr+q8JCyVNtWQIidKQE9wNtLSQnS4jDSsxNHogzFuQBw4cyM61UKVsjfr3ooBkPSqqQHesUPWVtzi9/vQi1T+rJj7WiTz4Pt/l3LxUkr5P2VYZaZ4URpsE+st/dujQoaBBYokbrz/8TJNQYLSonrPS9kUaSkPeZyj1AWSj+d+VBoy1pIWVNed8P0Ll/ee5HdGRhrHhR5GGN0r4LGZBaj8oFDJitBTJzIZgFcmU0Y8ytWMZMzJOaXUSrUs5RxKnrxmbb5YXO9VGUhtpXldhEUogFr3IzIsvlpmdosVcGVGXFWp2oU9kLFL3dEkSz6NHEY1sjSRdIuDFWEhd8KxFqsRi1uM/nz9/zpxnwlESONdg6dKlbsaMGS4EHFHtjFIDHwKOo46l4TxSuxgDzi+rE2jg+BaFruOX4HXa0Nnf1lwAPufZeF8/r6zD97WK2qFnGjBxTw5qNGPxT+5T/r7/7RawFC3j4vTp09koCxkeHjqbHJqArmH5UrFKKksnxrK7FuRIs8STfBZv+luugXZ2pR/pP9Ois4z+TiMzUUkUjD0iEi1fzX8GmXyuxUBRcaUfykV0YZnlJGKQpOiGB76x5GeWkWWJc3mOrK6S7xdND+W5N6XyaRgtWJFe13GkaZnKOsYqGdOVVVbGupsyA/l7emTLHi7vwTdirNEt0qxnzAvBFcnQF16xh/TMpUuXHDowhlA9vQVraQhkudRdzOnK+04ZSP3DUhVSP61YsaLtd/ks7ZgtPcXqPqEafHkdqa84X6aCeL7YWlv6edGFHb+ZFICPlljHhg0bKuk0CSvVznWsotRu433alNdFrqG45ejoaPCaUkWERpLXjzFL2Rpllp7PJU2a/v7Ab8N05/9t27Z16KUqoFGsxnI9EosS2niSYg9SpU6B4JgTrvVW1flt1sT+0ADIJU2maXzcUTraGCRaL1Wp9rUMk16PMom8QhruxzvZIegJjFU7LLCePfS8uaQdPny4jTTL0dbee5mYokQsXTIWNY46kuMbnt8Kmec+LGWtOVIl9cT1rCB0V8WqkjAsRwta93TbwNYoGKsUSChN44lgBNCoHLHzquYKrU6qZ8lolCIN0Rh6cP0Q3U6I6IXILYOQI513hJaSKAorFpuHXJNfVlpRtmYBk1Su1obZr5dnKAO+L10Hrj3WZW+E3qh6IszE37F6EB+68mGpvKm4eb9bFrlzrok7fvr0Kfv727dvWRmdVTJHw0qiiCUSZ6wCK+7XL/AcsgNyL74DQQ730sv78Su7+t/A36MdY0sW5o40ahslXr58aZ5HtZB8GH64m9EmMZ7FpYw4T6QnrZfgenrhFxaSiSGXtPnz57e9TkNZLvTjeqhr734CNtrK41L40sUQckmj1lGKQ0rC37x544r8eNXRpnVE3ZZY7zXo8NomiO0ZUCj2uHz58rbXoZ6gc0uA+F6ZeKS/jhRDUq8MKrTho9fEkihMmhxtBI1DxKFY9XLpVcSkfoi8JGnToZO5sU5aiDQIW716ddt7ZLYtMQlhECdBGXZZMWldY5BHm5xgAroWj4C0hbYkSc/jBmggIrXJWlZM6pSETsEPGqZOndr2uuuR5rF169a2HoHPdurUKZM4CO1WTPqaDaAd+GFGKdIQkxAn9RuEWcTRyN2KSUgiSgF5aWzPTeA/lN5rZubMmR2bE4SIC4nJoltgAV/dVefZm72AtctUCJU2CMJ327hxY9t7EHbkyJFseq+EJSY16RPo3Dkq1kkr7+q0bNmyDuLQcZBEPYmHVdOBiJyIlrRDq41YPWfXOxUysi5fvtyaj+2BpcnsUV/oSoEMOk2CQGlr4ckhBwaetBhjCwH0ZHtJROPJkyc7UjcYLDjmrH7ADTEBXFfOYmB0k9oYBOjJ8b4aOYSe7QkKcYhFlq3QYLQhSidNmtS2RATwy8YOM3EQJsUjKiaWZ+vZToUQgzhkHXudb/PW5YMHD9yZM2faPsMwoc7RciYJXbGuBqJ1UIGKKLv915jsvgtJxCZDubdXr165mzdvtr1Hz5LONA8jrUwKPqsmVesKa49S3Q4WxmRPUEYdTjgiUcfUwLx589ySJUva3oMkP6IYddq6HMS4o55xBJBUeRjzfa4Zdeg56QZ43LhxoyPo7Lf1kNt7oO8wWAbNwaYjIv5lhyS7kRf96dvm5Jah8vfvX3flyhX35cuX6HfzFHOToS1H4BenCaHvO8pr8iDuwoUL7tevX+b5ZdbBair0xkFIlFDlW4ZknEClsp/TzXyAKVOmmHWFVSbDNw1l1+4f90U6IY/q4V27dpnE9bJ+v87QEydjqx/UamVVPRG+mwkNTYN+9tjkwzEx+atCm/X9WvWtDtAb68Wy9LXa1UmvCDDIpPkyOQ5ZwSzJ4jMrvFcr0rSjOUh+GcT4LSg5ugkW1Io0/SCDQBojh0hPlaJdah+tkVYrnTZowP8iq1F1TgMBBauufyB33x1v+NWFYmT5KmppgHC+NkAgbmRkpD3yn9QIseXymoTQFGQmIOKTxiZIWpvAatenVqRVXf2nTrAWMsPnKrMZHz6bJq5jvce6QK8J1cQNgKxlJapMPdZSR64/UivS9NztpkVEdKcrs5alhhWP9NeqlfWopzhZScI6QxseegZRGeg5a8C3Re1Mfl1ScP36ddcUaMuv24iOJtz7sbUjTS4qBvKmstYJoUauiuD3k5qhyr7QdUHMeCgLa1Ear9NquemdXgmum4fvJ6w1lqsuDhNrg1qSpleJK7K3TF0Q2jSd94uSZ60kK1e3qyVpQK6PVWXp2/FC3mp6jBhKKOiY2h3gtUV64TWM6wDETRPLDfSakXmH3w8g9Jlug8ZtTt4kVF0kLUYYmCCtD/DrQ5YhMGbA9L3ucdjh0y8kOHW5gU/VEEmJTcL4Pz/f7mgoAbYkAAAAAElFTkSuQmCC",
|
||||
"image_url": "iVBORw0KGgoAAAANSUhEUgAAAG0AAABmCAYAAADBPx+VAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAA3VSURBVHgB7Z27r0zdG8fX743i1bi1ikMoFMQloXRpKFFIqI7LH4BEQ+NWIkjQuSWCRIEoULk0gsK1kCBI0IhrQVT7tz/7zZo888yz1r7MnDl7z5xvsjkzs2fP3uu71nNfa7lkAsm7d++Sffv2JbNmzUqcc8m0adOSzZs3Z+/XES4ZckAWJEGWPiCxjsQNLWmQsWjRIpMseaxcuTKpG/7HP27I8P79e7dq1ars/yL4/v27S0ejqwv+cUOGEGGpKHR37tzJCEpHV9tnT58+dXXCJDdECBE2Ojrqjh071hpNECjx4cMHVycM1Uhbv359B2F79+51586daxN/+pyRkRFXKyRDAqxEp4yMlDDzXG1NPnnyJKkThoK0VFd1ELZu3TrzXKxKfW7dMBQ6bcuWLW2v0VlHjx41z717927ba22U9APcw7Nnz1oGEPeL3m3p2mTAYYnFmMOMXybPPXv2bNIPpFZr1NHn4HMw0KRBjg9NuRw95s8PEcz/6DZELQd/09C9QGq5RsmSRybqkwHGjh07OsJSsYYm3ijPpyHzoiacg35MLdDSIS/O1yM778jOTwYUkKNHWUzUWaOsylE00MyI0fcnOwIdjvtNdW/HZwNLGg+sR1kMepSNJXmIwxBZiG8tDTpEZzKg0GItNsosY8USkxDhD0Rinuiko2gfL/RbiD2LZAjU9zKQJj8RDR0vJBR1/Phx9+PHj9Z7REF4nTZkxzX4LCXHrV271qXkBAPGfP/atWvu/PnzHe4C97F48eIsRLZ9+3a3f/9+87dwP1JxaF7/3r17ba+5l4EcaVo0lj3SBq5kGTJSQmLWMjgYNei2GPT1MuMqGTDEFHzeQSP2wi/jGnkmPJ/nhccs44jvDAxpVcxnq0F6eT8h4ni/iIWpR5lPyA6ETkNXoSukvpJAD3AsXLiwpZs49+fPn5ke4j10TqYvegSfn0OnafC+Tv9ooA/JPkgQysqQNBzagXY55nO/oa1F7qvIPWkRL12WRpMWUvpVDYmxAPehxWSe8ZEXL20sadYIozfmNch4QJPAfeJgW3rNsnzphBKNJM2KKODo1rVOMRYik5ETy3ix4qWNI81qAAirizgMIc+yhTytx0JWZuNI03qsrgWlGtwjoS9XwgUhWGyhUaRZZQNNIEwCiXD16tXcAHUs79co0vSD8rrJCIW98pzvxpAWyyo3HYwqS0+H0BjStClcZJT5coMm6D2LOF8TolGJtK9fvyZpyiC5ePFi9nc/oJU4eiEP0jVoAnHa9wyJycITMP78+eMeP37sXrx44d6+fdt6f82aNdkx1pg9e3Zb5W+RSRE+n+VjksQWifvVaTKFhn5O8my63K8Qabdv33b379/PiAP//vuvW7BggZszZ072/+TJk91YgkafPn166zXB1rQHFvouAWHq9z3SEevSUerqCn2/dDCeta2jxYbr69evk4MHDyY7d+7MjhMnTiTPnz9Pfv/+nfQT2ggpO2dMF8cghuoM7Ygj5iWCqRlGFml0QC/ftGmTmzt3rmsaKDsgBSPh0/8yPeLLBihLkOKJc0jp8H8vUzcxIA1k6QJ/c78tWEyj5P3o4u9+jywNPdJi5rAH9x0KHcl4Hg570eQp3+vHXGyrmEeigzQsQsjavXt38ujRo44LQuDDhw+TW7duRS1HGgMxhNXHgflaNTOsHyKvHK5Ijo2jbFjJBQK9YwFd6RVMzfgRBmEfP37suBBm/p49e1qjEP2mwTViNRo0VJWH1deMXcNK08uUjVUu7s/zRaL+oLNxz1bpANco4npUgX4G2eFbpDFyQoQxojBCpEGSytmOH8qrH5Q9vuzD6ofQylkCUmh8DBAr+q8JCyVNtWQIidKQE9wNtLSQnS4jDSsxNHogzFuQBw4cyM61UKVsjfr3ooBkPSqqQHesUPWVtzi9/vQi1T+rJj7WiTz4Pt/l3LxUkr5P2VYZaZ4URpsE+st/dujQoaBBYokbrz/8TJNQYLSonrPS9kUaSkPeZyj1AWSj+d+VBoy1pIWVNed8P0Ll/ee5HdGRhrHhR5GGN0r4LGZBaj8oFDJitBTJzIZgFcmU0Y8ytWMZMzJOaXUSrUs5RxKnrxmbb5YXO9VGUhtpXldhEUogFr3IzIsvlpmdosVcGVGXFWp2oU9kLFL3dEkSz6NHEY1sjSRdIuDFWEhd8KxFqsRi1uM/nz9/zpxnwlESONdg6dKlbsaMGS4EHFHtjFIDHwKOo46l4TxSuxgDzi+rE2jg+BaFruOX4HXa0Nnf1lwAPufZeF8/r6zD97WK2qFnGjBxTw5qNGPxT+5T/r7/7RawFC3j4vTp09koCxkeHjqbHJqArmH5UrFKKksnxrK7FuRIs8STfBZv+luugXZ2pR/pP9Ois4z+TiMzUUkUjD0iEi1fzX8GmXyuxUBRcaUfykV0YZnlJGKQpOiGB76x5GeWkWWJc3mOrK6S7xdND+W5N6XyaRgtWJFe13GkaZnKOsYqGdOVVVbGupsyA/l7emTLHi7vwTdirNEt0qxnzAvBFcnQF16xh/TMpUuXHDowhlA9vQVraQhkudRdzOnK+04ZSP3DUhVSP61YsaLtd/ks7ZgtPcXqPqEafHkdqa84X6aCeL7YWlv6edGFHb+ZFICPlljHhg0bKuk0CSvVznWsotRu433alNdFrqG45ejoaPCaUkWERpLXjzFL2Rpllp7PJU2a/v7Ab8N05/9t27Z16KUqoFGsxnI9EosS2niSYg9SpU6B4JgTrvVW1flt1sT+0ADIJU2maXzcUTraGCRaL1Wp9rUMk16PMom8QhruxzvZIegJjFU7LLCePfS8uaQdPny4jTTL0dbee5mYokQsXTIWNY46kuMbnt8Kmec+LGWtOVIl9cT1rCB0V8WqkjAsRwta93TbwNYoGKsUSChN44lgBNCoHLHzquYKrU6qZ8lolCIN0Rh6cP0Q3U6I6IXILYOQI513hJaSKAorFpuHXJNfVlpRtmYBk1Su1obZr5dnKAO+L10Hrj3WZW+E3qh6IszE37F6EB+68mGpvKm4eb9bFrlzrok7fvr0Kfv727dvWRmdVTJHw0qiiCUSZ6wCK+7XL/AcsgNyL74DQQ730sv78Su7+t/A36MdY0sW5o40ahslXr58aZ5HtZB8GH64m9EmMZ7FpYw4T6QnrZfgenrhFxaSiSGXtPnz57e9TkNZLvTjeqhr734CNtrK41L40sUQckmj1lGKQ0rC37x544r8eNXRpnVE3ZZY7zXo8NomiO0ZUCj2uHz58rbXoZ6gc0uA+F6ZeKS/jhRDUq8MKrTho9fEkihMmhxtBI1DxKFY9XLpVcSkfoi8JGnToZO5sU5aiDQIW716ddt7ZLYtMQlhECdBGXZZMWldY5BHm5xgAroWj4C0hbYkSc/jBmggIrXJWlZM6pSETsEPGqZOndr2uuuR5rF169a2HoHPdurUKZM4CO1WTPqaDaAd+GFGKdIQkxAn9RuEWcTRyN2KSUgiSgF5aWzPTeA/lN5rZubMmR2bE4SIC4nJoltgAV/dVefZm72AtctUCJU2CMJ327hxY9t7EHbkyJFseq+EJSY16RPo3Dkq1kkr7+q0bNmyDuLQcZBEPYmHVdOBiJyIlrRDq41YPWfXOxUysi5fvtyaj+2BpcnsUV/oSoEMOk2CQGlr4ckhBwaetBhjCwH0ZHtJROPJkyc7UjcYLDjmrH7ADTEBXFfOYmB0k9oYBOjJ8b4aOYSe7QkKcYhFlq3QYLQhSidNmtS2RATwy8YOM3EQJsUjKiaWZ+vZToUQgzhkHXudb/PW5YMHD9yZM2faPsMwoc7RciYJXbGuBqJ1UIGKKLv915jsvgtJxCZDubdXr165mzdvtr1Hz5LONA8jrUwKPqsmVesKa49S3Q4WxmRPUEYdTjgiUcfUwLx589ySJUva3oMkP6IYddq6HMS4o55xBJBUeRjzfa4Zdeg56QZ43LhxoyPo7Lf1kNt7oO8wWAbNwaYjIv5lhyS7kRf96dvm5Jah8vfvX3flyhX35cuX6HfzFHOToS1H4BenCaHvO8pr8iDuwoUL7tevX+b5ZdbBair0xkFIlFDlW4ZknEClsp/TzXyAKVOmmHWFVSbDNw1l1+4f90U6IY/q4V27dpnE9bJ+v87QEydjqx/UamVVPRG+mwkNTYN+9tjkwzEx+atCm/X9WvWtDtAb68Wy9LXa1UmvCDDIpPkyOQ5ZwSzJ4jMrvFcr0rSjOUh+GcT4LSg5ugkW1Io0/SCDQBojh0hPlaJdah+tkVYrnTZowP8iq1F1TgMBBauufyB33x1v+NWFYmT5KmppgHC+NkAgbmRkpD3yn9QIseXymoTQFGQmIOKTxiZIWpvAatenVqRVXf2nTrAWMsPnKrMZHz6bJq5jvce6QK8J1cQNgKxlJapMPdZSR64/UivS9NztpkVEdKcrs5alhhWP9NeqlfWopzhZScI6QxseegZRGeg5a8C3Re1Mfl1ScP36ddcUaMuv24iOJtz7sbUjTS4qBvKmstYJoUauiuD3k5qhyr7QdUHMeCgLa1Ear9NquemdXgmum4fvJ6w1lqsuDhNrg1qSpleJK7K3TF0Q2jSd94uSZ60kK1e3qyVpQK6PVWXp2/FC3mp6jBhKKOiY2h3gtUV64TWM6wDETRPLDfSakXmH3w8g9Jlug8ZtTt4kVF0kLUYYmCCtD/DrQ5YhMGbA9L3ucdjh0y8kOHW5gU/VEEmJTcL4Pz/f7mgoAbYkAAAAAElFTkSuQmCC",
|
||||
},
|
||||
],
|
||||
}
|
||||
@ -46,13 +46,13 @@ response = client.chat.completions.create(
|
||||
)
|
||||
|
||||
completion = client.completions.create(
|
||||
model="llama3.2",
|
||||
model="llama3.1",
|
||||
prompt="Say this is a test",
|
||||
)
|
||||
|
||||
list_completion = client.models.list()
|
||||
|
||||
model = client.models.retrieve("llama3.2")
|
||||
model = client.models.retrieve("llama3.1")
|
||||
|
||||
embeddings = client.embeddings.create(
|
||||
model="all-minilm",
|
||||
@ -74,7 +74,7 @@ const openai = new OpenAI({
|
||||
|
||||
const chatCompletion = await openai.chat.completions.create({
|
||||
messages: [{ role: 'user', content: 'Say this is a test' }],
|
||||
model: 'llama3.2',
|
||||
model: 'llama3.1',
|
||||
})
|
||||
|
||||
const response = await openai.chat.completions.create({
|
||||
@ -86,7 +86,7 @@ const response = await openai.chat.completions.create({
|
||||
{ type: "text", text: "What's in this image?" },
|
||||
{
|
||||
type: "image_url",
|
||||
image_url: "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAG0AAABmCAYAAADBPx+VAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAA3VSURBVHgB7Z27r0zdG8fX743i1bi1ikMoFMQloXRpKFFIqI7LH4BEQ+NWIkjQuSWCRIEoULk0gsK1kCBI0IhrQVT7tz/7zZo888yz1r7MnDl7z5xvsjkzs2fP3uu71nNfa7lkAsm7d++Sffv2JbNmzUqcc8m0adOSzZs3Z+/XES4ZckAWJEGWPiCxjsQNLWmQsWjRIpMseaxcuTKpG/7HP27I8P79e7dq1ars/yL4/v27S0ejqwv+cUOGEGGpKHR37tzJCEpHV9tnT58+dXXCJDdECBE2Ojrqjh071hpNECjx4cMHVycM1Uhbv359B2F79+51586daxN/+pyRkRFXKyRDAqxEp4yMlDDzXG1NPnnyJKkThoK0VFd1ELZu3TrzXKxKfW7dMBQ6bcuWLW2v0VlHjx41z717927ba22U9APcw7Nnz1oGEPeL3m3p2mTAYYnFmMOMXybPPXv2bNIPpFZr1NHn4HMw0KRBjg9NuRw95s8PEcz/6DZELQd/09C9QGq5RsmSRybqkwHGjh07OsJSsYYm3ijPpyHzoiacg35MLdDSIS/O1yM778jOTwYUkKNHWUzUWaOsylE00MyI0fcnOwIdjvtNdW/HZwNLGg+sR1kMepSNJXmIwxBZiG8tDTpEZzKg0GItNsosY8USkxDhD0Rinuiko2gfL/RbiD2LZAjU9zKQJj8RDR0vJBR1/Phx9+PHj9Z7REF4nTZkxzX4LCXHrV271qXkBAPGfP/atWvu/PnzHe4C97F48eIsRLZ9+3a3f/9+87dwP1JxaF7/3r17ba+5l4EcaVo0lj3SBq5kGTJSQmLWMjgYNei2GPT1MuMqGTDEFHzeQSP2wi/jGnkmPJ/nhccs44jvDAxpVcxnq0F6eT8h4ni/iIWpR5lPyA6ETkNXoSukvpJAD3AsXLiwpZs49+fPn5ke4j10TqYvegSfn0OnafC+Tv9ooA/JPkgQysqQNBzagXY55nO/oa1F7qvIPWkRL12WRpMWUvpVDYmxAPehxWSe8ZEXL20sadYIozfmNch4QJPAfeJgW3rNsnzphBKNJM2KKODo1rVOMRYik5ETy3ix4qWNI81qAAirizgMIc+yhTytx0JWZuNI03qsrgWlGtwjoS9XwgUhWGyhUaRZZQNNIEwCiXD16tXcAHUs79co0vSD8rrJCIW98pzvxpAWyyo3HYwqS0+H0BjStClcZJT5coMm6D2LOF8TolGJtK9fvyZpyiC5ePFi9nc/oJU4eiEP0jVoAnHa9wyJycITMP78+eMeP37sXrx44d6+fdt6f82aNdkx1pg9e3Zb5W+RSRE+n+VjksQWifvVaTKFhn5O8my63K8Qabdv33b379/PiAP//vuvW7BggZszZ072/+TJk91YgkafPn166zXB1rQHFvouAWHq9z3SEevSUerqCn2/dDCeta2jxYbr69evk4MHDyY7d+7MjhMnTiTPnz9Pfv/+nfQT2ggpO2dMF8cghuoM7Ygj5iWCqRlGFml0QC/ftGmTmzt3rmsaKDsgBSPh0/8yPeLLBihLkOKJc0jp8H8vUzcxIA1k6QJ/c78tWEyj5P3o4u9+jywNPdJi5rAH9x0KHcl4Hg570eQp3+vHXGyrmEeigzQsQsjavXt38ujRo44LQuDDhw+TW7duRS1HGgMxhNXHgflaNTOsHyKvHK5Ijo2jbFjJBQK9YwFd6RVMzfgRBmEfP37suBBm/p49e1qjEP2mwTViNRo0VJWH1deMXcNK08uUjVUu7s/zRaL+oLNxz1bpANco4npUgX4G2eFbpDFyQoQxojBCpEGSytmOH8qrH5Q9vuzD6ofQylkCUmh8DBAr+q8JCyVNtWQIidKQE9wNtLSQnS4jDSsxNHogzFuQBw4cyM61UKVsjfr3ooBkPSqqQHesUPWVtzi9/vQi1T+rJj7WiTz4Pt/l3LxUkr5P2VYZaZ4URpsE+st/dujQoaBBYokbrz/8TJNQYLSonrPS9kUaSkPeZyj1AWSj+d+VBoy1pIWVNed8P0Ll/ee5HdGRhrHhR5GGN0r4LGZBaj8oFDJitBTJzIZgFcmU0Y8ytWMZMzJOaXUSrUs5RxKnrxmbb5YXO9VGUhtpXldhEUogFr3IzIsvlpmdosVcGVGXFWp2oU9kLFL3dEkSz6NHEY1sjSRdIuDFWEhd8KxFqsRi1uM/nz9/zpxnwlESONdg6dKlbsaMGS4EHFHtjFIDHwKOo46l4TxSuxgDzi+rE2jg+BaFruOX4HXa0Nnf1lwAPufZeF8/r6zD97WK2qFnGjBxTw5qNGPxT+5T/r7/7RawFC3j4vTp09koCxkeHjqbHJqArmH5UrFKKksnxrK7FuRIs8STfBZv+luugXZ2pR/pP9Ois4z+TiMzUUkUjD0iEi1fzX8GmXyuxUBRcaUfykV0YZnlJGKQpOiGB76x5GeWkWWJc3mOrK6S7xdND+W5N6XyaRgtWJFe13GkaZnKOsYqGdOVVVbGupsyA/l7emTLHi7vwTdirNEt0qxnzAvBFcnQF16xh/TMpUuXHDowhlA9vQVraQhkudRdzOnK+04ZSP3DUhVSP61YsaLtd/ks7ZgtPcXqPqEafHkdqa84X6aCeL7YWlv6edGFHb+ZFICPlljHhg0bKuk0CSvVznWsotRu433alNdFrqG45ejoaPCaUkWERpLXjzFL2Rpllp7PJU2a/v7Ab8N05/9t27Z16KUqoFGsxnI9EosS2niSYg9SpU6B4JgTrvVW1flt1sT+0ADIJU2maXzcUTraGCRaL1Wp9rUMk16PMom8QhruxzvZIegJjFU7LLCePfS8uaQdPny4jTTL0dbee5mYokQsXTIWNY46kuMbnt8Kmec+LGWtOVIl9cT1rCB0V8WqkjAsRwta93TbwNYoGKsUSChN44lgBNCoHLHzquYKrU6qZ8lolCIN0Rh6cP0Q3U6I6IXILYOQI513hJaSKAorFpuHXJNfVlpRtmYBk1Su1obZr5dnKAO+L10Hrj3WZW+E3qh6IszE37F6EB+68mGpvKm4eb9bFrlzrok7fvr0Kfv727dvWRmdVTJHw0qiiCUSZ6wCK+7XL/AcsgNyL74DQQ730sv78Su7+t/A36MdY0sW5o40ahslXr58aZ5HtZB8GH64m9EmMZ7FpYw4T6QnrZfgenrhFxaSiSGXtPnz57e9TkNZLvTjeqhr734CNtrK41L40sUQckmj1lGKQ0rC37x544r8eNXRpnVE3ZZY7zXo8NomiO0ZUCj2uHz58rbXoZ6gc0uA+F6ZeKS/jhRDUq8MKrTho9fEkihMmhxtBI1DxKFY9XLpVcSkfoi8JGnToZO5sU5aiDQIW716ddt7ZLYtMQlhECdBGXZZMWldY5BHm5xgAroWj4C0hbYkSc/jBmggIrXJWlZM6pSETsEPGqZOndr2uuuR5rF169a2HoHPdurUKZM4CO1WTPqaDaAd+GFGKdIQkxAn9RuEWcTRyN2KSUgiSgF5aWzPTeA/lN5rZubMmR2bE4SIC4nJoltgAV/dVefZm72AtctUCJU2CMJ327hxY9t7EHbkyJFseq+EJSY16RPo3Dkq1kkr7+q0bNmyDuLQcZBEPYmHVdOBiJyIlrRDq41YPWfXOxUysi5fvtyaj+2BpcnsUV/oSoEMOk2CQGlr4ckhBwaetBhjCwH0ZHtJROPJkyc7UjcYLDjmrH7ADTEBXFfOYmB0k9oYBOjJ8b4aOYSe7QkKcYhFlq3QYLQhSidNmtS2RATwy8YOM3EQJsUjKiaWZ+vZToUQgzhkHXudb/PW5YMHD9yZM2faPsMwoc7RciYJXbGuBqJ1UIGKKLv915jsvgtJxCZDubdXr165mzdvtr1Hz5LONA8jrUwKPqsmVesKa49S3Q4WxmRPUEYdTjgiUcfUwLx589ySJUva3oMkP6IYddq6HMS4o55xBJBUeRjzfa4Zdeg56QZ43LhxoyPo7Lf1kNt7oO8wWAbNwaYjIv5lhyS7kRf96dvm5Jah8vfvX3flyhX35cuX6HfzFHOToS1H4BenCaHvO8pr8iDuwoUL7tevX+b5ZdbBair0xkFIlFDlW4ZknEClsp/TzXyAKVOmmHWFVSbDNw1l1+4f90U6IY/q4V27dpnE9bJ+v87QEydjqx/UamVVPRG+mwkNTYN+9tjkwzEx+atCm/X9WvWtDtAb68Wy9LXa1UmvCDDIpPkyOQ5ZwSzJ4jMrvFcr0rSjOUh+GcT4LSg5ugkW1Io0/SCDQBojh0hPlaJdah+tkVYrnTZowP8iq1F1TgMBBauufyB33x1v+NWFYmT5KmppgHC+NkAgbmRkpD3yn9QIseXymoTQFGQmIOKTxiZIWpvAatenVqRVXf2nTrAWMsPnKrMZHz6bJq5jvce6QK8J1cQNgKxlJapMPdZSR64/UivS9NztpkVEdKcrs5alhhWP9NeqlfWopzhZScI6QxseegZRGeg5a8C3Re1Mfl1ScP36ddcUaMuv24iOJtz7sbUjTS4qBvKmstYJoUauiuD3k5qhyr7QdUHMeCgLa1Ear9NquemdXgmum4fvJ6w1lqsuDhNrg1qSpleJK7K3TF0Q2jSd94uSZ60kK1e3qyVpQK6PVWXp2/FC3mp6jBhKKOiY2h3gtUV64TWM6wDETRPLDfSakXmH3w8g9Jlug8ZtTt4kVF0kLUYYmCCtD/DrQ5YhMGbA9L3ucdjh0y8kOHW5gU/VEEmJTcL4Pz/f7mgoAbYkAAAAAElFTkSuQmCC",
|
||||
image_url: "iVBORw0KGgoAAAANSUhEUgAAAG0AAABmCAYAAADBPx+VAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAA3VSURBVHgB7Z27r0zdG8fX743i1bi1ikMoFMQloXRpKFFIqI7LH4BEQ+NWIkjQuSWCRIEoULk0gsK1kCBI0IhrQVT7tz/7zZo888yz1r7MnDl7z5xvsjkzs2fP3uu71nNfa7lkAsm7d++Sffv2JbNmzUqcc8m0adOSzZs3Z+/XES4ZckAWJEGWPiCxjsQNLWmQsWjRIpMseaxcuTKpG/7HP27I8P79e7dq1ars/yL4/v27S0ejqwv+cUOGEGGpKHR37tzJCEpHV9tnT58+dXXCJDdECBE2Ojrqjh071hpNECjx4cMHVycM1Uhbv359B2F79+51586daxN/+pyRkRFXKyRDAqxEp4yMlDDzXG1NPnnyJKkThoK0VFd1ELZu3TrzXKxKfW7dMBQ6bcuWLW2v0VlHjx41z717927ba22U9APcw7Nnz1oGEPeL3m3p2mTAYYnFmMOMXybPPXv2bNIPpFZr1NHn4HMw0KRBjg9NuRw95s8PEcz/6DZELQd/09C9QGq5RsmSRybqkwHGjh07OsJSsYYm3ijPpyHzoiacg35MLdDSIS/O1yM778jOTwYUkKNHWUzUWaOsylE00MyI0fcnOwIdjvtNdW/HZwNLGg+sR1kMepSNJXmIwxBZiG8tDTpEZzKg0GItNsosY8USkxDhD0Rinuiko2gfL/RbiD2LZAjU9zKQJj8RDR0vJBR1/Phx9+PHj9Z7REF4nTZkxzX4LCXHrV271qXkBAPGfP/atWvu/PnzHe4C97F48eIsRLZ9+3a3f/9+87dwP1JxaF7/3r17ba+5l4EcaVo0lj3SBq5kGTJSQmLWMjgYNei2GPT1MuMqGTDEFHzeQSP2wi/jGnkmPJ/nhccs44jvDAxpVcxnq0F6eT8h4ni/iIWpR5lPyA6ETkNXoSukvpJAD3AsXLiwpZs49+fPn5ke4j10TqYvegSfn0OnafC+Tv9ooA/JPkgQysqQNBzagXY55nO/oa1F7qvIPWkRL12WRpMWUvpVDYmxAPehxWSe8ZEXL20sadYIozfmNch4QJPAfeJgW3rNsnzphBKNJM2KKODo1rVOMRYik5ETy3ix4qWNI81qAAirizgMIc+yhTytx0JWZuNI03qsrgWlGtwjoS9XwgUhWGyhUaRZZQNNIEwCiXD16tXcAHUs79co0vSD8rrJCIW98pzvxpAWyyo3HYwqS0+H0BjStClcZJT5coMm6D2LOF8TolGJtK9fvyZpyiC5ePFi9nc/oJU4eiEP0jVoAnHa9wyJycITMP78+eMeP37sXrx44d6+fdt6f82aNdkx1pg9e3Zb5W+RSRE+n+VjksQWifvVaTKFhn5O8my63K8Qabdv33b379/PiAP//vuvW7BggZszZ072/+TJk91YgkafPn166zXB1rQHFvouAWHq9z3SEevSUerqCn2/dDCeta2jxYbr69evk4MHDyY7d+7MjhMnTiTPnz9Pfv/+nfQT2ggpO2dMF8cghuoM7Ygj5iWCqRlGFml0QC/ftGmTmzt3rmsaKDsgBSPh0/8yPeLLBihLkOKJc0jp8H8vUzcxIA1k6QJ/c78tWEyj5P3o4u9+jywNPdJi5rAH9x0KHcl4Hg570eQp3+vHXGyrmEeigzQsQsjavXt38ujRo44LQuDDhw+TW7duRS1HGgMxhNXHgflaNTOsHyKvHK5Ijo2jbFjJBQK9YwFd6RVMzfgRBmEfP37suBBm/p49e1qjEP2mwTViNRo0VJWH1deMXcNK08uUjVUu7s/zRaL+oLNxz1bpANco4npUgX4G2eFbpDFyQoQxojBCpEGSytmOH8qrH5Q9vuzD6ofQylkCUmh8DBAr+q8JCyVNtWQIidKQE9wNtLSQnS4jDSsxNHogzFuQBw4cyM61UKVsjfr3ooBkPSqqQHesUPWVtzi9/vQi1T+rJj7WiTz4Pt/l3LxUkr5P2VYZaZ4URpsE+st/dujQoaBBYokbrz/8TJNQYLSonrPS9kUaSkPeZyj1AWSj+d+VBoy1pIWVNed8P0Ll/ee5HdGRhrHhR5GGN0r4LGZBaj8oFDJitBTJzIZgFcmU0Y8ytWMZMzJOaXUSrUs5RxKnrxmbb5YXO9VGUhtpXldhEUogFr3IzIsvlpmdosVcGVGXFWp2oU9kLFL3dEkSz6NHEY1sjSRdIuDFWEhd8KxFqsRi1uM/nz9/zpxnwlESONdg6dKlbsaMGS4EHFHtjFIDHwKOo46l4TxSuxgDzi+rE2jg+BaFruOX4HXa0Nnf1lwAPufZeF8/r6zD97WK2qFnGjBxTw5qNGPxT+5T/r7/7RawFC3j4vTp09koCxkeHjqbHJqArmH5UrFKKksnxrK7FuRIs8STfBZv+luugXZ2pR/pP9Ois4z+TiMzUUkUjD0iEi1fzX8GmXyuxUBRcaUfykV0YZnlJGKQpOiGB76x5GeWkWWJc3mOrK6S7xdND+W5N6XyaRgtWJFe13GkaZnKOsYqGdOVVVbGupsyA/l7emTLHi7vwTdirNEt0qxnzAvBFcnQF16xh/TMpUuXHDowhlA9vQVraQhkudRdzOnK+04ZSP3DUhVSP61YsaLtd/ks7ZgtPcXqPqEafHkdqa84X6aCeL7YWlv6edGFHb+ZFICPlljHhg0bKuk0CSvVznWsotRu433alNdFrqG45ejoaPCaUkWERpLXjzFL2Rpllp7PJU2a/v7Ab8N05/9t27Z16KUqoFGsxnI9EosS2niSYg9SpU6B4JgTrvVW1flt1sT+0ADIJU2maXzcUTraGCRaL1Wp9rUMk16PMom8QhruxzvZIegJjFU7LLCePfS8uaQdPny4jTTL0dbee5mYokQsXTIWNY46kuMbnt8Kmec+LGWtOVIl9cT1rCB0V8WqkjAsRwta93TbwNYoGKsUSChN44lgBNCoHLHzquYKrU6qZ8lolCIN0Rh6cP0Q3U6I6IXILYOQI513hJaSKAorFpuHXJNfVlpRtmYBk1Su1obZr5dnKAO+L10Hrj3WZW+E3qh6IszE37F6EB+68mGpvKm4eb9bFrlzrok7fvr0Kfv727dvWRmdVTJHw0qiiCUSZ6wCK+7XL/AcsgNyL74DQQ730sv78Su7+t/A36MdY0sW5o40ahslXr58aZ5HtZB8GH64m9EmMZ7FpYw4T6QnrZfgenrhFxaSiSGXtPnz57e9TkNZLvTjeqhr734CNtrK41L40sUQckmj1lGKQ0rC37x544r8eNXRpnVE3ZZY7zXo8NomiO0ZUCj2uHz58rbXoZ6gc0uA+F6ZeKS/jhRDUq8MKrTho9fEkihMmhxtBI1DxKFY9XLpVcSkfoi8JGnToZO5sU5aiDQIW716ddt7ZLYtMQlhECdBGXZZMWldY5BHm5xgAroWj4C0hbYkSc/jBmggIrXJWlZM6pSETsEPGqZOndr2uuuR5rF169a2HoHPdurUKZM4CO1WTPqaDaAd+GFGKdIQkxAn9RuEWcTRyN2KSUgiSgF5aWzPTeA/lN5rZubMmR2bE4SIC4nJoltgAV/dVefZm72AtctUCJU2CMJ327hxY9t7EHbkyJFseq+EJSY16RPo3Dkq1kkr7+q0bNmyDuLQcZBEPYmHVdOBiJyIlrRDq41YPWfXOxUysi5fvtyaj+2BpcnsUV/oSoEMOk2CQGlr4ckhBwaetBhjCwH0ZHtJROPJkyc7UjcYLDjmrH7ADTEBXFfOYmB0k9oYBOjJ8b4aOYSe7QkKcYhFlq3QYLQhSidNmtS2RATwy8YOM3EQJsUjKiaWZ+vZToUQgzhkHXudb/PW5YMHD9yZM2faPsMwoc7RciYJXbGuBqJ1UIGKKLv915jsvgtJxCZDubdXr165mzdvtr1Hz5LONA8jrUwKPqsmVesKa49S3Q4WxmRPUEYdTjgiUcfUwLx589ySJUva3oMkP6IYddq6HMS4o55xBJBUeRjzfa4Zdeg56QZ43LhxoyPo7Lf1kNt7oO8wWAbNwaYjIv5lhyS7kRf96dvm5Jah8vfvX3flyhX35cuX6HfzFHOToS1H4BenCaHvO8pr8iDuwoUL7tevX+b5ZdbBair0xkFIlFDlW4ZknEClsp/TzXyAKVOmmHWFVSbDNw1l1+4f90U6IY/q4V27dpnE9bJ+v87QEydjqx/UamVVPRG+mwkNTYN+9tjkwzEx+atCm/X9WvWtDtAb68Wy9LXa1UmvCDDIpPkyOQ5ZwSzJ4jMrvFcr0rSjOUh+GcT4LSg5ugkW1Io0/SCDQBojh0hPlaJdah+tkVYrnTZowP8iq1F1TgMBBauufyB33x1v+NWFYmT5KmppgHC+NkAgbmRkpD3yn9QIseXymoTQFGQmIOKTxiZIWpvAatenVqRVXf2nTrAWMsPnKrMZHz6bJq5jvce6QK8J1cQNgKxlJapMPdZSR64/UivS9NztpkVEdKcrs5alhhWP9NeqlfWopzhZScI6QxseegZRGeg5a8C3Re1Mfl1ScP36ddcUaMuv24iOJtz7sbUjTS4qBvKmstYJoUauiuD3k5qhyr7QdUHMeCgLa1Ear9NquemdXgmum4fvJ6w1lqsuDhNrg1qSpleJK7K3TF0Q2jSd94uSZ60kK1e3qyVpQK6PVWXp2/FC3mp6jBhKKOiY2h3gtUV64TWM6wDETRPLDfSakXmH3w8g9Jlug8ZtTt4kVF0kLUYYmCCtD/DrQ5YhMGbA9L3ucdjh0y8kOHW5gU/VEEmJTcL4Pz/f7mgoAbYkAAAAAElFTkSuQmCC",
|
||||
},
|
||||
],
|
||||
},
|
||||
@ -94,13 +94,13 @@ const response = await openai.chat.completions.create({
|
||||
})
|
||||
|
||||
const completion = await openai.completions.create({
|
||||
model: "llama3.2",
|
||||
model: "llama3.1",
|
||||
prompt: "Say this is a test.",
|
||||
})
|
||||
|
||||
const listCompletion = await openai.models.list()
|
||||
|
||||
const model = await openai.models.retrieve("llama3.2")
|
||||
const model = await openai.models.retrieve("llama3.1")
|
||||
|
||||
const embedding = await openai.embeddings.create({
|
||||
model: "all-minilm",
|
||||
@ -114,7 +114,7 @@ const embedding = await openai.embeddings.create({
|
||||
curl http://localhost:11434/v1/chat/completions \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"messages": [
|
||||
{
|
||||
"role": "system",
|
||||
@ -142,7 +142,7 @@ curl http://localhost:11434/v1/chat/completions \
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAG0AAABmCAYAAADBPx+VAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAA3VSURBVHgB7Z27r0zdG8fX743i1bi1ikMoFMQloXRpKFFIqI7LH4BEQ+NWIkjQuSWCRIEoULk0gsK1kCBI0IhrQVT7tz/7zZo888yz1r7MnDl7z5xvsjkzs2fP3uu71nNfa7lkAsm7d++Sffv2JbNmzUqcc8m0adOSzZs3Z+/XES4ZckAWJEGWPiCxjsQNLWmQsWjRIpMseaxcuTKpG/7HP27I8P79e7dq1ars/yL4/v27S0ejqwv+cUOGEGGpKHR37tzJCEpHV9tnT58+dXXCJDdECBE2Ojrqjh071hpNECjx4cMHVycM1Uhbv359B2F79+51586daxN/+pyRkRFXKyRDAqxEp4yMlDDzXG1NPnnyJKkThoK0VFd1ELZu3TrzXKxKfW7dMBQ6bcuWLW2v0VlHjx41z717927ba22U9APcw7Nnz1oGEPeL3m3p2mTAYYnFmMOMXybPPXv2bNIPpFZr1NHn4HMw0KRBjg9NuRw95s8PEcz/6DZELQd/09C9QGq5RsmSRybqkwHGjh07OsJSsYYm3ijPpyHzoiacg35MLdDSIS/O1yM778jOTwYUkKNHWUzUWaOsylE00MyI0fcnOwIdjvtNdW/HZwNLGg+sR1kMepSNJXmIwxBZiG8tDTpEZzKg0GItNsosY8USkxDhD0Rinuiko2gfL/RbiD2LZAjU9zKQJj8RDR0vJBR1/Phx9+PHj9Z7REF4nTZkxzX4LCXHrV271qXkBAPGfP/atWvu/PnzHe4C97F48eIsRLZ9+3a3f/9+87dwP1JxaF7/3r17ba+5l4EcaVo0lj3SBq5kGTJSQmLWMjgYNei2GPT1MuMqGTDEFHzeQSP2wi/jGnkmPJ/nhccs44jvDAxpVcxnq0F6eT8h4ni/iIWpR5lPyA6ETkNXoSukvpJAD3AsXLiwpZs49+fPn5ke4j10TqYvegSfn0OnafC+Tv9ooA/JPkgQysqQNBzagXY55nO/oa1F7qvIPWkRL12WRpMWUvpVDYmxAPehxWSe8ZEXL20sadYIozfmNch4QJPAfeJgW3rNsnzphBKNJM2KKODo1rVOMRYik5ETy3ix4qWNI81qAAirizgMIc+yhTytx0JWZuNI03qsrgWlGtwjoS9XwgUhWGyhUaRZZQNNIEwCiXD16tXcAHUs79co0vSD8rrJCIW98pzvxpAWyyo3HYwqS0+H0BjStClcZJT5coMm6D2LOF8TolGJtK9fvyZpyiC5ePFi9nc/oJU4eiEP0jVoAnHa9wyJycITMP78+eMeP37sXrx44d6+fdt6f82aNdkx1pg9e3Zb5W+RSRE+n+VjksQWifvVaTKFhn5O8my63K8Qabdv33b379/PiAP//vuvW7BggZszZ072/+TJk91YgkafPn166zXB1rQHFvouAWHq9z3SEevSUerqCn2/dDCeta2jxYbr69evk4MHDyY7d+7MjhMnTiTPnz9Pfv/+nfQT2ggpO2dMF8cghuoM7Ygj5iWCqRlGFml0QC/ftGmTmzt3rmsaKDsgBSPh0/8yPeLLBihLkOKJc0jp8H8vUzcxIA1k6QJ/c78tWEyj5P3o4u9+jywNPdJi5rAH9x0KHcl4Hg570eQp3+vHXGyrmEeigzQsQsjavXt38ujRo44LQuDDhw+TW7duRS1HGgMxhNXHgflaNTOsHyKvHK5Ijo2jbFjJBQK9YwFd6RVMzfgRBmEfP37suBBm/p49e1qjEP2mwTViNRo0VJWH1deMXcNK08uUjVUu7s/zRaL+oLNxz1bpANco4npUgX4G2eFbpDFyQoQxojBCpEGSytmOH8qrH5Q9vuzD6ofQylkCUmh8DBAr+q8JCyVNtWQIidKQE9wNtLSQnS4jDSsxNHogzFuQBw4cyM61UKVsjfr3ooBkPSqqQHesUPWVtzi9/vQi1T+rJj7WiTz4Pt/l3LxUkr5P2VYZaZ4URpsE+st/dujQoaBBYokbrz/8TJNQYLSonrPS9kUaSkPeZyj1AWSj+d+VBoy1pIWVNed8P0Ll/ee5HdGRhrHhR5GGN0r4LGZBaj8oFDJitBTJzIZgFcmU0Y8ytWMZMzJOaXUSrUs5RxKnrxmbb5YXO9VGUhtpXldhEUogFr3IzIsvlpmdosVcGVGXFWp2oU9kLFL3dEkSz6NHEY1sjSRdIuDFWEhd8KxFqsRi1uM/nz9/zpxnwlESONdg6dKlbsaMGS4EHFHtjFIDHwKOo46l4TxSuxgDzi+rE2jg+BaFruOX4HXa0Nnf1lwAPufZeF8/r6zD97WK2qFnGjBxTw5qNGPxT+5T/r7/7RawFC3j4vTp09koCxkeHjqbHJqArmH5UrFKKksnxrK7FuRIs8STfBZv+luugXZ2pR/pP9Ois4z+TiMzUUkUjD0iEi1fzX8GmXyuxUBRcaUfykV0YZnlJGKQpOiGB76x5GeWkWWJc3mOrK6S7xdND+W5N6XyaRgtWJFe13GkaZnKOsYqGdOVVVbGupsyA/l7emTLHi7vwTdirNEt0qxnzAvBFcnQF16xh/TMpUuXHDowhlA9vQVraQhkudRdzOnK+04ZSP3DUhVSP61YsaLtd/ks7ZgtPcXqPqEafHkdqa84X6aCeL7YWlv6edGFHb+ZFICPlljHhg0bKuk0CSvVznWsotRu433alNdFrqG45ejoaPCaUkWERpLXjzFL2Rpllp7PJU2a/v7Ab8N05/9t27Z16KUqoFGsxnI9EosS2niSYg9SpU6B4JgTrvVW1flt1sT+0ADIJU2maXzcUTraGCRaL1Wp9rUMk16PMom8QhruxzvZIegJjFU7LLCePfS8uaQdPny4jTTL0dbee5mYokQsXTIWNY46kuMbnt8Kmec+LGWtOVIl9cT1rCB0V8WqkjAsRwta93TbwNYoGKsUSChN44lgBNCoHLHzquYKrU6qZ8lolCIN0Rh6cP0Q3U6I6IXILYOQI513hJaSKAorFpuHXJNfVlpRtmYBk1Su1obZr5dnKAO+L10Hrj3WZW+E3qh6IszE37F6EB+68mGpvKm4eb9bFrlzrok7fvr0Kfv727dvWRmdVTJHw0qiiCUSZ6wCK+7XL/AcsgNyL74DQQ730sv78Su7+t/A36MdY0sW5o40ahslXr58aZ5HtZB8GH64m9EmMZ7FpYw4T6QnrZfgenrhFxaSiSGXtPnz57e9TkNZLvTjeqhr734CNtrK41L40sUQckmj1lGKQ0rC37x544r8eNXRpnVE3ZZY7zXo8NomiO0ZUCj2uHz58rbXoZ6gc0uA+F6ZeKS/jhRDUq8MKrTho9fEkihMmhxtBI1DxKFY9XLpVcSkfoi8JGnToZO5sU5aiDQIW716ddt7ZLYtMQlhECdBGXZZMWldY5BHm5xgAroWj4C0hbYkSc/jBmggIrXJWlZM6pSETsEPGqZOndr2uuuR5rF169a2HoHPdurUKZM4CO1WTPqaDaAd+GFGKdIQkxAn9RuEWcTRyN2KSUgiSgF5aWzPTeA/lN5rZubMmR2bE4SIC4nJoltgAV/dVefZm72AtctUCJU2CMJ327hxY9t7EHbkyJFseq+EJSY16RPo3Dkq1kkr7+q0bNmyDuLQcZBEPYmHVdOBiJyIlrRDq41YPWfXOxUysi5fvtyaj+2BpcnsUV/oSoEMOk2CQGlr4ckhBwaetBhjCwH0ZHtJROPJkyc7UjcYLDjmrH7ADTEBXFfOYmB0k9oYBOjJ8b4aOYSe7QkKcYhFlq3QYLQhSidNmtS2RATwy8YOM3EQJsUjKiaWZ+vZToUQgzhkHXudb/PW5YMHD9yZM2faPsMwoc7RciYJXbGuBqJ1UIGKKLv915jsvgtJxCZDubdXr165mzdvtr1Hz5LONA8jrUwKPqsmVesKa49S3Q4WxmRPUEYdTjgiUcfUwLx589ySJUva3oMkP6IYddq6HMS4o55xBJBUeRjzfa4Zdeg56QZ43LhxoyPo7Lf1kNt7oO8wWAbNwaYjIv5lhyS7kRf96dvm5Jah8vfvX3flyhX35cuX6HfzFHOToS1H4BenCaHvO8pr8iDuwoUL7tevX+b5ZdbBair0xkFIlFDlW4ZknEClsp/TzXyAKVOmmHWFVSbDNw1l1+4f90U6IY/q4V27dpnE9bJ+v87QEydjqx/UamVVPRG+mwkNTYN+9tjkwzEx+atCm/X9WvWtDtAb68Wy9LXa1UmvCDDIpPkyOQ5ZwSzJ4jMrvFcr0rSjOUh+GcT4LSg5ugkW1Io0/SCDQBojh0hPlaJdah+tkVYrnTZowP8iq1F1TgMBBauufyB33x1v+NWFYmT5KmppgHC+NkAgbmRkpD3yn9QIseXymoTQFGQmIOKTxiZIWpvAatenVqRVXf2nTrAWMsPnKrMZHz6bJq5jvce6QK8J1cQNgKxlJapMPdZSR64/UivS9NztpkVEdKcrs5alhhWP9NeqlfWopzhZScI6QxseegZRGeg5a8C3Re1Mfl1ScP36ddcUaMuv24iOJtz7sbUjTS4qBvKmstYJoUauiuD3k5qhyr7QdUHMeCgLa1Ear9NquemdXgmum4fvJ6w1lqsuDhNrg1qSpleJK7K3TF0Q2jSd94uSZ60kK1e3qyVpQK6PVWXp2/FC3mp6jBhKKOiY2h3gtUV64TWM6wDETRPLDfSakXmH3w8g9Jlug8ZtTt4kVF0kLUYYmCCtD/DrQ5YhMGbA9L3ucdjh0y8kOHW5gU/VEEmJTcL4Pz/f7mgoAbYkAAAAAElFTkSuQmCC"
|
||||
"url": "iVBORw0KGgoAAAANSUhEUgAAAG0AAABmCAYAAADBPx+VAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAA3VSURBVHgB7Z27r0zdG8fX743i1bi1ikMoFMQloXRpKFFIqI7LH4BEQ+NWIkjQuSWCRIEoULk0gsK1kCBI0IhrQVT7tz/7zZo888yz1r7MnDl7z5xvsjkzs2fP3uu71nNfa7lkAsm7d++Sffv2JbNmzUqcc8m0adOSzZs3Z+/XES4ZckAWJEGWPiCxjsQNLWmQsWjRIpMseaxcuTKpG/7HP27I8P79e7dq1ars/yL4/v27S0ejqwv+cUOGEGGpKHR37tzJCEpHV9tnT58+dXXCJDdECBE2Ojrqjh071hpNECjx4cMHVycM1Uhbv359B2F79+51586daxN/+pyRkRFXKyRDAqxEp4yMlDDzXG1NPnnyJKkThoK0VFd1ELZu3TrzXKxKfW7dMBQ6bcuWLW2v0VlHjx41z717927ba22U9APcw7Nnz1oGEPeL3m3p2mTAYYnFmMOMXybPPXv2bNIPpFZr1NHn4HMw0KRBjg9NuRw95s8PEcz/6DZELQd/09C9QGq5RsmSRybqkwHGjh07OsJSsYYm3ijPpyHzoiacg35MLdDSIS/O1yM778jOTwYUkKNHWUzUWaOsylE00MyI0fcnOwIdjvtNdW/HZwNLGg+sR1kMepSNJXmIwxBZiG8tDTpEZzKg0GItNsosY8USkxDhD0Rinuiko2gfL/RbiD2LZAjU9zKQJj8RDR0vJBR1/Phx9+PHj9Z7REF4nTZkxzX4LCXHrV271qXkBAPGfP/atWvu/PnzHe4C97F48eIsRLZ9+3a3f/9+87dwP1JxaF7/3r17ba+5l4EcaVo0lj3SBq5kGTJSQmLWMjgYNei2GPT1MuMqGTDEFHzeQSP2wi/jGnkmPJ/nhccs44jvDAxpVcxnq0F6eT8h4ni/iIWpR5lPyA6ETkNXoSukvpJAD3AsXLiwpZs49+fPn5ke4j10TqYvegSfn0OnafC+Tv9ooA/JPkgQysqQNBzagXY55nO/oa1F7qvIPWkRL12WRpMWUvpVDYmxAPehxWSe8ZEXL20sadYIozfmNch4QJPAfeJgW3rNsnzphBKNJM2KKODo1rVOMRYik5ETy3ix4qWNI81qAAirizgMIc+yhTytx0JWZuNI03qsrgWlGtwjoS9XwgUhWGyhUaRZZQNNIEwCiXD16tXcAHUs79co0vSD8rrJCIW98pzvxpAWyyo3HYwqS0+H0BjStClcZJT5coMm6D2LOF8TolGJtK9fvyZpyiC5ePFi9nc/oJU4eiEP0jVoAnHa9wyJycITMP78+eMeP37sXrx44d6+fdt6f82aNdkx1pg9e3Zb5W+RSRE+n+VjksQWifvVaTKFhn5O8my63K8Qabdv33b379/PiAP//vuvW7BggZszZ072/+TJk91YgkafPn166zXB1rQHFvouAWHq9z3SEevSUerqCn2/dDCeta2jxYbr69evk4MHDyY7d+7MjhMnTiTPnz9Pfv/+nfQT2ggpO2dMF8cghuoM7Ygj5iWCqRlGFml0QC/ftGmTmzt3rmsaKDsgBSPh0/8yPeLLBihLkOKJc0jp8H8vUzcxIA1k6QJ/c78tWEyj5P3o4u9+jywNPdJi5rAH9x0KHcl4Hg570eQp3+vHXGyrmEeigzQsQsjavXt38ujRo44LQuDDhw+TW7duRS1HGgMxhNXHgflaNTOsHyKvHK5Ijo2jbFjJBQK9YwFd6RVMzfgRBmEfP37suBBm/p49e1qjEP2mwTViNRo0VJWH1deMXcNK08uUjVUu7s/zRaL+oLNxz1bpANco4npUgX4G2eFbpDFyQoQxojBCpEGSytmOH8qrH5Q9vuzD6ofQylkCUmh8DBAr+q8JCyVNtWQIidKQE9wNtLSQnS4jDSsxNHogzFuQBw4cyM61UKVsjfr3ooBkPSqqQHesUPWVtzi9/vQi1T+rJj7WiTz4Pt/l3LxUkr5P2VYZaZ4URpsE+st/dujQoaBBYokbrz/8TJNQYLSonrPS9kUaSkPeZyj1AWSj+d+VBoy1pIWVNed8P0Ll/ee5HdGRhrHhR5GGN0r4LGZBaj8oFDJitBTJzIZgFcmU0Y8ytWMZMzJOaXUSrUs5RxKnrxmbb5YXO9VGUhtpXldhEUogFr3IzIsvlpmdosVcGVGXFWp2oU9kLFL3dEkSz6NHEY1sjSRdIuDFWEhd8KxFqsRi1uM/nz9/zpxnwlESONdg6dKlbsaMGS4EHFHtjFIDHwKOo46l4TxSuxgDzi+rE2jg+BaFruOX4HXa0Nnf1lwAPufZeF8/r6zD97WK2qFnGjBxTw5qNGPxT+5T/r7/7RawFC3j4vTp09koCxkeHjqbHJqArmH5UrFKKksnxrK7FuRIs8STfBZv+luugXZ2pR/pP9Ois4z+TiMzUUkUjD0iEi1fzX8GmXyuxUBRcaUfykV0YZnlJGKQpOiGB76x5GeWkWWJc3mOrK6S7xdND+W5N6XyaRgtWJFe13GkaZnKOsYqGdOVVVbGupsyA/l7emTLHi7vwTdirNEt0qxnzAvBFcnQF16xh/TMpUuXHDowhlA9vQVraQhkudRdzOnK+04ZSP3DUhVSP61YsaLtd/ks7ZgtPcXqPqEafHkdqa84X6aCeL7YWlv6edGFHb+ZFICPlljHhg0bKuk0CSvVznWsotRu433alNdFrqG45ejoaPCaUkWERpLXjzFL2Rpllp7PJU2a/v7Ab8N05/9t27Z16KUqoFGsxnI9EosS2niSYg9SpU6B4JgTrvVW1flt1sT+0ADIJU2maXzcUTraGCRaL1Wp9rUMk16PMom8QhruxzvZIegJjFU7LLCePfS8uaQdPny4jTTL0dbee5mYokQsXTIWNY46kuMbnt8Kmec+LGWtOVIl9cT1rCB0V8WqkjAsRwta93TbwNYoGKsUSChN44lgBNCoHLHzquYKrU6qZ8lolCIN0Rh6cP0Q3U6I6IXILYOQI513hJaSKAorFpuHXJNfVlpRtmYBk1Su1obZr5dnKAO+L10Hrj3WZW+E3qh6IszE37F6EB+68mGpvKm4eb9bFrlzrok7fvr0Kfv727dvWRmdVTJHw0qiiCUSZ6wCK+7XL/AcsgNyL74DQQ730sv78Su7+t/A36MdY0sW5o40ahslXr58aZ5HtZB8GH64m9EmMZ7FpYw4T6QnrZfgenrhFxaSiSGXtPnz57e9TkNZLvTjeqhr734CNtrK41L40sUQckmj1lGKQ0rC37x544r8eNXRpnVE3ZZY7zXo8NomiO0ZUCj2uHz58rbXoZ6gc0uA+F6ZeKS/jhRDUq8MKrTho9fEkihMmhxtBI1DxKFY9XLpVcSkfoi8JGnToZO5sU5aiDQIW716ddt7ZLYtMQlhECdBGXZZMWldY5BHm5xgAroWj4C0hbYkSc/jBmggIrXJWlZM6pSETsEPGqZOndr2uuuR5rF169a2HoHPdurUKZM4CO1WTPqaDaAd+GFGKdIQkxAn9RuEWcTRyN2KSUgiSgF5aWzPTeA/lN5rZubMmR2bE4SIC4nJoltgAV/dVefZm72AtctUCJU2CMJ327hxY9t7EHbkyJFseq+EJSY16RPo3Dkq1kkr7+q0bNmyDuLQcZBEPYmHVdOBiJyIlrRDq41YPWfXOxUysi5fvtyaj+2BpcnsUV/oSoEMOk2CQGlr4ckhBwaetBhjCwH0ZHtJROPJkyc7UjcYLDjmrH7ADTEBXFfOYmB0k9oYBOjJ8b4aOYSe7QkKcYhFlq3QYLQhSidNmtS2RATwy8YOM3EQJsUjKiaWZ+vZToUQgzhkHXudb/PW5YMHD9yZM2faPsMwoc7RciYJXbGuBqJ1UIGKKLv915jsvgtJxCZDubdXr165mzdvtr1Hz5LONA8jrUwKPqsmVesKa49S3Q4WxmRPUEYdTjgiUcfUwLx589ySJUva3oMkP6IYddq6HMS4o55xBJBUeRjzfa4Zdeg56QZ43LhxoyPo7Lf1kNt7oO8wWAbNwaYjIv5lhyS7kRf96dvm5Jah8vfvX3flyhX35cuX6HfzFHOToS1H4BenCaHvO8pr8iDuwoUL7tevX+b5ZdbBair0xkFIlFDlW4ZknEClsp/TzXyAKVOmmHWFVSbDNw1l1+4f90U6IY/q4V27dpnE9bJ+v87QEydjqx/UamVVPRG+mwkNTYN+9tjkwzEx+atCm/X9WvWtDtAb68Wy9LXa1UmvCDDIpPkyOQ5ZwSzJ4jMrvFcr0rSjOUh+GcT4LSg5ugkW1Io0/SCDQBojh0hPlaJdah+tkVYrnTZowP8iq1F1TgMBBauufyB33x1v+NWFYmT5KmppgHC+NkAgbmRkpD3yn9QIseXymoTQFGQmIOKTxiZIWpvAatenVqRVXf2nTrAWMsPnKrMZHz6bJq5jvce6QK8J1cQNgKxlJapMPdZSR64/UivS9NztpkVEdKcrs5alhhWP9NeqlfWopzhZScI6QxseegZRGeg5a8C3Re1Mfl1ScP36ddcUaMuv24iOJtz7sbUjTS4qBvKmstYJoUauiuD3k5qhyr7QdUHMeCgLa1Ear9NquemdXgmum4fvJ6w1lqsuDhNrg1qSpleJK7K3TF0Q2jSd94uSZ60kK1e3qyVpQK6PVWXp2/FC3mp6jBhKKOiY2h3gtUV64TWM6wDETRPLDfSakXmH3w8g9Jlug8ZtTt4kVF0kLUYYmCCtD/DrQ5YhMGbA9L3ucdjh0y8kOHW5gU/VEEmJTcL4Pz/f7mgoAbYkAAAAAElFTkSuQmCC"
|
||||
}
|
||||
}
|
||||
]
|
||||
@ -154,13 +154,13 @@ curl http://localhost:11434/v1/chat/completions \
|
||||
curl http://localhost:11434/v1/completions \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "llama3.2",
|
||||
"model": "llama3.1",
|
||||
"prompt": "Say this is a test"
|
||||
}'
|
||||
|
||||
curl http://localhost:11434/v1/models
|
||||
|
||||
curl http://localhost:11434/v1/models/llama3.2
|
||||
curl http://localhost:11434/v1/models/llama3.1
|
||||
|
||||
curl http://localhost:11434/v1/embeddings \
|
||||
-H "Content-Type: application/json" \
|
||||
@ -274,7 +274,7 @@ curl http://localhost:11434/v1/embeddings \
|
||||
Before using a model, pull it locally `ollama pull`:
|
||||
|
||||
```shell
|
||||
ollama pull llama3.2
|
||||
ollama pull llama3.1
|
||||
```
|
||||
|
||||
### Default model names
|
||||
@ -282,7 +282,7 @@ ollama pull llama3.2
|
||||
For tooling that relies on default OpenAI model names such as `gpt-3.5-turbo`, use `ollama cp` to copy an existing model name to a temporary name:
|
||||
|
||||
```
|
||||
ollama cp llama3.2 gpt-3.5-turbo
|
||||
ollama cp llama3.1 gpt-3.5-turbo
|
||||
```
|
||||
|
||||
Afterwards, this new model name can be specified the `model` field:
|
||||
|
@ -33,7 +33,7 @@ Omitting a template in these models puts the responsibility of correctly templat
|
||||
To add templates in your model, you'll need to add a `TEMPLATE` command to the Modelfile. Here's an example using Meta's Llama 3.
|
||||
|
||||
```dockerfile
|
||||
FROM llama3.2
|
||||
FROM llama3.1
|
||||
|
||||
TEMPLATE """{{- if .System }}<|start_header_id|>system<|end_header_id|>
|
||||
|
||||
|
@ -95,9 +95,7 @@ If none of those resolve the problem, gather additional information and file an
|
||||
|
||||
On linux, AMD GPU access typically requires `video` and/or `render` group membership to access the `/dev/kfd` device. If permissions are not set up correctly, Ollama will detect this and report an error in the server log.
|
||||
|
||||
When running in a container, in some Linux distributions and container runtimes, the ollama process may be unable to access the GPU. Use `ls -lnd /dev/kfd /dev/dri /dev/dri/*` on the host system to determine the **numeric** group IDs on your system, and pass additional `--group-add ...` arguments to the container so it can access the required devices. For example, in the following output `crw-rw---- 1 0 44 226, 0 Sep 16 16:55 /dev/dri/card0` the group ID column is `44`
|
||||
|
||||
If Ollama initially works on the GPU in a docker container, but then switches to running on CPU after some period of time with errors in the server log reporting GPU discovery failures, this can be resolved by disabling systemd cgroup management in Docker. Edit `/etc/docker/daemon.json` on the host and add `"exec-opts": ["native.cgroupdriver=cgroupfs"]` to the docker configuration.
|
||||
When running in a container, in some Linux distributions and container runtimes, the ollama process may be unable to access the GPU. Use `ls -ld /dev/kfd /dev/dri /dev/dri/*` on the host system to determine the group assignments on your system, and pass additional `--group-add ...` arguments to the container so it can access the required devices.
|
||||
|
||||
If you are experiencing problems getting Ollama to correctly discover or use your GPU for inference, the following may help isolate the failure.
|
||||
- `AMD_LOG_LEVEL=3` Enable info log levels in the AMD HIP/ROCm libraries. This can help show more detailed error codes that can help troubleshoot problems
|
||||
|
@ -15,7 +15,7 @@ import { Ollama } from "@langchain/community/llms/ollama";
|
||||
|
||||
const ollama = new Ollama({
|
||||
baseUrl: "http://localhost:11434",
|
||||
model: "llama3.2",
|
||||
model: "llama3.1",
|
||||
});
|
||||
|
||||
const answer = await ollama.invoke(`why is the sky blue?`);
|
||||
@ -23,7 +23,7 @@ const answer = await ollama.invoke(`why is the sky blue?`);
|
||||
console.log(answer);
|
||||
```
|
||||
|
||||
That will get us the same thing as if we ran `ollama run llama3.2 "why is the sky blue"` in the terminal. But we want to load a document from the web to ask a question against. **Cheerio** is a great library for ingesting a webpage, and **LangChain** uses it in their **CheerioWebBaseLoader**. So let's install **Cheerio** and build that part of the app.
|
||||
That will get us the same thing as if we ran `ollama run llama3.1 "why is the sky blue"` in the terminal. But we want to load a document from the web to ask a question against. **Cheerio** is a great library for ingesting a webpage, and **LangChain** uses it in their **CheerioWebBaseLoader**. So let's install **Cheerio** and build that part of the app.
|
||||
|
||||
```bash
|
||||
npm install cheerio
|
||||
|
@ -10,7 +10,7 @@ This sounds like a typical censored response, but even llama2-uncensored gives a
|
||||
|
||||
So let's figure out how we can use **LangChain** with Ollama to ask our question to the actual document, the Odyssey by Homer, using Python.
|
||||
|
||||
Let's start by asking a simple question that we can get an answer to from the **Llama3** model using **Ollama**. First, we need to install the **LangChain** package:
|
||||
Let's start by asking a simple question that we can get an answer to from the **Llama2** model using **Ollama**. First, we need to install the **LangChain** package:
|
||||
|
||||
`pip install langchain_community`
|
||||
|
||||
|
@ -1,15 +1,22 @@
|
||||
# Ollama Windows
|
||||
# Ollama Windows Preview
|
||||
|
||||
Welcome to Ollama for Windows.
|
||||
Welcome to the Ollama Windows preview.
|
||||
|
||||
No more WSL required!
|
||||
|
||||
Ollama now runs as a native Windows application, including NVIDIA and AMD Radeon GPU support.
|
||||
After installing Ollama for Windows, Ollama will run in the background and
|
||||
After installing Ollama Windows Preview, Ollama will run in the background and
|
||||
the `ollama` command line is available in `cmd`, `powershell` or your favorite
|
||||
terminal application. As usual the Ollama [api](./api.md) will be served on
|
||||
`http://localhost:11434`.
|
||||
|
||||
As this is a preview release, you should expect a few bugs here and there. If
|
||||
you run into a problem you can reach out on
|
||||
[Discord](https://discord.gg/ollama), or file an
|
||||
[issue](https://github.com/ollama/ollama/issues).
|
||||
Logs will often be helpful in diagnosing the problem (see
|
||||
[Troubleshooting](#troubleshooting) below)
|
||||
|
||||
## System Requirements
|
||||
|
||||
* Windows 10 22H2 or newer, Home or Pro
|
||||
@ -18,41 +25,19 @@ terminal application. As usual the Ollama [api](./api.md) will be served on
|
||||
|
||||
Ollama uses unicode characters for progress indication, which may render as unknown squares in some older terminal fonts in Windows 10. If you see this, try changing your terminal font settings.
|
||||
|
||||
## Filesystem Requirements
|
||||
|
||||
The Ollama install does not require Administrator, and installs in your home directory by default. You'll need at least 4GB of space for the binary install. Once you've installed Ollama, you'll need additional space for storing the Large Language models, which can be tens to hundreds of GB in size. If your home directory doesn't have enough space, you can change where the binaries are installed, and where the models are stored.
|
||||
|
||||
### Changing Install Location
|
||||
|
||||
To install the Ollama application in a location different than your home directory, start the installer with the following flag
|
||||
|
||||
```powershell
|
||||
OllamaSetup.exe /DIR="d:\some\location"
|
||||
```
|
||||
|
||||
### Changing Model Location
|
||||
|
||||
To change where Ollama stores the downloaded models instead of using your home directory, set the environment variable `OLLAMA_MODELS` in your user account.
|
||||
|
||||
1. Start the Settings (Windows 11) or Control Panel (Windows 10) application and search for _environment variables_.
|
||||
|
||||
2. Click on _Edit environment variables for your account_.
|
||||
|
||||
3. Edit or create a new variable for your user account for `OLLAMA_MODELS` where you want the models stored
|
||||
|
||||
4. Click OK/Apply to save.
|
||||
|
||||
If Ollama is already running, Quit the tray application and relaunch it from the Start menu, or a new terminal started after you saved the environment variables.
|
||||
|
||||
## API Access
|
||||
|
||||
Here's a quick example showing API access from `powershell`
|
||||
```powershell
|
||||
(Invoke-WebRequest -method POST -Body '{"model":"llama3.2", "prompt":"Why is the sky blue?", "stream": false}' -uri http://localhost:11434/api/generate ).Content | ConvertFrom-json
|
||||
(Invoke-WebRequest -method POST -Body '{"model":"llama3.1", "prompt":"Why is the sky blue?", "stream": false}' -uri http://localhost:11434/api/generate ).Content | ConvertFrom-json
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
While we're in preview, `OLLAMA_DEBUG` is always enabled, which adds
|
||||
a "view logs" menu item to the app, and increases logging for the GUI app and
|
||||
server.
|
||||
|
||||
Ollama on Windows stores files in a few different locations. You can view them in
|
||||
the explorer window by hitting `<cmd>+R` and type in:
|
||||
- `explorer %LOCALAPPDATA%\Ollama` contains logs, and downloaded updates
|
||||
@ -67,10 +52,6 @@ the explorer window by hitting `<cmd>+R` and type in:
|
||||
|
||||
The Ollama Windows installer registers an Uninstaller application. Under `Add or remove programs` in Windows Settings, you can uninstall Ollama.
|
||||
|
||||
> [!NOTE]
|
||||
> If you have [changed the OLLAMA_MODELS location](#changing-model-location), the installer will not remove your downloaded models
|
||||
|
||||
|
||||
## Standalone CLI
|
||||
|
||||
The easiest way to install Ollama on Windows is to use the `OllamaSetup.exe`
|
||||
|
@ -72,7 +72,6 @@ func Origins() (origins []string) {
|
||||
"app://*",
|
||||
"file://*",
|
||||
"tauri://*",
|
||||
"vscode-webview://*",
|
||||
)
|
||||
|
||||
return origins
|
||||
@ -161,8 +160,6 @@ var (
|
||||
SchedSpread = Bool("OLLAMA_SCHED_SPREAD")
|
||||
// IntelGPU enables experimental Intel GPU detection.
|
||||
IntelGPU = Bool("OLLAMA_INTEL_GPU")
|
||||
// MultiUserCache optimizes prompt caching for multi-user scenarios
|
||||
MultiUserCache = Bool("OLLAMA_MULTIUSER_CACHE")
|
||||
)
|
||||
|
||||
func String(s string) func() string {
|
||||
@ -248,7 +245,6 @@ func AsMap() map[string]EnvVar {
|
||||
"OLLAMA_ORIGINS": {"OLLAMA_ORIGINS", Origins(), "A comma separated list of allowed origins"},
|
||||
"OLLAMA_SCHED_SPREAD": {"OLLAMA_SCHED_SPREAD", SchedSpread(), "Always schedule model across all GPUs"},
|
||||
"OLLAMA_TMPDIR": {"OLLAMA_TMPDIR", TmpDir(), "Location for temporary files"},
|
||||
"OLLAMA_MULTIUSER_CACHE": {"OLLAMA_MULTIUSER_CACHE", MultiUserCache(), "Optimize prompt caching for multi-user scenarios"},
|
||||
|
||||
// Informational
|
||||
"HTTP_PROXY": {"HTTP_PROXY", String("HTTP_PROXY")(), "HTTP proxy"},
|
||||
@ -265,9 +261,9 @@ func AsMap() map[string]EnvVar {
|
||||
|
||||
if runtime.GOOS != "darwin" {
|
||||
ret["CUDA_VISIBLE_DEVICES"] = EnvVar{"CUDA_VISIBLE_DEVICES", CudaVisibleDevices(), "Set which NVIDIA devices are visible"}
|
||||
ret["HIP_VISIBLE_DEVICES"] = EnvVar{"HIP_VISIBLE_DEVICES", HipVisibleDevices(), "Set which AMD devices are visible by numeric ID"}
|
||||
ret["ROCR_VISIBLE_DEVICES"] = EnvVar{"ROCR_VISIBLE_DEVICES", RocrVisibleDevices(), "Set which AMD devices are visible by UUID or numeric ID"}
|
||||
ret["GPU_DEVICE_ORDINAL"] = EnvVar{"GPU_DEVICE_ORDINAL", GpuDeviceOrdinal(), "Set which AMD devices are visible by numeric ID"}
|
||||
ret["HIP_VISIBLE_DEVICES"] = EnvVar{"HIP_VISIBLE_DEVICES", HipVisibleDevices(), "Set which AMD devices are visible"}
|
||||
ret["ROCR_VISIBLE_DEVICES"] = EnvVar{"ROCR_VISIBLE_DEVICES", RocrVisibleDevices(), "Set which AMD devices are visible"}
|
||||
ret["GPU_DEVICE_ORDINAL"] = EnvVar{"GPU_DEVICE_ORDINAL", GpuDeviceOrdinal(), "Set which AMD devices are visible"}
|
||||
ret["HSA_OVERRIDE_GFX_VERSION"] = EnvVar{"HSA_OVERRIDE_GFX_VERSION", HsaOverrideGfxVersion(), "Override the gfx used for all detected AMD GPUs"}
|
||||
ret["OLLAMA_INTEL_GPU"] = EnvVar{"OLLAMA_INTEL_GPU", IntelGPU(), "Enable experimental Intel GPU detection"}
|
||||
}
|
||||
|
@ -68,7 +68,6 @@ func TestOrigins(t *testing.T) {
|
||||
"app://*",
|
||||
"file://*",
|
||||
"tauri://*",
|
||||
"vscode-webview://*",
|
||||
}},
|
||||
{"http://10.0.0.1", []string{
|
||||
"http://10.0.0.1",
|
||||
@ -87,7 +86,6 @@ func TestOrigins(t *testing.T) {
|
||||
"app://*",
|
||||
"file://*",
|
||||
"tauri://*",
|
||||
"vscode-webview://*",
|
||||
}},
|
||||
{"http://172.16.0.1,https://192.168.0.1", []string{
|
||||
"http://172.16.0.1",
|
||||
@ -107,7 +105,6 @@ func TestOrigins(t *testing.T) {
|
||||
"app://*",
|
||||
"file://*",
|
||||
"tauri://*",
|
||||
"vscode-webview://*",
|
||||
}},
|
||||
{"http://totally.safe,http://definitely.legit", []string{
|
||||
"http://totally.safe",
|
||||
@ -127,7 +124,6 @@ func TestOrigins(t *testing.T) {
|
||||
"app://*",
|
||||
"file://*",
|
||||
"tauri://*",
|
||||
"vscode-webview://*",
|
||||
}},
|
||||
}
|
||||
for _, tt := range cases {
|
||||
|
@ -35,7 +35,7 @@ func main() {
|
||||
|
||||
ctx := context.Background()
|
||||
req := &api.ChatRequest{
|
||||
Model: "llama3.2",
|
||||
Model: "llama3.1",
|
||||
Messages: messages,
|
||||
}
|
||||
|
||||
|
@ -4,10 +4,10 @@ This example provides an interface for asking questions to a PDF document.
|
||||
|
||||
## Setup
|
||||
|
||||
1. Ensure you have the `llama3.2` model installed:
|
||||
1. Ensure you have the `llama3.1` model installed:
|
||||
|
||||
```
|
||||
ollama pull llama3.2
|
||||
ollama pull llama3.1
|
||||
```
|
||||
|
||||
2. Install the Python Requirements.
|
||||
|
@ -51,7 +51,7 @@ while True:
|
||||
template=template,
|
||||
)
|
||||
|
||||
llm = Ollama(model="llama3.2", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]))
|
||||
llm = Ollama(model="llama3.1", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]))
|
||||
qa_chain = RetrievalQA.from_chain_type(
|
||||
llm,
|
||||
retriever=vectorstore.as_retriever(),
|
||||
|
@ -4,10 +4,10 @@ This example summarizes the website, [https://ollama.com/blog/run-llama2-uncenso
|
||||
|
||||
## Running the Example
|
||||
|
||||
1. Ensure you have the `llama3.2` model installed:
|
||||
1. Ensure you have the `llama3.1` model installed:
|
||||
|
||||
```bash
|
||||
ollama pull llama3.2
|
||||
ollama pull llama3.1
|
||||
```
|
||||
|
||||
2. Install the Python Requirements.
|
||||
|
@ -5,7 +5,7 @@ from langchain.chains.summarize import load_summarize_chain
|
||||
loader = WebBaseLoader("https://ollama.com/blog/run-llama2-uncensored-locally")
|
||||
docs = loader.load()
|
||||
|
||||
llm = Ollama(model="llama3.2")
|
||||
llm = Ollama(model="llama3.1")
|
||||
chain = load_summarize_chain(llm, chain_type="stuff")
|
||||
|
||||
result = chain.invoke(docs)
|
||||
|
@ -4,10 +4,10 @@ This example is a basic "hello world" of using LangChain with Ollama.
|
||||
|
||||
## Running the Example
|
||||
|
||||
1. Ensure you have the `llama3.2` model installed:
|
||||
1. Ensure you have the `llama3.1` model installed:
|
||||
|
||||
```bash
|
||||
ollama pull llama3.2
|
||||
ollama pull llama3.1
|
||||
```
|
||||
|
||||
2. Install the Python Requirements.
|
||||
|
@ -1,6 +1,6 @@
|
||||
from langchain.llms import Ollama
|
||||
|
||||
input = input("What is your question?")
|
||||
llm = Ollama(model="llama3.2")
|
||||
llm = Ollama(model="llama3.1")
|
||||
res = llm.predict(input)
|
||||
print (res)
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM llama3.2
|
||||
FROM llama3.1
|
||||
PARAMETER temperature 1
|
||||
SYSTEM """
|
||||
You are Mario from super mario bros, acting as an assistant.
|
||||
|
@ -2,12 +2,12 @@
|
||||
|
||||
# Example character: Mario
|
||||
|
||||
This example shows how to create a basic character using Llama 3.2 as the base model.
|
||||
This example shows how to create a basic character using Llama3.1 as the base model.
|
||||
|
||||
To run this example:
|
||||
|
||||
1. Download the Modelfile
|
||||
2. `ollama pull llama3.2` to get the base model used in the model file.
|
||||
2. `ollama pull llama3.1` to get the base model used in the model file.
|
||||
3. `ollama create NAME -f ./Modelfile`
|
||||
4. `ollama run NAME`
|
||||
|
||||
@ -18,7 +18,7 @@ Ask it some questions like "Who are you?" or "Is Peach in trouble again?"
|
||||
What the model file looks like:
|
||||
|
||||
```
|
||||
FROM llama3.2
|
||||
FROM llama3.1
|
||||
PARAMETER temperature 1
|
||||
SYSTEM """
|
||||
You are Mario from Super Mario Bros, acting as an assistant.
|
||||
|
@ -1,93 +0,0 @@
|
||||
# RAG Hallucination Checker using Bespoke-Minicheck
|
||||
|
||||
This example allows the user to ask questions related to a document, which can be specified via an article url. Relevant chunks are retreived from the document and given to `llama3.2` as context to answer the question. Then each sentence in the answer is checked against the retrieved chunks using `bespoke-minicheck` to ensure that the answer does not contain hallucinations.
|
||||
|
||||
## Running the Example
|
||||
|
||||
1. Ensure `all-minilm` (embedding) `llama3.2` (chat) and `bespoke-minicheck` (check) models installed:
|
||||
|
||||
```bash
|
||||
ollama pull all-minilm
|
||||
ollama pull llama3.2
|
||||
ollama pull bespoke-minicheck
|
||||
```
|
||||
|
||||
2. Install the dependencies.
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
3. Run the example:
|
||||
|
||||
```bash
|
||||
python main.py
|
||||
```
|
||||
|
||||
## Expected Output
|
||||
|
||||
```text
|
||||
Enter the URL of an article you want to chat with, or press Enter for default example:
|
||||
|
||||
Loaded, chunked, and embedded text from https://www.theverge.com/2024/9/12/24242439/openai-o1-model-reasoning-strawberry-chatgpt.
|
||||
|
||||
Enter your question or type quit: Who is the CEO of openai?
|
||||
|
||||
Retrieved chunks:
|
||||
OpenAI is releasing a new model called o1 , the first in a planned series of “ reasoning ” models that have been trained to answer more complex questions , faster than a human can . It ’ s being released alongside o1-mini , a smaller , cheaper version . And yes , if you ’ re steeped in AI rumors : this is , in fact , the extremely hyped Strawberry model . For OpenAI , o1 represents a step toward its broader goal of human-like artificial intelligence .
|
||||
|
||||
OpenAI is releasing a new model called o1 , the first in a planned series of “ reasoning ” models that have been trained to answer more complex questions , faster than a human can . It ’ s being released alongside o1-mini , a smaller , cheaper version . And yes , if you ’ re steeped in AI rumors : this is , in fact , the extremely hyped Strawberry model . For OpenAI , o1 represents a step toward its broader goal of human-like artificial intelligence . More practically , it does a better job at writing code and solving multistep problems than previous models . But it ’ s also more expensive and slower to use than GPT-4o . OpenAI is calling this release of o1 a “ preview ” to emphasize how nascent it is . ChatGPT Plus and Team users get access to both o1-preview and o1-mini starting today , while Enterprise and Edu users will get access early next week .
|
||||
|
||||
More practically , it does a better job at writing code and solving multistep problems than previous models . But it ’ s also more expensive and slower to use than GPT-4o . OpenAI is calling this release of o1 a “ preview ” to emphasize how nascent it is . ChatGPT Plus and Team users get access to both o1-preview and o1-mini starting today , while Enterprise and Edu users will get access early next week . OpenAI says it plans to bring o1-mini access to all the free users of ChatGPT but hasn ’ t set a release date yet . Developer access to o1 is really expensive : In the API , o1-preview is $ 15 per 1 million input tokens , or chunks of text parsed by the model , and $ 60 per 1 million output tokens . For comparison , GPT-4o costs $ 5 per 1 million input tokens and $ 15 per 1 million output tokens .
|
||||
|
||||
OpenAI says it plans to bring o1-mini access to all the free users of ChatGPT but hasn ’ t set a release date yet . Developer access to o1 is really expensive : In the API , o1-preview is $ 15 per 1 million input tokens , or chunks of text parsed by the model , and $ 60 per 1 million output tokens . For comparison , GPT-4o costs $ 5 per 1 million input tokens and $ 15 per 1 million output tokens . The training behind o1 is fundamentally different from its predecessors , OpenAI ’ s research lead , Jerry Tworek , tells me , though the company is being vague about the exact details . He says o1 “ has been trained using a completely new optimization algorithm and a new training dataset specifically tailored for it. ” Image : OpenAI OpenAI taught previous GPT models to mimic patterns from its training data .
|
||||
|
||||
LLM Answer:
|
||||
The text does not mention the CEO of OpenAI. It only discusses the release of a new model called o1 and some details about it, but does not provide information on the company's leadership.
|
||||
|
||||
LLM Claim: The text does not mention the CEO of OpenAI.
|
||||
Is this claim supported by the context according to bespoke-minicheck? Yes
|
||||
|
||||
LLM Claim: It only discusses the release of a new model called o1 and some details about it, but does not provide information on the company's leadership.
|
||||
Is this claim supported by the context according to bespoke-minicheck? No
|
||||
```
|
||||
|
||||
The second claim is unsupported since the text mentions the research lead.
|
||||
|
||||
Another tricky example:
|
||||
|
||||
```text
|
||||
|
||||
Enter your question or type quit: what sets o1 apart from gpt-4o?
|
||||
|
||||
Retrieved chunks:
|
||||
OpenAI says it plans to bring o1-mini access to all the free users of ChatGPT but hasn ’ t set a release date yet . Developer access to o1 is really expensive : In the API , o1-preview is $ 15 per 1 million input tokens , or chunks of text parsed by the model , and $ 60 per 1 million output tokens . For comparison , GPT-4o costs $ 5 per 1 million input tokens and $ 15 per 1 million output tokens . The training behind o1 is fundamentally different from its predecessors , OpenAI ’ s research lead , Jerry Tworek , tells me , though the company is being vague about the exact details . He says o1 “ has been trained using a completely new optimization algorithm and a new training dataset specifically tailored for it. ” Image : OpenAI OpenAI taught previous GPT models to mimic patterns from its training data .
|
||||
|
||||
He says OpenAI also tested o1 against a qualifying exam for the International Mathematics Olympiad , and while GPT-4o only correctly solved only 13 percent of problems , o1 scored 83 percent . “ We can ’ t say we solved hallucinations ” In online programming contests known as Codeforces competitions , this new model reached the 89th percentile of participants , and OpenAI claims the next update of this model will perform “ similarly to PhD students on challenging benchmark tasks in physics , chemistry and biology. ” At the same time , o1 is not as capable as GPT-4o in a lot of areas . It doesn ’ t do as well on factual knowledge about the world .
|
||||
|
||||
More practically , it does a better job at writing code and solving multistep problems than previous models . But it ’ s also more expensive and slower to use than GPT-4o . OpenAI is calling this release of o1 a “ preview ” to emphasize how nascent it is . ChatGPT Plus and Team users get access to both o1-preview and o1-mini starting today , while Enterprise and Edu users will get access early next week . OpenAI says it plans to bring o1-mini access to all the free users of ChatGPT but hasn ’ t set a release date yet . Developer access to o1 is really expensive : In the API , o1-preview is $ 15 per 1 million input tokens , or chunks of text parsed by the model , and $ 60 per 1 million output tokens . For comparison , GPT-4o costs $ 5 per 1 million input tokens and $ 15 per 1 million output tokens .
|
||||
|
||||
“ We can ’ t say we solved hallucinations ” In online programming contests known as Codeforces competitions , this new model reached the 89th percentile of participants , and OpenAI claims the next update of this model will perform “ similarly to PhD students on challenging benchmark tasks in physics , chemistry and biology. ” At the same time , o1 is not as capable as GPT-4o in a lot of areas . It doesn ’ t do as well on factual knowledge about the world . It also doesn ’ t have the ability to browse the web or process files and images . Still , the company believes it represents a brand-new class of capabilities . It was named o1 to indicate “ resetting the counter back to 1. ” “ I ’ m gon na be honest : I think we ’ re terrible at naming , traditionally , ” McGrew says .
|
||||
LLM Answer: According to the text, several things set o1 apart from GPT-4o:
|
||||
|
||||
* In online programming contests (Codeforces competitions), o1 scored 83% correct solutions compared to GPT-4o's 13%.
|
||||
* The training behind o1 is "fundamentally different" from its predecessors, including a completely new optimization algorithm and a new training dataset specifically tailored for it.
|
||||
* o1 has been shown to perform similarly to PhD students on challenging benchmark tasks in physics, chemistry, and biology, while GPT-4o does not have this level of performance.
|
||||
* However, o1 is also less capable than GPT-4o in some areas, particularly with regard to factual knowledge about the world.
|
||||
|
||||
LLM Claim: According to the text, several things set o1 apart from GPT-4o:
|
||||
|
||||
* In online programming contests (Codeforces competitions), o1 scored 83% correct solutions compared to GPT-4o's 13%.
|
||||
Is this claim supported by the context according to bespoke-minicheck? Yes
|
||||
|
||||
LLM Claim: * The training behind o1 is "fundamentally different" from its predecessors, including a completely new optimization algorithm and a new training dataset specifically tailored for it.
|
||||
Is this claim supported by the context according to bespoke-minicheck? Yes
|
||||
|
||||
LLM Claim: * o1 has been shown to perform similarly to PhD students on challenging benchmark tasks in physics, chemistry, and biology, while GPT-4o does not have this level of performance.
|
||||
Is this claim supported by the context according to bespoke-minicheck? No
|
||||
|
||||
LLM Claim: * However, o1 is also less capable than GPT-4o in some areas, particularly with regard to factual knowledge about the world.
|
||||
Is this claim supported by the context according to bespoke-minicheck? Yes
|
||||
```
|
||||
|
||||
We see that the third claim "* o1 has been shown to perform similarly to PhD students on challenging benchmark tasks in physics, chemistry, and biology, while GPT-4o does not have this level of performance." is not supported by the context. This is because the context only mentions that o1 "is claimed to perform" which is different from "has been shown to perform".
|
@ -1,137 +0,0 @@
|
||||
import ollama
|
||||
import warnings
|
||||
from mattsollamatools import chunker
|
||||
from newspaper import Article
|
||||
import numpy as np
|
||||
from sklearn.neighbors import NearestNeighbors
|
||||
import nltk
|
||||
|
||||
warnings.filterwarnings(
|
||||
"ignore", category=FutureWarning, module="transformers.tokenization_utils_base"
|
||||
)
|
||||
nltk.download("punkt_tab", quiet=True)
|
||||
|
||||
|
||||
def getArticleText(url):
|
||||
"""Gets the text of an article from a URL.
|
||||
|
||||
Often there are a bunch of ads and menus on pages for a news article.
|
||||
This uses newspaper3k to get just the text of just the article.
|
||||
"""
|
||||
article = Article(url)
|
||||
article.download()
|
||||
article.parse()
|
||||
return article.text
|
||||
|
||||
|
||||
def knn_search(question_embedding, embeddings, k=5):
|
||||
"""Performs K-nearest neighbors (KNN) search"""
|
||||
X = np.array(
|
||||
[item["embedding"] for article in embeddings for item in article["embeddings"]]
|
||||
)
|
||||
source_texts = [
|
||||
item["source"] for article in embeddings for item in article["embeddings"]
|
||||
]
|
||||
|
||||
# Fit a KNN model on the embeddings
|
||||
knn = NearestNeighbors(n_neighbors=k, metric="cosine")
|
||||
knn.fit(X)
|
||||
|
||||
# Find the indices and distances of the k-nearest neighbors.
|
||||
_, indices = knn.kneighbors(question_embedding, n_neighbors=k)
|
||||
|
||||
# Get the indices and source texts of the best matches
|
||||
best_matches = [(indices[0][i], source_texts[indices[0][i]]) for i in range(k)]
|
||||
|
||||
return best_matches
|
||||
|
||||
|
||||
def check(document, claim):
|
||||
"""Checks if the claim is supported by the document by calling bespoke-minicheck.
|
||||
|
||||
Returns Yes/yes if the claim is supported by the document, No/no otherwise.
|
||||
Support for logits will be added in the future.
|
||||
|
||||
bespoke-minicheck's system prompt is defined as:
|
||||
'Determine whether the provided claim is consistent with the corresponding
|
||||
document. Consistency in this context implies that all information presented in the claim
|
||||
is substantiated by the document. If not, it should be considered inconsistent. Please
|
||||
assess the claim's consistency with the document by responding with either "Yes" or "No".'
|
||||
|
||||
bespoke-minicheck's user prompt is defined as:
|
||||
"Document: {document}\nClaim: {claim}"
|
||||
"""
|
||||
prompt = f"Document: {document}\nClaim: {claim}"
|
||||
response = ollama.generate(
|
||||
model="bespoke-minicheck", prompt=prompt, options={"num_predict": 2, "temperature": 0.0}
|
||||
)
|
||||
return response["response"].strip()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
allEmbeddings = []
|
||||
default_url = "https://www.theverge.com/2024/9/12/24242439/openai-o1-model-reasoning-strawberry-chatgpt"
|
||||
user_input = input(
|
||||
"Enter the URL of an article you want to chat with, or press Enter for default example: "
|
||||
)
|
||||
article_url = user_input.strip() if user_input.strip() else default_url
|
||||
article = {}
|
||||
article["embeddings"] = []
|
||||
article["url"] = article_url
|
||||
text = getArticleText(article_url)
|
||||
chunks = chunker(text)
|
||||
|
||||
# Embed (batch) chunks using ollama
|
||||
embeddings = ollama.embed(model="all-minilm", input=chunks)["embeddings"]
|
||||
|
||||
for chunk, embedding in zip(chunks, embeddings):
|
||||
item = {}
|
||||
item["source"] = chunk
|
||||
item["embedding"] = embedding
|
||||
item["sourcelength"] = len(chunk)
|
||||
article["embeddings"].append(item)
|
||||
|
||||
allEmbeddings.append(article)
|
||||
|
||||
print(f"\nLoaded, chunked, and embedded text from {article_url}.\n")
|
||||
|
||||
while True:
|
||||
# Input a question from the user
|
||||
# For example, "Who is the chief research officer?"
|
||||
question = input("Enter your question or type quit: ")
|
||||
|
||||
if question.lower() == "quit":
|
||||
break
|
||||
|
||||
# Embed the user's question using ollama.embed
|
||||
question_embedding = ollama.embed(model="all-minilm", input=question)[
|
||||
"embeddings"
|
||||
]
|
||||
|
||||
# Perform KNN search to find the best matches (indices and source text)
|
||||
best_matches = knn_search(question_embedding, allEmbeddings, k=4)
|
||||
|
||||
sourcetext = "\n\n".join([source_text for (_, source_text) in best_matches])
|
||||
|
||||
print(f"\nRetrieved chunks: \n{sourcetext}\n")
|
||||
|
||||
# Give the retreived chunks and question to the chat model
|
||||
system_prompt = f"Only use the following information to answer the question. Do not use anything else: {sourcetext}"
|
||||
|
||||
ollama_response = ollama.generate(
|
||||
model="llama3.2",
|
||||
prompt=question,
|
||||
system=system_prompt,
|
||||
options={"stream": False},
|
||||
)
|
||||
|
||||
answer = ollama_response["response"]
|
||||
print(f"LLM Answer:\n{answer}\n")
|
||||
|
||||
# Check each sentence in the response for grounded factuality
|
||||
if answer:
|
||||
for claim in nltk.sent_tokenize(answer):
|
||||
print(f"LLM Claim: {claim}")
|
||||
print(
|
||||
f"Is this claim supported by the context according to bespoke-minicheck? {check(sourcetext, claim)}\n"
|
||||
)
|
@ -1,8 +0,0 @@
|
||||
ollama
|
||||
lxml==5.3.0
|
||||
lxml_html_clean==0.2.2
|
||||
mattsollamatools==0.0.25
|
||||
newspaper3k==0.2.8
|
||||
nltk==3.9.1
|
||||
numpy==1.26.4
|
||||
scikit-learn==1.5.2
|
@ -1,53 +0,0 @@
|
||||
"""Simple example to demonstrate how to use the bespoke-minicheck model."""
|
||||
|
||||
import ollama
|
||||
|
||||
# NOTE: ollama must be running for this to work, start the ollama app or run `ollama serve`
|
||||
|
||||
|
||||
def check(document, claim):
|
||||
"""Checks if the claim is supported by the document by calling bespoke-minicheck.
|
||||
|
||||
Returns Yes/yes if the claim is supported by the document, No/no otherwise.
|
||||
Support for logits will be added in the future.
|
||||
|
||||
bespoke-minicheck's system prompt is defined as:
|
||||
'Determine whether the provided claim is consistent with the corresponding
|
||||
document. Consistency in this context implies that all information presented in the claim
|
||||
is substantiated by the document. If not, it should be considered inconsistent. Please
|
||||
assess the claim's consistency with the document by responding with either "Yes" or "No".'
|
||||
|
||||
bespoke-minicheck's user prompt is defined as:
|
||||
"Document: {document}\nClaim: {claim}"
|
||||
"""
|
||||
prompt = f"Document: {document}\nClaim: {claim}"
|
||||
response = ollama.generate(
|
||||
model="bespoke-minicheck", prompt=prompt, options={"num_predict": 2, "temperature": 0.0}
|
||||
)
|
||||
return response["response"].strip()
|
||||
|
||||
|
||||
def get_user_input(prompt):
|
||||
user_input = input(prompt)
|
||||
if not user_input:
|
||||
exit()
|
||||
print()
|
||||
return user_input
|
||||
|
||||
|
||||
def main():
|
||||
while True:
|
||||
# Get a document from the user (e.g. "Ryan likes running and biking.")
|
||||
document = get_user_input("Enter a document: ")
|
||||
# Get a claim from the user (e.g. "Ryan likes to run.")
|
||||
claim = get_user_input("Enter a claim: ")
|
||||
# Check if the claim is supported by the document
|
||||
grounded_factuality_check = check(document, claim)
|
||||
print(
|
||||
f"Is the claim supported by the document according to bespoke-minicheck? {grounded_factuality_check}"
|
||||
)
|
||||
print("\n\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,54 +0,0 @@
|
||||
# Simple Bespoke-Minicheck Example
|
||||
|
||||
`bespoke-minicheck` is a model for checking if a claim is supported by a document. It is used through the **generate** endpoint, which is called in this example with a `prompt` that includes the expected formatting of the user input.
|
||||
|
||||
## Running the Example
|
||||
|
||||
1. Ensure you have the `bespoke-minicheck` model installed:
|
||||
|
||||
```bash
|
||||
ollama pull bespoke-minicheck
|
||||
```
|
||||
|
||||
2. Install the dependencies:
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
3. Run the program:
|
||||
|
||||
```bash
|
||||
python main.py
|
||||
```
|
||||
|
||||
4. Enter a document and a claim when prompted:
|
||||
|
||||
```bash
|
||||
Enter a document: Roses are red.
|
||||
|
||||
Enter a claim: Roses are blue.
|
||||
```
|
||||
|
||||
The claim and document are then given to the `bespoke-minicheck` as inputs, which then generates a response (Yes or No) on whether the claim is supported by the document.
|
||||
|
||||
```bash
|
||||
Is the claim supported by the document according to bespoke-minicheck? No
|
||||
```
|
||||
|
||||
## More Examples
|
||||
|
||||
Document ([source](https://en.wikipedia.org/wiki/Apple_I)):
|
||||
> The Apple Computer 1 (Apple-1[a]), later known predominantly as the Apple I(written with a Roman numeral),[b] is an 8-bit motherboard-only personal computer designed by Steve Wozniak[5][6] and released by the Apple Computer Company (now Apple Inc.) in 1976. The company was initially formed to sell the Apple I – its first product – and would later become the world's largest technology company.[7] The idea of starting a company and selling the computer came from Wozniak's friend and Apple co-founder Steve Jobs.[8][9] One of the main innovations of the Apple I was that it included video display terminal circuitry on its circuit board, allowing it to connect to a low-cost composite video monitor or television, instead of an expensive computer terminal, compared to most existing computers at the time.
|
||||
|
||||
Claim:
|
||||
>The Apple I is a 16-bit computer.
|
||||
|
||||
Expected output:
|
||||
>Is the claim supported by the document according to bespoke-minicheck? **No**
|
||||
|
||||
Claim:
|
||||
>Apple was originally called the Apple Computer Company.
|
||||
|
||||
Expected output:
|
||||
>Is the claim supported by the document according to bespoke-minicheck? **Yes**
|
@ -1 +0,0 @@
|
||||
ollama
|
@ -2,7 +2,7 @@ import requests
|
||||
import json
|
||||
import random
|
||||
|
||||
model = "llama3.2"
|
||||
model = "llama3.1"
|
||||
template = {
|
||||
"firstName": "",
|
||||
"lastName": "",
|
||||
|
@ -12,7 +12,7 @@ countries = [
|
||||
"France",
|
||||
]
|
||||
country = random.choice(countries)
|
||||
model = "llama3.2"
|
||||
model = "llama3.1"
|
||||
|
||||
prompt = f"generate one realistically believable sample data set of a persons first name, last name, address in {country}, and phone number. Do not use common names. Respond using JSON. Key names should have no backslashes, values should use plain ascii with no special characters."
|
||||
|
||||
|
@ -6,10 +6,10 @@ There are two python scripts in this example. `randomaddresses.py` generates ran
|
||||
|
||||
## Running the Example
|
||||
|
||||
1. Ensure you have the `llama3.2` model installed:
|
||||
1. Ensure you have the `llama3.1` model installed:
|
||||
|
||||
```bash
|
||||
ollama pull llama3.2
|
||||
ollama pull llama3.1
|
||||
```
|
||||
|
||||
2. Install the Python Requirements.
|
||||
|
@ -2,7 +2,7 @@ import json
|
||||
import requests
|
||||
|
||||
# NOTE: ollama must be running for this to work, start the ollama app or run `ollama serve`
|
||||
model = "llama3.2" # TODO: update this for whatever model you wish to use
|
||||
model = "llama3.1" # TODO: update this for whatever model you wish to use
|
||||
|
||||
|
||||
def chat(messages):
|
||||
|
@ -4,10 +4,10 @@ The **chat** endpoint is one of two ways to generate text from an LLM with Ollam
|
||||
|
||||
## Running the Example
|
||||
|
||||
1. Ensure you have the `llama3.2` model installed:
|
||||
1. Ensure you have the `llama3.1` model installed:
|
||||
|
||||
```bash
|
||||
ollama pull llama3.2
|
||||
ollama pull llama3.1
|
||||
```
|
||||
|
||||
2. Install the Python Requirements.
|
||||
|
@ -1,6 +1,6 @@
|
||||
import * as readline from "readline";
|
||||
|
||||
const model = "llama3.2";
|
||||
const model = "llama3.1";
|
||||
type Message = {
|
||||
role: "assistant" | "user" | "system";
|
||||
content: string;
|
||||
|
3
go.mod
3
go.mod
@ -1,6 +1,6 @@
|
||||
module github.com/ollama/ollama
|
||||
|
||||
go 1.22.8
|
||||
go 1.22.5
|
||||
|
||||
require (
|
||||
github.com/containerd/console v1.0.3
|
||||
@ -22,7 +22,6 @@ require (
|
||||
github.com/mattn/go-runewidth v0.0.14
|
||||
github.com/nlpodyssey/gopickle v0.3.0
|
||||
github.com/pdevine/tensor v0.0.0-20240510204454-f88f4562727c
|
||||
golang.org/x/image v0.14.0
|
||||
)
|
||||
|
||||
require (
|
||||
|
2
go.sum
2
go.sum
@ -230,8 +230,6 @@ golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+o
|
||||
golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.14.0 h1:tNgSxAFe3jC4uYqvZdTr84SZoM1KfwdC9SKIFrLjFn4=
|
||||
golang.org/x/image v0.14.0/go.mod h1:HUYqC05R2ZcZ3ejNQsIHQDQiwWM4JBqmm6MKANTp4LE=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
|
@ -1,6 +1,6 @@
|
||||
//go:build linux || windows
|
||||
|
||||
package discover
|
||||
package gpu
|
||||
|
||||
import (
|
||||
"errors"
|
||||
@ -37,6 +37,19 @@ func GetSupportedGFX(libDir string) ([]string, error) {
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func rocmGetVisibleDevicesEnv(gpuInfo []GpuInfo) (string, string) {
|
||||
ids := []string{}
|
||||
for _, info := range gpuInfo {
|
||||
if info.Library != "rocm" {
|
||||
// TODO shouldn't happen if things are wired correctly...
|
||||
slog.Debug("rocmGetVisibleDevicesEnv skipping over non-rocm device", "library", info.Library)
|
||||
continue
|
||||
}
|
||||
ids = append(ids, info.ID)
|
||||
}
|
||||
return "HIP_VISIBLE_DEVICES", strings.Join(ids, ",")
|
||||
}
|
||||
|
||||
func commonAMDValidateLibDir() (string, error) {
|
||||
// Favor our bundled version
|
||||
|
@ -1,4 +1,4 @@
|
||||
package discover
|
||||
package gpu
|
||||
|
||||
import (
|
||||
"errors"
|
||||
@ -64,7 +64,7 @@ func NewHipLib() (*HipLib, error) {
|
||||
return hl, nil
|
||||
}
|
||||
|
||||
// The hip library only evaluates the ROCR_VISIBLE_DEVICES variable at startup
|
||||
// The hip library only evaluates the HIP_VISIBLE_DEVICES variable at startup
|
||||
// so we have to unload/reset the library after we do our initial discovery
|
||||
// to make sure our updates to that variable are processed by llama.cpp
|
||||
func (hl *HipLib) Release() {
|
@ -1,4 +1,4 @@
|
||||
package discover
|
||||
package gpu
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@ -47,11 +47,10 @@ var (
|
||||
)
|
||||
|
||||
// Gather GPU information from the amdgpu driver if any supported GPUs are detected
|
||||
// Only called once during bootstrap
|
||||
func AMDGetGPUInfo() ([]RocmGPUInfo, error) {
|
||||
func AMDGetGPUInfo() []RocmGPUInfo {
|
||||
resp := []RocmGPUInfo{}
|
||||
if !AMDDetected() {
|
||||
return resp, fmt.Errorf("AMD GPUs not detected")
|
||||
return resp
|
||||
}
|
||||
|
||||
// Opportunistic logging of driver version to aid in troubleshooting
|
||||
@ -64,13 +63,16 @@ func AMDGetGPUInfo() ([]RocmGPUInfo, error) {
|
||||
// Determine if the user has already pre-selected which GPUs to look at, then ignore the others
|
||||
var visibleDevices []string
|
||||
hipVD := envconfig.HipVisibleDevices() // zero based index only
|
||||
rocrVD := envconfig.RocrVisibleDevices() // zero based index or UUID
|
||||
rocrVD := envconfig.RocrVisibleDevices() // zero based index or UUID, but consumer cards seem to not support UUID
|
||||
gpuDO := envconfig.GpuDeviceOrdinal() // zero based index
|
||||
switch {
|
||||
case rocrVD != "":
|
||||
visibleDevices = strings.Split(rocrVD, ",")
|
||||
// TODO is this priorty order right?
|
||||
case hipVD != "":
|
||||
visibleDevices = strings.Split(hipVD, ",")
|
||||
case rocrVD != "":
|
||||
visibleDevices = strings.Split(rocrVD, ",")
|
||||
// TODO - since we don't yet support UUIDs, consider detecting and reporting here
|
||||
// all our test systems show GPU-XX indicating UUID is not supported
|
||||
case gpuDO != "":
|
||||
visibleDevices = strings.Split(gpuDO, ",")
|
||||
}
|
||||
@ -96,7 +98,7 @@ func AMDGetGPUInfo() ([]RocmGPUInfo, error) {
|
||||
}
|
||||
return a < b
|
||||
})
|
||||
gpuCount := 0
|
||||
cpuCount := 0
|
||||
for _, match := range matches {
|
||||
slog.Debug("evaluating amdgpu node " + match)
|
||||
fp, err := os.Open(match)
|
||||
@ -105,6 +107,11 @@ func AMDGetGPUInfo() ([]RocmGPUInfo, error) {
|
||||
continue
|
||||
}
|
||||
defer fp.Close()
|
||||
nodeID, err := strconv.Atoi(filepath.Base(filepath.Dir(match)))
|
||||
if err != nil {
|
||||
slog.Debug("failed to parse node ID", "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(fp)
|
||||
isCPU := false
|
||||
@ -178,18 +185,23 @@ func AMDGetGPUInfo() ([]RocmGPUInfo, error) {
|
||||
// do reliably report VRAM usage.
|
||||
|
||||
if isCPU {
|
||||
cpuCount++
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip over any GPUs that are masked
|
||||
if major == 0 && minor == 0 && patch == 0 {
|
||||
slog.Debug("skipping gpu with gfx000")
|
||||
continue
|
||||
// CPUs are always first in the list
|
||||
gpuID := nodeID - cpuCount
|
||||
|
||||
// Shouldn't happen, but just in case...
|
||||
if gpuID < 0 {
|
||||
slog.Error("unexpected amdgpu sysfs data resulted in negative GPU ID, please set OLLAMA_DEBUG=1 and report an issue")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Keep track of numeric IDs based on valid GPUs
|
||||
gpuID := gpuCount
|
||||
gpuCount += 1
|
||||
if int(major) < RocmComputeMin {
|
||||
slog.Warn(fmt.Sprintf("amdgpu too old gfx%d%x%x", major, minor, patch), "gpu", gpuID)
|
||||
continue
|
||||
}
|
||||
|
||||
// Look up the memory for the current node
|
||||
totalMemory := uint64(0)
|
||||
@ -258,20 +270,19 @@ func AMDGetGPUInfo() ([]RocmGPUInfo, error) {
|
||||
break
|
||||
}
|
||||
|
||||
// iGPU detection, remove this check once we can support an iGPU variant of the rocm library
|
||||
if totalMemory < IGPUMemLimit {
|
||||
slog.Info("unsupported Radeon iGPU detected skipping", "id", gpuID, "total", format.HumanBytes2(totalMemory))
|
||||
continue
|
||||
}
|
||||
var name string
|
||||
// TODO - PCI ID lookup
|
||||
if vendor > 0 && device > 0 {
|
||||
name = fmt.Sprintf("%04x:%04x", vendor, device)
|
||||
}
|
||||
|
||||
// Favor UUIDs if available to reduce possibility of getting the numeric IDs wrong
|
||||
var ID string
|
||||
if uniqueID != 0 {
|
||||
ID = fmt.Sprintf("GPU-%016x", uniqueID)
|
||||
} else {
|
||||
ID = strconv.Itoa(gpuID)
|
||||
}
|
||||
|
||||
slog.Debug("amdgpu memory", "gpu", gpuID, "total", format.HumanBytes2(totalMemory))
|
||||
slog.Debug("amdgpu memory", "gpu", gpuID, "available", format.HumanBytes2(totalMemory-usedMemory))
|
||||
gpuInfo := RocmGPUInfo{
|
||||
GpuInfo: GpuInfo{
|
||||
Library: "rocm",
|
||||
@ -279,7 +290,7 @@ func AMDGetGPUInfo() ([]RocmGPUInfo, error) {
|
||||
TotalMemory: totalMemory,
|
||||
FreeMemory: (totalMemory - usedMemory),
|
||||
},
|
||||
ID: ID,
|
||||
ID: strconv.Itoa(gpuID),
|
||||
Name: name,
|
||||
Compute: fmt.Sprintf("gfx%d%x%x", major, minor, patch),
|
||||
MinimumMemory: rocmMinimumMemory,
|
||||
@ -287,51 +298,19 @@ func AMDGetGPUInfo() ([]RocmGPUInfo, error) {
|
||||
DriverMinor: driverMinor,
|
||||
},
|
||||
usedFilepath: usedFile,
|
||||
index: gpuID,
|
||||
}
|
||||
|
||||
// iGPU detection, remove this check once we can support an iGPU variant of the rocm library
|
||||
if totalMemory < IGPUMemLimit {
|
||||
reason := "unsupported Radeon iGPU detected skipping"
|
||||
slog.Info(reason, "id", gpuID, "total", format.HumanBytes2(totalMemory))
|
||||
unsupportedGPUs = append(unsupportedGPUs, UnsupportedGPUInfo{
|
||||
GpuInfo: gpuInfo.GpuInfo,
|
||||
Reason: reason,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
if int(major) < RocmComputeMin {
|
||||
reason := fmt.Sprintf("amdgpu too old gfx%d%x%x", major, minor, patch)
|
||||
slog.Warn(reason, "gpu", gpuID)
|
||||
unsupportedGPUs = append(unsupportedGPUs, UnsupportedGPUInfo{
|
||||
GpuInfo: gpuInfo.GpuInfo,
|
||||
Reason: reason,
|
||||
})
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
slog.Debug("amdgpu memory", "gpu", gpuID, "total", format.HumanBytes2(totalMemory))
|
||||
slog.Debug("amdgpu memory", "gpu", gpuID, "available", format.HumanBytes2(totalMemory-usedMemory))
|
||||
|
||||
// If the user wants to filter to a subset of devices, filter out if we aren't a match
|
||||
if len(visibleDevices) > 0 {
|
||||
include := false
|
||||
for _, visible := range visibleDevices {
|
||||
if visible == gpuInfo.ID || visible == strconv.Itoa(gpuInfo.index) {
|
||||
if visible == gpuInfo.ID {
|
||||
include = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !include {
|
||||
reason := "filtering out device per user request"
|
||||
slog.Info(reason, "id", gpuInfo.ID, "visible_devices", visibleDevices)
|
||||
unsupportedGPUs = append(unsupportedGPUs, UnsupportedGPUInfo{
|
||||
GpuInfo: gpuInfo.GpuInfo,
|
||||
Reason: reason,
|
||||
})
|
||||
|
||||
slog.Info("filtering out device per user request", "id", gpuInfo.ID, "visible_devices", visibleDevices)
|
||||
continue
|
||||
}
|
||||
}
|
||||
@ -341,41 +320,25 @@ func AMDGetGPUInfo() ([]RocmGPUInfo, error) {
|
||||
if libDir == "" {
|
||||
libDir, err = AMDValidateLibDir()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("unable to verify rocm library: %w", err)
|
||||
slog.Warn(err.Error())
|
||||
unsupportedGPUs = append(unsupportedGPUs, UnsupportedGPUInfo{
|
||||
GpuInfo: gpuInfo.GpuInfo,
|
||||
Reason: err.Error(),
|
||||
})
|
||||
return nil, err
|
||||
slog.Warn("unable to verify rocm library, will use cpu", "error", err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
gpuInfo.DependencyPath = []string{libDir}
|
||||
gpuInfo.DependencyPath = libDir
|
||||
|
||||
if gfxOverride == "" {
|
||||
// Only load supported list once
|
||||
if len(supported) == 0 {
|
||||
supported, err = GetSupportedGFX(libDir)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to lookup supported GFX types: %w", err)
|
||||
slog.Warn(err.Error())
|
||||
unsupportedGPUs = append(unsupportedGPUs, UnsupportedGPUInfo{
|
||||
GpuInfo: gpuInfo.GpuInfo,
|
||||
Reason: err.Error(),
|
||||
})
|
||||
return nil, err
|
||||
slog.Warn("failed to lookup supported GFX types, falling back to CPU mode", "error", err)
|
||||
return nil
|
||||
}
|
||||
slog.Debug("rocm supported GPUs", "types", supported)
|
||||
}
|
||||
gfx := gpuInfo.Compute
|
||||
if !slices.Contains[[]string, string](supported, gfx) {
|
||||
reason := fmt.Sprintf("amdgpu is not supported (supported types:%s)", supported)
|
||||
slog.Warn(reason, "gpu_type", gfx, "gpu", gpuInfo.ID, "library", libDir)
|
||||
unsupportedGPUs = append(unsupportedGPUs, UnsupportedGPUInfo{
|
||||
GpuInfo: gpuInfo.GpuInfo,
|
||||
Reason: reason,
|
||||
})
|
||||
|
||||
slog.Warn("amdgpu is not supported", "gpu", gpuInfo.ID, "gpu_type", gfx, "library", libDir, "supported_types", supported)
|
||||
// TODO - consider discrete markdown just for ROCM troubleshooting?
|
||||
slog.Warn("See https://github.com/ollama/ollama/blob/main/docs/gpu.md#overrides for HSA_OVERRIDE_GFX_VERSION usage")
|
||||
continue
|
||||
@ -395,16 +358,13 @@ func AMDGetGPUInfo() ([]RocmGPUInfo, error) {
|
||||
resp = append(resp, gpuInfo)
|
||||
}
|
||||
if len(resp) == 0 {
|
||||
err := fmt.Errorf("no compatible amdgpu devices detected")
|
||||
slog.Info(err.Error())
|
||||
return nil, err
|
||||
slog.Info("no compatible amdgpu devices detected")
|
||||
}
|
||||
if err := verifyKFDDriverAccess(); err != nil {
|
||||
err = fmt.Errorf("amdgpu devices detected but permission problems block access: %w", err)
|
||||
slog.Error(err.Error())
|
||||
return nil, err
|
||||
slog.Error("amdgpu devices detected but permission problems block access", "error", err)
|
||||
return nil
|
||||
}
|
||||
return resp, nil
|
||||
return resp
|
||||
}
|
||||
|
||||
// Quick check for AMD driver so we can skip amdgpu discovery if not present
|
||||
@ -516,20 +476,3 @@ func verifyKFDDriverAccess() error {
|
||||
fd.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func rocmGetVisibleDevicesEnv(gpuInfo []GpuInfo) (string, string) {
|
||||
ids := []string{}
|
||||
for _, info := range gpuInfo {
|
||||
if info.Library != "rocm" {
|
||||
// TODO shouldn't happen if things are wired correctly...
|
||||
slog.Debug("rocmGetVisibleDevicesEnv skipping over non-rocm device", "library", info.Library)
|
||||
continue
|
||||
}
|
||||
ids = append(ids, info.ID)
|
||||
}
|
||||
// There are 3 potential env vars to use to select GPUs.
|
||||
// ROCR_VISIBLE_DEVICES supports UUID or numeric so is our preferred on linux
|
||||
// GPU_DEVICE_ORDINAL supports numeric IDs only
|
||||
// HIP_VISIBLE_DEVICES supports numeric IDs only
|
||||
return "ROCR_VISIBLE_DEVICES", strings.Join(ids, ",")
|
||||
}
|
@ -1,9 +1,8 @@
|
||||
package discover
|
||||
package gpu
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -27,13 +26,12 @@ var (
|
||||
RocmStandardLocations = []string{"C:\\Program Files\\AMD\\ROCm\\6.1\\bin"} // TODO glob?
|
||||
)
|
||||
|
||||
// Only called once during bootstrap
|
||||
func AMDGetGPUInfo() ([]RocmGPUInfo, error) {
|
||||
func AMDGetGPUInfo() []RocmGPUInfo {
|
||||
resp := []RocmGPUInfo{}
|
||||
hl, err := NewHipLib()
|
||||
if err != nil {
|
||||
slog.Debug(err.Error())
|
||||
return nil, err
|
||||
return nil
|
||||
}
|
||||
defer hl.Release()
|
||||
|
||||
@ -43,18 +41,15 @@ func AMDGetGPUInfo() ([]RocmGPUInfo, error) {
|
||||
slog.Debug("error looking up amd driver version", "error", err)
|
||||
}
|
||||
|
||||
// Note: the HIP library automatically handles subsetting to any *_VISIBLE_DEVICES the user specified
|
||||
// Note: the HIP library automatically handles subsetting to any HIP_VISIBLE_DEVICES the user specified
|
||||
count := hl.HipGetDeviceCount()
|
||||
if count == 0 {
|
||||
err := fmt.Errorf("no compatible amdgpu devices detected")
|
||||
slog.Info(err.Error())
|
||||
return nil, err
|
||||
return nil
|
||||
}
|
||||
libDir, err := AMDValidateLibDir()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("unable to verify rocm library: %w", err)
|
||||
slog.Warn(err.Error())
|
||||
return nil, err
|
||||
slog.Warn("unable to verify rocm library, will use cpu", "error", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
var supported []string
|
||||
@ -62,9 +57,8 @@ func AMDGetGPUInfo() ([]RocmGPUInfo, error) {
|
||||
if gfxOverride == "" {
|
||||
supported, err = GetSupportedGFX(libDir)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to lookup supported GFX types: %w", err)
|
||||
slog.Warn(err.Error())
|
||||
return nil, err
|
||||
slog.Warn("failed to lookup supported GFX types, falling back to CPU mode", "error", err)
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
slog.Info("skipping rocm gfx compatibility check", "HSA_OVERRIDE_GFX_VERSION", gfxOverride)
|
||||
@ -93,6 +87,21 @@ func AMDGetGPUInfo() ([]RocmGPUInfo, error) {
|
||||
slog.Debug("hip device", "id", i, "name", name, "gfx", gfx)
|
||||
// slog.Info(fmt.Sprintf("[%d] Integrated: %d", i, props.iGPU)) // DOESN'T REPORT CORRECTLY! Always 0
|
||||
// TODO Why isn't props.iGPU accurate!?
|
||||
if strings.EqualFold(name, iGPUName) {
|
||||
slog.Info("unsupported Radeon iGPU detected skipping", "id", i, "name", name, "gfx", gfx)
|
||||
continue
|
||||
}
|
||||
if gfxOverride == "" {
|
||||
// Strip off Target Features when comparing
|
||||
if !slices.Contains[[]string, string](supported, strings.Split(gfx, ":")[0]) {
|
||||
slog.Warn("amdgpu is not supported", "gpu", i, "gpu_type", gfx, "library", libDir, "supported_types", supported)
|
||||
// TODO - consider discrete markdown just for ROCM troubleshooting?
|
||||
slog.Warn("See https://github.com/ollama/ollama/blob/main/docs/troubleshooting.md for HSA_OVERRIDE_GFX_VERSION usage")
|
||||
continue
|
||||
} else {
|
||||
slog.Debug("amdgpu is supported", "gpu", i, "gpu_type", gfx)
|
||||
}
|
||||
}
|
||||
|
||||
freeMemory, totalMemory, err := hl.HipMemGetInfo()
|
||||
if err != nil {
|
||||
@ -100,6 +109,14 @@ func AMDGetGPUInfo() ([]RocmGPUInfo, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
// iGPU detection, remove this check once we can support an iGPU variant of the rocm library
|
||||
if totalMemory < IGPUMemLimit {
|
||||
slog.Info("amdgpu appears to be an iGPU, skipping", "gpu", i, "total", format.HumanBytes2(totalMemory))
|
||||
continue
|
||||
}
|
||||
|
||||
slog.Debug("amdgpu memory", "gpu", i, "total", format.HumanBytes2(totalMemory))
|
||||
slog.Debug("amdgpu memory", "gpu", i, "available", format.HumanBytes2(freeMemory))
|
||||
gpuInfo := RocmGPUInfo{
|
||||
GpuInfo: GpuInfo{
|
||||
Library: "rocm",
|
||||
@ -111,7 +128,7 @@ func AMDGetGPUInfo() ([]RocmGPUInfo, error) {
|
||||
UnreliableFreeMemory: true,
|
||||
|
||||
ID: strconv.Itoa(i), // TODO this is probably wrong if we specify visible devices
|
||||
DependencyPath: []string{libDir},
|
||||
DependencyPath: libDir,
|
||||
MinimumMemory: rocmMinimumMemory,
|
||||
Name: name,
|
||||
Compute: gfx,
|
||||
@ -121,38 +138,10 @@ func AMDGetGPUInfo() ([]RocmGPUInfo, error) {
|
||||
index: i,
|
||||
}
|
||||
|
||||
// iGPU detection, remove this check once we can support an iGPU variant of the rocm library
|
||||
if strings.EqualFold(name, iGPUName) || totalMemory < IGPUMemLimit {
|
||||
reason := "unsupported Radeon iGPU detected skipping"
|
||||
slog.Info(reason, "id", gpuInfo.ID, "total", format.HumanBytes2(totalMemory))
|
||||
unsupportedGPUs = append(unsupportedGPUs, UnsupportedGPUInfo{
|
||||
GpuInfo: gpuInfo.GpuInfo,
|
||||
Reason: reason,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
// Strip off Target Features when comparing
|
||||
if !slices.Contains[[]string, string](supported, strings.Split(gfx, ":")[0]) {
|
||||
reason := fmt.Sprintf("amdgpu is not supported (supported types:%s)", supported)
|
||||
slog.Warn(reason, "gpu_type", gfx, "gpu", gpuInfo.ID, "library", libDir)
|
||||
unsupportedGPUs = append(unsupportedGPUs, UnsupportedGPUInfo{
|
||||
GpuInfo: gpuInfo.GpuInfo,
|
||||
Reason: reason,
|
||||
})
|
||||
// HSA_OVERRIDE_GFX_VERSION not supported on windows
|
||||
continue
|
||||
} else {
|
||||
slog.Debug("amdgpu is supported", "gpu", i, "gpu_type", gfx)
|
||||
}
|
||||
|
||||
slog.Debug("amdgpu memory", "gpu", i, "total", format.HumanBytes2(totalMemory))
|
||||
slog.Debug("amdgpu memory", "gpu", i, "available", format.HumanBytes2(freeMemory))
|
||||
|
||||
resp = append(resp, gpuInfo)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
return resp
|
||||
}
|
||||
|
||||
func AMDValidateLibDir() (string, error) {
|
||||
@ -201,20 +190,3 @@ func (gpus RocmGPUInfoList) RefreshFreeMemory() error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func rocmGetVisibleDevicesEnv(gpuInfo []GpuInfo) (string, string) {
|
||||
ids := []string{}
|
||||
for _, info := range gpuInfo {
|
||||
if info.Library != "rocm" {
|
||||
// TODO shouldn't happen if things are wired correctly...
|
||||
slog.Debug("rocmGetVisibleDevicesEnv skipping over non-rocm device", "library", info.Library)
|
||||
continue
|
||||
}
|
||||
ids = append(ids, info.ID)
|
||||
}
|
||||
// There are 3 potential env vars to use to select GPUs.
|
||||
// ROCR_VISIBLE_DEVICES supports UUID or numeric but does not work on Windows
|
||||
// HIP_VISIBLE_DEVICES supports numeric IDs only
|
||||
// GPU_DEVICE_ORDINAL supports numeric IDs only
|
||||
return "HIP_VISIBLE_DEVICES", strings.Join(ids, ",")
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
package discover
|
||||
package gpu
|
||||
|
||||
import (
|
||||
"os"
|
@ -1,6 +1,6 @@
|
||||
//go:build linux || windows
|
||||
|
||||
package discover
|
||||
package gpu
|
||||
|
||||
import (
|
||||
"log/slog"
|
@ -1,6 +1,6 @@
|
||||
//go:build linux || windows
|
||||
|
||||
package discover
|
||||
package gpu
|
||||
|
||||
/*
|
||||
#cgo linux LDFLAGS: -lrt -lpthread -ldl -lstdc++ -lm
|
||||
@ -54,13 +54,6 @@ var (
|
||||
nvmlLibPath string
|
||||
rocmGPUs []RocmGPUInfo
|
||||
oneapiGPUs []OneapiGPUInfo
|
||||
|
||||
// If any discovered GPUs are incompatible, report why
|
||||
unsupportedGPUs []UnsupportedGPUInfo
|
||||
|
||||
// Keep track of errors during bootstrapping so that if GPUs are missing
|
||||
// they expected to be present this may explain why
|
||||
bootstrapErrors []error
|
||||
)
|
||||
|
||||
// With our current CUDA compile flags, older than 5.0 will not work properly
|
||||
@ -77,17 +70,16 @@ func initCudaHandles() *cudaHandles {
|
||||
|
||||
cHandles := &cudaHandles{}
|
||||
// Short Circuit if we already know which library to use
|
||||
// ignore bootstrap errors in this case since we already recorded them
|
||||
if nvmlLibPath != "" {
|
||||
cHandles.nvml, _, _ = loadNVMLMgmt([]string{nvmlLibPath})
|
||||
cHandles.nvml, _ = LoadNVMLMgmt([]string{nvmlLibPath})
|
||||
return cHandles
|
||||
}
|
||||
if nvcudaLibPath != "" {
|
||||
cHandles.deviceCount, cHandles.nvcuda, _, _ = loadNVCUDAMgmt([]string{nvcudaLibPath})
|
||||
cHandles.deviceCount, cHandles.nvcuda, _ = LoadNVCUDAMgmt([]string{nvcudaLibPath})
|
||||
return cHandles
|
||||
}
|
||||
if cudartLibPath != "" {
|
||||
cHandles.deviceCount, cHandles.cudart, _, _ = loadCUDARTMgmt([]string{cudartLibPath})
|
||||
cHandles.deviceCount, cHandles.cudart, _ = LoadCUDARTMgmt([]string{cudartLibPath})
|
||||
return cHandles
|
||||
}
|
||||
|
||||
@ -110,21 +102,18 @@ func initCudaHandles() *cudaHandles {
|
||||
if len(NvmlGlobs) > 0 {
|
||||
nvmlLibPaths := FindGPULibs(NvmlMgmtName, NvmlGlobs)
|
||||
if len(nvmlLibPaths) > 0 {
|
||||
nvml, libPath, err := loadNVMLMgmt(nvmlLibPaths)
|
||||
nvml, libPath := LoadNVMLMgmt(nvmlLibPaths)
|
||||
if nvml != nil {
|
||||
slog.Debug("nvidia-ml loaded", "library", libPath)
|
||||
cHandles.nvml = nvml
|
||||
nvmlLibPath = libPath
|
||||
}
|
||||
if err != nil {
|
||||
bootstrapErrors = append(bootstrapErrors, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nvcudaLibPaths := FindGPULibs(NvcudaMgmtName, nvcudaMgmtPatterns)
|
||||
if len(nvcudaLibPaths) > 0 {
|
||||
deviceCount, nvcuda, libPath, err := loadNVCUDAMgmt(nvcudaLibPaths)
|
||||
deviceCount, nvcuda, libPath := LoadNVCUDAMgmt(nvcudaLibPaths)
|
||||
if nvcuda != nil {
|
||||
slog.Debug("detected GPUs", "count", deviceCount, "library", libPath)
|
||||
cHandles.nvcuda = nvcuda
|
||||
@ -132,14 +121,11 @@ func initCudaHandles() *cudaHandles {
|
||||
nvcudaLibPath = libPath
|
||||
return cHandles
|
||||
}
|
||||
if err != nil {
|
||||
bootstrapErrors = append(bootstrapErrors, err)
|
||||
}
|
||||
}
|
||||
|
||||
cudartLibPaths := FindGPULibs(CudartMgmtName, cudartMgmtPatterns)
|
||||
if len(cudartLibPaths) > 0 {
|
||||
deviceCount, cudart, libPath, err := loadCUDARTMgmt(cudartLibPaths)
|
||||
deviceCount, cudart, libPath := LoadCUDARTMgmt(cudartLibPaths)
|
||||
if cudart != nil {
|
||||
slog.Debug("detected GPUs", "library", libPath, "count", deviceCount)
|
||||
cHandles.cudart = cudart
|
||||
@ -147,9 +133,6 @@ func initCudaHandles() *cudaHandles {
|
||||
cudartLibPath = libPath
|
||||
return cHandles
|
||||
}
|
||||
if err != nil {
|
||||
bootstrapErrors = append(bootstrapErrors, err)
|
||||
}
|
||||
}
|
||||
|
||||
return cHandles
|
||||
@ -160,19 +143,14 @@ func initOneAPIHandles() *oneapiHandles {
|
||||
oHandles := &oneapiHandles{}
|
||||
|
||||
// Short Circuit if we already know which library to use
|
||||
// ignore bootstrap errors in this case since we already recorded them
|
||||
if oneapiLibPath != "" {
|
||||
oHandles.deviceCount, oHandles.oneapi, _, _ = loadOneapiMgmt([]string{oneapiLibPath})
|
||||
oHandles.deviceCount, oHandles.oneapi, _ = LoadOneapiMgmt([]string{oneapiLibPath})
|
||||
return oHandles
|
||||
}
|
||||
|
||||
oneapiLibPaths := FindGPULibs(OneapiMgmtName, OneapiGlobs)
|
||||
if len(oneapiLibPaths) > 0 {
|
||||
var err error
|
||||
oHandles.deviceCount, oHandles.oneapi, oneapiLibPath, err = loadOneapiMgmt(oneapiLibPaths)
|
||||
if err != nil {
|
||||
bootstrapErrors = append(bootstrapErrors, err)
|
||||
}
|
||||
oHandles.deviceCount, oHandles.oneapi, oneapiLibPath = LoadOneapiMgmt(oneapiLibPaths)
|
||||
}
|
||||
|
||||
return oHandles
|
||||
@ -219,7 +197,6 @@ func GetGPUInfo() GpuInfoList {
|
||||
|
||||
if !bootstrapped {
|
||||
slog.Info("looking for compatible GPUs")
|
||||
bootstrapErrors = []error{}
|
||||
needRefresh = false
|
||||
cpuCapability = GetCPUCapability()
|
||||
var memInfo C.mem_info_t
|
||||
@ -228,34 +205,27 @@ func GetGPUInfo() GpuInfoList {
|
||||
if err != nil {
|
||||
slog.Warn("error looking up system memory", "error", err)
|
||||
}
|
||||
depPath := LibraryDir()
|
||||
details, err := GetCPUDetails()
|
||||
if err != nil {
|
||||
slog.Warn("failed to lookup CPU details", "error", err)
|
||||
}
|
||||
cpus = []CPUInfo{
|
||||
{
|
||||
GpuInfo: GpuInfo{
|
||||
memInfo: mem,
|
||||
Library: "cpu",
|
||||
Variant: cpuCapability.String(),
|
||||
ID: "0",
|
||||
DependencyPath: []string{depPath},
|
||||
memInfo: mem,
|
||||
Library: "cpu",
|
||||
Variant: cpuCapability.String(),
|
||||
ID: "0",
|
||||
},
|
||||
CPUs: details,
|
||||
},
|
||||
}
|
||||
|
||||
// Fallback to CPU mode if we're lacking required vector extensions on x86
|
||||
if cpuCapability < GPURunnerCPUCapability && runtime.GOARCH == "amd64" {
|
||||
err := fmt.Errorf("CPU does not have minimum vector extensions, GPU inference disabled. Required:%s Detected:%s", GPURunnerCPUCapability, cpuCapability)
|
||||
slog.Warn(err.Error())
|
||||
bootstrapErrors = append(bootstrapErrors, err)
|
||||
slog.Warn("CPU does not have minimum vector extensions, GPU inference disabled", "required", GPURunnerCPUCapability, "detected", cpuCapability)
|
||||
bootstrapped = true
|
||||
// No need to do any GPU discovery, since we can't run on them
|
||||
return GpuInfoList{cpus[0].GpuInfo}
|
||||
}
|
||||
|
||||
depPath := LibraryDir()
|
||||
|
||||
// Load ALL libraries
|
||||
cHandles = initCudaHandles()
|
||||
|
||||
@ -282,6 +252,10 @@ func GetGPUInfo() GpuInfoList {
|
||||
C.free(unsafe.Pointer(memInfo.err))
|
||||
continue
|
||||
}
|
||||
if memInfo.major < CudaComputeMin[0] || (memInfo.major == CudaComputeMin[0] && memInfo.minor < CudaComputeMin[1]) {
|
||||
slog.Info(fmt.Sprintf("[%d] CUDA GPU is too old. Compute Capability detected: %d.%d", i, memInfo.major, memInfo.minor))
|
||||
continue
|
||||
}
|
||||
gpuInfo.TotalMemory = uint64(memInfo.total)
|
||||
gpuInfo.FreeMemory = uint64(memInfo.free)
|
||||
gpuInfo.ID = C.GoString(&memInfo.gpu_id[0])
|
||||
@ -293,32 +267,21 @@ func GetGPUInfo() GpuInfoList {
|
||||
gpuInfo.DriverMinor = driverMinor
|
||||
variant := cudaVariant(gpuInfo)
|
||||
if depPath != "" {
|
||||
gpuInfo.DependencyPath = []string{depPath}
|
||||
gpuInfo.DependencyPath = depPath
|
||||
// Check for variant specific directory
|
||||
if variant != "" {
|
||||
if _, err := os.Stat(filepath.Join(depPath, "cuda_"+variant)); err == nil {
|
||||
gpuInfo.DependencyPath = []string{filepath.Join(depPath, "cuda_"+variant), depPath}
|
||||
gpuInfo.DependencyPath = filepath.Join(depPath, "cuda_"+variant)
|
||||
}
|
||||
}
|
||||
}
|
||||
gpuInfo.Name = C.GoString(&memInfo.gpu_name[0])
|
||||
gpuInfo.Variant = variant
|
||||
|
||||
if memInfo.major < CudaComputeMin[0] || (memInfo.major == CudaComputeMin[0] && memInfo.minor < CudaComputeMin[1]) {
|
||||
unsupportedGPUs = append(unsupportedGPUs,
|
||||
UnsupportedGPUInfo{
|
||||
GpuInfo: gpuInfo.GpuInfo,
|
||||
})
|
||||
slog.Info(fmt.Sprintf("[%d] CUDA GPU is too old. Compute Capability detected: %d.%d", i, memInfo.major, memInfo.minor))
|
||||
continue
|
||||
}
|
||||
|
||||
// query the management library as well so we can record any skew between the two
|
||||
// which represents overhead on the GPU we must set aside on subsequent updates
|
||||
if cHandles.nvml != nil {
|
||||
uuid := C.CString(gpuInfo.ID)
|
||||
defer C.free(unsafe.Pointer(uuid))
|
||||
C.nvml_get_free(*cHandles.nvml, uuid, &memInfo.free, &memInfo.total, &memInfo.used)
|
||||
C.nvml_get_free(*cHandles.nvml, C.int(gpuInfo.index), &memInfo.free, &memInfo.total, &memInfo.used)
|
||||
if memInfo.err != nil {
|
||||
slog.Warn("error looking up nvidia GPU memory", "error", C.GoString(memInfo.err))
|
||||
C.free(unsafe.Pointer(memInfo.err))
|
||||
@ -370,17 +333,14 @@ func GetGPUInfo() GpuInfoList {
|
||||
gpuInfo.FreeMemory = uint64(memInfo.free)
|
||||
gpuInfo.ID = C.GoString(&memInfo.gpu_id[0])
|
||||
gpuInfo.Name = C.GoString(&memInfo.gpu_name[0])
|
||||
gpuInfo.DependencyPath = []string{depPath}
|
||||
gpuInfo.DependencyPath = depPath
|
||||
oneapiGPUs = append(oneapiGPUs, gpuInfo)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rocmGPUs, err = AMDGetGPUInfo()
|
||||
if err != nil {
|
||||
bootstrapErrors = append(bootstrapErrors, err)
|
||||
}
|
||||
rocmGPUs = AMDGetGPUInfo()
|
||||
bootstrapped = true
|
||||
if len(cudaGPUs) == 0 && len(rocmGPUs) == 0 && len(oneapiGPUs) == 0 {
|
||||
slog.Info("no compatible GPUs were discovered")
|
||||
@ -419,9 +379,7 @@ func GetGPUInfo() GpuInfoList {
|
||||
}
|
||||
for i, gpu := range cudaGPUs {
|
||||
if cHandles.nvml != nil {
|
||||
uuid := C.CString(gpu.ID)
|
||||
defer C.free(unsafe.Pointer(uuid))
|
||||
C.nvml_get_free(*cHandles.nvml, uuid, &memInfo.free, &memInfo.total, &memInfo.used)
|
||||
C.nvml_get_free(*cHandles.nvml, C.int(gpu.index), &memInfo.free, &memInfo.total, &memInfo.used)
|
||||
} else if cHandles.cudart != nil {
|
||||
C.cudart_bootstrap(*cHandles.cudart, C.int(gpu.index), &memInfo)
|
||||
} else if cHandles.nvcuda != nil {
|
||||
@ -567,114 +525,92 @@ func FindGPULibs(baseLibName string, defaultPatterns []string) []string {
|
||||
return gpuLibPaths
|
||||
}
|
||||
|
||||
// Bootstrap the runtime library
|
||||
// Returns: num devices, handle, libPath, error
|
||||
func loadCUDARTMgmt(cudartLibPaths []string) (int, *C.cudart_handle_t, string, error) {
|
||||
func LoadCUDARTMgmt(cudartLibPaths []string) (int, *C.cudart_handle_t, string) {
|
||||
var resp C.cudart_init_resp_t
|
||||
resp.ch.verbose = getVerboseState()
|
||||
var err error
|
||||
for _, libPath := range cudartLibPaths {
|
||||
lib := C.CString(libPath)
|
||||
defer C.free(unsafe.Pointer(lib))
|
||||
C.cudart_init(lib, &resp)
|
||||
if resp.err != nil {
|
||||
err = fmt.Errorf("Unable to load cudart library %s: %s", libPath, C.GoString(resp.err))
|
||||
slog.Debug(err.Error())
|
||||
slog.Debug("Unable to load cudart", "library", libPath, "error", C.GoString(resp.err))
|
||||
C.free(unsafe.Pointer(resp.err))
|
||||
} else {
|
||||
err = nil
|
||||
return int(resp.num_devices), &resp.ch, libPath, err
|
||||
return int(resp.num_devices), &resp.ch, libPath
|
||||
}
|
||||
}
|
||||
return 0, nil, "", err
|
||||
return 0, nil, ""
|
||||
}
|
||||
|
||||
// Bootstrap the driver library
|
||||
// Returns: num devices, handle, libPath, error
|
||||
func loadNVCUDAMgmt(nvcudaLibPaths []string) (int, *C.nvcuda_handle_t, string, error) {
|
||||
func LoadNVCUDAMgmt(nvcudaLibPaths []string) (int, *C.nvcuda_handle_t, string) {
|
||||
var resp C.nvcuda_init_resp_t
|
||||
resp.ch.verbose = getVerboseState()
|
||||
var err error
|
||||
for _, libPath := range nvcudaLibPaths {
|
||||
lib := C.CString(libPath)
|
||||
defer C.free(unsafe.Pointer(lib))
|
||||
C.nvcuda_init(lib, &resp)
|
||||
if resp.err != nil {
|
||||
// Decide what log level based on the type of error message to help users understand why
|
||||
msg := C.GoString(resp.err)
|
||||
switch resp.cudaErr {
|
||||
case C.CUDA_ERROR_INSUFFICIENT_DRIVER, C.CUDA_ERROR_SYSTEM_DRIVER_MISMATCH:
|
||||
err = fmt.Errorf("version mismatch between driver and cuda driver library - reboot or upgrade may be required: library %s", libPath)
|
||||
slog.Warn(err.Error())
|
||||
slog.Warn("version mismatch between driver and cuda driver library - reboot or upgrade may be required", "library", libPath, "error", msg)
|
||||
case C.CUDA_ERROR_NO_DEVICE:
|
||||
err = fmt.Errorf("no nvidia devices detected by library %s", libPath)
|
||||
slog.Info(err.Error())
|
||||
slog.Info("no nvidia devices detected", "library", libPath)
|
||||
case C.CUDA_ERROR_UNKNOWN:
|
||||
err = fmt.Errorf("unknown error initializing cuda driver library %s: %s. see https://github.com/ollama/ollama/blob/main/docs/troubleshooting.md for more information", libPath, C.GoString(resp.err))
|
||||
slog.Warn(err.Error())
|
||||
slog.Warn("unknown error initializing cuda driver library", "library", libPath, "error", msg)
|
||||
slog.Warn("see https://github.com/ollama/ollama/blob/main/docs/troubleshooting.md for more information")
|
||||
default:
|
||||
msg := C.GoString(resp.err)
|
||||
if strings.Contains(msg, "wrong ELF class") {
|
||||
slog.Debug("skipping 32bit library", "library", libPath)
|
||||
} else {
|
||||
err = fmt.Errorf("Unable to load cudart library %s: %s", libPath, C.GoString(resp.err))
|
||||
slog.Info(err.Error())
|
||||
slog.Info("unable to load cuda driver library", "library", libPath, "error", msg)
|
||||
}
|
||||
}
|
||||
C.free(unsafe.Pointer(resp.err))
|
||||
} else {
|
||||
err = nil
|
||||
return int(resp.num_devices), &resp.ch, libPath, err
|
||||
return int(resp.num_devices), &resp.ch, libPath
|
||||
}
|
||||
}
|
||||
return 0, nil, "", err
|
||||
return 0, nil, ""
|
||||
}
|
||||
|
||||
// Bootstrap the management library
|
||||
// Returns: handle, libPath, error
|
||||
func loadNVMLMgmt(nvmlLibPaths []string) (*C.nvml_handle_t, string, error) {
|
||||
func LoadNVMLMgmt(nvmlLibPaths []string) (*C.nvml_handle_t, string) {
|
||||
var resp C.nvml_init_resp_t
|
||||
resp.ch.verbose = getVerboseState()
|
||||
var err error
|
||||
for _, libPath := range nvmlLibPaths {
|
||||
lib := C.CString(libPath)
|
||||
defer C.free(unsafe.Pointer(lib))
|
||||
C.nvml_init(lib, &resp)
|
||||
if resp.err != nil {
|
||||
err = fmt.Errorf("Unable to load NVML management library %s: %s", libPath, C.GoString(resp.err))
|
||||
slog.Info(err.Error())
|
||||
slog.Info(fmt.Sprintf("Unable to load NVML management library %s: %s", libPath, C.GoString(resp.err)))
|
||||
C.free(unsafe.Pointer(resp.err))
|
||||
} else {
|
||||
err = nil
|
||||
return &resp.ch, libPath, err
|
||||
return &resp.ch, libPath
|
||||
}
|
||||
}
|
||||
return nil, "", err
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
// bootstrap the Intel GPU library
|
||||
// Returns: num devices, handle, libPath, error
|
||||
func loadOneapiMgmt(oneapiLibPaths []string) (int, *C.oneapi_handle_t, string, error) {
|
||||
func LoadOneapiMgmt(oneapiLibPaths []string) (int, *C.oneapi_handle_t, string) {
|
||||
var resp C.oneapi_init_resp_t
|
||||
num_devices := 0
|
||||
resp.oh.verbose = getVerboseState()
|
||||
var err error
|
||||
for _, libPath := range oneapiLibPaths {
|
||||
lib := C.CString(libPath)
|
||||
defer C.free(unsafe.Pointer(lib))
|
||||
C.oneapi_init(lib, &resp)
|
||||
if resp.err != nil {
|
||||
err = fmt.Errorf("Unable to load oneAPI management library %s: %s", libPath, C.GoString(resp.err))
|
||||
slog.Debug(err.Error())
|
||||
slog.Debug("Unable to load oneAPI management library", "library", libPath, "error", C.GoString(resp.err))
|
||||
C.free(unsafe.Pointer(resp.err))
|
||||
} else {
|
||||
err = nil
|
||||
for i := range resp.oh.num_drivers {
|
||||
num_devices += int(C.oneapi_get_device_count(resp.oh, C.int(i)))
|
||||
}
|
||||
return num_devices, &resp.oh, libPath, err
|
||||
return num_devices, &resp.oh, libPath
|
||||
}
|
||||
}
|
||||
return 0, nil, "", err
|
||||
return 0, nil, ""
|
||||
}
|
||||
|
||||
func getVerboseState() C.uint16_t {
|
||||
@ -732,23 +668,3 @@ func LibraryDir() string {
|
||||
slog.Warn("unable to locate gpu dependency libraries")
|
||||
return ""
|
||||
}
|
||||
|
||||
func GetSystemInfo() SystemInfo {
|
||||
gpus := GetGPUInfo()
|
||||
gpuMutex.Lock()
|
||||
defer gpuMutex.Unlock()
|
||||
discoveryErrors := []string{}
|
||||
for _, err := range bootstrapErrors {
|
||||
discoveryErrors = append(discoveryErrors, err.Error())
|
||||
}
|
||||
if len(gpus) == 1 && gpus[0].Library == "cpu" {
|
||||
gpus = []GpuInfo{}
|
||||
}
|
||||
|
||||
return SystemInfo{
|
||||
System: cpus[0],
|
||||
GPUs: gpus,
|
||||
UnsupportedGPUs: unsupportedGPUs,
|
||||
DiscoveryErrors: discoveryErrors,
|
||||
}
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
//go:build darwin
|
||||
|
||||
package discover
|
||||
package gpu
|
||||
|
||||
/*
|
||||
#cgo CFLAGS: -x objective-c
|
||||
@ -10,9 +10,7 @@ package discover
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"runtime"
|
||||
"syscall"
|
||||
|
||||
"github.com/ollama/ollama/format"
|
||||
)
|
||||
@ -68,34 +66,3 @@ func (l GpuInfoList) GetVisibleDevicesEnv() (string, string) {
|
||||
// No-op on darwin
|
||||
return "", ""
|
||||
}
|
||||
|
||||
func GetSystemInfo() SystemInfo {
|
||||
mem, _ := GetCPUMem()
|
||||
query := "hw.perflevel0.physicalcpu"
|
||||
perfCores, err := syscall.SysctlUint32(query)
|
||||
if err != nil {
|
||||
slog.Warn("failed to discover physical CPU details", "query", query, "error", err)
|
||||
}
|
||||
query = "hw.perflevel1.physicalcpu"
|
||||
efficiencyCores, _ := syscall.SysctlUint32(query) // On x86 xeon this wont return data
|
||||
|
||||
// Determine thread count
|
||||
query = "hw.logicalcpu"
|
||||
logicalCores, _ := syscall.SysctlUint32(query)
|
||||
|
||||
return SystemInfo{
|
||||
System: CPUInfo{
|
||||
GpuInfo: GpuInfo{
|
||||
memInfo: mem,
|
||||
},
|
||||
CPUs: []CPU{
|
||||
{
|
||||
CoreCount: int(perfCores + efficiencyCores),
|
||||
EfficiencyCoreCount: int(efficiencyCores),
|
||||
ThreadCount: int(logicalCores),
|
||||
},
|
||||
},
|
||||
},
|
||||
GPUs: GetGPUInfo(),
|
||||
}
|
||||
}
|
@ -4,7 +4,6 @@
|
||||
#include "gpu_info_nvcuda.h"
|
||||
|
||||
void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) {
|
||||
LOG(resp->ch.verbose, "initializing %s\n", nvcuda_lib_path);
|
||||
CUresult ret;
|
||||
resp->err = NULL;
|
||||
resp->num_devices = 0;
|
||||
@ -58,10 +57,8 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) {
|
||||
resp->cudaErr = -1;
|
||||
return;
|
||||
}
|
||||
LOG(resp->ch.verbose, "dlsym: %s - %p\n", l[i].s, *l[i].p);
|
||||
}
|
||||
|
||||
LOG(resp->ch.verbose, "calling cuInit\n");
|
||||
ret = (*resp->ch.cuInit)(0);
|
||||
if (ret != CUDA_SUCCESS) {
|
||||
LOG(resp->ch.verbose, "cuInit err: %d\n", ret);
|
||||
@ -78,18 +75,15 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) {
|
||||
resp->ch.driver_minor = 0;
|
||||
|
||||
// Report driver version if we're in verbose mode, ignore errors
|
||||
LOG(resp->ch.verbose, "calling cuDriverGetVersion\n");
|
||||
ret = (*resp->ch.cuDriverGetVersion)(&version);
|
||||
if (ret != CUDA_SUCCESS) {
|
||||
LOG(resp->ch.verbose, "cuDriverGetVersion failed: %d\n", ret);
|
||||
} else {
|
||||
LOG(resp->ch.verbose, "raw version 0x%x\n", version);
|
||||
resp->ch.driver_major = version / 1000;
|
||||
resp->ch.driver_minor = (version - (resp->ch.driver_major * 1000)) / 10;
|
||||
LOG(resp->ch.verbose, "CUDA driver version: %d.%d\n", resp->ch.driver_major, resp->ch.driver_minor);
|
||||
}
|
||||
|
||||
LOG(resp->ch.verbose, "calling cuDeviceGetCount\n");
|
||||
ret = (*resp->ch.cuDeviceGetCount)(&resp->num_devices);
|
||||
if (ret != CUDA_SUCCESS) {
|
||||
LOG(resp->ch.verbose, "cuDeviceGetCount err: %d\n", ret);
|
||||
@ -100,7 +94,6 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) {
|
||||
resp->cudaErr = ret;
|
||||
return;
|
||||
}
|
||||
LOG(resp->ch.verbose, "device count %d\n", resp->num_devices);
|
||||
}
|
||||
|
||||
const int buflen = 256;
|
@ -17,7 +17,7 @@ void nvml_init(char *nvml_lib_path, nvml_init_resp_t *resp) {
|
||||
} l[] = {
|
||||
{"nvmlInit_v2", (void *)&resp->ch.nvmlInit_v2},
|
||||
{"nvmlShutdown", (void *)&resp->ch.nvmlShutdown},
|
||||
{"nvmlDeviceGetHandleByUUID", (void *)&resp->ch.nvmlDeviceGetHandleByUUID},
|
||||
{"nvmlDeviceGetHandleByIndex", (void *)&resp->ch.nvmlDeviceGetHandleByIndex},
|
||||
{"nvmlDeviceGetMemoryInfo", (void *)&resp->ch.nvmlDeviceGetMemoryInfo},
|
||||
{NULL, NULL},
|
||||
};
|
||||
@ -67,20 +67,20 @@ void nvml_init(char *nvml_lib_path, nvml_init_resp_t *resp) {
|
||||
}
|
||||
|
||||
|
||||
void nvml_get_free(nvml_handle_t h, char *uuid, uint64_t *free, uint64_t *total, uint64_t *used) {
|
||||
void nvml_get_free(nvml_handle_t h, int device_id, uint64_t *free, uint64_t *total, uint64_t *used) {
|
||||
nvmlDevice_t device;
|
||||
nvmlMemory_t memInfo = {0};
|
||||
nvmlReturn_t ret;
|
||||
ret = (*h.nvmlDeviceGetHandleByUUID)((const char *)(uuid), &device);
|
||||
ret = (*h.nvmlDeviceGetHandleByIndex)(device_id, &device);
|
||||
if (ret != NVML_SUCCESS) {
|
||||
LOG(1, "unable to get device handle %s: %d", uuid, ret);
|
||||
LOG(1, "unable to get device handle %d: %d", device_id, ret);
|
||||
*free = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
ret = (*h.nvmlDeviceGetMemoryInfo)(device, &memInfo);
|
||||
if (ret != NVML_SUCCESS) {
|
||||
LOG(1, "device memory info lookup failure %s: %d", uuid, ret);
|
||||
LOG(1, "device memory info lookup failure %d: %d", device_id, ret);
|
||||
*free = 0;
|
||||
return;
|
||||
}
|
@ -25,7 +25,7 @@ typedef struct nvml_handle {
|
||||
uint16_t verbose;
|
||||
nvmlReturn_t (*nvmlInit_v2)(void);
|
||||
nvmlReturn_t (*nvmlShutdown)(void);
|
||||
nvmlReturn_t (*nvmlDeviceGetHandleByUUID)(const char *, nvmlDevice_t *);
|
||||
nvmlReturn_t (*nvmlDeviceGetHandleByIndex)(unsigned int, nvmlDevice_t *);
|
||||
nvmlReturn_t (*nvmlDeviceGetMemoryInfo)(nvmlDevice_t, nvmlMemory_t *);
|
||||
} nvml_handle_t;
|
||||
|
||||
@ -41,7 +41,7 @@ typedef struct nvml_compute_capability {
|
||||
} nvml_compute_capability_t;
|
||||
|
||||
void nvml_init(char *nvml_lib_path, nvml_init_resp_t *resp);
|
||||
void nvml_get_free(nvml_handle_t ch, char *uuid, uint64_t *free, uint64_t *total, uint64_t *used);
|
||||
void nvml_get_free(nvml_handle_t ch, int device_id, uint64_t *free, uint64_t *total, uint64_t *used);
|
||||
void nvml_release(nvml_handle_t ch);
|
||||
|
||||
#endif // __GPU_INFO_NVML_H__
|
92
gpu/gpu_linux.go
Normal file
92
gpu/gpu_linux.go
Normal file
@ -0,0 +1,92 @@
|
||||
package gpu
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/ollama/ollama/format"
|
||||
)
|
||||
|
||||
var CudartGlobs = []string{
|
||||
"/usr/local/cuda/lib64/libcudart.so*",
|
||||
"/usr/lib/x86_64-linux-gnu/nvidia/current/libcudart.so*",
|
||||
"/usr/lib/x86_64-linux-gnu/libcudart.so*",
|
||||
"/usr/lib/wsl/lib/libcudart.so*",
|
||||
"/usr/lib/wsl/drivers/*/libcudart.so*",
|
||||
"/opt/cuda/lib64/libcudart.so*",
|
||||
"/usr/local/cuda*/targets/aarch64-linux/lib/libcudart.so*",
|
||||
"/usr/lib/aarch64-linux-gnu/nvidia/current/libcudart.so*",
|
||||
"/usr/lib/aarch64-linux-gnu/libcudart.so*",
|
||||
"/usr/local/cuda/lib*/libcudart.so*",
|
||||
"/usr/lib*/libcudart.so*",
|
||||
"/usr/local/lib*/libcudart.so*",
|
||||
}
|
||||
|
||||
var NvmlGlobs = []string{}
|
||||
|
||||
var NvcudaGlobs = []string{
|
||||
"/usr/local/cuda*/targets/*/lib/libcuda.so*",
|
||||
"/usr/lib/*-linux-gnu/nvidia/current/libcuda.so*",
|
||||
"/usr/lib/*-linux-gnu/libcuda.so*",
|
||||
"/usr/lib/wsl/lib/libcuda.so*",
|
||||
"/usr/lib/wsl/drivers/*/libcuda.so*",
|
||||
"/opt/cuda/lib*/libcuda.so*",
|
||||
"/usr/local/cuda/lib*/libcuda.so*",
|
||||
"/usr/lib*/libcuda.so*",
|
||||
"/usr/local/lib*/libcuda.so*",
|
||||
}
|
||||
|
||||
var OneapiGlobs = []string{
|
||||
"/usr/lib/x86_64-linux-gnu/libze_intel_gpu.so*",
|
||||
"/usr/lib*/libze_intel_gpu.so*",
|
||||
}
|
||||
|
||||
var (
|
||||
CudartMgmtName = "libcudart.so*"
|
||||
NvcudaMgmtName = "libcuda.so*"
|
||||
NvmlMgmtName = "" // not currently wired on linux
|
||||
OneapiMgmtName = "libze_intel_gpu.so*"
|
||||
)
|
||||
|
||||
func GetCPUMem() (memInfo, error) {
|
||||
var mem memInfo
|
||||
var total, available, free, buffers, cached, freeSwap uint64
|
||||
f, err := os.Open("/proc/meminfo")
|
||||
if err != nil {
|
||||
return mem, err
|
||||
}
|
||||
defer f.Close()
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
switch {
|
||||
case strings.HasPrefix(line, "MemTotal:"):
|
||||
_, err = fmt.Sscanf(line, "MemTotal:%d", &total)
|
||||
case strings.HasPrefix(line, "MemAvailable:"):
|
||||
_, err = fmt.Sscanf(line, "MemAvailable:%d", &available)
|
||||
case strings.HasPrefix(line, "MemFree:"):
|
||||
_, err = fmt.Sscanf(line, "MemFree:%d", &free)
|
||||
case strings.HasPrefix(line, "Buffers:"):
|
||||
_, err = fmt.Sscanf(line, "Buffers:%d", &buffers)
|
||||
case strings.HasPrefix(line, "Cached:"):
|
||||
_, err = fmt.Sscanf(line, "Cached:%d", &cached)
|
||||
case strings.HasPrefix(line, "SwapFree:"):
|
||||
_, err = fmt.Sscanf(line, "SwapFree:%d", &freeSwap)
|
||||
default:
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return mem, err
|
||||
}
|
||||
}
|
||||
mem.TotalMemory = total * format.KibiByte
|
||||
mem.FreeSwap = freeSwap * format.KibiByte
|
||||
if available > 0 {
|
||||
mem.FreeMemory = available * format.KibiByte
|
||||
} else {
|
||||
mem.FreeMemory = (free + buffers + cached) * format.KibiByte
|
||||
}
|
||||
return mem, nil
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
//go:build linux || windows
|
||||
|
||||
package discover
|
||||
package gpu
|
||||
|
||||
import (
|
||||
"log/slog"
|
@ -1,4 +1,4 @@
|
||||
package discover
|
||||
package gpu
|
||||
|
||||
import (
|
||||
"runtime"
|
57
gpu/gpu_windows.go
Normal file
57
gpu/gpu_windows.go
Normal file
@ -0,0 +1,57 @@
|
||||
package gpu
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type MEMORYSTATUSEX struct {
|
||||
length uint32
|
||||
MemoryLoad uint32
|
||||
TotalPhys uint64
|
||||
AvailPhys uint64
|
||||
TotalPageFile uint64
|
||||
AvailPageFile uint64
|
||||
TotalVirtual uint64
|
||||
AvailVirtual uint64
|
||||
AvailExtendedVirtual uint64
|
||||
}
|
||||
|
||||
var (
|
||||
k32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
globalMemoryStatusExProc = k32.NewProc("GlobalMemoryStatusEx")
|
||||
sizeofMemoryStatusEx = uint32(unsafe.Sizeof(MEMORYSTATUSEX{}))
|
||||
)
|
||||
|
||||
var CudartGlobs = []string{
|
||||
"c:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v*\\bin\\cudart64_*.dll",
|
||||
}
|
||||
|
||||
var NvmlGlobs = []string{
|
||||
"c:\\Windows\\System32\\nvml.dll",
|
||||
}
|
||||
|
||||
var NvcudaGlobs = []string{
|
||||
"c:\\windows\\system*\\nvcuda.dll",
|
||||
}
|
||||
|
||||
var OneapiGlobs = []string{
|
||||
"c:\\Windows\\System32\\DriverStore\\FileRepository\\*\\ze_intel_gpu64.dll",
|
||||
}
|
||||
|
||||
var (
|
||||
CudartMgmtName = "cudart64_*.dll"
|
||||
NvcudaMgmtName = "nvcuda.dll"
|
||||
NvmlMgmtName = "nvml.dll"
|
||||
OneapiMgmtName = "ze_intel_gpu64.dll"
|
||||
)
|
||||
|
||||
func GetCPUMem() (memInfo, error) {
|
||||
memStatus := MEMORYSTATUSEX{length: sizeofMemoryStatusEx}
|
||||
r1, _, err := globalMemoryStatusExProc.Call(uintptr(unsafe.Pointer(&memStatus)))
|
||||
if r1 == 0 {
|
||||
return memInfo{}, fmt.Errorf("GlobalMemoryStatusEx failed: %w", err)
|
||||
}
|
||||
return memInfo{TotalMemory: memStatus.TotalPhys, FreeMemory: memStatus.AvailPhys, FreeSwap: memStatus.AvailPageFile}, nil
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
package discover
|
||||
package gpu
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -10,11 +10,11 @@ import (
|
||||
type memInfo struct {
|
||||
TotalMemory uint64 `json:"total_memory,omitempty"`
|
||||
FreeMemory uint64 `json:"free_memory,omitempty"`
|
||||
FreeSwap uint64 `json:"free_swap,omitempty"` // TODO split this out for system only
|
||||
FreeSwap uint64 `json:"free_swap,omitempty"`
|
||||
}
|
||||
|
||||
// Beginning of an `ollama info` command
|
||||
type GpuInfo struct { // TODO better name maybe "InferenceProcessor"?
|
||||
type GpuInfo struct {
|
||||
memInfo
|
||||
Library string `json:"library,omitempty"`
|
||||
|
||||
@ -25,7 +25,7 @@ type GpuInfo struct { // TODO better name maybe "InferenceProcessor"?
|
||||
MinimumMemory uint64 `json:"-"`
|
||||
|
||||
// Any extra PATH/LD_LIBRARY_PATH dependencies required for the Library to operate properly
|
||||
DependencyPath []string `json:"lib_path,omitempty"`
|
||||
DependencyPath string `json:"lib_path,omitempty"`
|
||||
|
||||
// Extra environment variables specific to the GPU as list of [key,value]
|
||||
EnvWorkarounds [][2]string `json:"envs,omitempty"`
|
||||
@ -49,17 +49,6 @@ type GpuInfo struct { // TODO better name maybe "InferenceProcessor"?
|
||||
|
||||
type CPUInfo struct {
|
||||
GpuInfo
|
||||
CPUs []CPU
|
||||
}
|
||||
|
||||
// CPU type represents a CPU Package occupying a socket
|
||||
type CPU struct {
|
||||
ID string `cpuinfo:"processor"`
|
||||
VendorID string `cpuinfo:"vendor_id"`
|
||||
ModelName string `cpuinfo:"model name"`
|
||||
CoreCount int
|
||||
EfficiencyCoreCount int // Performance = CoreCount - Efficiency
|
||||
ThreadCount int
|
||||
}
|
||||
|
||||
type CudaGPUInfo struct {
|
||||
@ -87,11 +76,6 @@ type OneapiGPUInfoList []OneapiGPUInfo
|
||||
|
||||
type GpuInfoList []GpuInfo
|
||||
|
||||
type UnsupportedGPUInfo struct {
|
||||
GpuInfo
|
||||
Reason string `json:"reason"`
|
||||
}
|
||||
|
||||
// Split up the set of gpu info's by Library and variant
|
||||
func (l GpuInfoList) ByLibrary() []GpuInfoList {
|
||||
resp := []GpuInfoList{}
|
||||
@ -162,24 +146,3 @@ func (c CPUCapability) String() string {
|
||||
return "no vector extensions"
|
||||
}
|
||||
}
|
||||
|
||||
type SystemInfo struct {
|
||||
System CPUInfo `json:"system"`
|
||||
GPUs []GpuInfo `json:"gpus"`
|
||||
UnsupportedGPUs []UnsupportedGPUInfo `json:"unsupported_gpus"`
|
||||
DiscoveryErrors []string `json:"discovery_errors"`
|
||||
}
|
||||
|
||||
// Return the optimal number of threads to use for inference
|
||||
func (si SystemInfo) GetOptimalThreadCount() int {
|
||||
if len(si.System.CPUs) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
coreCount := 0
|
||||
for _, c := range si.System.CPUs {
|
||||
coreCount += c.CoreCount - c.EfficiencyCoreCount
|
||||
}
|
||||
|
||||
return coreCount
|
||||
}
|
@ -30,48 +30,6 @@ func TestOrcaMiniBlueSky(t *testing.T) {
|
||||
GenerateTestHelper(ctx, t, req, []string{"rayleigh", "scattering"})
|
||||
}
|
||||
|
||||
func TestUnicode(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)
|
||||
defer cancel()
|
||||
// Set up the test data
|
||||
req := api.GenerateRequest{
|
||||
// DeepSeek has a Unicode tokenizer regex, making it a unicode torture test
|
||||
Model: "deepseek-coder-v2:16b-lite-instruct-q2_K",
|
||||
Prompt: "天空为什么是蓝色的?",
|
||||
Stream: &stream,
|
||||
Options: map[string]interface{}{
|
||||
"temperature": 0,
|
||||
"seed": 123,
|
||||
// Workaround deepseek context shifting bug
|
||||
"num_ctx": 8192,
|
||||
"num_predict": 2048,
|
||||
},
|
||||
}
|
||||
client, _, cleanup := InitServerConnection(ctx, t)
|
||||
defer cleanup()
|
||||
require.NoError(t, PullIfMissing(ctx, client, req.Model))
|
||||
DoGenerate(ctx, t, client, req, []string{"散射", "频率"}, 120*time.Second, 120*time.Second)
|
||||
}
|
||||
|
||||
func TestExtendedUnicodeOutput(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
defer cancel()
|
||||
// Set up the test data
|
||||
req := api.GenerateRequest{
|
||||
Model: "gemma2:2b",
|
||||
Prompt: "Output some smily face emoji",
|
||||
Stream: &stream,
|
||||
Options: map[string]interface{}{
|
||||
"temperature": 0,
|
||||
"seed": 123,
|
||||
},
|
||||
}
|
||||
client, _, cleanup := InitServerConnection(ctx, t)
|
||||
defer cleanup()
|
||||
require.NoError(t, PullIfMissing(ctx, client, req.Model))
|
||||
DoGenerate(ctx, t, client, req, []string{"😀", "😊", "😁", "😂", "😄", "😃"}, 120*time.Second, 120*time.Second)
|
||||
}
|
||||
|
||||
func TestUnicodeModelDir(t *testing.T) {
|
||||
// This is only useful for Windows with utf-16 characters, so skip this test for other platforms
|
||||
if runtime.GOOS != "windows" {
|
||||
|
@ -42,7 +42,7 @@ func TestMultiModelConcurrency(t *testing.T) {
|
||||
}
|
||||
resp = [2][]string{
|
||||
{"sunlight"},
|
||||
{"england", "english", "massachusetts", "pilgrims", "british", "festival"},
|
||||
{"england", "english", "massachusetts", "pilgrims", "british"},
|
||||
}
|
||||
)
|
||||
var wg sync.WaitGroup
|
||||
@ -60,8 +60,7 @@ func TestMultiModelConcurrency(t *testing.T) {
|
||||
for i := 0; i < len(req); i++ {
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
// Note: CPU based inference can crawl so don't give up too quickly
|
||||
DoGenerate(ctx, t, client, req[i], resp[i], 90*time.Second, 30*time.Second)
|
||||
DoGenerate(ctx, t, client, req[i], resp[i], 60*time.Second, 10*time.Second)
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
|
File diff suppressed because one or more lines are too long
@ -12,7 +12,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestIntegrationLlava(t *testing.T) {
|
||||
func TestIntegrationMultimodal(t *testing.T) {
|
||||
image, err := base64.StdEncoding.DecodeString(imageEncoding)
|
||||
require.NoError(t, err)
|
||||
req := api.GenerateRequest{
|
||||
@ -39,33 +39,6 @@ func TestIntegrationLlava(t *testing.T) {
|
||||
DoGenerate(ctx, t, client, req, []string{resp}, 120*time.Second, 30*time.Second)
|
||||
}
|
||||
|
||||
func TestIntegrationMllama(t *testing.T) {
|
||||
image, err := base64.StdEncoding.DecodeString(imageEncoding)
|
||||
require.NoError(t, err)
|
||||
req := api.GenerateRequest{
|
||||
// TODO fix up once we publish the final image
|
||||
Model: "x/llama3.2-vision",
|
||||
Prompt: "what does the text in this image say?",
|
||||
Stream: &stream,
|
||||
Options: map[string]interface{}{
|
||||
"seed": 42,
|
||||
"temperature": 0.0,
|
||||
},
|
||||
Images: []api.ImageData{
|
||||
image,
|
||||
},
|
||||
}
|
||||
|
||||
resp := "the ollamas"
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
defer cancel()
|
||||
client, _, cleanup := InitServerConnection(ctx, t)
|
||||
defer cleanup()
|
||||
require.NoError(t, PullIfMissing(ctx, client, req.Model))
|
||||
// mllama models on CPU can be quite slow to start,
|
||||
DoGenerate(ctx, t, client, req, []string{resp}, 240*time.Second, 30*time.Second)
|
||||
}
|
||||
|
||||
const imageEncoding = `iVBORw0KGgoAAAANSUhEUgAAANIAAAB4CAYAAACHHqzKAAAAAXNSR0IArs4c6QAAAIRlWElmTU0AKgAAAAgABQESAAMAAAABAAEAAAEaAAUAAAABAAAASgEb
|
||||
AAUAAAABAAAAUgEoAAMAAAABAAIAAIdpAAQAAAABAAAAWgAAAAAAAABIAAAAAQAAAEgAAAABAAOgAQADAAAAAQABAACgAgAEAAAAAQAAANKgAwAEAAAAAQAA
|
||||
AHgAAAAAXdsepgAAAAlwSFlzAAALEwAACxMBAJqcGAAAAVlpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6
|
||||
|
@ -275,7 +275,7 @@ func DoGenerate(ctx context.Context, t *testing.T, client *api.Client, genReq ap
|
||||
break
|
||||
}
|
||||
}
|
||||
require.True(t, atLeastOne, "%s: none of %v found in %s", genReq.Model, anyResp, response)
|
||||
require.True(t, atLeastOne, "none of %v found in %s", anyResp, response)
|
||||
slog.Info("test pass", "model", genReq.Model, "prompt", genReq.Prompt, "contains", anyResp, "response", response)
|
||||
case <-ctx.Done():
|
||||
t.Error("outer test context done while waiting for generate")
|
||||
|
3
llama/.gitignore
vendored
3
llama/.gitignore
vendored
@ -1,3 +0,0 @@
|
||||
*.bin
|
||||
*.gguf
|
||||
build/
|
@ -1,57 +0,0 @@
|
||||
# top level makefile for Go server
|
||||
include make/common-defs.make
|
||||
|
||||
RUNNER_TARGETS := default
|
||||
|
||||
# Determine which if any GPU runners we should build
|
||||
ifeq ($(OS),windows)
|
||||
CUDA_PATH?=$(shell cygpath -m -s "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\" 2>/dev/null)unknown
|
||||
CUDA_BASE_DIR := $(dir $(shell cygpath -m -s "$(CUDA_PATH)\\.." 2>/dev/null))
|
||||
CUDA_11:=$(shell ls -d $(CUDA_BASE_DIR)/v11.? 2>/dev/null)
|
||||
CUDA_12:=$(shell ls -d $(CUDA_BASE_DIR)/v12.? 2>/dev/null)
|
||||
HIP_LIB_DIR := $(shell ls -d $(HIP_PATH)/lib 2>/dev/null)
|
||||
else ifeq ($(OS),linux)
|
||||
HIP_PATH?=/opt/rocm
|
||||
HIP_LIB_DIR := $(shell ls -d $(HIP_PATH)/lib 2>/dev/null)
|
||||
CUDA_PATH?=/usr/local/cuda
|
||||
CUDA_11:=$(shell ls -d $(CUDA_PATH)-11 2>/dev/null)
|
||||
CUDA_12:=$(shell ls -d $(CUDA_PATH)-12 2>/dev/null)
|
||||
endif
|
||||
|
||||
ifeq ($(OLLAMA_SKIP_CUDA_GENERATE),)
|
||||
ifneq ($(CUDA_11),)
|
||||
RUNNER_TARGETS += cuda_v11
|
||||
endif
|
||||
ifneq ($(CUDA_12),)
|
||||
RUNNER_TARGETS += cuda_v12
|
||||
endif
|
||||
endif
|
||||
ifeq ($(OLLAMA_SKIP_ROCM_GENERATE),)
|
||||
ifneq ($(HIP_LIB_DIR),)
|
||||
RUNNER_TARGETS += rocm
|
||||
endif
|
||||
endif
|
||||
|
||||
|
||||
all: clean-payload .WAIT runners
|
||||
|
||||
runners: $(RUNNER_TARGETS)
|
||||
|
||||
$(RUNNER_TARGETS):
|
||||
$(MAKE) -f make/Makefile.$@
|
||||
|
||||
help-sync apply-patches create-patches sync:
|
||||
$(MAKE) -f make/Makefile.sync $@
|
||||
|
||||
clean:
|
||||
rm -rf $(BUILD_DIR) $(DIST_RUNNERS) $(PAYLOAD_RUNNERS)
|
||||
go clean -cache
|
||||
|
||||
clean-payload:
|
||||
rm -rf $(addprefix $(RUNNERS_PAYLOAD_DIR)/, $(RUNNER_TARGETS) metal cpu cpu_avx cpu_avx2)
|
||||
|
||||
.PHONY: all runners clean clean-payload $(RUNNER_TARGETS) .WAIT
|
||||
|
||||
# Handy debugging for make variables
|
||||
print-%:
|
||||
@echo '$*=$($*)'
|
160
llama/README.md
160
llama/README.md
@ -1,160 +0,0 @@
|
||||
# `llama`
|
||||
|
||||
This package integrates the [llama.cpp](https://github.com/ggerganov/llama.cpp) library as a Go package and makes it easy to build it with tags for different CPU and GPU processors.
|
||||
|
||||
Supported:
|
||||
|
||||
- [x] CPU
|
||||
- [x] avx, avx2
|
||||
- [x] macOS Metal
|
||||
- [x] Windows CUDA
|
||||
- [x] Windows ROCm
|
||||
- [x] Linux CUDA
|
||||
- [x] Linux ROCm
|
||||
- [x] Llava
|
||||
|
||||
Extra build steps are required for CUDA and ROCm on Windows since `nvcc` and `hipcc` both require using msvc as the host compiler. For these shared libraries are created:
|
||||
|
||||
- `ggml_cuda.dll` on Windows or `ggml_cuda.so` on Linux
|
||||
- `ggml_hipblas.dll` on Windows or `ggml_hipblas.so` on Linux
|
||||
|
||||
> Note: it's important that memory is allocated and freed by the same compiler (e.g. entirely by code compiled with msvc or mingw). Issues from this should be rare, but there are some places where pointers are returned by the CUDA or HIP runtimes and freed elsewhere, causing a a crash. In a future change the same runtime should be used in both cases to avoid crashes.
|
||||
|
||||
## Building
|
||||
|
||||
```
|
||||
go build .
|
||||
```
|
||||
|
||||
### AVX
|
||||
|
||||
```shell
|
||||
go build -tags avx .
|
||||
```
|
||||
|
||||
### AVX2
|
||||
|
||||
```shell
|
||||
# go doesn't recognize `-mfma` as a valid compiler flag
|
||||
# see https://github.com/golang/go/issues/17895
|
||||
go env -w "CGO_CFLAGS_ALLOW=-mfma|-mf16c"
|
||||
go env -w "CGO_CXXFLAGS_ALLOW=-mfma|-mf16c"
|
||||
go build -tags=avx,avx2 .
|
||||
```
|
||||
|
||||
## Linux
|
||||
|
||||
### CUDA
|
||||
|
||||
Install the [CUDA toolkit v11.3.1](https://developer.nvidia.com/cuda-11-3-1-download-archive):
|
||||
|
||||
```shell
|
||||
make ggml_cuda.so
|
||||
go build -tags avx,cuda .
|
||||
```
|
||||
|
||||
### ROCm
|
||||
|
||||
Install the [CUDA toolkit v11.3.1](https://developer.nvidia.com/cuda-11-3-1-download-archive):
|
||||
|
||||
```shell
|
||||
make ggml_hipblas.so
|
||||
go build -tags avx,rocm .
|
||||
```
|
||||
|
||||
## Windows
|
||||
|
||||
Download [w64devkit](https://github.com/skeeto/w64devkit/releases/latest) for a simple MinGW development environment.
|
||||
|
||||
### CUDA
|
||||
|
||||
Install the [CUDA toolkit v11.3.1](https://developer.nvidia.com/cuda-11-3-1-download-archive) then build the cuda code:
|
||||
|
||||
```shell
|
||||
make ggml_cuda.dll
|
||||
go build -tags avx,cuda .
|
||||
```
|
||||
|
||||
### ROCm
|
||||
|
||||
Install [ROCm 5.7.1](https://rocm.docs.amd.com/en/docs-5.7.1/).
|
||||
|
||||
```shell
|
||||
make ggml_hipblas.dll
|
||||
go build -tags avx,rocm .
|
||||
```
|
||||
|
||||
## Building runners
|
||||
|
||||
```shell
|
||||
# build all runners for this platform
|
||||
make -j
|
||||
```
|
||||
|
||||
## Vendoring
|
||||
|
||||
Ollama currently vendors [llama.cpp](https://github.com/ggerganov/llama.cpp/) and [ggml](https://github.com/ggerganov/ggml) through a vendoring model. While we generally strive to contribute changes back upstream to avoid drift, we cary a small set of patches which are applied to the tracking commit. A set of make targets are available to aid developers in updating to a newer tracking commit, or to work on changes.
|
||||
|
||||
If you update the vendoring code, start by running the following command to establish the tracking llama.cpp repo in the `./vendor/` directory.
|
||||
|
||||
```
|
||||
make apply-patches
|
||||
```
|
||||
|
||||
### Updating Base Commit
|
||||
|
||||
**Pin to new base commit**
|
||||
|
||||
To update to a newer base commit, select the upstream git tag or commit and update `llama/vendoring.env`
|
||||
|
||||
#### Applying patches
|
||||
|
||||
When updating to a newer base commit, the existing patches may not apply cleanly and require manual merge resolution.
|
||||
|
||||
Start by applying the patches. If any of the patches have conflicts, the `git am` will stop at the first failure.
|
||||
|
||||
```
|
||||
make apply-patches
|
||||
```
|
||||
|
||||
If you see an error message about a conflict, go into the `./vendor/` directory, and perform merge resolution using your preferred tool to the patch commit which failed. Save the file(s) and continue the patch series with `git am --continue` . If any additional patches fail, follow the same pattern until the full patch series is applied. Once finished, run a final `create-patches` and `sync` target to ensure everything is updated.
|
||||
|
||||
```
|
||||
make create-patches sync
|
||||
```
|
||||
|
||||
Build and test Ollama, and make any necessary changes to the Go code based on the new base commit. Submit your PR to the Ollama repo.
|
||||
|
||||
### Generating Patches
|
||||
|
||||
When working on new fixes or features that impact vendored code, use the following model. First get a clean tracking repo with all current patches applied:
|
||||
|
||||
```
|
||||
make apply-patches
|
||||
```
|
||||
|
||||
Now edit the upstream native code in the `./vendor/` directory. You do not need to commit every change in order to build, a dirty working tree in the tracking repo is OK while developing. Simply save in your editor, and run the following to refresh the vendored code with your changes, build the backend(s) and build ollama:
|
||||
|
||||
```
|
||||
make sync
|
||||
make -j 8
|
||||
go build .
|
||||
```
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Do **NOT** run `apply-patches` while you're iterating as that will reset the tracking repo. It will detect a dirty tree and abort, but if your tree is clean and you accidentally ran this target, use `git reflog` to recover your commit(s).
|
||||
|
||||
Iterate until you're ready to submit PRs. Once your code is ready, commit a change in the `./vendor/` directory, then generate the patches for ollama with
|
||||
|
||||
```
|
||||
make create-patches
|
||||
```
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Once you have completed this step, it is safe to run `apply-patches` since your change is preserved in the patches.
|
||||
|
||||
In your `./vendor/` directory, create a branch, and cherry-pick the new commit to that branch, then submit a PR upstream to llama.cpp.
|
||||
|
||||
Commit the changes in the ollama repo and submit a PR to Ollama, which will include the vendored code update with your change, along with the patches.
|
||||
|
||||
After your PR upstream is merged, follow the **Updating Base Commit** instructions above, however first remove your patch before running `apply-patches` since the new base commit contains your change already.
|
392
llama/base64.hpp
392
llama/base64.hpp
@ -1,392 +0,0 @@
|
||||
/*
|
||||
This is free and unencumbered software released into the public domain.
|
||||
|
||||
Anyone is free to copy, modify, publish, use, compile, sell, or
|
||||
distribute this software, either in source code form or as a compiled
|
||||
binary, for any purpose, commercial or non-commercial, and by any
|
||||
means.
|
||||
|
||||
In jurisdictions that recognize copyright laws, the author or authors
|
||||
of this software dedicate any and all copyright interest in the
|
||||
software to the public domain. We make this dedication for the benefit
|
||||
of the public at large and to the detriment of our heirs and
|
||||
successors. We intend this dedication to be an overt act of
|
||||
relinquishment in perpetuity of all present and future rights to this
|
||||
software under copyright law.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
For more information, please refer to <http://unlicense.org>
|
||||
*/
|
||||
|
||||
#ifndef PUBLIC_DOMAIN_BASE64_HPP_
|
||||
#define PUBLIC_DOMAIN_BASE64_HPP_
|
||||
|
||||
#include <cstdint>
|
||||
#include <iterator>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
|
||||
class base64_error : public std::runtime_error
|
||||
{
|
||||
public:
|
||||
using std::runtime_error::runtime_error;
|
||||
};
|
||||
|
||||
class base64
|
||||
{
|
||||
public:
|
||||
enum class alphabet
|
||||
{
|
||||
/** the alphabet is detected automatically */
|
||||
auto_,
|
||||
/** the standard base64 alphabet is used */
|
||||
standard,
|
||||
/** like `standard` except that the characters `+` and `/` are replaced by `-` and `_` respectively*/
|
||||
url_filename_safe
|
||||
};
|
||||
|
||||
enum class decoding_behavior
|
||||
{
|
||||
/** if the input is not padded, the remaining bits are ignored */
|
||||
moderate,
|
||||
/** if a padding character is encounter decoding is finished */
|
||||
loose
|
||||
};
|
||||
|
||||
/**
|
||||
Encodes all the elements from `in_begin` to `in_end` to `out`.
|
||||
|
||||
@warning The source and destination cannot overlap. The destination must be able to hold at least
|
||||
`required_encode_size(std::distance(in_begin, in_end))`, otherwise the behavior depends on the output iterator.
|
||||
|
||||
@tparam Input_iterator the source; the returned elements are cast to `std::uint8_t` and should not be greater than
|
||||
8 bits
|
||||
@tparam Output_iterator the destination; the elements written to it are from the type `char`
|
||||
@param in_begin the beginning of the source
|
||||
@param in_end the ending of the source
|
||||
@param out the destination iterator
|
||||
@param alphabet which alphabet should be used
|
||||
@returns the iterator to the next element past the last element copied
|
||||
@throws see `Input_iterator` and `Output_iterator`
|
||||
*/
|
||||
template<typename Input_iterator, typename Output_iterator>
|
||||
static Output_iterator encode(Input_iterator in_begin, Input_iterator in_end, Output_iterator out,
|
||||
alphabet alphabet = alphabet::standard)
|
||||
{
|
||||
constexpr auto pad = '=';
|
||||
const char* alpha = alphabet == alphabet::url_filename_safe
|
||||
? "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_"
|
||||
: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
|
||||
|
||||
while (in_begin != in_end) {
|
||||
std::uint8_t i0 = 0, i1 = 0, i2 = 0;
|
||||
|
||||
// first character
|
||||
i0 = static_cast<std::uint8_t>(*in_begin);
|
||||
++in_begin;
|
||||
|
||||
*out = alpha[i0 >> 2 & 0x3f];
|
||||
++out;
|
||||
|
||||
// part of first character and second
|
||||
if (in_begin != in_end) {
|
||||
i1 = static_cast<std::uint8_t>(*in_begin);
|
||||
++in_begin;
|
||||
|
||||
*out = alpha[((i0 & 0x3) << 4) | (i1 >> 4 & 0x0f)];
|
||||
++out;
|
||||
} else {
|
||||
*out = alpha[(i0 & 0x3) << 4];
|
||||
++out;
|
||||
|
||||
// last padding
|
||||
*out = pad;
|
||||
++out;
|
||||
|
||||
// last padding
|
||||
*out = pad;
|
||||
++out;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
// part of second character and third
|
||||
if (in_begin != in_end) {
|
||||
i2 = static_cast<std::uint8_t>(*in_begin);
|
||||
++in_begin;
|
||||
|
||||
*out = alpha[((i1 & 0xf) << 2) | (i2 >> 6 & 0x03)];
|
||||
++out;
|
||||
} else {
|
||||
*out = alpha[(i1 & 0xf) << 2];
|
||||
++out;
|
||||
|
||||
// last padding
|
||||
*out = pad;
|
||||
++out;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
// rest of third
|
||||
*out = alpha[i2 & 0x3f];
|
||||
++out;
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
/**
|
||||
Encodes a string.
|
||||
|
||||
@param str the string that should be encoded
|
||||
@param alphabet which alphabet should be used
|
||||
@returns the encoded base64 string
|
||||
@throws see base64::encode()
|
||||
*/
|
||||
static std::string encode(const std::string& str, alphabet alphabet = alphabet::standard)
|
||||
{
|
||||
std::string result;
|
||||
|
||||
result.reserve(required_encode_size(str.length()) + 1);
|
||||
|
||||
encode(str.begin(), str.end(), std::back_inserter(result), alphabet);
|
||||
|
||||
return result;
|
||||
}
|
||||
/**
|
||||
Encodes a char array.
|
||||
|
||||
@param buffer the char array
|
||||
@param size the size of the array
|
||||
@param alphabet which alphabet should be used
|
||||
@returns the encoded string
|
||||
*/
|
||||
static std::string encode(const char* buffer, std::size_t size, alphabet alphabet = alphabet::standard)
|
||||
{
|
||||
std::string result;
|
||||
|
||||
result.reserve(required_encode_size(size) + 1);
|
||||
|
||||
encode(buffer, buffer + size, std::back_inserter(result), alphabet);
|
||||
|
||||
return result;
|
||||
}
|
||||
/**
|
||||
Decodes all the elements from `in_begin` to `in_end` to `out`. `in_begin` may point to the same location as `out`,
|
||||
in other words: inplace decoding is possible.
|
||||
|
||||
@warning The destination must be able to hold at least `required_decode_size(std::distance(in_begin, in_end))`,
|
||||
otherwise the behavior depends on the output iterator.
|
||||
|
||||
@tparam Input_iterator the source; the returned elements are cast to `char`
|
||||
@tparam Output_iterator the destination; the elements written to it are from the type `std::uint8_t`
|
||||
@param in_begin the beginning of the source
|
||||
@param in_end the ending of the source
|
||||
@param out the destination iterator
|
||||
@param alphabet which alphabet should be used
|
||||
@param behavior the behavior when an error was detected
|
||||
@returns the iterator to the next element past the last element copied
|
||||
@throws base64_error depending on the set behavior
|
||||
@throws see `Input_iterator` and `Output_iterator`
|
||||
*/
|
||||
template<typename Input_iterator, typename Output_iterator>
|
||||
static Output_iterator decode(Input_iterator in_begin, Input_iterator in_end, Output_iterator out,
|
||||
alphabet alphabet = alphabet::auto_,
|
||||
decoding_behavior behavior = decoding_behavior::moderate)
|
||||
{
|
||||
//constexpr auto pad = '=';
|
||||
std::uint8_t last = 0;
|
||||
auto bits = 0;
|
||||
|
||||
while (in_begin != in_end) {
|
||||
auto c = *in_begin;
|
||||
++in_begin;
|
||||
|
||||
if (c == '=') {
|
||||
break;
|
||||
}
|
||||
|
||||
auto part = _base64_value(alphabet, c);
|
||||
|
||||
// enough bits for one byte
|
||||
if (bits + 6 >= 8) {
|
||||
*out = (last << (8 - bits)) | (part >> (bits - 2));
|
||||
++out;
|
||||
|
||||
bits -= 2;
|
||||
} else {
|
||||
bits += 6;
|
||||
}
|
||||
|
||||
last = part;
|
||||
}
|
||||
|
||||
// check padding
|
||||
if (behavior != decoding_behavior::loose) {
|
||||
while (in_begin != in_end) {
|
||||
auto c = *in_begin;
|
||||
++in_begin;
|
||||
|
||||
if (c != '=') {
|
||||
throw base64_error("invalid base64 character.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
/**
|
||||
Decodes a string.
|
||||
|
||||
@param str the base64 encoded string
|
||||
@param alphabet which alphabet should be used
|
||||
@param behavior the behavior when an error was detected
|
||||
@returns the decoded string
|
||||
@throws see base64::decode()
|
||||
*/
|
||||
static std::string decode(const std::string& str, alphabet alphabet = alphabet::auto_,
|
||||
decoding_behavior behavior = decoding_behavior::moderate)
|
||||
{
|
||||
std::string result;
|
||||
|
||||
result.reserve(max_decode_size(str.length()));
|
||||
|
||||
decode(str.begin(), str.end(), std::back_inserter(result), alphabet, behavior);
|
||||
|
||||
return result;
|
||||
}
|
||||
/**
|
||||
Decodes a string.
|
||||
|
||||
@param buffer the base64 encoded buffer
|
||||
@param size the size of the buffer
|
||||
@param alphabet which alphabet should be used
|
||||
@param behavior the behavior when an error was detected
|
||||
@returns the decoded string
|
||||
@throws see base64::decode()
|
||||
*/
|
||||
static std::string decode(const char* buffer, std::size_t size, alphabet alphabet = alphabet::auto_,
|
||||
decoding_behavior behavior = decoding_behavior::moderate)
|
||||
{
|
||||
std::string result;
|
||||
|
||||
result.reserve(max_decode_size(size));
|
||||
|
||||
decode(buffer, buffer + size, std::back_inserter(result), alphabet, behavior);
|
||||
|
||||
return result;
|
||||
}
|
||||
/**
|
||||
Decodes a string inplace.
|
||||
|
||||
@param[in,out] str the base64 encoded string
|
||||
@param alphabet which alphabet should be used
|
||||
@param behavior the behavior when an error was detected
|
||||
@throws base64::decode_inplace()
|
||||
*/
|
||||
static void decode_inplace(std::string& str, alphabet alphabet = alphabet::auto_,
|
||||
decoding_behavior behavior = decoding_behavior::moderate)
|
||||
{
|
||||
str.resize(decode(str.begin(), str.end(), str.begin(), alphabet, behavior) - str.begin());
|
||||
}
|
||||
/**
|
||||
Decodes a char array inplace.
|
||||
|
||||
@param[in,out] str the string array
|
||||
@param size the length of the array
|
||||
@param alphabet which alphabet should be used
|
||||
@param behavior the behavior when an error was detected
|
||||
@returns the pointer to the next element past the last element decoded
|
||||
@throws base64::decode_inplace()
|
||||
*/
|
||||
static char* decode_inplace(char* str, std::size_t size, alphabet alphabet = alphabet::auto_,
|
||||
decoding_behavior behavior = decoding_behavior::moderate)
|
||||
{
|
||||
return decode(str, str + size, str, alphabet, behavior);
|
||||
}
|
||||
/**
|
||||
Returns the required decoding size for a given size. The value is calculated with the following formula:
|
||||
|
||||
$$
|
||||
\lceil \frac{size}{4} \rceil \cdot 3
|
||||
$$
|
||||
|
||||
@param size the size of the encoded input
|
||||
@returns the size of the resulting decoded buffer; this the absolute maximum
|
||||
*/
|
||||
static std::size_t max_decode_size(std::size_t size) noexcept
|
||||
{
|
||||
return (size / 4 + (size % 4 ? 1 : 0)) * 3;
|
||||
}
|
||||
/**
|
||||
Returns the required encoding size for a given size. The value is calculated with the following formula:
|
||||
|
||||
$$
|
||||
\lceil \frac{size}{3} \rceil \cdot 4
|
||||
$$
|
||||
|
||||
@param size the size of the decoded input
|
||||
@returns the size of the resulting encoded buffer
|
||||
*/
|
||||
static std::size_t required_encode_size(std::size_t size) noexcept
|
||||
{
|
||||
return (size / 3 + (size % 3 ? 1 : 0)) * 4;
|
||||
}
|
||||
|
||||
private:
|
||||
static std::uint8_t _base64_value(alphabet& alphabet, char c)
|
||||
{
|
||||
if (c >= 'A' && c <= 'Z') {
|
||||
return c - 'A';
|
||||
} else if (c >= 'a' && c <= 'z') {
|
||||
return c - 'a' + 26;
|
||||
} else if (c >= '0' && c <= '9') {
|
||||
return c - '0' + 52;
|
||||
}
|
||||
|
||||
// comes down to alphabet
|
||||
if (alphabet == alphabet::standard) {
|
||||
if (c == '+') {
|
||||
return 62;
|
||||
} else if (c == '/') {
|
||||
return 63;
|
||||
}
|
||||
} else if (alphabet == alphabet::url_filename_safe) {
|
||||
if (c == '-') {
|
||||
return 62;
|
||||
} else if (c == '_') {
|
||||
return 63;
|
||||
}
|
||||
} // auto detect
|
||||
else {
|
||||
if (c == '+') {
|
||||
alphabet = alphabet::standard;
|
||||
|
||||
return 62;
|
||||
} else if (c == '/') {
|
||||
alphabet = alphabet::standard;
|
||||
|
||||
return 63;
|
||||
} else if (c == '-') {
|
||||
alphabet = alphabet::url_filename_safe;
|
||||
|
||||
return 62;
|
||||
} else if (c == '_') {
|
||||
alphabet = alphabet::url_filename_safe;
|
||||
|
||||
return 63;
|
||||
}
|
||||
}
|
||||
|
||||
throw base64_error("invalid base64 character.");
|
||||
}
|
||||
};
|
||||
|
||||
#endif // !PUBLIC_DOMAIN_BASE64_HPP_
|
@ -1,4 +0,0 @@
|
||||
int LLAMA_BUILD_NUMBER = 0;
|
||||
char const *LLAMA_COMMIT = "3f1ae2e32cde00c39b96be6d01c2997c29bae555";
|
||||
char const *LLAMA_COMPILER = "";
|
||||
char const *LLAMA_BUILD_TARGET = "";
|
2686
llama/clip.cpp
2686
llama/clip.cpp
File diff suppressed because it is too large
Load Diff
120
llama/clip.h
120
llama/clip.h
@ -1,120 +0,0 @@
|
||||
/**
|
||||
* llama.cpp - commit 3f1ae2e32cde00c39b96be6d01c2997c29bae555 - do not edit this file
|
||||
*
|
||||
* MIT License
|
||||
*
|
||||
* Copyright (c) 2023-2024 The ggml authors
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in all
|
||||
* copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef CLIP_H
|
||||
#define CLIP_H
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#ifdef LLAMA_SHARED
|
||||
# if defined(_WIN32) && !defined(__MINGW32__)
|
||||
# ifdef LLAMA_BUILD
|
||||
# define CLIP_API __declspec(dllexport)
|
||||
# else
|
||||
# define CLIP_API __declspec(dllimport)
|
||||
# endif
|
||||
# else
|
||||
# define CLIP_API __attribute__ ((visibility ("default")))
|
||||
# endif
|
||||
#else
|
||||
# define CLIP_API
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct clip_ctx;
|
||||
|
||||
struct clip_image_size {
|
||||
int width;
|
||||
int height;
|
||||
};
|
||||
|
||||
struct clip_image_u8_batch {
|
||||
struct clip_image_u8 * data;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
struct clip_image_f32_batch {
|
||||
struct clip_image_f32 * data;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
CLIP_API struct clip_ctx * clip_model_load (const char * fname, int verbosity);
|
||||
CLIP_API struct clip_ctx * clip_model_load_cpu(const char * fname, int verbosity);
|
||||
|
||||
CLIP_API void clip_free(struct clip_ctx * ctx);
|
||||
|
||||
CLIP_API size_t clip_embd_nbytes(const struct clip_ctx * ctx);
|
||||
|
||||
CLIP_API int32_t clip_image_size (const struct clip_ctx * ctx);
|
||||
CLIP_API int32_t clip_patch_size (const struct clip_ctx * ctx);
|
||||
CLIP_API int32_t clip_hidden_size(const struct clip_ctx * ctx);
|
||||
|
||||
// TODO: should be enum, not string
|
||||
CLIP_API const char * clip_patch_merge_type(const struct clip_ctx * ctx);
|
||||
|
||||
CLIP_API const int32_t * clip_image_grid(const struct clip_ctx * ctx);
|
||||
|
||||
CLIP_API int clip_n_patches (const struct clip_ctx * ctx);
|
||||
CLIP_API int clip_n_mmproj_embd(const struct clip_ctx * ctx);
|
||||
|
||||
CLIP_API int clip_uhd_num_image_embeds_col(struct clip_ctx * ctx_clip);
|
||||
CLIP_API void clip_add_load_image_size(struct clip_ctx * ctx_clip, struct clip_image_size * load_image_size);
|
||||
|
||||
CLIP_API struct clip_image_size * clip_image_size_init();
|
||||
CLIP_API struct clip_image_u8 * clip_image_u8_init ();
|
||||
CLIP_API struct clip_image_f32 * clip_image_f32_init();
|
||||
|
||||
CLIP_API void clip_image_u8_free (struct clip_image_u8 * img);
|
||||
CLIP_API void clip_image_f32_free(struct clip_image_f32 * img);
|
||||
CLIP_API void clip_image_u8_batch_free (struct clip_image_u8_batch * batch);
|
||||
CLIP_API void clip_image_f32_batch_free(struct clip_image_f32_batch * batch);
|
||||
|
||||
CLIP_API bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img);
|
||||
|
||||
/** interpret bytes as an image file with length bytes_length, and use the result to populate img */
|
||||
CLIP_API bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img);
|
||||
|
||||
/** preprocess img and store the result in res_imgs, pad_to_square may be overridden to false depending on model configuration */
|
||||
CLIP_API bool clip_image_preprocess(struct clip_ctx * ctx, const struct clip_image_u8 * img, struct clip_image_f32_batch * res_imgs );
|
||||
|
||||
CLIP_API struct ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx);
|
||||
|
||||
CLIP_API bool clip_image_encode (struct clip_ctx * ctx, int n_threads, struct clip_image_f32 * img, float * vec);
|
||||
CLIP_API bool clip_image_batch_encode(struct clip_ctx * ctx, int n_threads, const struct clip_image_f32_batch * imgs, float * vec);
|
||||
|
||||
CLIP_API bool clip_model_quantize(const char * fname_inp, const char * fname_out, int itype);
|
||||
|
||||
CLIP_API int clip_is_minicpmv(const struct clip_ctx * ctx);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // CLIP_H
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user