diff --git a/.dockerignore b/.dockerignore index 43f2e07d..fada7a9b 100644 --- a/.dockerignore +++ b/.dockerignore @@ -7,3 +7,5 @@ llm/llama.cpp .env .cache test_data +llm/build +llama/build diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 9c1e3e13..ac4c19b0 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -102,7 +102,8 @@ jobs: with: name: generate-windows-cpu path: | - llm/build/**/bin/* + build/**/* + build/**/*.a llm/build/**/*.a dist/windows-amd64/** @@ -176,7 +177,7 @@ jobs: with: name: generate-windows-rocm path: | - llm/build/**/bin/* + build/**/* dist/windows-amd64/** - uses: actions/upload-artifact@v4 with: @@ -265,7 +266,7 @@ jobs: with: name: generate-windows-cuda-${{ matrix.cuda.version }} path: | - llm/build/**/bin/* + build/**/* dist/windows-amd64/** - uses: actions/upload-artifact@v4 with: @@ -273,7 +274,134 @@ jobs: path: dist/deps/* - # Import the prior generation steps and build the final windows assets + # windows arm64 generate, go build, and zip file (no installer) + # Output of this build is aggregated into the final x86 build + # for a unified windows installer + windows-arm64: + runs-on: windows-arm64 + environment: release + env: + KEY_CONTAINER: ${{ vars.KEY_CONTAINER }} + steps: + # The current Windows arm64 beta image has effectively zero dev tools installed... + - name: Install git and gzip + run: | + Set-ExecutionPolicy Bypass -Scope Process -Force + [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072 + iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) + choco install -y --no-progress git gzip + echo "C:\Program Files\Git\cmd" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + echo "C:\ProgramData\chocolatey\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + - name: Install Visual Studio 2022 + run: | + $components = @( + "Microsoft.VisualStudio.Component.CoreEditor", + "Microsoft.VisualStudio.Workload.CoreEditor", + "Microsoft.VisualStudio.Component.Roslyn.Compiler", + "Microsoft.Component.MSBuild", + "Microsoft.VisualStudio.Component.TextTemplating", + "Microsoft.VisualStudio.Component.Debugger.JustInTime", + "Microsoft.VisualStudio.Component.VC.CoreIde", + "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", + "Microsoft.VisualStudio.Component.Windows11SDK.22621", + "Microsoft.VisualStudio.Component.VC.Tools.ARM64EC", + "Microsoft.VisualStudio.Component.VC.Tools.ARM64", + "Microsoft.VisualStudio.Component.VC.ATL", + "Microsoft.VisualStudio.Component.VC.ATL.ARM64", + "Microsoft.VisualStudio.Component.Graphics", + "Microsoft.VisualStudio.Component.VC.Redist.14.Latest", + "Microsoft.VisualStudio.ComponentGroup.NativeDesktop.Core", + "Microsoft.VisualStudio.Component.Windows11Sdk.WindowsPerformanceToolkit", + "Microsoft.VisualStudio.Component.CppBuildInsights", + "Microsoft.VisualStudio.Component.VC.DiagnosticTools", + "Microsoft.VisualStudio.ComponentGroup.WebToolsExtensions.CMake", + "Microsoft.VisualStudio.Component.VC.CMake.Project", + "Microsoft.VisualStudio.Component.VC.ASAN", + "Microsoft.VisualStudio.Component.Vcpkg", + "Microsoft.VisualStudio.Workload.NativeDesktop" + ) + $config = @{ + "version" = "1.0" + "components" = $components + "extensions" = @() + } + $configPath = "${env:RUNNER_TEMP}\vsconfig" + $config | ConvertTo-Json | Out-File -FilePath $configPath + $bootstrapperFilePath = "${env:RUNNER_TEMP}\vs_community.exe" + write-host "Downloading Visual Studio 2022" + Invoke-WebRequest -Uri "https://aka.ms/vs/17/release/vs_community.exe" -outfile $bootstrapperFilePath + $bootstrapperArgumentList = ('/c', $bootstrapperFilePath, '--config', $configPath, '--quiet', '--wait' ) + write-host "Installing Visual Studio 2022" + $process = Start-Process -FilePath cmd.exe -ArgumentList $bootstrapperArgumentList -Wait -PassThru + $exitCode = $process.ExitCode + write-host $exitCode + # pacman in mingw/msys2 is ~broken on windows arm right now - hangs consistently during attempts to install + # so we'll use this alternative GCC binary + - name: Install llvm-mingw GCC + run: | + $gcc_url="https://github.com/mstorsjo/llvm-mingw/releases/download/20240619/llvm-mingw-20240619-ucrt-aarch64.zip" + write-host "Downloading llvm-mingw" + Invoke-WebRequest -Uri "${gcc_url}" -OutFile "${env:RUNNER_TEMP}\gcc.zip" + write-host "Unpacking llvm-mingw" + expand-archive -path "${env:RUNNER_TEMP}\gcc.zip" -destinationpath "c:\" + mv c:\llvm-mingw-* c:\llvm-mingw + echo "c:\llvm-mingw\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + - name: Verify GCC + run: | + echo $env:PATH + gcc --version + - uses: actions/checkout@v4 + - name: Set Version + run: | + $ver=${env:GITHUB_REF_NAME}.trim("v") + echo VERSION=$ver | Out-File -FilePath ${env:GITHUB_ENV} -Encoding utf8 -Append + - uses: 'google-github-actions/auth@v2' + with: + project_id: 'ollama' + credentials_json: '${{ secrets.GOOGLE_SIGNING_CREDENTIALS }}' + - run: echo "${{ vars.OLLAMA_CERT }}" | Out-File -FilePath ollama_inc.crt -Encoding utf8 + - name: install Windows SDK 8.1 to get signtool + run: | + $ErrorActionPreference = "Stop" + write-host "downloading SDK" + Invoke-WebRequest -Uri "https://go.microsoft.com/fwlink/p/?LinkId=323507" -OutFile "${env:RUNNER_TEMP}\sdksetup.exe" + Start-Process "${env:RUNNER_TEMP}\sdksetup.exe" -ArgumentList @("/q") -NoNewWindow -Wait + write-host "Win SDK 8.1 installed" + gci -path 'C:\Program Files (x86)\Windows Kits\' -r -fi 'signtool.exe' + - name: install signing plugin + run: | + $ErrorActionPreference = "Stop" + write-host "downloading plugin" + Invoke-WebRequest -Uri "https://github.com/GoogleCloudPlatform/kms-integrations/releases/download/cng-v1.0/kmscng-1.0-windows-amd64.zip" -OutFile "${env:RUNNER_TEMP}\plugin.zip" + Expand-Archive -Path "${env:RUNNER_TEMP}\plugin.zip" -DestinationPath ${env:RUNNER_TEMP}\plugin\ + write-host "Installing plugin" + & "${env:RUNNER_TEMP}\plugin\*\kmscng.msi" /quiet + write-host "plugin installed" + - uses: actions/setup-go@v5 + with: + go-version-file: go.mod + cache: true + - run: go get ./... + - run: | + $gopath=(get-command go).source | split-path -parent + $gccpath=(get-command gcc).source | split-path -parent + & "C:\Program Files\Microsoft Visual Studio\2022\Community\Common7\Tools\Launch-VsDevShell.ps1" + cd $env:GITHUB_WORKSPACE + $env:CMAKE_SYSTEM_VERSION="10.0.22621.0" + $env:PATH="$gopath;$gccpath;$env:PATH;C:\Program Files\Microsoft Visual Studio\2022\Community\Common7\IDE\CommonExtensions\Microsoft\CMake\CMake\bin" + echo $env:PATH + $env:ARCH="arm64" + .\scripts\build_windows.ps1 buildOllama buildApp gatherDependencies distZip + name: 'Windows Build' + - uses: actions/upload-artifact@v4 + with: + name: windows-arm64 + path: | + dist/windows-arm64/** + dist/windows-arm64-app.exe + dist/ollama-windows-arm64.zip + + # Import the prior generation steps plus the full arm64 build, and build the final windows assets build-windows: environment: release runs-on: windows @@ -281,6 +409,7 @@ jobs: - generate-windows-cuda - generate-windows-rocm - generate-windows-cpu + - windows-arm64 env: KEY_CONTAINER: ${{ vars.KEY_CONTAINER }} steps: @@ -338,7 +467,11 @@ jobs: - uses: actions/download-artifact@v4 with: name: generate-windows-rocm - - run: dir llm/build + - uses: actions/download-artifact@v4 + with: + name: windows-arm64 + path: dist + - run: dir build - run: | $gopath=(get-command go).source | split-path -parent & "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Launch-VsDevShell.ps1" @@ -359,9 +492,7 @@ jobs: environment: release runs-on: linux env: - OLLAMA_SKIP_MANIFEST_CREATE: '1' - BUILD_ARCH: amd64 - PUSH: '1' + PLATFORM: linux/amd64 steps: - uses: actions/checkout@v4 with: @@ -369,14 +500,8 @@ jobs: - name: Set Version shell: bash run: echo "VERSION=${GITHUB_REF_NAME#v}" >> $GITHUB_ENV - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ vars.DOCKER_USER }} - password: ${{ secrets.DOCKER_ACCESS_TOKEN }} - run: | ./scripts/build_linux.sh - ./scripts/build_docker.sh - uses: actions/upload-artifact@v4 with: name: dist-linux-amd64 @@ -390,9 +515,7 @@ jobs: environment: release runs-on: linux-arm64 env: - OLLAMA_SKIP_MANIFEST_CREATE: '1' - BUILD_ARCH: arm64 - PUSH: '1' + PLATFORM: linux/arm64 steps: - uses: actions/checkout@v4 with: @@ -421,14 +544,8 @@ jobs: sudo usermod -aG docker $USER sudo apt-get install acl sudo setfacl --modify user:$USER:rw /var/run/docker.sock - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ vars.DOCKER_USER }} - password: ${{ secrets.DOCKER_ACCESS_TOKEN }} - run: | ./scripts/build_linux.sh - ./scripts/build_docker.sh - uses: actions/upload-artifact@v4 with: name: dist-linux-arm64 @@ -436,6 +553,178 @@ jobs: dist/*linux* !dist/*-cov + # Container image build + build-container-image: + environment: release + strategy: + matrix: + runner: + - linux + - linux-arm64 + runs-on: ${{ matrix.runner }} + env: + FINAL_IMAGE_REPO: ollama/ollama + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + - name: 'Install Docker' + if: ${{ startsWith(matrix.runner, 'linux-arm64') }} + run: | + sudo apt-get update + sudo apt-get install -y ca-certificates curl + sudo install -m 0755 -d /etc/apt/keyrings + sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc + sudo chmod a+r /etc/apt/keyrings/docker.asc + echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + sudo apt-get update + sudo apt-get install -y docker-ce docker-ce-cli containerd.io + sudo usermod -aG docker $USER + sudo apt-get install acl + sudo setfacl --modify user:$USER:rw /var/run/docker.sock + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.FINAL_IMAGE_REPO }} + flavor: | + latest=false + tags: | + type=ref,enable=true,priority=600,prefix=0.0.0-pr,suffix=,event=pr + type=semver,pattern={{version}} + - name: Set Version + shell: bash + run: | + machine=$(uname -m) + case ${machine} in + x86_64) echo ARCH=amd64; echo PLATFORM_PAIR=linux-amd64 ;; + aarch64) echo ARCH=arm64; echo PLATFORM_PAIR=linux-arm64 ;; + esac >>$GITHUB_ENV + echo GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=${{ env.DOCKER_METADATA_OUTPUT_VERSION }}\" \"-X=github.com/ollama/ollama/server.mode=release\"'" >>$GITHUB_ENV + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ vars.DOCKER_USER }} + password: ${{ secrets.DOCKER_ACCESS_TOKEN }} + - name: Build and push by digest + id: build + uses: docker/build-push-action@v6 + with: + context: "." + platforms: linux/${{ env.ARCH }} + build-args: | + GOFLAGS + outputs: type=image,name=${{ env.FINAL_IMAGE_REPO }},push-by-digest=true,name-canonical=true,push=true + - name: Export digest + run: | + mkdir -p /tmp/digests + digest="${{ steps.build.outputs.digest }}" + touch "/tmp/digests/${digest#sha256:}" + - name: Upload digest + uses: actions/upload-artifact@v4 + with: + name: digests-${{ env.PLATFORM_PAIR }} + path: /tmp/digests/* + if-no-files-found: error + retention-days: 1 + merge: + environment: release + runs-on: linux + needs: + - build-container-image + env: + FINAL_IMAGE_REPO: ollama/ollama + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + - name: Download digests + uses: actions/download-artifact@v4 + with: + path: /tmp/digests + pattern: digests-* + merge-multiple: true + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.FINAL_IMAGE_REPO }} + flavor: | + latest=false + tags: | + type=ref,enable=true,priority=600,prefix=0.0.0-pr,suffix=,event=pr + type=semver,pattern={{version}} + - name: Set Version + shell: bash + run: | + machine=$(uname -m) + case ${machine} in + x86_64) echo ARCH=amd64; echo PLATFORM_PAIR=linux-amd64 ;; + aarch64) echo ARCH=arm64; echo PLATFORM_PAIR=linux-arm64 ;; + esac >>$GITHUB_ENV + echo GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=${{ env.DOCKER_METADATA_OUTPUT_VERSION }}\" \"-X=github.com/ollama/ollama/server.mode=release\"'" >>$GITHUB_ENV + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ vars.DOCKER_USER }} + password: ${{ secrets.DOCKER_ACCESS_TOKEN }} + - name: Create manifest list and push + working-directory: /tmp/digests + run: | + docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ + $(printf '${{ env.FINAL_IMAGE_REPO }}@sha256:%s ' *) + - name: Inspect image + run: | + docker buildx imagetools inspect ${{ env.FINAL_IMAGE_REPO }}:${{ steps.meta.outputs.version }} + build-container-image-rocm: + environment: release + runs-on: linux + env: + FINAL_IMAGE_REPO: ollama/ollama + ARCH: amd64 + PLATFORM_PAIR: linux-amd64 + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.FINAL_IMAGE_REPO }} + flavor: | + latest=false + tags: | + type=ref,enable=true,priority=600,prefix=0.0.0-pr,suffix=,event=pr + type=semver,pattern={{version}} + - name: Set Version + shell: bash + run: | + echo GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=${{ env.DOCKER_METADATA_OUTPUT_VERSION }}\" \"-X=github.com/ollama/ollama/server.mode=release\"'" >>$GITHUB_ENV + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ vars.DOCKER_USER }} + password: ${{ secrets.DOCKER_ACCESS_TOKEN }} + - name: Build and push by digest + id: build + uses: docker/build-push-action@v6 + with: + context: "." + target: runtime-rocm + build-args: | + GOFLAGS + tags: ${{ env.FINAL_IMAGE_REPO }}:${{ env.DOCKER_METADATA_OUTPUT_VERSION}}-rocm + push: true + # Aggregate all the assets and ship a release release: needs: @@ -448,8 +737,6 @@ jobs: permissions: contents: write env: - OLLAMA_SKIP_IMAGE_BUILD: '1' - PUSH: '1' GH_TOKEN: ${{ github.token }} steps: - uses: actions/checkout@v4 @@ -458,12 +745,6 @@ jobs: run: | echo "VERSION=${GITHUB_REF_NAME#v}" >> $GITHUB_ENV echo "RELEASE_VERSION=$(echo ${GITHUB_REF_NAME} | cut -f1 -d-)" >> $GITHUB_ENV - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ vars.DOCKER_USER }} - password: ${{ secrets.DOCKER_ACCESS_TOKEN }} - - run: ./scripts/build_docker.sh - name: Retrieve built artifact uses: actions/download-artifact@v4 with: @@ -474,8 +755,6 @@ jobs: ls -lh dist/ (cd dist; find . -type f | xargs sha256sum > ../sha256sum.txt) mv sha256sum.txt dist/ - mv dist/linux-???64 . - mv dist/linux-amd64-rocm . cat dist/sha256sum.txt - name: Create or update Release run: | diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 3d58fa3e..26dc732a 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -81,12 +81,6 @@ jobs: if: ${{ ! startsWith(matrix.os, 'windows-') }} name: 'Unix Go Generate' - run: go build . - - uses: actions/upload-artifact@v4 - with: - name: ${{ matrix.os }}-${{ matrix.arch }}-libraries - path: | - llm/build/**/bin/* - llm/build/**/*.a generate-cuda: needs: [changes] if: ${{ needs.changes.outputs.GENERATE_CUDA == 'True' }} @@ -114,12 +108,6 @@ jobs: go generate -x ./... env: OLLAMA_SKIP_CPU_GENERATE: '1' - - uses: actions/upload-artifact@v4 - with: - name: cuda-${{ matrix.cuda-version }}-libraries - path: | - llm/build/**/bin/* - dist/windows-amd64/** generate-rocm: needs: [changes] if: ${{ needs.changes.outputs.GENERATE_ROCM == 'True' }} @@ -147,12 +135,6 @@ jobs: go generate -x ./... env: OLLAMA_SKIP_CPU_GENERATE: '1' - - uses: actions/upload-artifact@v4 - with: - name: rocm-${{ matrix.rocm-version }}-libraries - path: | - llm/build/**/bin/* - dist/windows-amd64/** # ROCm generation step generate-windows-rocm: @@ -189,7 +171,6 @@ jobs: name: go generate env: OLLAMA_SKIP_CPU_GENERATE: '1' - # TODO - do we need any artifacts? # CUDA generation step generate-windows-cuda: @@ -231,7 +212,6 @@ jobs: go generate -x ./... env: OLLAMA_SKIP_CPU_GENERATE: '1' - # TODO - do we need any artifacts? lint: strategy: @@ -263,14 +243,6 @@ jobs: arm64) echo ARCH=arm64 ;; esac >>$GITHUB_ENV shell: bash - - run: | - mkdir -p llm/build/linux/$ARCH/stub/bin - touch llm/build/linux/$ARCH/stub/bin/ollama_llama_server - if: ${{ startsWith(matrix.os, 'ubuntu-') }} - - run: | - mkdir -p llm/build/darwin/$ARCH/stub/bin - touch llm/build/darwin/$ARCH/stub/bin/ollama_llama_server - if: ${{ startsWith(matrix.os, 'macos-') }} - uses: golangci/golangci-lint-action@v6 with: args: --timeout 8m0s -v @@ -301,23 +273,10 @@ jobs: cache: true - run: | case ${{ matrix.arch }} in - amd64) echo ARCH=x86_64 ;; + amd64) echo ARCH=amd64 ;; arm64) echo ARCH=arm64 ;; esac >>$GITHUB_ENV shell: bash - - run: | - mkdir -p llm/build/linux/$ARCH/stub/bin - touch llm/build/linux/$ARCH/stub/bin/ollama_llama_server - if: ${{ startsWith(matrix.os, 'ubuntu-') }} - - run: | - mkdir -p llm/build/darwin/$ARCH/stub/bin - touch llm/build/darwin/$ARCH/stub/bin/ollama_llama_server - if: ${{ startsWith(matrix.os, 'macos-') }} - shell: bash - run: go generate ./... - run: go build - run: go test -v ./... - - uses: actions/upload-artifact@v4 - with: - name: ${{ matrix.os }}-binaries - path: ollama diff --git a/.gitignore b/.gitignore index 0d826ab6..87f8b007 100644 --- a/.gitignore +++ b/.gitignore @@ -12,4 +12,7 @@ ggml-metal.metal test_data *.crt llm/build +build/*/*/* +!build/**/placeholder +llama/build __debug_bin* \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 6743866a..0f43e618 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,12 +16,12 @@ FROM --platform=linux/amd64 nvidia/cuda:$CUDA_VERSION_11-devel-centos7 AS cuda-1 ARG CMAKE_VERSION COPY ./scripts/rh_linux_deps.sh / RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh -ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH +ENV PATH=/opt/rh/devtoolset-10/root/usr/bin:$PATH COPY --from=llm-code / /go/src/github.com/ollama/ollama/ WORKDIR /go/src/github.com/ollama/ollama/llm/generate ARG CGO_CFLAGS ARG CUDA_V11_ARCHITECTURES -ENV GOARCH amd64 +ENV GOARCH=amd64 RUN --mount=type=cache,target=/root/.ccache \ OLLAMA_SKIP_STATIC_GENERATE=1 \ OLLAMA_SKIP_CPU_GENERATE=1 \ @@ -33,12 +33,12 @@ FROM --platform=linux/amd64 nvidia/cuda:$CUDA_VERSION_12-devel-centos7 AS cuda-1 ARG CMAKE_VERSION COPY ./scripts/rh_linux_deps.sh / RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh -ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH +ENV PATH=/opt/rh/devtoolset-10/root/usr/bin:$PATH COPY --from=llm-code / /go/src/github.com/ollama/ollama/ WORKDIR /go/src/github.com/ollama/ollama/llm/generate ARG CGO_CFLAGS ARG CUDA_V12_ARCHITECTURES -ENV GOARCH amd64 +ENV GOARCH=amd64 RUN --mount=type=cache,target=/root/.ccache \ OLLAMA_SKIP_STATIC_GENERATE=1 \ OLLAMA_SKIP_CPU_GENERATE=1 \ @@ -47,32 +47,32 @@ RUN --mount=type=cache,target=/root/.ccache \ OLLAMA_CUSTOM_CUDA_DEFS="-DGGML_CUDA_USE_GRAPHS=on" \ bash gen_linux.sh -FROM --platform=linux/arm64 nvidia/cuda:$CUDA_VERSION_11-devel-rockylinux8 AS cuda-11-build-server-arm64 +FROM --platform=linux/arm64 nvidia/cuda:$CUDA_VERSION_11-devel-rockylinux8 AS cuda-11-build-runner-arm64 ARG CMAKE_VERSION COPY ./scripts/rh_linux_deps.sh / RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh -ENV PATH /opt/rh/gcc-toolset-10/root/usr/bin:$PATH +ENV PATH=/opt/rh/gcc-toolset-10/root/usr/bin:$PATH COPY --from=llm-code / /go/src/github.com/ollama/ollama/ WORKDIR /go/src/github.com/ollama/ollama/llm/generate ARG CGO_CFLAGS ARG CUDA_V11_ARCHITECTURES -ENV GOARCH arm64 +ENV GOARCH=arm64 RUN OLLAMA_SKIP_STATIC_GENERATE=1 \ OLLAMA_SKIP_CPU_GENERATE=1 \ CMAKE_CUDA_ARCHITECTURES="${CUDA_V11_ARCHITECTURES}" \ CUDA_VARIANT="_v11" \ bash gen_linux.sh -FROM --platform=linux/arm64 nvidia/cuda:$CUDA_VERSION_12-devel-rockylinux8 AS cuda-12-build-server-arm64 +FROM --platform=linux/arm64 nvidia/cuda:$CUDA_VERSION_12-devel-rockylinux8 AS cuda-12-build-runner-arm64 ARG CMAKE_VERSION COPY ./scripts/rh_linux_deps.sh / RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh -ENV PATH /opt/rh/gcc-toolset-10/root/usr/bin:$PATH +ENV PATH=/opt/rh/gcc-toolset-10/root/usr/bin:$PATH COPY --from=llm-code / /go/src/github.com/ollama/ollama/ WORKDIR /go/src/github.com/ollama/ollama/llm/generate ARG CGO_CFLAGS ARG CUDA_V12_ARCHITECTURES -ENV GOARCH arm64 +ENV GOARCH=arm64 RUN --mount=type=cache,target=/root/.ccache \ OLLAMA_SKIP_STATIC_GENERATE=1 \ OLLAMA_SKIP_CPU_GENERATE=1 \ @@ -86,13 +86,13 @@ FROM --platform=linux/amd64 rocm/dev-centos-7:${ROCM_VERSION}-complete AS rocm-b ARG CMAKE_VERSION COPY ./scripts/rh_linux_deps.sh / RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh -ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH -ENV LIBRARY_PATH /opt/amdgpu/lib64 +ENV PATH=/opt/rh/devtoolset-10/root/usr/bin:$PATH +ENV LIBRARY_PATH=/opt/amdgpu/lib64 COPY --from=llm-code / /go/src/github.com/ollama/ollama/ WORKDIR /go/src/github.com/ollama/ollama/llm/generate ARG CGO_CFLAGS ARG AMDGPU_TARGETS -ENV GOARCH amd64 +ENV GOARCH=amd64 RUN --mount=type=cache,target=/root/.ccache \ OLLAMA_SKIP_STATIC_GENERATE=1 OLLAMA_SKIP_CPU_GENERATE=1 bash gen_linux.sh RUN mkdir -p ../../dist/linux-amd64-rocm/lib/ollama && \ @@ -103,11 +103,11 @@ ARG CMAKE_VERSION ARG GOLANG_VERSION COPY ./scripts/rh_linux_deps.sh / RUN CMAKE_VERSION=${CMAKE_VERSION} GOLANG_VERSION=${GOLANG_VERSION} sh /rh_linux_deps.sh -ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH +ENV PATH=/opt/rh/devtoolset-10/root/usr/bin:$PATH COPY --from=llm-code / /go/src/github.com/ollama/ollama/ ARG OLLAMA_CUSTOM_CPU_DEFS ARG CGO_CFLAGS -ENV GOARCH amd64 +ENV GOARCH=amd64 WORKDIR /go/src/github.com/ollama/ollama/llm/generate FROM --platform=linux/amd64 cpu-builder-amd64 AS static-build-amd64 @@ -128,11 +128,11 @@ ARG CMAKE_VERSION ARG GOLANG_VERSION COPY ./scripts/rh_linux_deps.sh / RUN CMAKE_VERSION=${CMAKE_VERSION} GOLANG_VERSION=${GOLANG_VERSION} sh /rh_linux_deps.sh -ENV PATH /opt/rh/gcc-toolset-10/root/usr/bin:$PATH +ENV PATH=/opt/rh/gcc-toolset-10/root/usr/bin:$PATH COPY --from=llm-code / /go/src/github.com/ollama/ollama/ ARG OLLAMA_CUSTOM_CPU_DEFS ARG CGO_CFLAGS -ENV GOARCH arm64 +ENV GOARCH=arm64 WORKDIR /go/src/github.com/ollama/ollama/llm/generate FROM --platform=linux/arm64 cpu-builder-arm64 AS static-build-arm64 @@ -143,73 +143,112 @@ RUN --mount=type=cache,target=/root/.ccache \ OLLAMA_SKIP_STATIC_GENERATE=1 OLLAMA_CPU_TARGET="cpu" bash gen_linux.sh -# Intermediate stage used for ./scripts/build_linux.sh +# Intermediate stages used for ./scripts/build_linux.sh FROM --platform=linux/amd64 cpu-build-amd64 AS build-amd64 -ENV CGO_ENABLED 1 +ENV CGO_ENABLED=1 WORKDIR /go/src/github.com/ollama/ollama COPY . . -COPY --from=static-build-amd64 /go/src/github.com/ollama/ollama/llm/build/linux/ llm/build/linux/ -COPY --from=cpu_avx-build-amd64 /go/src/github.com/ollama/ollama/llm/build/linux/ llm/build/linux/ -COPY --from=cpu_avx2-build-amd64 /go/src/github.com/ollama/ollama/llm/build/linux/ llm/build/linux/ +COPY --from=static-build-amd64 /go/src/github.com/ollama/ollama/llm/build/ llm/build/ +COPY --from=cpu_avx-build-amd64 /go/src/github.com/ollama/ollama/build/ build/ +COPY --from=cpu_avx2-build-amd64 /go/src/github.com/ollama/ollama/build/ build/ COPY --from=cuda-11-build-amd64 /go/src/github.com/ollama/ollama/dist/ dist/ -COPY --from=cuda-11-build-amd64 /go/src/github.com/ollama/ollama/llm/build/linux/ llm/build/linux/ +COPY --from=cuda-11-build-amd64 /go/src/github.com/ollama/ollama/build/ build/ COPY --from=cuda-12-build-amd64 /go/src/github.com/ollama/ollama/dist/ dist/ -COPY --from=cuda-12-build-amd64 /go/src/github.com/ollama/ollama/llm/build/linux/ llm/build/linux/ +COPY --from=cuda-12-build-amd64 /go/src/github.com/ollama/ollama/build/ build/ COPY --from=rocm-build-amd64 /go/src/github.com/ollama/ollama/dist/ dist/ -COPY --from=rocm-build-amd64 /go/src/github.com/ollama/ollama/llm/build/linux/ llm/build/linux/ +COPY --from=rocm-build-amd64 /go/src/github.com/ollama/ollama/build/ build/ +ARG GOFLAGS +ARG CGO_CFLAGS +RUN --mount=type=cache,target=/root/.ccache \ + go build -trimpath -o dist/linux-amd64/bin/ollama . +RUN cd dist/linux-$GOARCH && \ + tar --exclude runners -cf - . | pigz --best > ../ollama-linux-$GOARCH.tgz +RUN cd dist/linux-$GOARCH-rocm && \ + tar -cf - . | pigz --best > ../ollama-linux-$GOARCH-rocm.tgz + +FROM --platform=linux/arm64 cpu-build-arm64 AS build-arm64 +ENV CGO_ENABLED=1 +ARG GOLANG_VERSION +WORKDIR /go/src/github.com/ollama/ollama +COPY . . +COPY --from=static-build-arm64 /go/src/github.com/ollama/ollama/llm/build/ llm/build/ +COPY --from=cuda-11-build-runner-arm64 /go/src/github.com/ollama/ollama/dist/ dist/ +COPY --from=cuda-11-build-runner-arm64 /go/src/github.com/ollama/ollama/build/ build/ +COPY --from=cuda-12-build-runner-arm64 /go/src/github.com/ollama/ollama/dist/ dist/ +COPY --from=cuda-12-build-runner-arm64 /go/src/github.com/ollama/ollama/build/ build/ +ARG GOFLAGS +ARG CGO_CFLAGS +RUN --mount=type=cache,target=/root/.ccache \ + go build -trimpath -o dist/linux-arm64/bin/ollama . +RUN cd dist/linux-$GOARCH && \ + tar --exclude runners -cf - . | pigz --best > ../ollama-linux-$GOARCH.tgz + +FROM --platform=linux/amd64 scratch AS dist-amd64 +COPY --from=build-amd64 /go/src/github.com/ollama/ollama/dist/ollama-linux-*.tgz / +FROM --platform=linux/arm64 scratch AS dist-arm64 +COPY --from=build-arm64 /go/src/github.com/ollama/ollama/dist/ollama-linux-*.tgz / +FROM dist-$TARGETARCH as dist + + +# Optimized container images do not cary nested payloads +FROM --platform=linux/amd64 static-build-amd64 AS container-build-amd64 +WORKDIR /go/src/github.com/ollama/ollama +COPY . . ARG GOFLAGS ARG CGO_CFLAGS RUN --mount=type=cache,target=/root/.ccache \ go build -trimpath -o dist/linux-amd64/bin/ollama . -# Intermediate stage used for ./scripts/build_linux.sh -FROM --platform=linux/arm64 cpu-build-arm64 AS build-arm64 -ENV CGO_ENABLED 1 -ARG GOLANG_VERSION +FROM --platform=linux/arm64 static-build-arm64 AS container-build-arm64 WORKDIR /go/src/github.com/ollama/ollama COPY . . -COPY --from=static-build-arm64 /go/src/github.com/ollama/ollama/llm/build/linux/ llm/build/linux/ -COPY --from=cuda-11-build-server-arm64 /go/src/github.com/ollama/ollama/dist/ dist/ -COPY --from=cuda-11-build-server-arm64 /go/src/github.com/ollama/ollama/llm/build/linux/ llm/build/linux/ -COPY --from=cuda-12-build-server-arm64 /go/src/github.com/ollama/ollama/dist/ dist/ -COPY --from=cuda-12-build-server-arm64 /go/src/github.com/ollama/ollama/llm/build/linux/ llm/build/linux/ ARG GOFLAGS ARG CGO_CFLAGS RUN --mount=type=cache,target=/root/.ccache \ go build -trimpath -o dist/linux-arm64/bin/ollama . -# Strip out ROCm dependencies to keep the primary image lean -FROM --platform=linux/amd64 ubuntu:22.04 as amd64-libs-without-rocm -COPY --from=build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/lib/ /scratch/ -RUN cd /scratch/ollama/ && rm -rf rocblas libamd* libdrm* libroc* libhip* libhsa* - -# Runtime stages -FROM --platform=linux/amd64 ubuntu:22.04 as runtime-amd64 -COPY --from=amd64-libs-without-rocm /scratch/ /lib/ -RUN apt-get update && apt-get install -y ca-certificates && \ +FROM --platform=linux/amd64 ubuntu:22.04 AS runtime-amd64 +RUN apt-get update && \ + apt-get install -y ca-certificates && \ apt-get clean && rm -rf /var/lib/apt/lists/* -COPY --from=build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/bin/ /bin/ +COPY --from=container-build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/bin/ /bin/ +COPY --from=cpu-build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/lib/ /lib/ +COPY --from=cpu_avx-build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/lib/ /lib/ +COPY --from=cpu_avx2-build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/lib/ /lib/ +COPY --from=cuda-11-build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/lib/ /lib/ +COPY --from=cuda-12-build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/lib/ /lib/ -FROM --platform=linux/arm64 ubuntu:22.04 as runtime-arm64 -COPY --from=build-arm64 /go/src/github.com/ollama/ollama/dist/linux-arm64/lib/ /lib/ -RUN apt-get update && apt-get install -y ca-certificates && \ +FROM --platform=linux/arm64 ubuntu:22.04 AS runtime-arm64 +RUN apt-get update && \ + apt-get install -y ca-certificates && \ apt-get clean && rm -rf /var/lib/apt/lists/* -COPY --from=build-arm64 /go/src/github.com/ollama/ollama/dist/linux-arm64/bin/ /bin/ +COPY --from=container-build-arm64 /go/src/github.com/ollama/ollama/dist/linux-arm64/bin/ /bin/ +COPY --from=cpu-build-arm64 /go/src/github.com/ollama/ollama/dist/linux-arm64/lib/ /lib/ +COPY --from=cuda-11-build-runner-arm64 /go/src/github.com/ollama/ollama/dist/linux-arm64/lib/ /lib/ +COPY --from=cuda-12-build-runner-arm64 /go/src/github.com/ollama/ollama/dist/linux-arm64/lib/ /lib/ -# Radeon images are much larger so we keep it distinct from the CPU/CUDA image -FROM --platform=linux/amd64 rocm/dev-centos-7:${ROCM_VERSION}-complete as runtime-rocm -RUN update-pciids -COPY --from=build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/bin/ /bin/ -RUN ln -s /opt/rocm/lib /lib/ollama +# ROCm libraries larger so we keep it distinct from the CPU/CUDA image +FROM --platform=linux/amd64 ubuntu:22.04 AS runtime-rocm +# Frontload the rocm libraries which are large, and rarely change to increase chance of a common layer +# across releases +COPY --from=rocm-build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64-rocm/lib/ /lib/ +RUN apt-get update && \ + apt-get install -y ca-certificates && \ + apt-get clean && rm -rf /var/lib/apt/lists/* +COPY --from=container-build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/bin/ /bin/ +COPY --from=cpu-build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/lib/ /lib/ +COPY --from=cpu_avx-build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/lib/ /lib/ +COPY --from=cpu_avx2-build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/lib/ /lib/ +COPY --from=rocm-build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/lib/ /lib/ EXPOSE 11434 -ENV OLLAMA_HOST 0.0.0.0 +ENV OLLAMA_HOST=0.0.0.0 ENTRYPOINT ["/bin/ollama"] CMD ["serve"] FROM runtime-$TARGETARCH EXPOSE 11434 -ENV OLLAMA_HOST 0.0.0.0 +ENV OLLAMA_HOST=0.0.0.0 ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin ENV LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility diff --git a/README.md b/README.md index d17b9723..cb57f7e9 100644 --- a/README.md +++ b/README.md @@ -35,10 +35,10 @@ The official [Ollama Docker image](https://hub.docker.com/r/ollama/ollama) `olla ## Quickstart -To run and chat with [Llama 3.1](https://ollama.com/library/llama3.1): +To run and chat with [Llama 3.2](https://ollama.com/library/llama3.2): ``` -ollama run llama3.1 +ollama run llama3.2 ``` ## Model library @@ -49,6 +49,8 @@ Here are some example models that can be downloaded: | Model | Parameters | Size | Download | | ------------------ | ---------- | ----- | ------------------------------ | +| Llama 3.2 | 3B | 2.0GB | `ollama run llama3.2` | +| Llama 3.2 | 1B | 1.3GB | `ollama run llama3.2:1b` | | Llama 3.1 | 8B | 4.7GB | `ollama run llama3.1` | | Llama 3.1 | 70B | 40GB | `ollama run llama3.1:70b` | | Llama 3.1 | 405B | 231GB | `ollama run llama3.1:405b` | @@ -99,16 +101,16 @@ See the [guide](docs/import.md) on importing models for more information. ### Customize a prompt -Models from the Ollama library can be customized with a prompt. For example, to customize the `llama3.1` model: +Models from the Ollama library can be customized with a prompt. For example, to customize the `llama3.2` model: ``` -ollama pull llama3.1 +ollama pull llama3.2 ``` Create a `Modelfile`: ``` -FROM llama3.1 +FROM llama3.2 # set the temperature to 1 [higher is more creative, lower is more coherent] PARAMETER temperature 1 @@ -143,7 +145,7 @@ ollama create mymodel -f ./Modelfile ### Pull a model ``` -ollama pull llama3.1 +ollama pull llama3.2 ``` > This command can also be used to update a local model. Only the diff will be pulled. @@ -151,13 +153,13 @@ ollama pull llama3.1 ### Remove a model ``` -ollama rm llama3.1 +ollama rm llama3.2 ``` ### Copy a model ``` -ollama cp llama3.1 my-model +ollama cp llama3.2 my-model ``` ### Multiline input @@ -181,14 +183,14 @@ The image features a yellow smiley face, which is likely the central focus of th ### Pass the prompt as an argument ``` -$ ollama run llama3.1 "Summarize this file: $(cat README.md)" +$ ollama run llama3.2 "Summarize this file: $(cat README.md)" Ollama is a lightweight, extensible framework for building and running language models on the local machine. It provides a simple API for creating, running, and managing models, as well as a library of pre-built models that can be easily used in a variety of applications. ``` ### Show model information ``` -ollama show llama3.1 +ollama show llama3.2 ``` ### List models on your computer @@ -197,6 +199,18 @@ ollama show llama3.1 ollama list ``` +### List which models are currently loaded + +``` +ollama ps +``` + +### Stop a model which is currently running + +``` +ollama stop llama3.2 +``` + ### Start Ollama `ollama serve` is used when you want to start ollama without running the desktop application. @@ -216,7 +230,7 @@ Next, start the server: Finally, in a separate shell, run a model: ``` -./ollama run llama3.1 +./ollama run llama3.2 ``` ## REST API @@ -227,7 +241,7 @@ Ollama has a REST API for running and managing models. ``` curl http://localhost:11434/api/generate -d '{ - "model": "llama3.1", + "model": "llama3.2", "prompt":"Why is the sky blue?" }' ``` @@ -236,7 +250,7 @@ curl http://localhost:11434/api/generate -d '{ ``` curl http://localhost:11434/api/chat -d '{ - "model": "llama3.1", + "model": "llama3.2", "messages": [ { "role": "user", "content": "why is the sky blue?" } ] @@ -312,6 +326,10 @@ See the [API documentation](./docs/api.md) for all endpoints. - [Cherry Studio](https://github.com/kangfenmao/cherry-studio) (Desktop client with Ollama support) - [ConfiChat](https://github.com/1runeberg/confichat) (Lightweight, standalone, multi-platform, and privacy focused LLM chat interface with optional encryption) - [Archyve](https://github.com/nickthecook/archyve) (RAG-enabling document library) +- [crewAI with Mesop](https://github.com/rapidarchitect/ollama-crew-mesop) (Mesop Web Interface to run crewAI with Ollama) +- [LLMChat](https://github.com/trendy-design/llmchat) (Privacy focused, 100% local, intuitive all-in-one chat interface) +- [ARGO](https://github.com/xark-argo/argo) (Locally download and run Ollama and Huggingface models with RAG on Mac/Windows/Linux) +- [G1](https://github.com/bklieger-groq/g1) (Prototype of using prompting strategies to improve the LLM's reasoning through o1-like reasoning chains.) ### Terminal @@ -336,6 +354,8 @@ See the [API documentation](./docs/api.md) for all endpoints. - [podman-ollama](https://github.com/ericcurtin/podman-ollama) - [gollama](https://github.com/sammcj/gollama) - [Ollama eBook Summary](https://github.com/cognitivetech/ollama-ebook-summary/) +- [Ollama Mixture of Experts (MOE) in 50 lines of code](https://github.com/rapidarchitect/ollama_moe) +- [vim-intelligence-bridge](https://github.com/pepo-ec/vim-intelligence-bridge) Simple interaction of "Ollama" with the Vim editor ### Apple Vision Pro - [Enchanted](https://github.com/AugustDev/enchanted) @@ -356,12 +376,13 @@ See the [API documentation](./docs/api.md) for all endpoints. ### Libraries -- [LangChain](https://python.langchain.com/docs/integrations/llms/ollama) and [LangChain.js](https://js.langchain.com/docs/modules/model_io/models/llms/integrations/ollama) with [example](https://js.langchain.com/docs/use_cases/question_answering/local_retrieval_qa) +- [LangChain](https://python.langchain.com/docs/integrations/llms/ollama) and [LangChain.js](https://js.langchain.com/docs/integrations/chat/ollama/) with [example](https://js.langchain.com/docs/tutorials/local_rag/) - [Firebase Genkit](https://firebase.google.com/docs/genkit/plugins/ollama) +- [crewAI](https://github.com/crewAIInc/crewAI) - [LangChainGo](https://github.com/tmc/langchaingo/) with [example](https://github.com/tmc/langchaingo/tree/main/examples/ollama-completion-example) - [LangChain4j](https://github.com/langchain4j/langchain4j) with [example](https://github.com/langchain4j/langchain4j-examples/tree/main/ollama-examples/src/main/java) - [LangChainRust](https://github.com/Abraxas-365/langchain-rust) with [example](https://github.com/Abraxas-365/langchain-rust/blob/main/examples/llm_ollama.rs) -- [LlamaIndex](https://gpt-index.readthedocs.io/en/stable/examples/llm/ollama.html) +- [LlamaIndex](https://docs.llamaindex.ai/en/stable/examples/llm/ollama/) and [LlamaIndexTS](https://ts.llamaindex.ai/modules/llms/available_llms/ollama) - [LiteLLM](https://github.com/BerriAI/litellm) - [OllamaFarm for Go](https://github.com/presbrey/ollamafarm) - [OllamaSharp for .NET](https://github.com/awaescher/OllamaSharp) @@ -389,6 +410,7 @@ See the [API documentation](./docs/api.md) for all endpoints. - [Ollamaclient for Golang](https://github.com/xyproto/ollamaclient) - [High-level function abstraction in Go](https://gitlab.com/tozd/go/fun) - [Ollama PHP](https://github.com/ArdaGnsrn/ollama-php) +- [Agents-Flex for Java](https://github.com/agents-flex/agents-flex) with [example](https://github.com/agents-flex/agents-flex/tree/main/agents-flex-llm/agents-flex-llm-ollama/src/test/java/com/agentsflex/llm/ollama) ### Mobile @@ -427,6 +449,8 @@ See the [API documentation](./docs/api.md) for all endpoints. - [Headless Ollama](https://github.com/nischalj10/headless-ollama) (Scripts to automatically install ollama client & models on any OS for apps that depends on ollama server) - [vnc-lm](https://github.com/jk011ru/vnc-lm) (A containerized Discord bot with support for attachments and web links) - [LSP-AI](https://github.com/SilasMarvin/lsp-ai) (Open-source language server for AI-powered functionality) +- [QodeAssist](https://github.com/Palm1r/QodeAssist) (AI-powered coding assistant plugin for Qt Creator) +- [Obsidian Quiz Generator plugin](https://github.com/ECuiDev/obsidian-quiz-generator) ### Supported backends diff --git a/app/ollama.iss b/app/ollama.iss index 34cc5c4c..4038815a 100644 --- a/app/ollama.iss +++ b/app/ollama.iss @@ -28,8 +28,8 @@ AppPublisher={#MyAppPublisher} AppPublisherURL={#MyAppURL} AppSupportURL={#MyAppURL} AppUpdatesURL={#MyAppURL} -ArchitecturesAllowed=x64 arm64 -ArchitecturesInstallIn64BitMode=x64 arm64 +ArchitecturesAllowed=x64compatible arm64 +ArchitecturesInstallIn64BitMode=x64compatible arm64 DefaultDirName={localappdata}\Programs\{#MyAppName} DefaultGroupName={#MyAppName} DisableProgramGroupPage=yes @@ -48,6 +48,7 @@ OutputDir=..\dist\ SetupLogging=yes CloseApplications=yes RestartApplications=no +RestartIfNeededByRun=no ; https://jrsoftware.org/ishelp/index.php?topic=setup_wizardimagefile WizardSmallImageFile=.\assets\setup.bmp @@ -86,12 +87,21 @@ Name: "english"; MessagesFile: "compiler:Default.isl" DialogFontSize=12 [Files] -Source: ".\app.exe"; DestDir: "{app}"; DestName: "{#MyAppExeName}" ; Flags: ignoreversion 64bit -Source: "..\ollama.exe"; DestDir: "{app}"; Flags: ignoreversion 64bit -Source: "..\dist\windows-{#ARCH}\lib\ollama\runners\*"; DestDir: "{app}\lib\ollama\runners"; Flags: ignoreversion 64bit recursesubdirs +#if DirExists("..\dist\windows-amd64") +Source: "..\dist\windows-amd64-app.exe"; DestDir: "{app}"; DestName: "{#MyAppExeName}" ;Check: not IsArm64(); Flags: ignoreversion 64bit +Source: "..\dist\windows-amd64\ollama.exe"; DestDir: "{app}"; Check: not IsArm64(); Flags: ignoreversion 64bit +Source: "..\dist\windows-amd64\lib\ollama\*"; DestDir: "{app}\lib\ollama\"; Check: not IsArm64(); Flags: ignoreversion 64bit recursesubdirs +#endif + +#if DirExists("..\dist\windows-arm64") +Source: "..\dist\windows-arm64\vc_redist.arm64.exe"; DestDir: "{tmp}"; Check: IsArm64() and vc_redist_needed(); Flags: deleteafterinstall +Source: "..\dist\windows-arm64-app.exe"; DestDir: "{app}"; DestName: "{#MyAppExeName}" ;Check: IsArm64(); Flags: ignoreversion 64bit +Source: "..\dist\windows-arm64\ollama.exe"; DestDir: "{app}"; Check: IsArm64(); Flags: ignoreversion 64bit +Source: "..\dist\windows-arm64\lib\ollama\*"; DestDir: "{app}\lib\ollama\"; Check: IsArm64(); Flags: ignoreversion 64bit recursesubdirs +#endif + Source: "..\dist\ollama_welcome.ps1"; DestDir: "{app}"; Flags: ignoreversion Source: ".\assets\app.ico"; DestDir: "{app}"; Flags: ignoreversion -Source: "..\dist\windows-amd64\lib\ollama\*"; DestDir: "{app}\lib\ollama\"; Flags: ignoreversion recursesubdirs [Icons] Name: "{group}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"; IconFilename: "{app}\app.ico" @@ -99,6 +109,9 @@ Name: "{userstartup}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"; IconFilen Name: "{userprograms}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"; IconFilename: "{app}\app.ico" [Run] +#if DirExists("..\dist\windows-arm64") +Filename: "{tmp}\vc_redist.arm64.exe"; Parameters: "/install /passive /norestart"; Check: IsArm64() and vc_redist_needed(); StatusMsg: "Installing VC++ Redistributables..."; Flags: waituntilterminated +#endif Filename: "{cmd}"; Parameters: "/C set PATH={app};%PATH% & ""{app}\{#MyAppExeName}"""; Flags: postinstall nowait runhidden [UninstallRun] @@ -129,7 +142,7 @@ SetupAppRunningError=Another Ollama installer is running.%n%nPlease cancel or fi ;FinishedHeadingLabel=Run your first model -;FinishedLabel=%nRun this command in a PowerShell or cmd terminal.%n%n%n ollama run llama3.1 +;FinishedLabel=%nRun this command in a PowerShell or cmd terminal.%n%n%n ollama run llama3.2 ;ClickFinish=%n [Registry] @@ -154,3 +167,39 @@ begin { Pos() returns 0 if not found } Result := Pos(';' + ExpandConstant(Param) + ';', ';' + OrigPath + ';') = 0; end; + +{ --- VC Runtime libraries discovery code - Only install vc_redist if it isn't already installed ----- } +const VCRTL_MIN_V1 = 14; +const VCRTL_MIN_V2 = 40; +const VCRTL_MIN_V3 = 33807; +const VCRTL_MIN_V4 = 0; + + // check if the minimum required vc redist is installed (by looking the registry) +function vc_redist_needed (): Boolean; +var + sRegKey: string; + v1: Cardinal; + v2: Cardinal; + v3: Cardinal; + v4: Cardinal; +begin + sRegKey := 'SOFTWARE\WOW6432Node\Microsoft\VisualStudio\14.0\VC\Runtimes\arm64'; + if (RegQueryDWordValue (HKEY_LOCAL_MACHINE, sRegKey, 'Major', v1) and + RegQueryDWordValue (HKEY_LOCAL_MACHINE, sRegKey, 'Minor', v2) and + RegQueryDWordValue (HKEY_LOCAL_MACHINE, sRegKey, 'Bld', v3) and + RegQueryDWordValue (HKEY_LOCAL_MACHINE, sRegKey, 'RBld', v4)) then + begin + Log ('VC Redist version: ' + IntToStr (v1) + + '.' + IntToStr (v2) + '.' + IntToStr (v3) + + '.' + IntToStr (v4)); + { Version info was found. Return true if later or equal to our + minimal required version RTL_MIN_Vx } + Result := not ( + (v1 > VCRTL_MIN_V1) or ((v1 = VCRTL_MIN_V1) and + ((v2 > VCRTL_MIN_V2) or ((v2 = VCRTL_MIN_V2) and + ((v3 > VCRTL_MIN_V3) or ((v3 = VCRTL_MIN_V3) and + (v4 >= VCRTL_MIN_V4))))))); + end + else + Result := TRUE; +end; diff --git a/app/ollama_welcome.ps1 b/app/ollama_welcome.ps1 index 46777a3a..e9695748 100644 --- a/app/ollama_welcome.ps1 +++ b/app/ollama_welcome.ps1 @@ -4,5 +4,5 @@ write-host "Welcome to Ollama!" write-host "" write-host "Run your first model:" write-host "" -write-host "`tollama run llama3.1" +write-host "`tollama run llama3.2" write-host "" \ No newline at end of file diff --git a/build/darwin/amd64/placeholder b/build/darwin/amd64/placeholder new file mode 100644 index 00000000..87dc2738 --- /dev/null +++ b/build/darwin/amd64/placeholder @@ -0,0 +1 @@ +This is here to make sure the build/ directory exists for the go:embed command diff --git a/build/darwin/arm64/placeholder b/build/darwin/arm64/placeholder new file mode 100644 index 00000000..87dc2738 --- /dev/null +++ b/build/darwin/arm64/placeholder @@ -0,0 +1 @@ +This is here to make sure the build/ directory exists for the go:embed command diff --git a/build/embed_darwin_amd64.go b/build/embed_darwin_amd64.go new file mode 100644 index 00000000..af1458ea --- /dev/null +++ b/build/embed_darwin_amd64.go @@ -0,0 +1,8 @@ +package build + +import "embed" + +// Darwin payloads separated by architecture to avoid duplicate payloads when cross compiling + +//go:embed darwin/amd64/* +var EmbedFS embed.FS diff --git a/build/embed_darwin_arm64.go b/build/embed_darwin_arm64.go new file mode 100644 index 00000000..d885365d --- /dev/null +++ b/build/embed_darwin_arm64.go @@ -0,0 +1,8 @@ +package build + +import "embed" + +// Darwin payloads separated by architecture to avoid duplicate payloads when cross compiling + +//go:embed darwin/arm64/* +var EmbedFS embed.FS diff --git a/build/embed_linux.go b/build/embed_linux.go new file mode 100644 index 00000000..4cf7be4c --- /dev/null +++ b/build/embed_linux.go @@ -0,0 +1,6 @@ +package build + +import "embed" + +//go:embed linux/* +var EmbedFS embed.FS diff --git a/build/embed_unused.go b/build/embed_unused.go new file mode 100644 index 00000000..00fbe02e --- /dev/null +++ b/build/embed_unused.go @@ -0,0 +1,8 @@ +//go:build !linux && !darwin + +package build + +import "embed" + +// unused on windows +var EmbedFS embed.FS diff --git a/build/linux/amd64/placeholder b/build/linux/amd64/placeholder new file mode 100644 index 00000000..87dc2738 --- /dev/null +++ b/build/linux/amd64/placeholder @@ -0,0 +1 @@ +This is here to make sure the build/ directory exists for the go:embed command diff --git a/build/linux/arm64/placeholder b/build/linux/arm64/placeholder new file mode 100644 index 00000000..87dc2738 --- /dev/null +++ b/build/linux/arm64/placeholder @@ -0,0 +1 @@ +This is here to make sure the build/ directory exists for the go:embed command diff --git a/cmd/cmd.go b/cmd/cmd.go index 5de1ed1b..dc288e43 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -2,6 +2,7 @@ package cmd import ( "archive/zip" + "bufio" "bytes" "context" "crypto/ed25519" @@ -21,6 +22,7 @@ import ( "regexp" "runtime" "slices" + "strconv" "strings" "sync/atomic" "syscall" @@ -344,6 +346,39 @@ func (w *progressWriter) Write(p []byte) (n int, err error) { return len(p), nil } +func loadOrUnloadModel(cmd *cobra.Command, opts *runOptions) error { + p := progress.NewProgress(os.Stderr) + defer p.StopAndClear() + + spinner := progress.NewSpinner("") + p.Add("", spinner) + + client, err := api.ClientFromEnvironment() + if err != nil { + return err + } + + req := &api.GenerateRequest{ + Model: opts.Model, + KeepAlive: opts.KeepAlive, + } + + return client.Generate(cmd.Context(), req, func(api.GenerateResponse) error { return nil }) +} + +func StopHandler(cmd *cobra.Command, args []string) error { + opts := &runOptions{ + Model: args[0], + KeepAlive: &api.Duration{Duration: 0}, + } + if err := loadOrUnloadModel(cmd, opts); err != nil { + if strings.Contains(err.Error(), "not found") { + return fmt.Errorf("couldn't find model \"%s\" to stop", args[0]) + } + } + return nil +} + func RunHandler(cmd *cobra.Command, args []string) error { interactive := true @@ -422,7 +457,7 @@ func RunHandler(cmd *cobra.Command, args []string) error { opts.ParentModel = info.Details.ParentModel if interactive { - if err := loadModel(cmd, &opts); err != nil { + if err := loadOrUnloadModel(cmd, &opts); err != nil { return err } @@ -578,7 +613,7 @@ func ListHandler(cmd *cobra.Command, args []string) error { table.SetHeaderLine(false) table.SetBorder(false) table.SetNoWhiteSpace(true) - table.SetTablePadding("\t") + table.SetTablePadding(" ") table.AppendBulk(data) table.Render() @@ -613,7 +648,15 @@ func ListRunningHandler(cmd *cobra.Command, args []string) error { cpuPercent := math.Round(float64(sizeCPU) / float64(m.Size) * 100) procStr = fmt.Sprintf("%d%%/%d%% CPU/GPU", int(cpuPercent), int(100-cpuPercent)) } - data = append(data, []string{m.Name, m.Digest[:12], format.HumanBytes(m.Size), procStr, format.HumanTime(m.ExpiresAt, "Never")}) + + var until string + delta := time.Since(m.ExpiresAt) + if delta > 0 { + until = "Stopping..." + } else { + until = format.HumanTime(m.ExpiresAt, "Never") + } + data = append(data, []string{m.Name, m.Digest[:12], format.HumanBytes(m.Size), procStr, until}) } } @@ -624,7 +667,7 @@ func ListRunningHandler(cmd *cobra.Command, args []string) error { table.SetHeaderLine(false) table.SetBorder(false) table.SetNoWhiteSpace(true) - table.SetTablePadding("\t") + table.SetTablePadding(" ") table.AppendBulk(data) table.Render() @@ -637,6 +680,17 @@ func DeleteHandler(cmd *cobra.Command, args []string) error { return err } + // Unload the model if it's running before deletion + opts := &runOptions{ + Model: args[0], + KeepAlive: &api.Duration{Duration: 0}, + } + if err := loadOrUnloadModel(cmd, opts); err != nil { + if !strings.Contains(err.Error(), "not found") { + return fmt.Errorf("unable to stop existing running model \"%s\": %s", args[0], err) + } + } + for _, name := range args { req := api.DeleteRequest{Name: name} if err := client.Delete(cmd.Context(), &req); err != nil { @@ -720,125 +774,89 @@ func ShowHandler(cmd *cobra.Command, args []string) error { return nil } - showInfo(resp) - - return nil + return showInfo(resp, os.Stdout) } -func showInfo(resp *api.ShowResponse) { - modelData := [][]string{ - {"parameters", resp.Details.ParameterSize}, - {"quantization", resp.Details.QuantizationLevel}, - } - if resp.ModelInfo != nil { - arch := resp.ModelInfo["general.architecture"].(string) - modelData = append(modelData, - []string{"arch", arch}, - []string{"context length", fmt.Sprintf("%v", resp.ModelInfo[fmt.Sprintf("%s.context_length", arch)].(float64))}, - []string{"embedding length", fmt.Sprintf("%v", resp.ModelInfo[fmt.Sprintf("%s.embedding_length", arch)].(float64))}, - ) +func showInfo(resp *api.ShowResponse, w io.Writer) error { + tableRender := func(header string, rows func() [][]string) { + fmt.Fprintln(w, " ", header) + table := tablewriter.NewWriter(w) + table.SetAlignment(tablewriter.ALIGN_LEFT) + table.SetBorder(false) + table.SetNoWhiteSpace(true) + table.SetTablePadding(" ") + + switch header { + case "Template", "System", "License": + table.SetColWidth(100) + } + + table.AppendBulk(rows()) + table.Render() + fmt.Fprintln(w) } - mainTableData := [][]string{ - {"Model"}, - {renderSubTable(modelData, false)}, - } + tableRender("Model", func() (rows [][]string) { + if resp.ModelInfo != nil { + arch := resp.ModelInfo["general.architecture"].(string) + rows = append(rows, []string{"", "architecture", arch}) + rows = append(rows, []string{"", "parameters", format.HumanNumber(uint64(resp.ModelInfo["general.parameter_count"].(float64)))}) + rows = append(rows, []string{"", "context length", strconv.FormatFloat(resp.ModelInfo[fmt.Sprintf("%s.context_length", arch)].(float64), 'f', -1, 64)}) + rows = append(rows, []string{"", "embedding length", strconv.FormatFloat(resp.ModelInfo[fmt.Sprintf("%s.embedding_length", arch)].(float64), 'f', -1, 64)}) + } else { + rows = append(rows, []string{"", "architecture", resp.Details.Family}) + rows = append(rows, []string{"", "parameters", resp.Details.ParameterSize}) + } + rows = append(rows, []string{"", "quantization", resp.Details.QuantizationLevel}) + return + }) if resp.ProjectorInfo != nil { - projectorData := [][]string{ - {"arch", "clip"}, - {"parameters", format.HumanNumber(uint64(resp.ProjectorInfo["general.parameter_count"].(float64)))}, - } - - if projectorType, ok := resp.ProjectorInfo["clip.projector_type"]; ok { - projectorData = append(projectorData, []string{"projector type", projectorType.(string)}) - } - - projectorData = append(projectorData, - []string{"embedding length", fmt.Sprintf("%v", resp.ProjectorInfo["clip.vision.embedding_length"].(float64))}, - []string{"projection dimensionality", fmt.Sprintf("%v", resp.ProjectorInfo["clip.vision.projection_dim"].(float64))}, - ) - - mainTableData = append(mainTableData, - []string{"Projector"}, - []string{renderSubTable(projectorData, false)}, - ) + tableRender("Projector", func() (rows [][]string) { + arch := resp.ProjectorInfo["general.architecture"].(string) + rows = append(rows, []string{"", "architecture", arch}) + rows = append(rows, []string{"", "parameters", format.HumanNumber(uint64(resp.ProjectorInfo["general.parameter_count"].(float64)))}) + rows = append(rows, []string{"", "embedding length", strconv.FormatFloat(resp.ProjectorInfo[fmt.Sprintf("%s.vision.embedding_length", arch)].(float64), 'f', -1, 64)}) + rows = append(rows, []string{"", "dimensions", strconv.FormatFloat(resp.ProjectorInfo[fmt.Sprintf("%s.vision.projection_dim", arch)].(float64), 'f', -1, 64)}) + return + }) } if resp.Parameters != "" { - mainTableData = append(mainTableData, []string{"Parameters"}, []string{formatParams(resp.Parameters)}) + tableRender("Parameters", func() (rows [][]string) { + scanner := bufio.NewScanner(strings.NewReader(resp.Parameters)) + for scanner.Scan() { + if text := scanner.Text(); text != "" { + rows = append(rows, append([]string{""}, strings.Fields(text)...)) + } + } + return + }) + } + + head := func(s string, n int) (rows [][]string) { + scanner := bufio.NewScanner(strings.NewReader(s)) + for scanner.Scan() && (len(rows) < n || n < 0) { + if text := scanner.Text(); text != "" { + rows = append(rows, []string{"", strings.TrimSpace(text)}) + } + } + return } if resp.System != "" { - mainTableData = append(mainTableData, []string{"System"}, []string{renderSubTable(twoLines(resp.System), true)}) + tableRender("System", func() [][]string { + return head(resp.System, 2) + }) } if resp.License != "" { - mainTableData = append(mainTableData, []string{"License"}, []string{renderSubTable(twoLines(resp.License), true)}) + tableRender("License", func() [][]string { + return head(resp.License, 2) + }) } - table := tablewriter.NewWriter(os.Stdout) - table.SetAutoWrapText(false) - table.SetBorder(false) - table.SetAlignment(tablewriter.ALIGN_LEFT) - - for _, v := range mainTableData { - table.Append(v) - } - - table.Render() -} - -func renderSubTable(data [][]string, file bool) string { - var buf bytes.Buffer - table := tablewriter.NewWriter(&buf) - table.SetAutoWrapText(!file) - table.SetBorder(false) - table.SetNoWhiteSpace(true) - table.SetTablePadding("\t") - table.SetAlignment(tablewriter.ALIGN_LEFT) - - for _, v := range data { - table.Append(v) - } - - table.Render() - - renderedTable := buf.String() - lines := strings.Split(renderedTable, "\n") - for i, line := range lines { - lines[i] = "\t" + line - } - - return strings.Join(lines, "\n") -} - -func twoLines(s string) [][]string { - lines := strings.Split(s, "\n") - res := [][]string{} - - count := 0 - for _, line := range lines { - line = strings.TrimSpace(line) - if line != "" { - count++ - res = append(res, []string{line}) - if count == 2 { - return res - } - } - } - return res -} - -func formatParams(s string) string { - lines := strings.Split(s, "\n") - table := [][]string{} - - for _, line := range lines { - table = append(table, strings.Fields(line)) - } - return renderSubTable(table, false) + return nil } func CopyHandler(cmd *cobra.Command, args []string) error { @@ -1328,6 +1346,15 @@ func NewCLI() *cobra.Command { runCmd.Flags().Bool("insecure", false, "Use an insecure registry") runCmd.Flags().Bool("nowordwrap", false, "Don't wrap words to the next line automatically") runCmd.Flags().String("format", "", "Response format (e.g. json)") + + stopCmd := &cobra.Command{ + Use: "stop MODEL", + Short: "Stop a running model", + Args: cobra.ExactArgs(1), + PreRunE: checkServerHeartbeat, + RunE: StopHandler, + } + serveCmd := &cobra.Command{ Use: "serve", Aliases: []string{"start"}, @@ -1395,6 +1422,7 @@ func NewCLI() *cobra.Command { createCmd, showCmd, runCmd, + stopCmd, pullCmd, pushCmd, listCmd, @@ -1434,6 +1462,7 @@ func NewCLI() *cobra.Command { createCmd, showCmd, runCmd, + stopCmd, pullCmd, pushCmd, listCmd, diff --git a/cmd/cmd_test.go b/cmd/cmd_test.go new file mode 100644 index 00000000..9d23f3e9 --- /dev/null +++ b/cmd/cmd_test.go @@ -0,0 +1,272 @@ +package cmd + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/spf13/cobra" + + "github.com/ollama/ollama/api" +) + +func TestShowInfo(t *testing.T) { + t.Run("bare details", func(t *testing.T) { + var b bytes.Buffer + if err := showInfo(&api.ShowResponse{ + Details: api.ModelDetails{ + Family: "test", + ParameterSize: "7B", + QuantizationLevel: "FP16", + }, + }, &b); err != nil { + t.Fatal(err) + } + + expect := ` Model + architecture test + parameters 7B + quantization FP16 + +` + + if diff := cmp.Diff(expect, b.String()); diff != "" { + t.Errorf("unexpected output (-want +got):\n%s", diff) + } + }) + + t.Run("bare model info", func(t *testing.T) { + var b bytes.Buffer + if err := showInfo(&api.ShowResponse{ + ModelInfo: map[string]any{ + "general.architecture": "test", + "general.parameter_count": float64(7_000_000_000), + "test.context_length": float64(0), + "test.embedding_length": float64(0), + }, + Details: api.ModelDetails{ + Family: "test", + ParameterSize: "7B", + QuantizationLevel: "FP16", + }, + }, &b); err != nil { + t.Fatal(err) + } + + expect := ` Model + architecture test + parameters 7B + context length 0 + embedding length 0 + quantization FP16 + +` + if diff := cmp.Diff(expect, b.String()); diff != "" { + t.Errorf("unexpected output (-want +got):\n%s", diff) + } + }) + + t.Run("parameters", func(t *testing.T) { + var b bytes.Buffer + if err := showInfo(&api.ShowResponse{ + Details: api.ModelDetails{ + Family: "test", + ParameterSize: "7B", + QuantizationLevel: "FP16", + }, + Parameters: ` + stop never + stop gonna + stop give + stop you + stop up + temperature 99`, + }, &b); err != nil { + t.Fatal(err) + } + + expect := ` Model + architecture test + parameters 7B + quantization FP16 + + Parameters + stop never + stop gonna + stop give + stop you + stop up + temperature 99 + +` + if diff := cmp.Diff(expect, b.String()); diff != "" { + t.Errorf("unexpected output (-want +got):\n%s", diff) + } + }) + + t.Run("project info", func(t *testing.T) { + var b bytes.Buffer + if err := showInfo(&api.ShowResponse{ + Details: api.ModelDetails{ + Family: "test", + ParameterSize: "7B", + QuantizationLevel: "FP16", + }, + ProjectorInfo: map[string]any{ + "general.architecture": "clip", + "general.parameter_count": float64(133_700_000), + "clip.vision.embedding_length": float64(0), + "clip.vision.projection_dim": float64(0), + }, + }, &b); err != nil { + t.Fatal(err) + } + + expect := ` Model + architecture test + parameters 7B + quantization FP16 + + Projector + architecture clip + parameters 133.70M + embedding length 0 + dimensions 0 + +` + if diff := cmp.Diff(expect, b.String()); diff != "" { + t.Errorf("unexpected output (-want +got):\n%s", diff) + } + }) + + t.Run("system", func(t *testing.T) { + var b bytes.Buffer + if err := showInfo(&api.ShowResponse{ + Details: api.ModelDetails{ + Family: "test", + ParameterSize: "7B", + QuantizationLevel: "FP16", + }, + System: `You are a pirate! +Ahoy, matey! +Weigh anchor! + `, + }, &b); err != nil { + t.Fatal(err) + } + + expect := ` Model + architecture test + parameters 7B + quantization FP16 + + System + You are a pirate! + Ahoy, matey! + +` + if diff := cmp.Diff(expect, b.String()); diff != "" { + t.Errorf("unexpected output (-want +got):\n%s", diff) + } + }) + + t.Run("license", func(t *testing.T) { + var b bytes.Buffer + license, err := os.ReadFile(filepath.Join("..", "LICENSE")) + if err != nil { + t.Fatal(err) + } + + if err := showInfo(&api.ShowResponse{ + Details: api.ModelDetails{ + Family: "test", + ParameterSize: "7B", + QuantizationLevel: "FP16", + }, + License: string(license), + }, &b); err != nil { + t.Fatal(err) + } + + expect := ` Model + architecture test + parameters 7B + quantization FP16 + + License + MIT License + Copyright (c) Ollama + +` + if diff := cmp.Diff(expect, b.String()); diff != "" { + t.Errorf("unexpected output (-want +got):\n%s", diff) + } + }) +} + +func TestDeleteHandler(t *testing.T) { + stopped := false + mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/api/delete" && r.Method == http.MethodDelete { + var req api.DeleteRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if req.Name == "test-model" { + w.WriteHeader(http.StatusOK) + } else { + w.WriteHeader(http.StatusNotFound) + } + return + } + if r.URL.Path == "/api/generate" && r.Method == http.MethodPost { + var req api.GenerateRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if req.Model == "test-model" { + w.WriteHeader(http.StatusOK) + if err := json.NewEncoder(w).Encode(api.GenerateResponse{ + Done: true, + }); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + stopped = true + return + } else { + w.WriteHeader(http.StatusNotFound) + if err := json.NewEncoder(w).Encode(api.GenerateResponse{ + Done: false, + }); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + } + } + })) + + t.Setenv("OLLAMA_HOST", mockServer.URL) + t.Cleanup(mockServer.Close) + + cmd := &cobra.Command{} + cmd.SetContext(context.TODO()) + if err := DeleteHandler(cmd, []string{"test-model"}); err != nil { + t.Fatalf("DeleteHandler failed: %v", err) + } + if !stopped { + t.Fatal("Model was not stopped before deletion") + } + + err := DeleteHandler(cmd, []string{"test-model-not-found"}) + if err == nil || !strings.Contains(err.Error(), "unable to stop existing running model \"test-model-not-found\"") { + t.Fatalf("DeleteHandler failed: expected error about stopping non-existent model, got %v", err) + } +} diff --git a/cmd/interactive.go b/cmd/interactive.go index 4462cf29..94578f11 100644 --- a/cmd/interactive.go +++ b/cmd/interactive.go @@ -18,7 +18,6 @@ import ( "github.com/ollama/ollama/api" "github.com/ollama/ollama/envconfig" "github.com/ollama/ollama/parser" - "github.com/ollama/ollama/progress" "github.com/ollama/ollama/readline" "github.com/ollama/ollama/types/errtypes" ) @@ -31,26 +30,6 @@ const ( MultilineSystem ) -func loadModel(cmd *cobra.Command, opts *runOptions) error { - p := progress.NewProgress(os.Stderr) - defer p.StopAndClear() - - spinner := progress.NewSpinner("") - p.Add("", spinner) - - client, err := api.ClientFromEnvironment() - if err != nil { - return err - } - - chatReq := &api.ChatRequest{ - Model: opts.Model, - KeepAlive: opts.KeepAlive, - } - - return client.Chat(cmd.Context(), chatReq, func(api.ChatResponse) error { return nil }) -} - func generateInteractive(cmd *cobra.Command, opts runOptions) error { usage := func() { fmt.Fprintln(os.Stderr, "Available Commands:") @@ -217,7 +196,7 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error { opts.Model = args[1] opts.Messages = []api.Message{} fmt.Printf("Loading model '%s'\n", opts.Model) - if err := loadModel(cmd, &opts); err != nil { + if err := loadOrUnloadModel(cmd, &opts); err != nil { return err } continue @@ -371,7 +350,7 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error { switch args[1] { case "info": - showInfo(resp) + _ = showInfo(resp, os.Stderr) case "license": if resp.License == "" { fmt.Println("No license was specified for this model.") diff --git a/convert/convert.go b/convert/convert.go index 8c7b0943..44783b6e 100644 --- a/convert/convert.go +++ b/convert/convert.go @@ -208,14 +208,18 @@ func ConvertModel(fsys fs.FS, ws io.WriteSeeker) error { return err } - if vocabSize := int(p.VocabSize); vocabSize > len(t.Vocabulary.Tokens) { - slog.Warn("vocabulary is smaller than expected, padding with dummy tokens", "expect", p.VocabSize, "actual", len(t.Vocabulary.Tokens)) + vocabSize := int(p.VocabSize) + switch { + case vocabSize > len(t.Vocabulary.Tokens): + slog.Warn("vocabulary is smaller than expected, padding with dummy tokens", "expect", vocabSize, "actual", len(t.Vocabulary.Tokens)) for i := range vocabSize - len(t.Vocabulary.Tokens) { t.Vocabulary.Tokens = append(t.Vocabulary.Tokens, fmt.Sprintf("[PAD%d]", i)) t.Vocabulary.Scores = append(t.Vocabulary.Scores, -1) t.Vocabulary.Types = append(t.Vocabulary.Types, tokenTypeUserDefined) } - } else { + case vocabSize < len(t.Vocabulary.Tokens): + return fmt.Errorf("vocabulary is larger than expected '%d' instead of '%d'", len(t.Vocabulary.Tokens), vocabSize) + default: slog.Debug("vocabulary", "size", len(t.Vocabulary.Tokens)) } diff --git a/docs/api.md b/docs/api.md index aed2b69f..fe2eb82c 100644 --- a/docs/api.md +++ b/docs/api.md @@ -69,7 +69,7 @@ Enable JSON mode by setting the `format` parameter to `json`. This will structur ```shell curl http://localhost:11434/api/generate -d '{ - "model": "llama3", + "model": "llama3.2", "prompt": "Why is the sky blue?" }' ``` @@ -80,7 +80,7 @@ A stream of JSON objects is returned: ```json { - "model": "llama3", + "model": "llama3.2", "created_at": "2023-08-04T08:52:19.385406455-07:00", "response": "The", "done": false @@ -102,7 +102,7 @@ To calculate how fast the response is generated in tokens per second (token/s), ```json { - "model": "llama3", + "model": "llama3.2", "created_at": "2023-08-04T19:22:45.499127Z", "response": "", "done": true, @@ -124,7 +124,7 @@ A response can be received in one reply when streaming is off. ```shell curl http://localhost:11434/api/generate -d '{ - "model": "llama3", + "model": "llama3.2", "prompt": "Why is the sky blue?", "stream": false }' @@ -136,7 +136,7 @@ If `stream` is set to `false`, the response will be a single JSON object: ```json { - "model": "llama3", + "model": "llama3.2", "created_at": "2023-08-04T19:22:45.499127Z", "response": "The sky is blue because it is the color of the sky.", "done": true, @@ -194,7 +194,7 @@ curl http://localhost:11434/api/generate -d '{ ```shell curl http://localhost:11434/api/generate -d '{ - "model": "llama3", + "model": "llama3.2", "prompt": "What color is the sky at different times of the day? Respond using JSON", "format": "json", "stream": false @@ -205,7 +205,7 @@ curl http://localhost:11434/api/generate -d '{ ```json { - "model": "llama3", + "model": "llama3.2", "created_at": "2023-11-09T21:07:55.186497Z", "response": "{\n\"morning\": {\n\"color\": \"blue\"\n},\n\"noon\": {\n\"color\": \"blue-gray\"\n},\n\"afternoon\": {\n\"color\": \"warm gray\"\n},\n\"evening\": {\n\"color\": \"orange\"\n}\n}\n", "done": true, @@ -327,7 +327,7 @@ If you want to set custom options for the model at runtime rather than in the Mo ```shell curl http://localhost:11434/api/generate -d '{ - "model": "llama3", + "model": "llama3.2", "prompt": "Why is the sky blue?", "stream": false, "options": { @@ -368,7 +368,7 @@ curl http://localhost:11434/api/generate -d '{ ```json { - "model": "llama3", + "model": "llama3.2", "created_at": "2023-08-04T19:22:45.499127Z", "response": "The sky is blue because it is the color of the sky.", "done": true, @@ -390,7 +390,7 @@ If an empty prompt is provided, the model will be loaded into memory. ```shell curl http://localhost:11434/api/generate -d '{ - "model": "llama3" + "model": "llama3.2" }' ``` @@ -400,13 +400,40 @@ A single JSON object is returned: ```json { - "model": "llama3", + "model": "llama3.2", "created_at": "2023-12-18T19:52:07.071755Z", "response": "", "done": true } ``` +#### Unload a model + +If an empty prompt is provided and the `keep_alive` parameter is set to `0`, a model will be unloaded from memory. + +##### Request + +```shell +curl http://localhost:11434/api/generate -d '{ + "model": "llama3.2", + "keep_alive": 0 +}' +``` + +##### Response + +A single JSON object is returned: + +```json +{ + "model": "llama3.2", + "created_at": "2024-09-12T03:54:03.516566Z", + "response": "", + "done": true, + "done_reason": "unload" +} +``` + ## Generate a chat completion ```shell @@ -445,7 +472,7 @@ Send a chat message with a streaming response. ```shell curl http://localhost:11434/api/chat -d '{ - "model": "llama3", + "model": "llama3.2", "messages": [ { "role": "user", @@ -461,7 +488,7 @@ A stream of JSON objects is returned: ```json { - "model": "llama3", + "model": "llama3.2", "created_at": "2023-08-04T08:52:19.385406455-07:00", "message": { "role": "assistant", @@ -476,7 +503,7 @@ Final response: ```json { - "model": "llama3", + "model": "llama3.2", "created_at": "2023-08-04T19:22:45.499127Z", "done": true, "total_duration": 4883583458, @@ -494,7 +521,7 @@ Final response: ```shell curl http://localhost:11434/api/chat -d '{ - "model": "llama3", + "model": "llama3.2", "messages": [ { "role": "user", @@ -509,7 +536,7 @@ curl http://localhost:11434/api/chat -d '{ ```json { - "model": "registry.ollama.ai/library/llama3:latest", + "model": "llama3.2", "created_at": "2023-12-12T14:13:43.416799Z", "message": { "role": "assistant", @@ -533,7 +560,7 @@ Send a chat message with a conversation history. You can use this same approach ```shell curl http://localhost:11434/api/chat -d '{ - "model": "llama3", + "model": "llama3.2", "messages": [ { "role": "user", @@ -557,7 +584,7 @@ A stream of JSON objects is returned: ```json { - "model": "llama3", + "model": "llama3.2", "created_at": "2023-08-04T08:52:19.385406455-07:00", "message": { "role": "assistant", @@ -571,7 +598,7 @@ Final response: ```json { - "model": "llama3", + "model": "llama3.2", "created_at": "2023-08-04T19:22:45.499127Z", "done": true, "total_duration": 8113331500, @@ -629,7 +656,7 @@ curl http://localhost:11434/api/chat -d '{ ```shell curl http://localhost:11434/api/chat -d '{ - "model": "llama3", + "model": "llama3.2", "messages": [ { "role": "user", @@ -647,7 +674,7 @@ curl http://localhost:11434/api/chat -d '{ ```json { - "model": "registry.ollama.ai/library/llama3:latest", + "model": "llama3.2", "created_at": "2023-12-12T14:13:43.416799Z", "message": { "role": "assistant", @@ -669,7 +696,7 @@ curl http://localhost:11434/api/chat -d '{ ``` curl http://localhost:11434/api/chat -d '{ - "model": "llama3.1", + "model": "llama3.2", "messages": [ { "role": "user", @@ -708,7 +735,7 @@ curl http://localhost:11434/api/chat -d '{ ```json { - "model": "llama3.1", + "model": "llama3.2", "created_at": "2024-07-22T20:33:28.123648Z", "message": { "role": "assistant", @@ -736,6 +763,64 @@ curl http://localhost:11434/api/chat -d '{ } ``` +#### Load a model + +If the messages array is empty, the model will be loaded into memory. + +##### Request + +``` +curl http://localhost:11434/api/chat -d '{ + "model": "llama3.2", + "messages": [] +}' +``` + +##### Response +```json +{ + "model": "llama3.2", + "created_at":"2024-09-12T21:17:29.110811Z", + "message": { + "role": "assistant", + "content": "" + }, + "done_reason": "load", + "done": true +} +``` + +#### Unload a model + +If the messages array is empty and the `keep_alive` parameter is set to `0`, a model will be unloaded from memory. + +##### Request + +``` +curl http://localhost:11434/api/chat -d '{ + "model": "llama3.2", + "messages": [], + "keep_alive": 0 +}' +``` + +##### Response + +A single JSON object is returned: + +```json +{ + "model": "llama3.2", + "created_at":"2024-09-12T21:33:17.547535Z", + "message": { + "role": "assistant", + "content": "" + }, + "done_reason": "unload", + "done": true +} +``` + ## Create a Model ```shell @@ -904,7 +989,7 @@ Show information about a model including details, modelfile, template, parameter ```shell curl http://localhost:11434/api/show -d '{ - "name": "llama3" + "name": "llama3.2" }' ``` @@ -965,7 +1050,7 @@ Copy a model. Creates a model with another name from an existing model. ```shell curl http://localhost:11434/api/copy -d '{ - "source": "llama3", + "source": "llama3.2", "destination": "llama3-backup" }' ``` @@ -1020,7 +1105,7 @@ Download a model from the ollama library. Cancelled pulls are resumed from where ```shell curl http://localhost:11434/api/pull -d '{ - "name": "llama3" + "name": "llama3.2" }' ``` diff --git a/docs/development.md b/docs/development.md index cd6c41af..e67689ab 100644 --- a/docs/development.md +++ b/docs/development.md @@ -148,3 +148,22 @@ In addition to the common Windows development tools described above, install AMD - [Strawberry Perl](https://strawberryperl.com/) Lastly, add `ninja.exe` included with MSVC to the system path (e.g. `C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\Common7\IDE\CommonExtensions\Microsoft\CMake\Ninja`). + +#### Windows arm64 + +The default `Developer PowerShell for VS 2022` may default to x86 which is not what you want. To ensure you get an arm64 development environment, start a plain PowerShell terminal and run: + +```powershell +import-module 'C:\\Program Files\\Microsoft Visual Studio\\2022\\Community\\Common7\\Tools\\Microsoft.VisualStudio.DevShell.dll' +Enter-VsDevShell -Arch arm64 -vsinstallpath 'C:\\Program Files\\Microsoft Visual Studio\\2022\\Community' -skipautomaticlocation +``` + +You can confirm with `write-host $env:VSCMD_ARG_TGT_ARCH` + +Follow the instructions at https://www.msys2.org/wiki/arm64/ to set up an arm64 msys2 environment. Ollama requires gcc and mingw32-make to compile, which is not currently available on Windows arm64, but a gcc compatibility adapter is available via `mingw-w64-clang-aarch64-gcc-compat`. At a minimum you will need to install the following: + +``` +pacman -S mingw-w64-clang-aarch64-clang mingw-w64-clang-aarch64-gcc-compat mingw-w64-clang-aarch64-make make +``` + +You will need to ensure your PATH includes go, cmake, gcc and clang mingw32-make to build ollama from source. (typically `C:\msys64\clangarm64\bin\`) \ No newline at end of file diff --git a/docs/docker.md b/docs/docker.md index 314666b2..9c758c38 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -63,7 +63,7 @@ docker run -d --device /dev/kfd --device /dev/dri -v ollama:/root/.ollama -p 114 Now you can run a model: ``` -docker exec -it ollama ollama run llama3.1 +docker exec -it ollama ollama run llama3.2 ``` ### Try different models diff --git a/docs/faq.md b/docs/faq.md index 356d5105..0dbbb3ff 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -32,7 +32,7 @@ When using the API, specify the `num_ctx` parameter: ```shell curl http://localhost:11434/api/generate -d '{ - "model": "llama3", + "model": "llama3.2", "prompt": "Why is the sky blue?", "options": { "num_ctx": 4096 @@ -232,14 +232,18 @@ curl http://localhost:11434/api/chat -d '{"model": "mistral"}' To preload a model using the CLI, use the command: ```shell -ollama run llama3.1 "" +ollama run llama3.2 "" ``` ## How do I keep a model loaded in memory or make it unload immediately? -By default models are kept in memory for 5 minutes before being unloaded. This allows for quicker response times if you are making numerous requests to the LLM. You may, however, want to free up the memory before the 5 minutes have elapsed or keep the model loaded indefinitely. Use the `keep_alive` parameter with either the `/api/generate` and `/api/chat` API endpoints to control how long the model is left in memory. +By default models are kept in memory for 5 minutes before being unloaded. This allows for quicker response times if you're making numerous requests to the LLM. If you want to immediately unload a model from memory, use the `ollama stop` command: -The `keep_alive` parameter can be set to: +```shell +ollama stop llama3.2 +``` + +If you're using the API, use the `keep_alive` parameter with the `/api/generate` and `/api/chat` endpoints to set the amount of time that a model stays in memory. The `keep_alive` parameter can be set to: * a duration string (such as "10m" or "24h") * a number in seconds (such as 3600) * any negative number which will keep the model loaded in memory (e.g. -1 or "-1m") @@ -247,17 +251,17 @@ The `keep_alive` parameter can be set to: For example, to preload a model and leave it in memory use: ```shell -curl http://localhost:11434/api/generate -d '{"model": "llama3", "keep_alive": -1}' +curl http://localhost:11434/api/generate -d '{"model": "llama3.2", "keep_alive": -1}' ``` To unload the model and free up memory use: ```shell -curl http://localhost:11434/api/generate -d '{"model": "llama3", "keep_alive": 0}' +curl http://localhost:11434/api/generate -d '{"model": "llama3.2", "keep_alive": 0}' ``` -Alternatively, you can change the amount of time all models are loaded into memory by setting the `OLLAMA_KEEP_ALIVE` environment variable when starting the Ollama server. The `OLLAMA_KEEP_ALIVE` variable uses the same parameter types as the `keep_alive` parameter types mentioned above. Refer to section explaining [how to configure the Ollama server](#how-do-i-configure-ollama-server) to correctly set the environment variable. +Alternatively, you can change the amount of time all models are loaded into memory by setting the `OLLAMA_KEEP_ALIVE` environment variable when starting the Ollama server. The `OLLAMA_KEEP_ALIVE` variable uses the same parameter types as the `keep_alive` parameter types mentioned above. Refer to the section explaining [how to configure the Ollama server](#how-do-i-configure-ollama-server) to correctly set the environment variable. -If you wish to override the `OLLAMA_KEEP_ALIVE` setting, use the `keep_alive` API parameter with the `/api/generate` or `/api/chat` API endpoints. +The `keep_alive` API parameter with the `/api/generate` and `/api/chat` API endpoints will override the `OLLAMA_KEEP_ALIVE` setting. ## How do I manage the maximum number of requests the Ollama server can queue? diff --git a/docs/import.md b/docs/import.md index 1a90bc48..2346886f 100644 --- a/docs/import.md +++ b/docs/import.md @@ -38,7 +38,7 @@ Ollama supports importing adapters based on several different model architecture You can create the adapter using a fine tuning framework or tool which can output adapters in the Safetensors format, such as: - * Hugging Face [fine tuning framework] (https://huggingface.co/docs/transformers/en/training) + * Hugging Face [fine tuning framework](https://huggingface.co/docs/transformers/en/training) * [Unsloth](https://github.com/unslothai/unsloth) * [MLX](https://github.com/ml-explore/mlx) diff --git a/docs/modelfile.md b/docs/modelfile.md index 92df22ef..aa2849e7 100644 --- a/docs/modelfile.md +++ b/docs/modelfile.md @@ -11,7 +11,7 @@ A model file is the blueprint to create and share models with Ollama. - [Examples](#examples) - [Instructions](#instructions) - [FROM (Required)](#from-required) - - [Build from llama3.1](#build-from-llama31) + - [Build from existing model](#build-from-existing-model) - [Build from a Safetensors model](#build-from-a-safetensors-model) - [Build from a GGUF file](#build-from-a-gguf-file) - [PARAMETER](#parameter) @@ -50,7 +50,7 @@ INSTRUCTION arguments An example of a `Modelfile` creating a mario blueprint: ```modelfile -FROM llama3 +FROM llama3.2 # sets the temperature to 1 [higher is more creative, lower is more coherent] PARAMETER temperature 1 # sets the context window size to 4096, this controls how many tokens the LLM can use as context to generate the next token @@ -72,10 +72,10 @@ More examples are available in the [examples directory](../examples). To view the Modelfile of a given model, use the `ollama show --modelfile` command. ```bash - > ollama show --modelfile llama3 + > ollama show --modelfile llama3.2 # Modelfile generated by "ollama show" # To build a new Modelfile based on this one, replace the FROM line with: - # FROM llama3:latest + # FROM llama3.2:latest FROM /Users/pdevine/.ollama/models/blobs/sha256-00e1317cbf74d901080d7100f57580ba8dd8de57203072dc6f668324ba545f29 TEMPLATE """{{ if .System }}<|start_header_id|>system<|end_header_id|> @@ -100,10 +100,10 @@ The `FROM` instruction defines the base model to use when creating a model. FROM : ``` -#### Build from llama3.1 +#### Build from existing model ```modelfile -FROM llama3.1 +FROM llama3.2 ``` A list of available base models: diff --git a/docs/openai.md b/docs/openai.md index 0cbea6cc..e13842c0 100644 --- a/docs/openai.md +++ b/docs/openai.md @@ -25,7 +25,7 @@ chat_completion = client.chat.completions.create( 'content': 'Say this is a test', } ], - model='llama3', + model='llama3.2', ) response = client.chat.completions.create( @@ -46,13 +46,13 @@ response = client.chat.completions.create( ) completion = client.completions.create( - model="llama3", + model="llama3.2", prompt="Say this is a test", ) list_completion = client.models.list() -model = client.models.retrieve("llama3") +model = client.models.retrieve("llama3.2") embeddings = client.embeddings.create( model="all-minilm", @@ -74,7 +74,7 @@ const openai = new OpenAI({ const chatCompletion = await openai.chat.completions.create({ messages: [{ role: 'user', content: 'Say this is a test' }], - model: 'llama3', + model: 'llama3.2', }) const response = await openai.chat.completions.create({ @@ -94,13 +94,13 @@ const response = await openai.chat.completions.create({ }) const completion = await openai.completions.create({ - model: "llama3", + model: "llama3.2", prompt: "Say this is a test.", }) const listCompletion = await openai.models.list() -const model = await openai.models.retrieve("llama3") +const model = await openai.models.retrieve("llama3.2") const embedding = await openai.embeddings.create({ model: "all-minilm", @@ -114,7 +114,7 @@ const embedding = await openai.embeddings.create({ curl http://localhost:11434/v1/chat/completions \ -H "Content-Type: application/json" \ -d '{ - "model": "llama3", + "model": "llama3.2", "messages": [ { "role": "system", @@ -154,13 +154,13 @@ curl http://localhost:11434/v1/chat/completions \ curl http://localhost:11434/v1/completions \ -H "Content-Type: application/json" \ -d '{ - "model": "llama3", + "model": "llama3.2", "prompt": "Say this is a test" }' curl http://localhost:11434/v1/models -curl http://localhost:11434/v1/models/llama3 +curl http://localhost:11434/v1/models/llama3.2 curl http://localhost:11434/v1/embeddings \ -H "Content-Type: application/json" \ @@ -274,7 +274,7 @@ curl http://localhost:11434/v1/embeddings \ Before using a model, pull it locally `ollama pull`: ```shell -ollama pull llama3 +ollama pull llama3.2 ``` ### Default model names @@ -282,7 +282,7 @@ ollama pull llama3 For tooling that relies on default OpenAI model names such as `gpt-3.5-turbo`, use `ollama cp` to copy an existing model name to a temporary name: ``` -ollama cp llama3 gpt-3.5-turbo +ollama cp llama3.2 gpt-3.5-turbo ``` Afterwards, this new model name can be specified the `model` field: diff --git a/docs/template.md b/docs/template.md index 1d7104de..bd367e91 100644 --- a/docs/template.md +++ b/docs/template.md @@ -33,7 +33,7 @@ Omitting a template in these models puts the responsibility of correctly templat To add templates in your model, you'll need to add a `TEMPLATE` command to the Modelfile. Here's an example using Meta's Llama 3. ```dockerfile -FROM llama3 +FROM llama3.2 TEMPLATE """{{- if .System }}<|start_header_id|>system<|end_header_id|> diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 589061a8..0a89b87f 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -91,6 +91,17 @@ If none of those resolve the problem, gather additional information and file an - Check dmesg for any errors `sudo dmesg | grep -i nvrm` and `sudo dmesg | grep -i nvidia` +## AMD GPU Discovery + +On linux, AMD GPU access typically requires `video` and/or `render` group membership to access the `/dev/kfd` device. If permissions are not set up correctly, Ollama will detect this and report an error in the server log. + +When running in a container, in some Linux distributions and container runtimes, the ollama process may be unable to access the GPU. Use `ls -ld /dev/kfd /dev/dri /dev/dri/*` on the host system to determine the group assignments on your system, and pass additional `--group-add ...` arguments to the container so it can access the required devices. + +If you are experiencing problems getting Ollama to correctly discover or use your GPU for inference, the following may help isolate the failure. +- `AMD_LOG_LEVEL=3` Enable info log levels in the AMD HIP/ROCm libraries. This can help show more detailed error codes that can help troubleshoot problems +- `OLLAMA_DEBUG=1` During GPU discovery additional information will be reported +- Check dmesg for any errors from amdgpu or kfd drivers `sudo dmesg | grep -i amdgpu` and `sudo dmesg | grep -i kfd` + ## Windows Terminal Errors Older versions of Windows 10 (e.g., 21H1) are known to have a bug where the standard terminal program does not display control characters correctly. This can result in a long string of strings like `←[?25h←[?25l` being displayed, sometimes erroring with `The parameter is incorrect` To resolve this problem, please update to Win 10 22H1 or newer. diff --git a/docs/tutorials/langchainjs.md b/docs/tutorials/langchainjs.md index f925869b..86f895ae 100644 --- a/docs/tutorials/langchainjs.md +++ b/docs/tutorials/langchainjs.md @@ -15,7 +15,7 @@ import { Ollama } from "@langchain/community/llms/ollama"; const ollama = new Ollama({ baseUrl: "http://localhost:11434", - model: "llama3.1", + model: "llama3.2", }); const answer = await ollama.invoke(`why is the sky blue?`); @@ -23,7 +23,7 @@ const answer = await ollama.invoke(`why is the sky blue?`); console.log(answer); ``` -That will get us the same thing as if we ran `ollama run llama3.1 "why is the sky blue"` in the terminal. But we want to load a document from the web to ask a question against. **Cheerio** is a great library for ingesting a webpage, and **LangChain** uses it in their **CheerioWebBaseLoader**. So let's install **Cheerio** and build that part of the app. +That will get us the same thing as if we ran `ollama run llama3.2 "why is the sky blue"` in the terminal. But we want to load a document from the web to ask a question against. **Cheerio** is a great library for ingesting a webpage, and **LangChain** uses it in their **CheerioWebBaseLoader**. So let's install **Cheerio** and build that part of the app. ```bash npm install cheerio diff --git a/docs/windows.md b/docs/windows.md index f681ffac..5f196756 100644 --- a/docs/windows.md +++ b/docs/windows.md @@ -29,7 +29,7 @@ Ollama uses unicode characters for progress indication, which may render as unkn Here's a quick example showing API access from `powershell` ```powershell -(Invoke-WebRequest -method POST -Body '{"model":"llama3", "prompt":"Why is the sky blue?", "stream": false}' -uri http://localhost:11434/api/generate ).Content | ConvertFrom-json +(Invoke-WebRequest -method POST -Body '{"model":"llama3.2", "prompt":"Why is the sky blue?", "stream": false}' -uri http://localhost:11434/api/generate ).Content | ConvertFrom-json ``` ## Troubleshooting diff --git a/envconfig/config.go b/envconfig/config.go index 14e3cb0c..9c1490a9 100644 --- a/envconfig/config.go +++ b/envconfig/config.go @@ -179,53 +179,6 @@ var ( HsaOverrideGfxVersion = String("HSA_OVERRIDE_GFX_VERSION") ) -func RunnersDir() (p string) { - if p := Var("OLLAMA_RUNNERS_DIR"); p != "" { - return p - } - - if runtime.GOOS != "windows" { - return - } - - defer func() { - if p == "" { - slog.Error("unable to locate llm runner directory. Set OLLAMA_RUNNERS_DIR to the location of 'ollama/runners'") - } - }() - - // On Windows we do not carry the payloads inside the main executable - exe, err := os.Executable() - if err != nil { - return - } - - cwd, err := os.Getwd() - if err != nil { - return - } - - var paths []string - for _, root := range []string{filepath.Dir(exe), filepath.Join(filepath.Dir(exe), LibRelativeToExe()), cwd} { - paths = append(paths, - root, - filepath.Join(root, runtime.GOOS+"-"+runtime.GOARCH), - filepath.Join(root, "dist", runtime.GOOS+"-"+runtime.GOARCH), - ) - } - - // Try a few variations to improve developer experience when building from source in the local tree - for _, path := range paths { - candidate := filepath.Join(path, "lib", "ollama", "runners") - if _, err := os.Stat(candidate); err == nil { - p = candidate - break - } - } - - return p -} - func Uint(key string, defaultValue uint) func() uint { return func() uint { if s := Var(key); s != "" { @@ -290,10 +243,22 @@ func AsMap() map[string]EnvVar { "OLLAMA_NOPRUNE": {"OLLAMA_NOPRUNE", NoPrune(), "Do not prune model blobs on startup"}, "OLLAMA_NUM_PARALLEL": {"OLLAMA_NUM_PARALLEL", NumParallel(), "Maximum number of parallel requests"}, "OLLAMA_ORIGINS": {"OLLAMA_ORIGINS", Origins(), "A comma separated list of allowed origins"}, - "OLLAMA_RUNNERS_DIR": {"OLLAMA_RUNNERS_DIR", RunnersDir(), "Location for runners"}, "OLLAMA_SCHED_SPREAD": {"OLLAMA_SCHED_SPREAD", SchedSpread(), "Always schedule model across all GPUs"}, "OLLAMA_TMPDIR": {"OLLAMA_TMPDIR", TmpDir(), "Location for temporary files"}, + + // Informational + "HTTP_PROXY": {"HTTP_PROXY", String("HTTP_PROXY")(), "HTTP proxy"}, + "HTTPS_PROXY": {"HTTPS_PROXY", String("HTTPS_PROXY")(), "HTTPS proxy"}, + "NO_PROXY": {"NO_PROXY", String("NO_PROXY")(), "No proxy"}, } + + if runtime.GOOS != "windows" { + // Windows environment variables are case-insensitive so there's no need to duplicate them + ret["http_proxy"] = EnvVar{"http_proxy", String("http_proxy")(), "HTTP proxy"} + ret["https_proxy"] = EnvVar{"https_proxy", String("https_proxy")(), "HTTPS proxy"} + ret["no_proxy"] = EnvVar{"no_proxy", String("no_proxy")(), "No proxy"} + } + if runtime.GOOS != "darwin" { ret["CUDA_VISIBLE_DEVICES"] = EnvVar{"CUDA_VISIBLE_DEVICES", CudaVisibleDevices(), "Set which NVIDIA devices are visible"} ret["HIP_VISIBLE_DEVICES"] = EnvVar{"HIP_VISIBLE_DEVICES", HipVisibleDevices(), "Set which AMD devices are visible"} @@ -302,6 +267,7 @@ func AsMap() map[string]EnvVar { ret["HSA_OVERRIDE_GFX_VERSION"] = EnvVar{"HSA_OVERRIDE_GFX_VERSION", HsaOverrideGfxVersion(), "Override the gfx used for all detected AMD GPUs"} ret["OLLAMA_INTEL_GPU"] = EnvVar{"OLLAMA_INTEL_GPU", IntelGPU(), "Enable experimental Intel GPU detection"} } + return ret } diff --git a/examples/go-chat/main.go b/examples/go-chat/main.go index 7663fb8f..07430305 100644 --- a/examples/go-chat/main.go +++ b/examples/go-chat/main.go @@ -35,7 +35,7 @@ func main() { ctx := context.Background() req := &api.ChatRequest{ - Model: "llama3.1", + Model: "llama3.2", Messages: messages, } diff --git a/examples/langchain-python-rag-document/README.md b/examples/langchain-python-rag-document/README.md index e2f3bc02..d37afc9d 100644 --- a/examples/langchain-python-rag-document/README.md +++ b/examples/langchain-python-rag-document/README.md @@ -4,10 +4,10 @@ This example provides an interface for asking questions to a PDF document. ## Setup -1. Ensure you have the `llama3.1` model installed: +1. Ensure you have the `llama3.2` model installed: ``` -ollama pull llama3.1 +ollama pull llama3.2 ``` 2. Install the Python Requirements. diff --git a/examples/langchain-python-rag-document/main.py b/examples/langchain-python-rag-document/main.py index 6f7cec9b..4871a042 100644 --- a/examples/langchain-python-rag-document/main.py +++ b/examples/langchain-python-rag-document/main.py @@ -51,7 +51,7 @@ while True: template=template, ) - llm = Ollama(model="llama3.1", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()])) + llm = Ollama(model="llama3.2", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()])) qa_chain = RetrievalQA.from_chain_type( llm, retriever=vectorstore.as_retriever(), diff --git a/examples/langchain-python-rag-privategpt/requirements.txt b/examples/langchain-python-rag-privategpt/requirements.txt index 0aad1fe5..4f2cee25 100644 --- a/examples/langchain-python-rag-privategpt/requirements.txt +++ b/examples/langchain-python-rag-privategpt/requirements.txt @@ -1,6 +1,6 @@ langchain==0.0.274 gpt4all==1.0.8 -chromadb==0.4.7 +chromadb==0.5.0 llama-cpp-python==0.1.81 urllib3==2.0.4 PyMuPDF==1.23.5 @@ -12,4 +12,4 @@ pandoc==2.3 pypandoc==1.11 tqdm==4.66.1 sentence_transformers==2.2.2 -numpy>=1.22.2 # not directly required, pinned by Snyk to avoid a vulnerability \ No newline at end of file +numpy>=1.22.2 # not directly required, pinned by Snyk to avoid a vulnerability diff --git a/examples/langchain-python-rag-websummary/README.md b/examples/langchain-python-rag-websummary/README.md index 29c706a3..746c47ab 100644 --- a/examples/langchain-python-rag-websummary/README.md +++ b/examples/langchain-python-rag-websummary/README.md @@ -4,10 +4,10 @@ This example summarizes the website, [https://ollama.com/blog/run-llama2-uncenso ## Running the Example -1. Ensure you have the `llama3.1` model installed: +1. Ensure you have the `llama3.2` model installed: ```bash - ollama pull llama3.1 + ollama pull llama3.2 ``` 2. Install the Python Requirements. diff --git a/examples/langchain-python-rag-websummary/main.py b/examples/langchain-python-rag-websummary/main.py index 77b09fbb..56f8bd24 100644 --- a/examples/langchain-python-rag-websummary/main.py +++ b/examples/langchain-python-rag-websummary/main.py @@ -5,7 +5,7 @@ from langchain.chains.summarize import load_summarize_chain loader = WebBaseLoader("https://ollama.com/blog/run-llama2-uncensored-locally") docs = loader.load() -llm = Ollama(model="llama3.1") +llm = Ollama(model="llama3.2") chain = load_summarize_chain(llm, chain_type="stuff") result = chain.invoke(docs) diff --git a/examples/langchain-python-simple/README.md b/examples/langchain-python-simple/README.md index 60db2c8c..680ab560 100644 --- a/examples/langchain-python-simple/README.md +++ b/examples/langchain-python-simple/README.md @@ -4,10 +4,10 @@ This example is a basic "hello world" of using LangChain with Ollama. ## Running the Example -1. Ensure you have the `llama3.1` model installed: +1. Ensure you have the `llama3.2` model installed: ```bash - ollama pull llama3.1 + ollama pull llama3.2 ``` 2. Install the Python Requirements. diff --git a/examples/langchain-python-simple/main.py b/examples/langchain-python-simple/main.py index a7ed81d6..8d6989c8 100644 --- a/examples/langchain-python-simple/main.py +++ b/examples/langchain-python-simple/main.py @@ -1,6 +1,6 @@ from langchain.llms import Ollama input = input("What is your question?") -llm = Ollama(model="llama3.1") +llm = Ollama(model="llama3.2") res = llm.predict(input) print (res) diff --git a/examples/modelfile-mario/Modelfile b/examples/modelfile-mario/Modelfile index a3747086..b8e49667 100644 --- a/examples/modelfile-mario/Modelfile +++ b/examples/modelfile-mario/Modelfile @@ -1,4 +1,4 @@ -FROM llama3.1 +FROM llama3.2 PARAMETER temperature 1 SYSTEM """ You are Mario from super mario bros, acting as an assistant. diff --git a/examples/modelfile-mario/readme.md b/examples/modelfile-mario/readme.md index c3f34197..882023ad 100644 --- a/examples/modelfile-mario/readme.md +++ b/examples/modelfile-mario/readme.md @@ -2,12 +2,12 @@ # Example character: Mario -This example shows how to create a basic character using Llama3.1 as the base model. +This example shows how to create a basic character using Llama 3.2 as the base model. To run this example: 1. Download the Modelfile -2. `ollama pull llama3.1` to get the base model used in the model file. +2. `ollama pull llama3.2` to get the base model used in the model file. 3. `ollama create NAME -f ./Modelfile` 4. `ollama run NAME` @@ -18,7 +18,7 @@ Ask it some questions like "Who are you?" or "Is Peach in trouble again?" What the model file looks like: ``` -FROM llama3.1 +FROM llama3.2 PARAMETER temperature 1 SYSTEM """ You are Mario from Super Mario Bros, acting as an assistant. diff --git a/examples/python-grounded-factuality-rag-check/README.md b/examples/python-grounded-factuality-rag-check/README.md new file mode 100644 index 00000000..cd72071c --- /dev/null +++ b/examples/python-grounded-factuality-rag-check/README.md @@ -0,0 +1,93 @@ +# RAG Hallucination Checker using Bespoke-Minicheck + +This example allows the user to ask questions related to a document, which can be specified via an article url. Relevant chunks are retreived from the document and given to `llama3.2` as context to answer the question. Then each sentence in the answer is checked against the retrieved chunks using `bespoke-minicheck` to ensure that the answer does not contain hallucinations. + +## Running the Example + +1. Ensure `all-minilm` (embedding) `llama3.2` (chat) and `bespoke-minicheck` (check) models installed: + + ```bash + ollama pull all-minilm + ollama pull llama3.2 + ollama pull bespoke-minicheck + ``` + +2. Install the dependencies. + + ```bash + pip install -r requirements.txt + ``` + +3. Run the example: + + ```bash + python main.py + ``` + +## Expected Output + +```text +Enter the URL of an article you want to chat with, or press Enter for default example: + +Loaded, chunked, and embedded text from https://www.theverge.com/2024/9/12/24242439/openai-o1-model-reasoning-strawberry-chatgpt. + +Enter your question or type quit: Who is the CEO of openai? + +Retrieved chunks: +OpenAI is releasing a new model called o1 , the first in a planned series of “ reasoning ” models that have been trained to answer more complex questions , faster than a human can . It ’ s being released alongside o1-mini , a smaller , cheaper version . And yes , if you ’ re steeped in AI rumors : this is , in fact , the extremely hyped Strawberry model . For OpenAI , o1 represents a step toward its broader goal of human-like artificial intelligence . + +OpenAI is releasing a new model called o1 , the first in a planned series of “ reasoning ” models that have been trained to answer more complex questions , faster than a human can . It ’ s being released alongside o1-mini , a smaller , cheaper version . And yes , if you ’ re steeped in AI rumors : this is , in fact , the extremely hyped Strawberry model . For OpenAI , o1 represents a step toward its broader goal of human-like artificial intelligence . More practically , it does a better job at writing code and solving multistep problems than previous models . But it ’ s also more expensive and slower to use than GPT-4o . OpenAI is calling this release of o1 a “ preview ” to emphasize how nascent it is . ChatGPT Plus and Team users get access to both o1-preview and o1-mini starting today , while Enterprise and Edu users will get access early next week . + +More practically , it does a better job at writing code and solving multistep problems than previous models . But it ’ s also more expensive and slower to use than GPT-4o . OpenAI is calling this release of o1 a “ preview ” to emphasize how nascent it is . ChatGPT Plus and Team users get access to both o1-preview and o1-mini starting today , while Enterprise and Edu users will get access early next week . OpenAI says it plans to bring o1-mini access to all the free users of ChatGPT but hasn ’ t set a release date yet . Developer access to o1 is really expensive : In the API , o1-preview is $ 15 per 1 million input tokens , or chunks of text parsed by the model , and $ 60 per 1 million output tokens . For comparison , GPT-4o costs $ 5 per 1 million input tokens and $ 15 per 1 million output tokens . + +OpenAI says it plans to bring o1-mini access to all the free users of ChatGPT but hasn ’ t set a release date yet . Developer access to o1 is really expensive : In the API , o1-preview is $ 15 per 1 million input tokens , or chunks of text parsed by the model , and $ 60 per 1 million output tokens . For comparison , GPT-4o costs $ 5 per 1 million input tokens and $ 15 per 1 million output tokens . The training behind o1 is fundamentally different from its predecessors , OpenAI ’ s research lead , Jerry Tworek , tells me , though the company is being vague about the exact details . He says o1 “ has been trained using a completely new optimization algorithm and a new training dataset specifically tailored for it. ” Image : OpenAI OpenAI taught previous GPT models to mimic patterns from its training data . + +LLM Answer: +The text does not mention the CEO of OpenAI. It only discusses the release of a new model called o1 and some details about it, but does not provide information on the company's leadership. + +LLM Claim: The text does not mention the CEO of OpenAI. +Is this claim supported by the context according to bespoke-minicheck? Yes + +LLM Claim: It only discusses the release of a new model called o1 and some details about it, but does not provide information on the company's leadership. +Is this claim supported by the context according to bespoke-minicheck? No +``` + +The second claim is unsupported since the text mentions the research lead. + +Another tricky example: + +```text + +Enter your question or type quit: what sets o1 apart from gpt-4o? + +Retrieved chunks: +OpenAI says it plans to bring o1-mini access to all the free users of ChatGPT but hasn ’ t set a release date yet . Developer access to o1 is really expensive : In the API , o1-preview is $ 15 per 1 million input tokens , or chunks of text parsed by the model , and $ 60 per 1 million output tokens . For comparison , GPT-4o costs $ 5 per 1 million input tokens and $ 15 per 1 million output tokens . The training behind o1 is fundamentally different from its predecessors , OpenAI ’ s research lead , Jerry Tworek , tells me , though the company is being vague about the exact details . He says o1 “ has been trained using a completely new optimization algorithm and a new training dataset specifically tailored for it. ” Image : OpenAI OpenAI taught previous GPT models to mimic patterns from its training data . + +He says OpenAI also tested o1 against a qualifying exam for the International Mathematics Olympiad , and while GPT-4o only correctly solved only 13 percent of problems , o1 scored 83 percent . “ We can ’ t say we solved hallucinations ” In online programming contests known as Codeforces competitions , this new model reached the 89th percentile of participants , and OpenAI claims the next update of this model will perform “ similarly to PhD students on challenging benchmark tasks in physics , chemistry and biology. ” At the same time , o1 is not as capable as GPT-4o in a lot of areas . It doesn ’ t do as well on factual knowledge about the world . + +More practically , it does a better job at writing code and solving multistep problems than previous models . But it ’ s also more expensive and slower to use than GPT-4o . OpenAI is calling this release of o1 a “ preview ” to emphasize how nascent it is . ChatGPT Plus and Team users get access to both o1-preview and o1-mini starting today , while Enterprise and Edu users will get access early next week . OpenAI says it plans to bring o1-mini access to all the free users of ChatGPT but hasn ’ t set a release date yet . Developer access to o1 is really expensive : In the API , o1-preview is $ 15 per 1 million input tokens , or chunks of text parsed by the model , and $ 60 per 1 million output tokens . For comparison , GPT-4o costs $ 5 per 1 million input tokens and $ 15 per 1 million output tokens . + +“ We can ’ t say we solved hallucinations ” In online programming contests known as Codeforces competitions , this new model reached the 89th percentile of participants , and OpenAI claims the next update of this model will perform “ similarly to PhD students on challenging benchmark tasks in physics , chemistry and biology. ” At the same time , o1 is not as capable as GPT-4o in a lot of areas . It doesn ’ t do as well on factual knowledge about the world . It also doesn ’ t have the ability to browse the web or process files and images . Still , the company believes it represents a brand-new class of capabilities . It was named o1 to indicate “ resetting the counter back to 1. ” “ I ’ m gon na be honest : I think we ’ re terrible at naming , traditionally , ” McGrew says . +LLM Answer: According to the text, several things set o1 apart from GPT-4o: + +* In online programming contests (Codeforces competitions), o1 scored 83% correct solutions compared to GPT-4o's 13%. +* The training behind o1 is "fundamentally different" from its predecessors, including a completely new optimization algorithm and a new training dataset specifically tailored for it. +* o1 has been shown to perform similarly to PhD students on challenging benchmark tasks in physics, chemistry, and biology, while GPT-4o does not have this level of performance. +* However, o1 is also less capable than GPT-4o in some areas, particularly with regard to factual knowledge about the world. + +LLM Claim: According to the text, several things set o1 apart from GPT-4o: + +* In online programming contests (Codeforces competitions), o1 scored 83% correct solutions compared to GPT-4o's 13%. +Is this claim supported by the context according to bespoke-minicheck? Yes + +LLM Claim: * The training behind o1 is "fundamentally different" from its predecessors, including a completely new optimization algorithm and a new training dataset specifically tailored for it. +Is this claim supported by the context according to bespoke-minicheck? Yes + +LLM Claim: * o1 has been shown to perform similarly to PhD students on challenging benchmark tasks in physics, chemistry, and biology, while GPT-4o does not have this level of performance. +Is this claim supported by the context according to bespoke-minicheck? No + +LLM Claim: * However, o1 is also less capable than GPT-4o in some areas, particularly with regard to factual knowledge about the world. +Is this claim supported by the context according to bespoke-minicheck? Yes +``` + +We see that the third claim "* o1 has been shown to perform similarly to PhD students on challenging benchmark tasks in physics, chemistry, and biology, while GPT-4o does not have this level of performance." is not supported by the context. This is because the context only mentions that o1 "is claimed to perform" which is different from "has been shown to perform". diff --git a/examples/python-grounded-factuality-rag-check/main.py b/examples/python-grounded-factuality-rag-check/main.py new file mode 100644 index 00000000..eab0b670 --- /dev/null +++ b/examples/python-grounded-factuality-rag-check/main.py @@ -0,0 +1,137 @@ +import ollama +import warnings +from mattsollamatools import chunker +from newspaper import Article +import numpy as np +from sklearn.neighbors import NearestNeighbors +import nltk + +warnings.filterwarnings( + "ignore", category=FutureWarning, module="transformers.tokenization_utils_base" +) +nltk.download("punkt_tab", quiet=True) + + +def getArticleText(url): + """Gets the text of an article from a URL. + + Often there are a bunch of ads and menus on pages for a news article. + This uses newspaper3k to get just the text of just the article. + """ + article = Article(url) + article.download() + article.parse() + return article.text + + +def knn_search(question_embedding, embeddings, k=5): + """Performs K-nearest neighbors (KNN) search""" + X = np.array( + [item["embedding"] for article in embeddings for item in article["embeddings"]] + ) + source_texts = [ + item["source"] for article in embeddings for item in article["embeddings"] + ] + + # Fit a KNN model on the embeddings + knn = NearestNeighbors(n_neighbors=k, metric="cosine") + knn.fit(X) + + # Find the indices and distances of the k-nearest neighbors. + _, indices = knn.kneighbors(question_embedding, n_neighbors=k) + + # Get the indices and source texts of the best matches + best_matches = [(indices[0][i], source_texts[indices[0][i]]) for i in range(k)] + + return best_matches + + +def check(document, claim): + """Checks if the claim is supported by the document by calling bespoke-minicheck. + + Returns Yes/yes if the claim is supported by the document, No/no otherwise. + Support for logits will be added in the future. + + bespoke-minicheck's system prompt is defined as: + 'Determine whether the provided claim is consistent with the corresponding + document. Consistency in this context implies that all information presented in the claim + is substantiated by the document. If not, it should be considered inconsistent. Please + assess the claim's consistency with the document by responding with either "Yes" or "No".' + + bespoke-minicheck's user prompt is defined as: + "Document: {document}\nClaim: {claim}" + """ + prompt = f"Document: {document}\nClaim: {claim}" + response = ollama.generate( + model="bespoke-minicheck", prompt=prompt, options={"num_predict": 2, "temperature": 0.0} + ) + return response["response"].strip() + + +if __name__ == "__main__": + allEmbeddings = [] + default_url = "https://www.theverge.com/2024/9/12/24242439/openai-o1-model-reasoning-strawberry-chatgpt" + user_input = input( + "Enter the URL of an article you want to chat with, or press Enter for default example: " + ) + article_url = user_input.strip() if user_input.strip() else default_url + article = {} + article["embeddings"] = [] + article["url"] = article_url + text = getArticleText(article_url) + chunks = chunker(text) + + # Embed (batch) chunks using ollama + embeddings = ollama.embed(model="all-minilm", input=chunks)["embeddings"] + + for chunk, embedding in zip(chunks, embeddings): + item = {} + item["source"] = chunk + item["embedding"] = embedding + item["sourcelength"] = len(chunk) + article["embeddings"].append(item) + + allEmbeddings.append(article) + + print(f"\nLoaded, chunked, and embedded text from {article_url}.\n") + + while True: + # Input a question from the user + # For example, "Who is the chief research officer?" + question = input("Enter your question or type quit: ") + + if question.lower() == "quit": + break + + # Embed the user's question using ollama.embed + question_embedding = ollama.embed(model="all-minilm", input=question)[ + "embeddings" + ] + + # Perform KNN search to find the best matches (indices and source text) + best_matches = knn_search(question_embedding, allEmbeddings, k=4) + + sourcetext = "\n\n".join([source_text for (_, source_text) in best_matches]) + + print(f"\nRetrieved chunks: \n{sourcetext}\n") + + # Give the retreived chunks and question to the chat model + system_prompt = f"Only use the following information to answer the question. Do not use anything else: {sourcetext}" + + ollama_response = ollama.generate( + model="llama3.2", + prompt=question, + system=system_prompt, + options={"stream": False}, + ) + + answer = ollama_response["response"] + print(f"LLM Answer:\n{answer}\n") + + # Check each sentence in the response for grounded factuality + if answer: + for claim in nltk.sent_tokenize(answer): + print(f"LLM Claim: {claim}") + print( + f"Is this claim supported by the context according to bespoke-minicheck? {check(sourcetext, claim)}\n" + ) diff --git a/examples/python-grounded-factuality-rag-check/requirements.txt b/examples/python-grounded-factuality-rag-check/requirements.txt new file mode 100644 index 00000000..d4bd6df3 --- /dev/null +++ b/examples/python-grounded-factuality-rag-check/requirements.txt @@ -0,0 +1,8 @@ +ollama +lxml==5.3.0 +lxml_html_clean==0.2.2 +mattsollamatools==0.0.25 +newspaper3k==0.2.8 +nltk==3.9.1 +numpy==1.26.4 +scikit-learn==1.5.2 \ No newline at end of file diff --git a/examples/python-grounded-factuality-simple-check/main.py b/examples/python-grounded-factuality-simple-check/main.py new file mode 100644 index 00000000..0204f3b3 --- /dev/null +++ b/examples/python-grounded-factuality-simple-check/main.py @@ -0,0 +1,53 @@ +"""Simple example to demonstrate how to use the bespoke-minicheck model.""" + +import ollama + +# NOTE: ollama must be running for this to work, start the ollama app or run `ollama serve` + + +def check(document, claim): + """Checks if the claim is supported by the document by calling bespoke-minicheck. + + Returns Yes/yes if the claim is supported by the document, No/no otherwise. + Support for logits will be added in the future. + + bespoke-minicheck's system prompt is defined as: + 'Determine whether the provided claim is consistent with the corresponding + document. Consistency in this context implies that all information presented in the claim + is substantiated by the document. If not, it should be considered inconsistent. Please + assess the claim's consistency with the document by responding with either "Yes" or "No".' + + bespoke-minicheck's user prompt is defined as: + "Document: {document}\nClaim: {claim}" + """ + prompt = f"Document: {document}\nClaim: {claim}" + response = ollama.generate( + model="bespoke-minicheck", prompt=prompt, options={"num_predict": 2, "temperature": 0.0} + ) + return response["response"].strip() + + +def get_user_input(prompt): + user_input = input(prompt) + if not user_input: + exit() + print() + return user_input + + +def main(): + while True: + # Get a document from the user (e.g. "Ryan likes running and biking.") + document = get_user_input("Enter a document: ") + # Get a claim from the user (e.g. "Ryan likes to run.") + claim = get_user_input("Enter a claim: ") + # Check if the claim is supported by the document + grounded_factuality_check = check(document, claim) + print( + f"Is the claim supported by the document according to bespoke-minicheck? {grounded_factuality_check}" + ) + print("\n\n") + + +if __name__ == "__main__": + main() diff --git a/examples/python-grounded-factuality-simple-check/readme.md b/examples/python-grounded-factuality-simple-check/readme.md new file mode 100644 index 00000000..b164b5eb --- /dev/null +++ b/examples/python-grounded-factuality-simple-check/readme.md @@ -0,0 +1,54 @@ +# Simple Bespoke-Minicheck Example + +`bespoke-minicheck` is a model for checking if a claim is supported by a document. It is used through the **generate** endpoint, which is called in this example with a `prompt` that includes the expected formatting of the user input. + +## Running the Example + +1. Ensure you have the `bespoke-minicheck` model installed: + + ```bash + ollama pull bespoke-minicheck + ``` + +2. Install the dependencies: + + ```bash + pip install -r requirements.txt + ``` + +3. Run the program: + + ```bash + python main.py + ``` + +4. Enter a document and a claim when prompted: + + ```bash + Enter a document: Roses are red. + + Enter a claim: Roses are blue. + ``` + + The claim and document are then given to the `bespoke-minicheck` as inputs, which then generates a response (Yes or No) on whether the claim is supported by the document. + + ```bash + Is the claim supported by the document according to bespoke-minicheck? No + ``` + +## More Examples + +Document ([source](https://en.wikipedia.org/wiki/Apple_I)): +> The Apple Computer 1 (Apple-1[a]), later known predominantly as the Apple I(written with a Roman numeral),[b] is an 8-bit motherboard-only personal computer designed by Steve Wozniak[5][6] and released by the Apple Computer Company (now Apple Inc.) in 1976. The company was initially formed to sell the Apple I – its first product – and would later become the world's largest technology company.[7] The idea of starting a company and selling the computer came from Wozniak's friend and Apple co-founder Steve Jobs.[8][9] One of the main innovations of the Apple I was that it included video display terminal circuitry on its circuit board, allowing it to connect to a low-cost composite video monitor or television, instead of an expensive computer terminal, compared to most existing computers at the time. + +Claim: +>The Apple I is a 16-bit computer. + +Expected output: +>Is the claim supported by the document according to bespoke-minicheck? **No** + +Claim: +>Apple was originally called the Apple Computer Company. + +Expected output: +>Is the claim supported by the document according to bespoke-minicheck? **Yes** diff --git a/examples/python-grounded-factuality-simple-check/requirements.txt b/examples/python-grounded-factuality-simple-check/requirements.txt new file mode 100644 index 00000000..403abba6 --- /dev/null +++ b/examples/python-grounded-factuality-simple-check/requirements.txt @@ -0,0 +1 @@ +ollama diff --git a/examples/python-json-datagenerator/predefinedschema.py b/examples/python-json-datagenerator/predefinedschema.py index 68090ad7..91463760 100644 --- a/examples/python-json-datagenerator/predefinedschema.py +++ b/examples/python-json-datagenerator/predefinedschema.py @@ -2,7 +2,7 @@ import requests import json import random -model = "llama3.1" +model = "llama3.2" template = { "firstName": "", "lastName": "", diff --git a/examples/python-json-datagenerator/randomaddresses.py b/examples/python-json-datagenerator/randomaddresses.py index 878c9803..3df59d32 100644 --- a/examples/python-json-datagenerator/randomaddresses.py +++ b/examples/python-json-datagenerator/randomaddresses.py @@ -12,7 +12,7 @@ countries = [ "France", ] country = random.choice(countries) -model = "llama3.1" +model = "llama3.2" prompt = f"generate one realistically believable sample data set of a persons first name, last name, address in {country}, and phone number. Do not use common names. Respond using JSON. Key names should have no backslashes, values should use plain ascii with no special characters." diff --git a/examples/python-json-datagenerator/readme.md b/examples/python-json-datagenerator/readme.md index 5b444dff..a551e1dd 100644 --- a/examples/python-json-datagenerator/readme.md +++ b/examples/python-json-datagenerator/readme.md @@ -6,10 +6,10 @@ There are two python scripts in this example. `randomaddresses.py` generates ran ## Running the Example -1. Ensure you have the `llama3.1` model installed: +1. Ensure you have the `llama3.2` model installed: ```bash - ollama pull llama3.1 + ollama pull llama3.2 ``` 2. Install the Python Requirements. diff --git a/examples/python-loganalysis/Modelfile b/examples/python-loganalysis/Modelfile index 5237cb6e..b28aa0c0 100644 --- a/examples/python-loganalysis/Modelfile +++ b/examples/python-loganalysis/Modelfile @@ -4,5 +4,5 @@ SYSTEM """ You are a log file analyzer. You will receive a set of lines from a log file for some software application, find the errors and other interesting aspects of the logs, and explain them so a new user can understand what they mean. If there are any steps they can do to resolve them, list the steps in your answer. """ -PARAMETER TEMPERATURE 0.3 +PARAMETER temperature 0.3 diff --git a/examples/python-loganalysis/readme.md b/examples/python-loganalysis/readme.md index 4be0baaa..03bab672 100644 --- a/examples/python-loganalysis/readme.md +++ b/examples/python-loganalysis/readme.md @@ -21,6 +21,8 @@ You can try this with the `logtest.logfile` file included in this directory. 2. Install the Python Requirements. ```bash + python3 -m venv .venv + source .venv/bin/activate pip install -r requirements.txt ``` diff --git a/examples/python-loganalysis/requirements.txt b/examples/python-loganalysis/requirements.txt index 9688b8ec..e7cb17ef 100644 --- a/examples/python-loganalysis/requirements.txt +++ b/examples/python-loganalysis/requirements.txt @@ -1 +1 @@ -Requests==2.31.0 +Requests>=2.32.3 diff --git a/examples/python-simplechat/client.py b/examples/python-simplechat/client.py index 85043d5f..6ef14ffc 100644 --- a/examples/python-simplechat/client.py +++ b/examples/python-simplechat/client.py @@ -2,7 +2,7 @@ import json import requests # NOTE: ollama must be running for this to work, start the ollama app or run `ollama serve` -model = "llama3.1" # TODO: update this for whatever model you wish to use +model = "llama3.2" # TODO: update this for whatever model you wish to use def chat(messages): diff --git a/examples/python-simplechat/readme.md b/examples/python-simplechat/readme.md index 4c2ded4d..a4a2dfc1 100644 --- a/examples/python-simplechat/readme.md +++ b/examples/python-simplechat/readme.md @@ -4,10 +4,10 @@ The **chat** endpoint is one of two ways to generate text from an LLM with Ollam ## Running the Example -1. Ensure you have the `llama3.1` model installed: +1. Ensure you have the `llama3.2` model installed: ```bash - ollama pull llama3.1 + ollama pull llama3.2 ``` 2. Install the Python Requirements. diff --git a/examples/typescript-simplechat/client.ts b/examples/typescript-simplechat/client.ts index 8ad113b1..d8faaa1b 100644 --- a/examples/typescript-simplechat/client.ts +++ b/examples/typescript-simplechat/client.ts @@ -1,6 +1,6 @@ import * as readline from "readline"; -const model = "llama3.1"; +const model = "llama3.2"; type Message = { role: "assistant" | "user" | "system"; content: string; diff --git a/gpu/amd_linux.go b/gpu/amd_linux.go index aab67efe..d3f5b9fc 100644 --- a/gpu/amd_linux.go +++ b/gpu/amd_linux.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "io/fs" "log/slog" "os" "path/filepath" @@ -359,6 +360,10 @@ func AMDGetGPUInfo() []RocmGPUInfo { if len(resp) == 0 { slog.Info("no compatible amdgpu devices detected") } + if err := verifyKFDDriverAccess(); err != nil { + slog.Error("amdgpu devices detected but permission problems block access", "error", err) + return nil + } return resp } @@ -455,3 +460,19 @@ func getFreeMemory(usedFile string) (uint64, error) { } return usedMemory, nil } + +func verifyKFDDriverAccess() error { + // Verify we have permissions - either running as root, or we have group access to the driver + fd, err := os.OpenFile("/dev/kfd", os.O_RDWR, 0o666) + if err != nil { + if errors.Is(err, fs.ErrPermission) { + return fmt.Errorf("permissions not set up properly. Either run ollama as root, or add you user account to the render group. %w", err) + } else if errors.Is(err, fs.ErrNotExist) { + // Container runtime failure? + return fmt.Errorf("kfd driver not loaded. If running in a container, remember to include '--device /dev/kfd --device /dev/dri'") + } + return fmt.Errorf("failed to check permission on /dev/kfd: %w", err) + } + fd.Close() + return nil +} diff --git a/gpu/assets.go b/gpu/assets.go deleted file mode 100644 index 6d62d0dc..00000000 --- a/gpu/assets.go +++ /dev/null @@ -1,148 +0,0 @@ -package gpu - -import ( - "errors" - "fmt" - "log/slog" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - "syscall" - "time" - - "github.com/ollama/ollama/envconfig" -) - -var ( - lock sync.Mutex - payloadsDir = "" -) - -func PayloadsDir() (string, error) { - lock.Lock() - defer lock.Unlock() - var err error - if payloadsDir == "" { - runnersDir := envconfig.RunnersDir() - - if runnersDir != "" { - payloadsDir = runnersDir - return payloadsDir, nil - } - - // The remainder only applies on non-windows where we still carry payloads in the main executable - cleanupTmpDirs() - tmpDir := envconfig.TmpDir() - if tmpDir == "" { - tmpDir, err = os.MkdirTemp("", "ollama") - if err != nil { - return "", fmt.Errorf("failed to generate tmp dir: %w", err) - } - } else { - err = os.MkdirAll(tmpDir, 0o755) - if err != nil { - return "", fmt.Errorf("failed to generate tmp dir %s: %w", tmpDir, err) - } - } - - // Track our pid so we can clean up orphaned tmpdirs - n := filepath.Join(tmpDir, "ollama.pid") - if err := os.WriteFile(n, []byte(strconv.Itoa(os.Getpid())), 0o644); err != nil { - return "", fmt.Errorf("failed to write pid file %s: %w", n, err) - } - - // We create a distinct subdirectory for payloads within the tmpdir - // This will typically look like /tmp/ollama3208993108/runners on linux - payloadsDir = filepath.Join(tmpDir, "runners") - } - return payloadsDir, nil -} - -// Best effort to clean up prior tmpdirs -func cleanupTmpDirs() { - matches, err := filepath.Glob(filepath.Join(os.TempDir(), "ollama*", "ollama.pid")) - if err != nil { - return - } - - for _, match := range matches { - raw, err := os.ReadFile(match) - if errors.Is(err, os.ErrNotExist) { - slog.Debug("not a ollama runtime directory, skipping", "path", match) - continue - } else if err != nil { - slog.Warn("could not read ollama.pid, skipping", "path", match, "error", err) - continue - } - - pid, err := strconv.Atoi(string(raw)) - if err != nil { - slog.Warn("invalid pid, skipping", "path", match, "error", err) - continue - } - - p, err := os.FindProcess(pid) - if err == nil && !errors.Is(p.Signal(syscall.Signal(0)), os.ErrProcessDone) { - slog.Warn("process still running, skipping", "pid", pid, "path", match) - continue - } - - if err := os.Remove(match); err != nil { - slog.Warn("could not cleanup stale pidfile", "path", match, "error", err) - } - - runners := filepath.Join(filepath.Dir(match), "runners") - if err := os.RemoveAll(runners); err != nil { - slog.Warn("could not cleanup stale runners", "path", runners, "error", err) - } - - if err := os.Remove(filepath.Dir(match)); err != nil { - slog.Warn("could not cleanup stale tmpdir", "path", filepath.Dir(match), "error", err) - } - } -} - -func Cleanup() { - lock.Lock() - defer lock.Unlock() - runnersDir := envconfig.RunnersDir() - if payloadsDir != "" && runnersDir == "" && runtime.GOOS != "windows" { - // We want to fully clean up the tmpdir parent of the payloads dir - tmpDir := filepath.Clean(filepath.Join(payloadsDir, "..")) - slog.Debug("cleaning up", "dir", tmpDir) - err := os.RemoveAll(tmpDir) - if err != nil { - // On windows, if we remove too quickly the llama.dll may still be in-use and fail to remove - time.Sleep(1000 * time.Millisecond) - err = os.RemoveAll(tmpDir) - if err != nil { - slog.Warn("failed to clean up", "dir", tmpDir, "err", err) - } - } - } -} - -func UpdatePath(dir string) { - if runtime.GOOS == "windows" { - tmpDir := filepath.Dir(dir) - pathComponents := strings.Split(os.Getenv("PATH"), ";") - i := 0 - for _, comp := range pathComponents { - if strings.EqualFold(comp, dir) { - return - } - // Remove any other prior paths to our temp dir - if !strings.HasPrefix(strings.ToLower(comp), strings.ToLower(tmpDir)) { - pathComponents[i] = comp - i++ - } - } - newPath := strings.Join(append([]string{dir}, pathComponents...), ";") - slog.Info("updating", "PATH", newPath) - os.Setenv("PATH", newPath) - } - // linux and darwin rely on rpath -} diff --git a/gpu/gpu.go b/gpu/gpu.go index 3de93f7f..db0e247b 100644 --- a/gpu/gpu.go +++ b/gpu/gpu.go @@ -93,10 +93,9 @@ func initCudaHandles() *cudaHandles { localAppData := os.Getenv("LOCALAPPDATA") cudartMgmtPatterns = []string{filepath.Join(localAppData, "Programs", "Ollama", CudartMgmtName)} } - tmpDir, _ := PayloadsDir() - if tmpDir != "" { - // TODO - add "payloads" for subprocess - cudartMgmtPatterns = []string{filepath.Join(tmpDir, "cuda*", CudartMgmtName)} + libDir := LibraryDir() + if libDir != "" { + cudartMgmtPatterns = []string{filepath.Join(libDir, CudartMgmtName)} } cudartMgmtPatterns = append(cudartMgmtPatterns, CudartGlobs...) @@ -206,13 +205,16 @@ func GetGPUInfo() GpuInfoList { if err != nil { slog.Warn("error looking up system memory", "error", err) } + depPath := LibraryDir() + cpus = []CPUInfo{ { GpuInfo: GpuInfo{ - memInfo: mem, - Library: "cpu", - Variant: cpuCapability.String(), - ID: "0", + memInfo: mem, + Library: "cpu", + Variant: cpuCapability.String(), + ID: "0", + DependencyPath: depPath, }, }, } @@ -225,8 +227,6 @@ func GetGPUInfo() GpuInfoList { return GpuInfoList{cpus[0].GpuInfo} } - depPath := LibraryDir() - // Load ALL libraries cHandles = initCudaHandles() diff --git a/llm/ext_server/CMakeLists.txt b/llm/ext_server/CMakeLists.txt index 88c8b03d..51730245 100644 --- a/llm/ext_server/CMakeLists.txt +++ b/llm/ext_server/CMakeLists.txt @@ -10,5 +10,6 @@ target_compile_definitions(${TARGET} PRIVATE target_link_libraries(${TARGET} PRIVATE ggml llama common llava ${CMAKE_THREAD_LIBS_INIT} ${LLAMA_SERVER_LDFLAGS}) if (WIN32) TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32) + target_link_options(${TARGET} PRIVATE -municode -Wl,/subsystem:console) endif() target_compile_features(${TARGET} PRIVATE cxx_std_11) \ No newline at end of file diff --git a/llm/ext_server/server.cpp b/llm/ext_server/server.cpp index fc673c47..6ce457ae 100644 --- a/llm/ext_server/server.cpp +++ b/llm/ext_server/server.cpp @@ -913,7 +913,9 @@ struct llama_server_context slot.sampled = result.tok; // search stop word and delete it - slot.generated_text += token_str; + if (!llama_token_is_eog(model, result.tok)) + slot.generated_text += token_str; + slot.has_next_token = true; if (slot.ctx_sampling->params.use_penalty_prompt_tokens && result.tok != -1) @@ -954,30 +956,36 @@ struct llama_server_context if (!incomplete) { size_t pos = std::min(slot.n_sent_text, slot.generated_text.size()); - const std::string str_test = slot.generated_text.substr(pos); - bool is_stop_full = false; - size_t stop_pos = find_stopping_strings(str_test, token_str.size(), STOP_FULL, slot); - if (stop_pos != std::string::npos) - { - is_stop_full = true; - slot.generated_text.erase( - slot.generated_text.begin() + pos + stop_pos, - slot.generated_text.end()); - pos = std::min(slot.n_sent_text, slot.generated_text.size()); - } - else - { - is_stop_full = false; - stop_pos = find_stopping_strings(str_test, token_str.size(), STOP_PARTIAL, slot); - } - // check if there is any token to predict - if (stop_pos == std::string::npos || (!slot.has_next_token && !is_stop_full && stop_pos > 0)) - { - // no send the stop word in the response - result.text_to_send = slot.generated_text.substr(pos, std::string::npos); - slot.n_sent_text += result.text_to_send.size(); - // add the token to slot queue and cache + if (!llama_token_is_eog(model, result.tok)) { + const std::string str_test = slot.generated_text.substr(pos); + bool is_stop_full = false; + size_t stop_pos = find_stopping_strings(str_test, token_str.size(), STOP_FULL, slot); + if (stop_pos != std::string::npos) + { + is_stop_full = true; + slot.generated_text.erase( + slot.generated_text.begin() + pos + stop_pos, + slot.generated_text.end()); + pos = std::min(slot.n_sent_text, slot.generated_text.size()); + } + else + { + is_stop_full = false; + stop_pos = find_stopping_strings(str_test, token_str.size(), STOP_PARTIAL, slot); + } + + // check if there is any token to predict + if (stop_pos == std::string::npos || (!slot.has_next_token && !is_stop_full && stop_pos > 0)) + { + // no send the stop word in the response + result.text_to_send = slot.generated_text.substr(pos, std::string::npos); + slot.n_sent_text += result.text_to_send.size(); + // add the token to slot queue and cache + } + } else { + result.text_to_send = slot.generated_text.substr(pos, std::string::npos); + slot.n_sent_text += result.text_to_send.size(); } if (slot.params.stream) @@ -1117,9 +1125,7 @@ struct llama_server_context {"multimodal", multimodal} }; - if (!llama_token_is_eog(model, tkn.tok)) { - res.result_json["content"] = tkn.text_to_send; - } + res.result_json["content"] = tkn.text_to_send; if (slot.sparams.n_probs > 0) { diff --git a/llm/generate/gen_common.sh b/llm/generate/gen_common.sh index cef68ea1..3825c155 100644 --- a/llm/generate/gen_common.sh +++ b/llm/generate/gen_common.sh @@ -31,6 +31,7 @@ init_vars() { NO_WHOLE_ARCHIVE="" GCC_ARCH="-arch ${ARCH}" DIST_BASE=../../dist/darwin-${GOARCH}/ + PAYLOAD_BASE=../../build/darwin/${GOARCH} ;; "Linux") LIB_EXT="so" @@ -40,6 +41,7 @@ init_vars() { # Cross compiling not supported on linux - Use docker GCC_ARCH="" DIST_BASE=../../dist/linux-${GOARCH}/ + PAYLOAD_BASE=../../build/linux/${GOARCH} ;; *) ;; @@ -47,7 +49,8 @@ init_vars() { if [ -z "${CMAKE_CUDA_ARCHITECTURES}" ] ; then CMAKE_CUDA_ARCHITECTURES="50;52;61;70;75;80" fi - GZIP=$(which pigz 2>/dev/null || echo "gzip") + GZIP=$(command -v pigz 2>/dev/null || echo "gzip") + RUNNER_BASE="${DIST_BASE}/lib/ollama/runners" } git_module_setup() { @@ -66,22 +69,10 @@ git_module_setup() { } apply_patches() { - # Wire up our CMakefile - if ! grep ollama ${LLAMACPP_DIR}/CMakeLists.txt; then - echo 'add_subdirectory(../ext_server ext_server) # ollama' >>${LLAMACPP_DIR}/CMakeLists.txt - fi - - if [ -n "$(ls -A ../patches/*.diff)" ]; then - # apply temporary patches until fix is upstream - for patch in ../patches/*.diff; do - for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/); do - (cd ${LLAMACPP_DIR}; git checkout ${file}) - done - done - for patch in ../patches/*.diff; do - (cd ${LLAMACPP_DIR} && git apply ${patch}) - done - fi + # apply temporary patches until fix is upstream + for patch in ../patches/*.patch; do + git -c 'user.name=nobody' -c 'user.email=<>' -C ${LLAMACPP_DIR} am ${patch} + done } build() { @@ -91,17 +82,34 @@ build() { rm -f ${BUILD_DIR}/bin/ggml-common.h ${BUILD_DIR}/bin/ggml-metal.metal } -compress() { - echo "Compressing payloads to reduce overall binary size..." - rm -rf ${BUILD_DIR}/bin/*.gz +dist() { + [ -z "${RUNNER}" ] && exit 1 + mkdir -p ${RUNNER_BASE}/${RUNNER}/ for f in ${BUILD_DIR}/bin/* ; do - ${GZIP} -n --best -f ${f} & + cp ${f} ${RUNNER_BASE}/${RUNNER}/ + done + # check for lib directory + if [ -d ${BUILD_DIR}/lib ]; then + for f in ${BUILD_DIR}/lib/* ; do + cp ${f} ${RUNNER_BASE}/${RUNNER}/ + done + fi +} + +# Compress from the build $BUILD_DIR into the $PAYLOAD_BASE/$RUNNER dir +compress() { + [ -z "${RUNNER}" ] && exit 1 + echo "Compressing payloads with ${GZIP} to reduce overall binary size..." + rm -rf "${PAYLOAD_BASE}/${RUNNER}/" + mkdir -p "${PAYLOAD_BASE}/${RUNNER}/" + for f in ${BUILD_DIR}/bin/* ; do + ${GZIP} -c --best ${f} > "${PAYLOAD_BASE}/${RUNNER}/$(basename ${f}).gz" & compress_pids+=" $!" done # check for lib directory if [ -d ${BUILD_DIR}/lib ]; then for f in ${BUILD_DIR}/lib/* ; do - ${GZIP} -n --best -f ${f} & + ${GZIP} -c --best ${f} > "${PAYLOAD_BASE}/${RUNNER}/$(basename ${f}).gz" & compress_pids+=" $!" done fi @@ -117,7 +125,7 @@ wait_for_compress() { install() { echo "Installing libraries to bin dir ${BUILD_DIR}/bin/" - for lib in $(find ${BUILD_DIR} -name \*.${LIB_EXT}); do + for lib in $(find ${BUILD_DIR} -name \*.${LIB_EXT} | grep -v "${BUILD_DIR}/bin/" ); do rm -f "${BUILD_DIR}/bin/$(basename ${lib})" cp -af "${lib}" "${BUILD_DIR}/bin/" done diff --git a/llm/generate/gen_darwin.sh b/llm/generate/gen_darwin.sh index acea9c8d..c37366f3 100755 --- a/llm/generate/gen_darwin.sh +++ b/llm/generate/gen_darwin.sh @@ -39,7 +39,8 @@ case "${GOARCH}" in # init_vars CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_ACCELERATE=off -DGGML_BLAS=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}" - BUILD_DIR="../build/darwin/${ARCH}/cpu" + RUNNER=cpu + BUILD_DIR="../build/darwin/${GOARCH}/${RUNNER}" echo "Building LCD CPU" build sign ${BUILD_DIR}/bin/ollama_llama_server @@ -51,7 +52,8 @@ case "${GOARCH}" in # init_vars CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_ACCELERATE=off -DGGML_BLAS=off -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}" - BUILD_DIR="../build/darwin/${ARCH}/cpu_avx" + RUNNER=cpu_avx + BUILD_DIR="../build/darwin/${GOARCH}/${RUNNER}" echo "Building AVX CPU" build sign ${BUILD_DIR}/bin/ollama_llama_server @@ -63,7 +65,8 @@ case "${GOARCH}" in # init_vars CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_ACCELERATE=on -DGGML_BLAS=off -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on ${CMAKE_DEFS}" - BUILD_DIR="../build/darwin/${ARCH}/cpu_avx2" + RUNNER=cpu_avx2 + BUILD_DIR="../build/darwin/${GOARCH}/${RUNNER}" echo "Building AVX2 CPU" EXTRA_LIBS="${EXTRA_LIBS} -framework Accelerate -framework Foundation" build @@ -84,7 +87,8 @@ case "${GOARCH}" in if [ -z "$OLLAMA_SKIP_METAL_GENERATE" ]; then init_vars CMAKE_DEFS="${COMMON_DARWIN_DEFS} -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} ${CMAKE_DEFS}" - BUILD_DIR="../build/darwin/${ARCH}/metal" + RUNNER="metal" + BUILD_DIR="../build/darwin/${GOARCH}/${RUNNER}" EXTRA_LIBS="${EXTRA_LIBS} -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders" build sign ${BUILD_DIR}/bin/ollama_llama_server diff --git a/llm/generate/gen_linux.sh b/llm/generate/gen_linux.sh index 1f702ca2..48d08fd0 100755 --- a/llm/generate/gen_linux.sh +++ b/llm/generate/gen_linux.sh @@ -79,10 +79,12 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then init_vars echo "OLLAMA_CUSTOM_CPU_DEFS=\"${OLLAMA_CUSTOM_CPU_DEFS}\"" CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} -DBUILD_SHARED_LIBS=on -DCMAKE_POSITION_INDEPENDENT_CODE=on ${CMAKE_DEFS}" - BUILD_DIR="../build/linux/${ARCH}/cpu" + RUNNER="cpu" + BUILD_DIR="../build/linux/${GOARCH}/${RUNNER}" echo "Building custom CPU" build install + dist compress else # Darwin Rosetta x86 emulation does NOT support AVX, AVX2, AVX512 @@ -102,10 +104,12 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then # init_vars CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}" - BUILD_DIR="../build/linux/${ARCH}/cpu" + RUNNER=cpu + BUILD_DIR="../build/linux/${GOARCH}/${RUNNER}" echo "Building LCD CPU" build install + dist compress fi @@ -120,10 +124,12 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then # init_vars CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}" - BUILD_DIR="../build/linux/${ARCH}/cpu_avx" + RUNNER=cpu_avx + BUILD_DIR="../build/linux/${GOARCH}/${RUNNER}" echo "Building AVX CPU" build install + dist compress fi @@ -134,10 +140,12 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then # init_vars CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on ${CMAKE_DEFS}" - BUILD_DIR="../build/linux/${ARCH}/cpu_avx2" + RUNNER=cpu_avx2 + BUILD_DIR="../build/linux/${GOARCH}/${RUNNER}" echo "Building AVX2 CPU" build install + dist compress fi fi @@ -187,11 +195,13 @@ if [ -z "${OLLAMA_SKIP_CUDA_GENERATE}" -a -d "${CUDA_LIB_DIR}" ]; then fi export CUDAFLAGS="-t8" CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} ${ARM64_DEFS} ${CMAKE_CUDA_DEFS} -DGGML_STATIC=off" - BUILD_DIR="../build/linux/${ARCH}/cuda${CUDA_VARIANT}" + RUNNER=cuda${CUDA_VARIANT} + BUILD_DIR="../build/linux/${GOARCH}/${RUNNER}" export LLAMA_SERVER_LDFLAGS="-L${CUDA_LIB_DIR} -lcudart -lcublas -lcublasLt -lcuda" CUDA_DIST_DIR="${CUDA_DIST_DIR:-${DIST_BASE}/lib/ollama}" build install + dist echo "Installing CUDA dependencies in ${CUDA_DIST_DIR}" mkdir -p "${CUDA_DIST_DIR}" for lib in ${CUDA_LIB_DIR}/libcudart.so* ${CUDA_LIB_DIR}/libcublas.so* ${CUDA_LIB_DIR}/libcublasLt.so* ; do @@ -212,7 +222,8 @@ if [ -z "${OLLAMA_SKIP_ONEAPI_GENERATE}" -a -d "${ONEAPI_ROOT}" ]; then source ${ONEAPI_ROOT}/setvars.sh --force # set up environment variables for oneAPI CC=icx CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL=ON -DGGML_SYCL_F16=OFF" - BUILD_DIR="../build/linux/${ARCH}/oneapi" + RUNNER=oneapi + BUILD_DIR="../build/linux/${GOARCH}/${RUNNER}" ONEAPI_DIST_DIR="${DIST_BASE}/lib/ollama" export LLAMA_SERVER_LDFLAGS="-fsycl -lOpenCL -lmkl_core -lmkl_sycl_blas -lmkl_intel_ilp64 -lmkl_tbb_thread -ltbb" DEBUG_FLAGS="" # icx compiles with -O0 if we pass -g, so we must remove it @@ -231,6 +242,7 @@ if [ -z "${OLLAMA_SKIP_ONEAPI_GENERATE}" -a -d "${ONEAPI_ROOT}" ]; then cp "${ONEAPI_ROOT}/compiler/latest/lib/libsvml.so" "${ONEAPI_DIST_DIR}" cp "${ONEAPI_ROOT}/compiler/latest/lib/libur_loader.so.0" "${ONEAPI_DIST_DIR}" install + dist compress fi @@ -259,7 +271,8 @@ if [ -z "${OLLAMA_SKIP_ROCM_GENERATE}" -a -d "${ROCM_PATH}" ]; then CMAKE_DEFS="${CMAKE_DEFS} ${OLLAMA_CUSTOM_ROCM_DEFS}" echo "Building custom ROCM GPU" fi - BUILD_DIR="../build/linux/${ARCH}/rocm${ROCM_VARIANT}" + RUNNER=rocm${ROCM_VARIANT} + BUILD_DIR="../build/linux/${GOARCH}/${RUNNER}" # ROCm dependencies are too large to fit into a unified bundle ROCM_DIST_DIR="${DIST_BASE}/../linux-${GOARCH}-rocm/lib/ollama" # TODO figure out how to disable runpath (rpath) @@ -269,13 +282,17 @@ if [ -z "${OLLAMA_SKIP_ROCM_GENERATE}" -a -d "${ROCM_PATH}" ]; then # copy the ROCM dependencies mkdir -p "${ROCM_DIST_DIR}" - for dep in $(ldd "${BUILD_DIR}/bin/ollama_llama_server" | grep "=>" | cut -f2 -d= | cut -f2 -d' ' | grep -v "${ARCH}/rocm${ROCM_VARIANT}" | grep -e rocm -e amdgpu -e libtinfo ); do + for dep in $(ldd "${BUILD_DIR}/bin/ollama_llama_server" | grep "=>" | cut -f2 -d= | cut -f2 -d' ' | grep -v "${GOARCH}/rocm${ROCM_VARIANT}" | grep -e rocm -e amdgpu -e libtinfo -e libnuma -e libelf ); do cp -a "${dep}"* "${ROCM_DIST_DIR}" + if [ $(readlink -f "${dep}") != "${dep}" ] ; then + cp $(readlink -f "${dep}") "${ROCM_DIST_DIR}" + fi done install + dist compress fi cleanup wait_for_compress -echo "go generate completed. LLM runners: $(cd ${BUILD_DIR}/..; echo *)" +echo "go generate completed. LLM runners: $(cd ${PAYLOAD_BASE}; echo *)" diff --git a/llm/generate/gen_windows.ps1 b/llm/generate/gen_windows.ps1 index 7179c1bc..29ff5ff6 100644 --- a/llm/generate/gen_windows.ps1 +++ b/llm/generate/gen_windows.ps1 @@ -19,6 +19,19 @@ function amdGPUs { function init_vars { + write-host "Checking for cmake..." + get-command cmake + write-host "Checking for ninja..." + $d=(get-command -ea 'silentlycontinue' ninja).path + if ($null -eq $d) { + $MSVC_INSTALL=(Get-CimInstance MSFT_VSInstance -Namespace root/cimv2/vs)[0].InstallLocation + $matches=(gci -path $MSVC_INSTALL -r -fi ninja.exe) + if ($matches.count -eq 0) { + throw "Unable to locate ninja" + } + $ninjaDir=($matches[0].FullName | split-path -parent) + $env:PATH="$env:PATH;$ninjaDir" + } if (!$script:SRC_DIR) { $script:SRC_DIR = $(resolve-path "..\..\") } @@ -83,29 +96,9 @@ function git_module_setup { } function apply_patches { - # Wire up our CMakefile - if (!(Select-String -Path "${script:llamacppDir}/CMakeLists.txt" -Pattern 'ollama')) { - Add-Content -Path "${script:llamacppDir}/CMakeLists.txt" -Value 'add_subdirectory(../ext_server ext_server) # ollama' - } - # Apply temporary patches until fix is upstream - $patches = Get-ChildItem "../patches/*.diff" - foreach ($patch in $patches) { - # Extract file paths from the patch file - $filePaths = Get-Content $patch.FullName | Where-Object { $_ -match '^\+\+\+ ' } | ForEach-Object { - $parts = $_ -split ' ' - ($parts[1] -split '/', 2)[1] - } - - # Checkout each file - foreach ($file in $filePaths) { - git -C "${script:llamacppDir}" checkout $file - } - } - - # Apply each patch - foreach ($patch in $patches) { - git -C "${script:llamacppDir}" apply $patch.FullName + foreach ($patch in $(Get-ChildItem "../patches/*.patch")) { + git -c 'user.name=nobody' -c 'user.email=<>' -C "${script:llamacppDir}" am $patch.FullName } } @@ -165,7 +158,7 @@ function cleanup { } # Checkout each file - foreach ($file in $filePaths) { + foreach ($file in $filePaths) { git -C "${script:llamacppDir}" checkout $file } git -C "${script:llamacppDir}" checkout CMakeLists.txt @@ -182,12 +175,12 @@ function build_static() { if ((-not "${env:OLLAMA_SKIP_STATIC_GENERATE}") -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "static"))) { # GCC build for direct linking into the Go binary init_vars - # cmake will silently fallback to msvc compilers if mingw isn't in the path, so detect and fail fast - # as we need this to be compiled by gcc for golang to be able to link with itx - write-host "Checking for MinGW..." - # error action ensures we exit on failure - get-command gcc - get-command mingw32-make + + # cmake will silently fallback to msvc compilers if gcc isn't in the path, so detect and fail fast + # as we need this to be compiled by gcc for golang to be able to link with it + write-host "Checking for gcc..." + get-command gcc + get-command mingw32-make $oldTargets = $script:cmakeTargets $script:cmakeTargets = @("llama", "ggml") $script:cmakeDefs = @( @@ -211,11 +204,10 @@ function build_static() { } } -function build_cpu($gen_arch) { +function build_cpu_x64 { if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu"))) { - # remaining llama.cpp builds use MSVC init_vars - $script:cmakeDefs = $script:commonCpuDefs + @("-A", $gen_arch, "-DGGML_AVX=off", "-DGGML_AVX2=off", "-DGGML_AVX512=off", "-DGGML_FMA=off", "-DGGML_F16C=off") + $script:cmakeDefs + $script:cmakeDefs = $script:commonCpuDefs + @("-A", "x64", "-DGGML_AVX=off", "-DGGML_AVX2=off", "-DGGML_AVX512=off", "-DGGML_FMA=off", "-DGGML_F16C=off") + $script:cmakeDefs $script:buildDir="../build/windows/${script:ARCH}/cpu" $script:distDir="$script:DIST_BASE\cpu" write-host "Building LCD CPU" @@ -227,6 +219,32 @@ function build_cpu($gen_arch) { } } +function build_cpu_arm64 { + if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu"))) { + init_vars + write-host "Checking for clang..." + get-command clang + $env:CFLAGS="-march=armv8.7-a -fvectorize -ffp-model=fast -fno-finite-math-only" + $env:CXXFLAGS="$env:CFLAGS" + $env:LDFLAGS="-static-libstdc++" + $script:cmakeDefs = $script:commonCpuDefs + @( + "-DCMAKE_VERBOSE_MAKEFILE=on", + "-DCMAKE_C_COMPILER=clang.exe", + "-DCMAKE_CXX_COMPILER=clang++.exe", + "-DMSVC_RUNTIME_LIBRARY=MultiThreaded" + ) + $script:cmakeDefs + $script:buildDir="../build/windows/${script:ARCH}/cpu" + $script:distDir="$script:DIST_BASE\cpu" + write-host "Building LCD CPU" + build + sign + install + } else { + write-host "Skipping CPU generation step as requested" + } +} + + function build_cpu_avx() { if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu_avx"))) { init_vars @@ -351,7 +369,7 @@ function build_rocm() { $script:buildDir="../build/windows/${script:ARCH}/rocm$script:ROCM_VARIANT" $script:distDir="$script:DIST_BASE\rocm$script:ROCM_VARIANT" $script:cmakeDefs += @( - "-G", "Ninja", + "-G", "Ninja", "-DCMAKE_C_COMPILER=clang.exe", "-DCMAKE_CXX_COMPILER=clang++.exe", "-DGGML_HIPBLAS=on", @@ -400,9 +418,9 @@ if ($($args.count) -eq 0) { apply_patches build_static if ($script:ARCH -eq "arm64") { - build_cpu("ARM64") + build_cpu_arm64 } else { # amd64 - build_cpu("x64") + build_cpu_x64 build_cpu_avx build_cpu_avx2 build_cuda @@ -416,5 +434,5 @@ if ($($args.count) -eq 0) { for ( $i = 0; $i -lt $args.count; $i++ ) { write-host "performing $($args[$i])" & $($args[$i]) - } + } } \ No newline at end of file diff --git a/llm/llm.go b/llm/llm.go index 6bb6591d..6c695889 100644 --- a/llm/llm.go +++ b/llm/llm.go @@ -5,7 +5,7 @@ package llm // #cgo darwin,arm64 LDFLAGS: -L${SRCDIR}/build/darwin/arm64_static -L${SRCDIR}/build/darwin/arm64_static/src -L${SRCDIR}/build/darwin/arm64_static/ggml/src -framework Accelerate -framework Metal // #cgo darwin,amd64 LDFLAGS: -L${SRCDIR}/build/darwin/x86_64_static -L${SRCDIR}/build/darwin/x86_64_static/src -L${SRCDIR}/build/darwin/x86_64_static/ggml/src // #cgo windows,amd64 LDFLAGS: -static-libstdc++ -static-libgcc -static -L${SRCDIR}/build/windows/amd64_static -L${SRCDIR}/build/windows/amd64_static/src -L${SRCDIR}/build/windows/amd64_static/ggml/src -// #cgo windows,arm64 LDFLAGS: -static-libstdc++ -static-libgcc -static -L${SRCDIR}/build/windows/arm64_static -L${SRCDIR}/build/windows/arm64_static/src -L${SRCDIR}/build/windows/arm64_static/ggml/src +// #cgo windows,arm64 LDFLAGS: -lllama -lggml -static-libstdc++ -static-libgcc -static -L${SRCDIR}/build/windows/arm64_static -L${SRCDIR}/build/windows/arm64_static/src -L${SRCDIR}/build/windows/arm64_static/ggml/src // #cgo linux,amd64 LDFLAGS: -L${SRCDIR}/build/linux/x86_64_static -L${SRCDIR}/build/linux/x86_64_static/src -L${SRCDIR}/build/linux/x86_64_static/ggml/src // #cgo linux,arm64 LDFLAGS: -L${SRCDIR}/build/linux/arm64_static -L${SRCDIR}/build/linux/arm64_static/src -L${SRCDIR}/build/linux/arm64_static/ggml/src // #include diff --git a/llm/llm_darwin_arm64.go b/llm/llm_darwin.go similarity index 55% rename from llm/llm_darwin_arm64.go rename to llm/llm_darwin.go index 20ce8552..60837ed0 100644 --- a/llm/llm_darwin_arm64.go +++ b/llm/llm_darwin.go @@ -1,11 +1,7 @@ package llm import ( - "embed" "syscall" ) -//go:embed build/darwin/arm64/*/bin/* -var libEmbed embed.FS - var LlamaServerSysProcAttr = &syscall.SysProcAttr{} diff --git a/llm/llm_darwin_amd64.go b/llm/llm_darwin_amd64.go deleted file mode 100644 index 60eed719..00000000 --- a/llm/llm_darwin_amd64.go +++ /dev/null @@ -1,11 +0,0 @@ -package llm - -import ( - "embed" - "syscall" -) - -//go:embed build/darwin/x86_64/*/bin/* -var libEmbed embed.FS - -var LlamaServerSysProcAttr = &syscall.SysProcAttr{} diff --git a/llm/llm_linux.go b/llm/llm_linux.go index 928b4e79..60837ed0 100644 --- a/llm/llm_linux.go +++ b/llm/llm_linux.go @@ -1,11 +1,7 @@ package llm import ( - "embed" "syscall" ) -//go:embed build/linux/*/*/bin/* -var libEmbed embed.FS - var LlamaServerSysProcAttr = &syscall.SysProcAttr{} diff --git a/llm/llm_windows.go b/llm/llm_windows.go index 763cccf9..915355a2 100644 --- a/llm/llm_windows.go +++ b/llm/llm_windows.go @@ -1,14 +1,13 @@ package llm import ( - "embed" "syscall" ) -// unused on windows -var libEmbed embed.FS - -const CREATE_DEFAULT_ERROR_MODE = 0x04000000 +const ( + CREATE_DEFAULT_ERROR_MODE = 0x04000000 + ABOVE_NORMAL_PRIORITY_CLASS = 0x00008000 +) var LlamaServerSysProcAttr = &syscall.SysProcAttr{ // Wire up the default error handling logic If for some reason a DLL is @@ -16,5 +15,8 @@ var LlamaServerSysProcAttr = &syscall.SysProcAttr{ // the user can either fix their PATH, or report a bug. Without this // setting, the process exits immediately with a generic exit status but no // way to (easily) figure out what the actual missing DLL was. - CreationFlags: CREATE_DEFAULT_ERROR_MODE, + // + // Setting Above Normal priority class ensures when running as a "background service" + // with "programs" given best priority, we aren't starved of cpu cycles + CreationFlags: CREATE_DEFAULT_ERROR_MODE | ABOVE_NORMAL_PRIORITY_CLASS, } diff --git a/llm/patches/0000-cmakelist.patch b/llm/patches/0000-cmakelist.patch new file mode 100644 index 00000000..54e9b602 --- /dev/null +++ b/llm/patches/0000-cmakelist.patch @@ -0,0 +1,22 @@ +From 8b8d83ffca775840acc5dc700f3b3703e9f5cfe4 Mon Sep 17 00:00:00 2001 +From: Michael Yang +Date: Fri, 23 Aug 2024 11:27:48 -0700 +Subject: [PATCH] patch cmakelist + +--- + CMakeLists.txt | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/CMakeLists.txt b/CMakeLists.txt +index a3132063..6a2a9912 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -199,3 +199,5 @@ if (LLAMA_BUILD_EXAMPLES) + add_subdirectory(examples) + add_subdirectory(pocs) + endif() ++ ++add_subdirectory(../ext_server ext_server) # ollama +-- +2.45.2 + diff --git a/llm/patches/01-load-progress.diff b/llm/patches/0001-load-progress.patch similarity index 74% rename from llm/patches/01-load-progress.diff rename to llm/patches/0001-load-progress.patch index a053c1c2..0ddabc80 100644 --- a/llm/patches/01-load-progress.diff +++ b/llm/patches/0001-load-progress.patch @@ -1,8 +1,18 @@ +From 2cfaa0a04faa9c87ba8f1ac8527eb953e69c6cde Mon Sep 17 00:00:00 2001 +From: Michael Yang +Date: Mon, 16 Sep 2024 15:53:10 -0700 +Subject: [PATCH] 01-load-progress.diff + +--- + common/common.cpp | 2 ++ + common/common.h | 7 +++++++ + 2 files changed, 9 insertions(+) + diff --git a/common/common.cpp b/common/common.cpp -index 2c05a4d4..927f0e3d 100644 +index 9fa18472..48ff41e9 100644 --- a/common/common.cpp +++ b/common/common.cpp -@@ -2093,6 +2093,8 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params & +@@ -2573,6 +2573,8 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params & mparams.use_mmap = params.use_mmap; mparams.use_mlock = params.use_mlock; mparams.check_tensors = params.check_tensors; @@ -12,10 +22,10 @@ index 2c05a4d4..927f0e3d 100644 mparams.kv_overrides = NULL; } else { diff --git a/common/common.h b/common/common.h -index 65c0ef81..ebca2c77 100644 +index cb5e7f6d..d8f043f7 100644 --- a/common/common.h +++ b/common/common.h -@@ -184,6 +184,13 @@ struct gpt_params { +@@ -204,6 +204,13 @@ struct gpt_params { std::string mmproj = ""; // path to multimodal projector std::vector image; // path to image file(s) @@ -29,3 +39,6 @@ index 65c0ef81..ebca2c77 100644 // embedding bool embedding = false; // get only sentence embedding int32_t embd_normalize = 2; // normalisation for embendings (-1=none, 0=max absolute int16, 1=taxicab, 2=euclidean, >2=p-norm) +-- +2.46.0 + diff --git a/llm/patches/02-clip-log.diff b/llm/patches/0002-clip-log.patch similarity index 60% rename from llm/patches/02-clip-log.diff rename to llm/patches/0002-clip-log.patch index 34a018e8..8df0da17 100644 --- a/llm/patches/02-clip-log.diff +++ b/llm/patches/0002-clip-log.patch @@ -1,5 +1,14 @@ +From ba4bba80a744f76ac67b8234451c259a3c5da83b Mon Sep 17 00:00:00 2001 +From: Michael Yang +Date: Mon, 16 Sep 2024 15:53:11 -0700 +Subject: [PATCH] 02-clip-log.diff + +--- + examples/llava/clip.cpp | 1 + + 1 file changed, 1 insertion(+) + diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp -index e431c7f7..f077e688 100644 +index 9b890571..cb51793d 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -3,6 +3,7 @@ @@ -10,3 +19,6 @@ index e431c7f7..f077e688 100644 #include "log.h" #include "ggml.h" #include "ggml-alloc.h" +-- +2.46.0 + diff --git a/llm/patches/03-load_exception.diff b/llm/patches/0003-load_exception.patch similarity index 74% rename from llm/patches/03-load_exception.diff rename to llm/patches/0003-load_exception.patch index 02666196..3d858ebb 100644 --- a/llm/patches/03-load_exception.diff +++ b/llm/patches/0003-load_exception.patch @@ -1,8 +1,17 @@ +From e43bfd3f607a6dfcaba2d490d35f412a52e55e30 Mon Sep 17 00:00:00 2001 +From: Michael Yang +Date: Mon, 16 Sep 2024 15:53:12 -0700 +Subject: [PATCH] 03-load_exception.diff + +--- + src/llama.cpp | 25 ++++++++++++++++--------- + 1 file changed, 16 insertions(+), 9 deletions(-) + diff --git a/src/llama.cpp b/src/llama.cpp -index 73f52435..58a00fb1 100644 +index 88355971..926bb71a 100644 --- a/src/llama.cpp +++ b/src/llama.cpp -@@ -7241,7 +7241,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam +@@ -8635,7 +8635,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam } } catch (const std::exception & err) { LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what()); @@ -11,7 +20,7 @@ index 73f52435..58a00fb1 100644 } return 0; -@@ -17564,16 +17564,23 @@ struct llama_model * llama_load_model_from_file( +@@ -18022,16 +18022,23 @@ struct llama_model * llama_load_model_from_file( } model->rpc_servers.push_back(servers); } @@ -43,3 +52,6 @@ index 73f52435..58a00fb1 100644 } return model; +-- +2.46.0 + diff --git a/llm/patches/04-metal.diff b/llm/patches/0004-metal.patch similarity index 87% rename from llm/patches/04-metal.diff rename to llm/patches/0004-metal.patch index e63732e7..4cfa407e 100644 --- a/llm/patches/04-metal.diff +++ b/llm/patches/0004-metal.patch @@ -1,8 +1,17 @@ +From 29411d9a9d2b6a0af6425ffe88498f17f71f7d5d Mon Sep 17 00:00:00 2001 +From: Michael Yang +Date: Mon, 16 Sep 2024 15:53:12 -0700 +Subject: [PATCH] 04-metal.diff + +--- + ggml/src/ggml-metal.m | 30 +++++++++++++----------------- + 1 file changed, 13 insertions(+), 17 deletions(-) + diff --git a/ggml/src/ggml-metal.m b/ggml/src/ggml-metal.m -index 0207b787..b5e9884b 100644 +index 91b5e61b..9cfa72ac 100644 --- a/ggml/src/ggml-metal.m +++ b/ggml/src/ggml-metal.m -@@ -1396,27 +1396,23 @@ static enum ggml_status ggml_metal_graph_compute( +@@ -1734,27 +1734,23 @@ static enum ggml_status ggml_metal_graph_compute( // to the matrix-vector kernel int ne11_mm_min = 1; @@ -43,3 +52,6 @@ index 0207b787..b5e9884b 100644 // for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs // AMD GPU and older A-chips will reuse matrix-vector multiplication kernel +-- +2.46.0 + diff --git a/llm/patches/05-default-pretokenizer.diff b/llm/patches/0005-default-pretokenizer.patch similarity index 85% rename from llm/patches/05-default-pretokenizer.diff rename to llm/patches/0005-default-pretokenizer.patch index 351bcaef..6ad0ee97 100644 --- a/llm/patches/05-default-pretokenizer.diff +++ b/llm/patches/0005-default-pretokenizer.patch @@ -1,5 +1,14 @@ +From b298ac8614d1e38da28f760eb1d2ae8af0fbbe62 Mon Sep 17 00:00:00 2001 +From: Michael Yang +Date: Mon, 16 Sep 2024 15:53:13 -0700 +Subject: [PATCH] 05-default-pretokenizer.diff + +--- + src/llama.cpp | 14 +++----------- + 1 file changed, 3 insertions(+), 11 deletions(-) + diff --git a/src/llama.cpp b/src/llama.cpp -index 88355971..dd7d41ed 100644 +index 926bb71a..d1e959fc 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -6083,16 +6083,7 @@ static void llm_load_vocab( @@ -30,3 +39,6 @@ index 88355971..dd7d41ed 100644 } } else if (vocab.type == LLAMA_VOCAB_TYPE_SPM) { vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; +-- +2.46.0 + diff --git a/llm/patches/06-embeddings.diff b/llm/patches/0006-embeddings.patch similarity index 79% rename from llm/patches/06-embeddings.diff rename to llm/patches/0006-embeddings.patch index f3c071cb..8f89ffeb 100644 --- a/llm/patches/06-embeddings.diff +++ b/llm/patches/0006-embeddings.patch @@ -1,8 +1,17 @@ +From c9a6ca9fc039233dee746a4da9705762cd9e515d Mon Sep 17 00:00:00 2001 +From: Michael Yang +Date: Mon, 16 Sep 2024 15:53:14 -0700 +Subject: [PATCH] 06-embeddings.diff + +--- + src/llama.cpp | 17 ++++++++++------- + 1 file changed, 10 insertions(+), 7 deletions(-) + diff --git a/src/llama.cpp b/src/llama.cpp -index 88355971..d7db689b 100644 +index d1e959fc..f79bd782 100644 --- a/src/llama.cpp +++ b/src/llama.cpp -@@ -15906,7 +15906,7 @@ static size_t llama_output_reserve(llama_context & lctx, size_t n_outputs) { +@@ -15898,7 +15898,7 @@ static size_t llama_output_reserve(llama_context & lctx, size_t n_outputs) { const auto n_embd = hparams.n_embd; // TODO: use a per-batch flag for logits presence instead @@ -11,7 +20,7 @@ index 88355971..d7db689b 100644 const bool has_embd = cparams.embeddings && (cparams.pooling_type == LLAMA_POOLING_TYPE_NONE); const size_t logits_size = has_logits ? n_vocab*n_outputs_max : 0; -@@ -16175,20 +16175,23 @@ static int llama_decode_internal( +@@ -16167,20 +16167,23 @@ static int llama_decode_internal( // no output res = nullptr; embd = nullptr; @@ -41,3 +50,6 @@ index 88355971..d7db689b 100644 // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs); ggml_backend_sched_alloc_graph(lctx.sched, gf); +-- +2.46.0 + diff --git a/llm/patches/07-clip-unicode.diff b/llm/patches/0007-clip-unicode.patch similarity index 73% rename from llm/patches/07-clip-unicode.diff rename to llm/patches/0007-clip-unicode.patch index 53e5ee11..72c061cb 100644 --- a/llm/patches/07-clip-unicode.diff +++ b/llm/patches/0007-clip-unicode.patch @@ -1,8 +1,17 @@ +From ae2b188a679c83ce105aa1e823499441dfab3c57 Mon Sep 17 00:00:00 2001 +From: Michael Yang +Date: Mon, 16 Sep 2024 15:53:15 -0700 +Subject: [PATCH] 07-clip-unicode.diff + +--- + examples/llava/clip.cpp | 23 +++++++++++++++++++++++ + 1 file changed, 23 insertions(+) + diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp -index 95fbe3d0..5a02a6ec 100644 +index cb51793d..8716472b 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp -@@ -32,6 +33,14 @@ +@@ -41,6 +41,14 @@ #include #include @@ -17,7 +26,7 @@ index 95fbe3d0..5a02a6ec 100644 //#define CLIP_DEBUG_FUNCTIONS // RGB uint8 image -@@ -1055,7 +1064,22 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { +@@ -1223,7 +1231,22 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { return nullptr; } @@ -40,3 +49,6 @@ index 95fbe3d0..5a02a6ec 100644 if (!fin) { LOG_TEE("cannot open model file for loading tensors\n"); clip_free(new_clip); +-- +2.46.0 + diff --git a/llm/patches/0008-solar-pro.patch b/llm/patches/0008-solar-pro.patch new file mode 100644 index 00000000..54f18457 --- /dev/null +++ b/llm/patches/0008-solar-pro.patch @@ -0,0 +1,402 @@ +From 8313ce5f43f11f3d84f352f97f3802792e90e18c Mon Sep 17 00:00:00 2001 +From: Michael Yang +Date: Mon, 16 Sep 2024 15:53:16 -0700 +Subject: [PATCH] add solar-pro support + +solar-pro introduces block skip connections where blocks are connected +to other, non-sequential blocks with a scale multiple + +this change adds 4 new keys to store the skip connections and one new +tensor to store the scalar. the scalar is implemented a 1-dimensional +tensor with 2 elements dervied from the model's bskcn_tv configuration. +in general, the values are (bskcn_tv, 1 - bskcn_tv) +--- + src/llama.cpp | 267 +++++++++++++++++++++++++++++++++++++++++++++++--- + 1 file changed, 254 insertions(+), 13 deletions(-) + +diff --git a/src/llama.cpp b/src/llama.cpp +index f79bd782..b7771f53 100644 +--- a/src/llama.cpp ++++ b/src/llama.cpp +@@ -213,6 +213,7 @@ enum llm_arch { + LLM_ARCH_NEMOTRON, + LLM_ARCH_EXAONE, + LLM_ARCH_RWKV6, ++ LLM_ARCH_SOLAR, + LLM_ARCH_UNKNOWN, + }; + +@@ -261,6 +262,7 @@ static const std::map LLM_ARCH_NAMES = { + { LLM_ARCH_NEMOTRON, "nemotron" }, + { LLM_ARCH_EXAONE, "exaone" }, + { LLM_ARCH_RWKV6, "rwkv6" }, ++ { LLM_ARCH_SOLAR, "solar" }, + { LLM_ARCH_UNKNOWN, "(unknown)" }, + }; + +@@ -314,6 +316,7 @@ enum llm_kv { + LLM_KV_ATTENTION_KV_LORA_RANK, + LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, + LLM_KV_ATTENTION_SLIDING_WINDOW, ++ LLM_KV_ATTENTION_BLOCK_SKIP_CONNECTION, + + LLM_KV_ROPE_DIMENSION_COUNT, + LLM_KV_ROPE_FREQ_BASE, +@@ -405,19 +408,20 @@ static const std::map LLM_KV_NAMES = { + { LLM_KV_TIME_MIX_EXTRA_DIM, "%s.time_mix_extra_dim" }, + { LLM_KV_TIME_DECAY_EXTRA_DIM, "%s.time_decay_extra_dim" }, + +- { LLM_KV_ATTENTION_HEAD_COUNT, "%s.attention.head_count" }, +- { LLM_KV_ATTENTION_HEAD_COUNT_KV, "%s.attention.head_count_kv" }, +- { LLM_KV_ATTENTION_MAX_ALIBI_BIAS, "%s.attention.max_alibi_bias" }, +- { LLM_KV_ATTENTION_CLAMP_KQV, "%s.attention.clamp_kqv" }, +- { LLM_KV_ATTENTION_KEY_LENGTH, "%s.attention.key_length" }, +- { LLM_KV_ATTENTION_VALUE_LENGTH, "%s.attention.value_length" }, +- { LLM_KV_ATTENTION_LAYERNORM_EPS, "%s.attention.layer_norm_epsilon" }, +- { LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, "%s.attention.layer_norm_rms_epsilon" }, +- { LLM_KV_ATTENTION_CAUSAL, "%s.attention.causal" }, +- { LLM_KV_ATTENTION_Q_LORA_RANK, "%s.attention.q_lora_rank" }, +- { LLM_KV_ATTENTION_KV_LORA_RANK, "%s.attention.kv_lora_rank" }, +- { LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, "%s.attention.relative_buckets_count" }, +- { LLM_KV_ATTENTION_SLIDING_WINDOW, "%s.attention.sliding_window" }, ++ { LLM_KV_ATTENTION_HEAD_COUNT, "%s.attention.head_count" }, ++ { LLM_KV_ATTENTION_HEAD_COUNT_KV, "%s.attention.head_count_kv" }, ++ { LLM_KV_ATTENTION_MAX_ALIBI_BIAS, "%s.attention.max_alibi_bias" }, ++ { LLM_KV_ATTENTION_CLAMP_KQV, "%s.attention.clamp_kqv" }, ++ { LLM_KV_ATTENTION_KEY_LENGTH, "%s.attention.key_length" }, ++ { LLM_KV_ATTENTION_VALUE_LENGTH, "%s.attention.value_length" }, ++ { LLM_KV_ATTENTION_LAYERNORM_EPS, "%s.attention.layer_norm_epsilon" }, ++ { LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, "%s.attention.layer_norm_rms_epsilon" }, ++ { LLM_KV_ATTENTION_CAUSAL, "%s.attention.causal" }, ++ { LLM_KV_ATTENTION_Q_LORA_RANK, "%s.attention.q_lora_rank" }, ++ { LLM_KV_ATTENTION_KV_LORA_RANK, "%s.attention.kv_lora_rank" }, ++ { LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, "%s.attention.relative_buckets_count" }, ++ { LLM_KV_ATTENTION_SLIDING_WINDOW, "%s.attention.sliding_window" }, ++ { LLM_KV_ATTENTION_BLOCK_SKIP_CONNECTION, "%s.attention.block_skip_connection.%d" }, + + { LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" }, + { LLM_KV_ROPE_FREQ_BASE, "%s.rope.freq_base" }, +@@ -589,6 +593,7 @@ enum llm_tensor { + LLM_TENSOR_ENC_FFN_DOWN, + LLM_TENSOR_ENC_FFN_UP, + LLM_TENSOR_ENC_OUTPUT_NORM, ++ LLM_TENSOR_BSKCN_TV, + }; + + static const std::map> LLM_TENSOR_NAMES = { +@@ -1408,6 +1413,24 @@ static const std::map> LLM_TENSOR_NA + { LLM_TENSOR_CHANNEL_MIX_RECEPTANCE, "blk.%d.channel_mix_receptance" }, + }, + }, ++ { ++ LLM_ARCH_SOLAR, ++ { ++ { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, ++ { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, ++ { LLM_TENSOR_OUTPUT, "output" }, ++ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, ++ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, ++ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, ++ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, ++ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, ++ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, ++ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, ++ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, ++ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, ++ { LLM_TENSOR_BSKCN_TV, "bskcn_tv" }, ++ }, ++ }, + { + LLM_ARCH_UNKNOWN, + { +@@ -2237,6 +2260,7 @@ enum e_model { + MODEL_15B, + MODEL_16B, + MODEL_20B, ++ MODEL_22B, + MODEL_30B, + MODEL_34B, + MODEL_35B, +@@ -2284,6 +2308,8 @@ struct llama_hparams { + std::array n_head_kv_arr; + std::array n_ff_arr; + ++ std::array, 4> n_bskcn_arr; ++ + uint32_t n_layer_dense_lead = 0; + uint32_t n_lora_q = 0; + uint32_t n_lora_kv = 0; +@@ -2349,6 +2375,7 @@ struct llama_hparams { + if (this->n_head_arr != other.n_head_arr) return true; + if (this->n_head_kv_arr != other.n_head_kv_arr) return true; + if (this->n_ff_arr != other.n_ff_arr) return true; ++ if (this->n_bskcn_arr != other.n_bskcn_arr) return true; + + if (this->n_rel_attn_bkts != other.n_rel_attn_bkts) return true; + if (this->n_layer_dense_lead != other.n_layer_dense_lead) return true; +@@ -2455,6 +2482,14 @@ struct llama_hparams { + return ssm_d_state * ssm_d_inner; + } + } ++ ++ bool n_bskcn(uint32_t n, uint32_t il = 0) const { ++ if (il < n_layer) { ++ return n_bskcn_arr[n][il] > 0; ++ } ++ ++ GGML_ABORT("fatal error"); ++ } + }; + + static_assert(std::is_trivially_copyable::value, "llama_hparams must be trivially copyable"); +@@ -2635,6 +2670,8 @@ struct llama_layer { + struct ggml_tensor * ffn_gate_scale; + struct ggml_tensor * ffn_up_scale; + struct ggml_tensor * ffn_down_scale; ++ ++ struct ggml_tensor * bskcn_tv; + }; + + // very similar to llama_batch, +@@ -5937,6 +5974,21 @@ static void llm_load_hparams( + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; ++ case LLM_ARCH_SOLAR: ++ { ++ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); ++ ++ for (int i = 0; i < hparams.n_bskcn_arr.max_size(); ++i) { ++ auto & bskcn = hparams.n_bskcn_arr.at(i); ++ bskcn.fill(0); ++ ml.get_key_or_arr(::format(LLM_KV_NAMES.at(LLM_KV_ATTENTION_BLOCK_SKIP_CONNECTION), LLM_ARCH_NAMES.at(ml.llm_kv.arch), i), bskcn, hparams.n_layer, false); ++ } ++ ++ switch (hparams.n_layer) { ++ case 64: model.type = e_model::MODEL_22B; break; ++ default: model.type = e_model::MODEL_UNKNOWN; ++ } ++ } + default: (void)0; + } + +@@ -8420,6 +8472,38 @@ static bool llm_load_tensors( + } + + } break; ++ case LLM_ARCH_SOLAR: ++ { ++ model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); ++ ++ // output ++ { ++ model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); ++ model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED); ++ } ++ ++ for (int i = 0; i < n_layer; ++i) { ++ ggml_context * ctx_layer = ctx_for_layer(i); ++ ggml_context * ctx_split = ctx_for_layer_split(i); ++ ++ auto & layer = model.layers[i]; ++ ++ layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}); ++ ++ layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}); ++ layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}); ++ layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}); ++ layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}); ++ ++ layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}); ++ ++ layer.bskcn_tv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_BSKCN_TV, "weight"), {2}, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0)); ++ ++ layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}); ++ layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}); ++ layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}); ++ } ++ } break; + default: + throw std::runtime_error("unknown architecture"); + } +@@ -15173,6 +15257,158 @@ struct llm_build_context { + + return gf; + } ++ ++ ggml_cgraph * build_solar() { ++ struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false); ++ ++ // mutable variable, needed during the last layer of the computation to skip unused tokens ++ int32_t n_tokens = this->n_tokens; ++ ++ const int64_t n_embd_head = hparams.n_embd_head_v; ++ GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); ++ GGML_ASSERT(n_embd_head == hparams.n_rot); ++ ++ struct ggml_tensor * cur; ++ struct ggml_tensor * inpL; ++ ++ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb); ++ ++ // inp_pos - contains the positions ++ struct ggml_tensor * inp_pos = build_inp_pos(); ++ ++ // KQ_mask (mask for 1 head, it will be broadcasted to all heads) ++ struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); ++ ++ struct ggml_tensor * bskcn_1; ++ struct ggml_tensor * bskcn_2; ++ ++ for (int il = 0; il < n_layer; ++il) { ++ struct ggml_tensor * inpSA = inpL; ++ ++ if (hparams.n_bskcn(0, il)) { ++ bskcn_1 = inpSA; ++ } ++ ++ if (hparams.n_bskcn(1, il)) { ++ bskcn_2 = inpSA; ++ } ++ ++ if (hparams.n_bskcn(2, il)) { ++ inpSA = ggml_add( ++ ctx0, ++ ggml_mul(ctx0, bskcn_1, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, 0)), ++ ggml_mul(ctx0, inpSA, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, ggml_element_size(model.layers[il].bskcn_tv)))); ++ } ++ ++ if (hparams.n_bskcn(3, il)) { ++ inpSA = ggml_add( ++ ctx0, ++ ggml_mul(ctx0, bskcn_2, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, 0)), ++ ggml_mul(ctx0, inpSA, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, ggml_element_size(model.layers[il].bskcn_tv)))); ++ } ++ ++ // norm ++ cur = llm_build_norm(ctx0, inpL, hparams, ++ model.layers[il].attn_norm, NULL, ++ LLM_NORM_RMS, cb, il); ++ cb(cur, "attn_norm", il); ++ ++ // self-attention ++ { ++ // rope freq factors for llama3; may return nullptr for llama2 and other models ++ struct ggml_tensor * rope_factors = build_rope_factors(il); ++ ++ // compute Q and K and RoPE them ++ struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); ++ cb(Qcur, "Qcur", il); ++ if (model.layers[il].bq) { ++ Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); ++ cb(Qcur, "Qcur", il); ++ } ++ ++ struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); ++ cb(Kcur, "Kcur", il); ++ if (model.layers[il].bk) { ++ Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); ++ cb(Kcur, "Kcur", il); ++ } ++ ++ struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); ++ cb(Vcur, "Vcur", il); ++ if (model.layers[il].bv) { ++ Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); ++ cb(Vcur, "Vcur", il); ++ } ++ ++ Qcur = ggml_rope_ext( ++ ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors, ++ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ++ ext_factor, attn_factor, beta_fast, beta_slow ++ ); ++ cb(Qcur, "Qcur", il); ++ ++ Kcur = ggml_rope_ext( ++ ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, rope_factors, ++ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ++ ext_factor, attn_factor, beta_fast, beta_slow ++ ); ++ cb(Kcur, "Kcur", il); ++ ++ cur = llm_build_kv(ctx0, lctx, kv_self, gf, ++ model.layers[il].wo, model.layers[il].bo, ++ Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); ++ } ++ ++ if (il == n_layer - 1) { ++ // skip computing output for unused tokens ++ struct ggml_tensor * inp_out_ids = build_inp_out_ids(); ++ n_tokens = n_outputs; ++ cur = ggml_get_rows(ctx0, cur, inp_out_ids); ++ inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); ++ } ++ ++ struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); ++ cb(ffn_inp, "ffn_inp", il); ++ ++ // feed-forward network ++ cur = llm_build_norm(ctx0, ffn_inp, hparams, ++ model.layers[il].ffn_norm, NULL, ++ LLM_NORM_RMS, cb, il); ++ cb(cur, "ffn_norm", il); ++ ++ cur = llm_build_ffn(ctx0, lctx, cur, ++ model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, ++ model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL, ++ model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, ++ NULL, ++ LLM_FFN_SILU, LLM_FFN_PAR, cb, il); ++ cb(cur, "ffn_out", il); ++ ++ cur = ggml_add(ctx0, cur, ffn_inp); ++ cb(cur, "ffn_out", il); ++ ++ cur = lctx.cvec.apply_to(ctx0, cur, il); ++ cb(cur, "l_out", il); ++ ++ // input for next layer ++ inpL = cur; ++ } ++ ++ cur = inpL; ++ ++ cur = llm_build_norm(ctx0, cur, hparams, ++ model.output_norm, NULL, ++ LLM_NORM_RMS, cb, -1); ++ cb(cur, "result_norm", -1); ++ ++ // lm_head ++ cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); ++ cb(cur, "result_output", -1); ++ ++ ggml_build_forward_expand(gf, cur); ++ ++ return gf; ++ } + }; + + static struct ggml_cgraph * llama_build_graph_defrag(llama_context & lctx, const std::vector & ids) { +@@ -15423,6 +15659,10 @@ static struct ggml_cgraph * llama_build_graph( + { + result = llm.build_rwkv6(); + } break; ++ case LLM_ARCH_SOLAR: ++ { ++ result = llm.build_solar(); ++ } break; + default: + GGML_ABORT("fatal error"); + } +@@ -18503,6 +18743,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) { + case LLM_ARCH_ARCTIC: + case LLM_ARCH_DEEPSEEK2: + case LLM_ARCH_CHATGLM: ++ case LLM_ARCH_SOLAR: + return LLAMA_ROPE_TYPE_NORM; + + // the pairs of head values are offset by n_rot/2 +-- +2.46.0 + diff --git a/llm/payload.go b/llm/payload.go deleted file mode 100644 index 963b3295..00000000 --- a/llm/payload.go +++ /dev/null @@ -1,233 +0,0 @@ -package llm - -import ( - "compress/gzip" - "errors" - "fmt" - "io" - "io/fs" - "log/slog" - "os" - "path/filepath" - "runtime" - "slices" - "strings" - - "golang.org/x/sync/errgroup" - - "github.com/ollama/ollama/gpu" -) - -var errPayloadMissing = errors.New("expected payloads not included in this build of ollama") - -func Init() error { - payloadsDir, err := gpu.PayloadsDir() - if err != nil { - return err - } - - if runtime.GOOS != "windows" { - slog.Info("extracting embedded files", "dir", payloadsDir) - binGlob := "build/*/*/*/bin/*" - - // extract server libraries - err = extractFiles(payloadsDir, binGlob) - if err != nil { - return fmt.Errorf("extract binaries: %v", err) - } - } - - var variants []string - for v := range getAvailableServers() { - variants = append(variants, v) - } - slog.Info(fmt.Sprintf("Dynamic LLM libraries %v", variants)) - slog.Debug("Override detection logic by setting OLLAMA_LLM_LIBRARY") - - return nil -} - -// binary names may contain an optional variant separated by '_' -// For example, "ollama_rocm_v6" and "ollama_rocm_v5" or "ollama_cpu" and "ollama_cpu_avx2" -// Any library without a variant is the lowest common denominator -func getAvailableServers() map[string]string { - payloadsDir, err := gpu.PayloadsDir() - if err != nil { - slog.Error("payload lookup error", "error", err) - return nil - } - - // glob payloadsDir for files that start with ollama_ - pattern := filepath.Join(payloadsDir, "*", "ollama_*") - - files, err := filepath.Glob(pattern) - if err != nil { - slog.Debug("could not glob", "pattern", pattern, "error", err) - return nil - } - - servers := make(map[string]string) - for _, file := range files { - slog.Debug("availableServers : found", "file", file) - servers[filepath.Base(filepath.Dir(file))] = filepath.Dir(file) - } - - return servers -} - -// serversForGpu returns a list of compatible servers give the provided GPU -// info, ordered by performance. assumes Init() has been called -// TODO - switch to metadata based mapping -func serversForGpu(info gpu.GpuInfo) []string { - // glob workDir for files that start with ollama_ - availableServers := getAvailableServers() - requested := info.Library - if info.Variant != gpu.CPUCapabilityNone.String() { - requested += "_" + info.Variant - } - - servers := []string{} - - // exact match first - for a := range availableServers { - if a == requested { - servers = []string{a} - - if a == "metal" { - return servers - } - - break - } - } - - alt := []string{} - - // Then for GPUs load alternates and sort the list for consistent load ordering - if info.Library != "cpu" { - for a := range availableServers { - if info.Library == strings.Split(a, "_")[0] && a != requested { - alt = append(alt, a) - } - } - - slices.Sort(alt) - servers = append(servers, alt...) - } - - if !(runtime.GOOS == "darwin" && runtime.GOARCH == "arm64") { - // Load up the best CPU variant if not primary requested - if info.Library != "cpu" { - variant := gpu.GetCPUCapability() - // If no variant, then we fall back to default - // If we have a variant, try that if we find an exact match - // Attempting to run the wrong CPU instructions will panic the - // process - if variant != gpu.CPUCapabilityNone { - for cmp := range availableServers { - if cmp == "cpu_"+variant.String() { - servers = append(servers, cmp) - break - } - } - } else { - servers = append(servers, "cpu") - } - } - - if len(servers) == 0 { - servers = []string{"cpu"} - } - } - - return servers -} - -// Return the optimal server for this CPU architecture -func serverForCpu() string { - if runtime.GOOS == "darwin" && runtime.GOARCH == "arm64" { - return "metal" - } - variant := gpu.GetCPUCapability() - availableServers := getAvailableServers() - if variant != gpu.CPUCapabilityNone { - for cmp := range availableServers { - if cmp == "cpu_"+variant.String() { - return cmp - } - } - } - return "cpu" -} - -// extract extracts the embedded files to the target directory -func extractFiles(targetDir string, glob string) error { - files, err := fs.Glob(libEmbed, glob) - if err != nil || len(files) == 0 { - return errPayloadMissing - } - - if err := os.MkdirAll(targetDir, 0o755); err != nil { - return fmt.Errorf("extractFiles could not mkdir %s: %v", targetDir, err) - } - - g := new(errgroup.Group) - - // build/$OS/$GOARCH/$VARIANT/{bin,lib}/$FILE - for _, file := range files { - filename := file - - variant := filepath.Base(filepath.Dir(filepath.Dir(filename))) - - slog.Debug("extracting", "variant", variant, "file", filename) - - g.Go(func() error { - srcf, err := libEmbed.Open(filename) - if err != nil { - return err - } - defer srcf.Close() - - src := io.Reader(srcf) - if strings.HasSuffix(filename, ".gz") { - src, err = gzip.NewReader(src) - if err != nil { - return fmt.Errorf("decompress payload %s: %v", filename, err) - } - filename = strings.TrimSuffix(filename, ".gz") - } - - variantDir := filepath.Join(targetDir, variant) - if err := os.MkdirAll(variantDir, 0o755); err != nil { - return fmt.Errorf("extractFiles could not mkdir %s: %v", variantDir, err) - } - - base := filepath.Base(filename) - destFilename := filepath.Join(variantDir, base) - - _, err = os.Stat(destFilename) - switch { - case errors.Is(err, os.ErrNotExist): - destFile, err := os.OpenFile(destFilename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755) - if err != nil { - return fmt.Errorf("write payload %s: %v", filename, err) - } - defer destFile.Close() - if _, err := io.Copy(destFile, src); err != nil { - return fmt.Errorf("copy payload %s: %v", filename, err) - } - case err != nil: - return fmt.Errorf("stat payload %s: %v", filename, err) - } - return nil - }) - } - - err = g.Wait() - if err != nil { - // If we fail to extract, the payload dir is unusable, so cleanup whatever we extracted - gpu.Cleanup() - return err - } - return nil -} diff --git a/llm/server.go b/llm/server.go index 28eb8d6f..6c504f14 100644 --- a/llm/server.go +++ b/llm/server.go @@ -24,9 +24,11 @@ import ( "golang.org/x/sync/semaphore" "github.com/ollama/ollama/api" + "github.com/ollama/ollama/build" "github.com/ollama/ollama/envconfig" "github.com/ollama/ollama/format" "github.com/ollama/ollama/gpu" + "github.com/ollama/ollama/runners" ) type LlamaServer interface { @@ -106,7 +108,7 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr gpus = gpu.GetCPUInfo() } if len(gpus) == 1 && gpus[0].Library == "cpu" { - cpuRunner = serverForCpu() + cpuRunner = runners.ServerForCpu() estimate = EstimateGPULayers(gpus, ggml, projectors, opts) } else { estimate = EstimateGPULayers(gpus, ggml, projectors, opts) @@ -118,7 +120,7 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr opts.NumGPU = 0 case gpus[0].Library != "metal" && estimate.Layers == 0: // Don't bother loading into the GPU if no layers can fit - cpuRunner = serverForCpu() + cpuRunner = runners.ServerForCpu() gpus = gpu.GetCPUInfo() case opts.NumGPU < 0 && estimate.Layers > 0 && gpus[0].Library != "cpu": opts.NumGPU = estimate.Layers @@ -145,25 +147,20 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr return nil, errors.New("ollama supports only one lora adapter, but multiple were provided") } - availableServers := getAvailableServers() + rDir, err := runners.Refresh(build.EmbedFS) + if err != nil { + return nil, err + } + + availableServers := runners.GetAvailableServers(rDir) if len(availableServers) == 0 { - if runtime.GOOS != "windows" { - slog.Warn("llama server binary disappeared, reinitializing payloads") - err = Init() - if err != nil { - slog.Warn("failed to reinitialize payloads", "error", err) - return nil, err - } - availableServers = getAvailableServers() - } else { - return nil, finalErr - } + return nil, finalErr } var servers []string if cpuRunner != "" { servers = []string{cpuRunner} } else { - servers = serversForGpu(gpus[0]) // All GPUs in the list are matching Library and Variant + servers = runners.ServersForGpu(gpus[0]) // All GPUs in the list are matching Library and Variant } demandLib := envconfig.LLMLibrary() if demandLib != "" { @@ -274,7 +271,7 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr params = append(params, "--tensor-split", estimate.TensorSplit) } - for i := range len(servers) { + for i := range servers { dir := availableServers[servers[i]] if dir == "" { // Shouldn't happen @@ -330,7 +327,7 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr _, err := os.Stat(server) if errors.Is(err, os.ErrNotExist) { slog.Warn("llama server disappeared, reinitializing payloads", "path", server, "error", err) - err = Init() + _, err = runners.Refresh(build.EmbedFS) if err != nil { slog.Warn("failed to reinitialize payloads", "error", err) return nil, err diff --git a/macapp/src/app.tsx b/macapp/src/app.tsx index a627e63d..449fc851 100644 --- a/macapp/src/app.tsx +++ b/macapp/src/app.tsx @@ -19,7 +19,7 @@ export default function () { const [step, setStep] = useState(Step.WELCOME) const [commandCopied, setCommandCopied] = useState(false) - const command = 'ollama run llama3.1' + const command = 'ollama run llama3.2' return (
diff --git a/openai/openai.go b/openai/openai.go index ea540257..2bf9b9f9 100644 --- a/openai/openai.go +++ b/openai/openai.go @@ -452,7 +452,7 @@ func fromChatRequest(r ChatCompletionRequest) (*api.ChatRequest, error) { } if r.Temperature != nil { - options["temperature"] = *r.Temperature * 2.0 + options["temperature"] = *r.Temperature } else { options["temperature"] = 1.0 } @@ -462,11 +462,11 @@ func fromChatRequest(r ChatCompletionRequest) (*api.ChatRequest, error) { } if r.FrequencyPenalty != nil { - options["frequency_penalty"] = *r.FrequencyPenalty * 2.0 + options["frequency_penalty"] = *r.FrequencyPenalty } if r.PresencePenalty != nil { - options["presence_penalty"] = *r.PresencePenalty * 2.0 + options["presence_penalty"] = *r.PresencePenalty } if r.TopP != nil { diff --git a/openai/openai_test.go b/openai/openai_test.go index 25f570a1..eabf5b66 100644 --- a/openai/openai_test.go +++ b/openai/openai_test.go @@ -102,9 +102,9 @@ func TestChatMiddleware(t *testing.T) { "num_predict": 999.0, // float because JSON doesn't distinguish between float and int "seed": 123.0, "stop": []any{"\n", "stop"}, - "temperature": 6.0, - "frequency_penalty": 8.0, - "presence_penalty": 10.0, + "temperature": 3.0, + "frequency_penalty": 4.0, + "presence_penalty": 5.0, "top_p": 6.0, }, Format: "json", diff --git a/runners/common.go b/runners/common.go new file mode 100644 index 00000000..681c397b --- /dev/null +++ b/runners/common.go @@ -0,0 +1,384 @@ +package runners + +import ( + "compress/gzip" + "errors" + "fmt" + "io" + "io/fs" + "log/slog" + "os" + "path/filepath" + "runtime" + "slices" + "strconv" + "strings" + "sync" + "syscall" + + "golang.org/x/sync/errgroup" + + "github.com/ollama/ollama/envconfig" + "github.com/ollama/ollama/gpu" +) + +const ( + binGlob = "*/*/*/*" +) + +var ( + lock sync.Mutex + runnersDir = "" +) + +// Return the location where runners are stored +// If runners are payloads, this will either extract them +// or refresh them if any have disappeared due to tmp cleaners +func Refresh(payloadFS fs.FS) (string, error) { + lock.Lock() + defer lock.Unlock() + var err error + + // Wire up extra logging on our first load + if runnersDir == "" { + defer func() { + var runners []string + for v := range GetAvailableServers(runnersDir) { + runners = append(runners, v) + } + slog.Info("Dynamic LLM libraries", "runners", runners) + slog.Debug("Override detection logic by setting OLLAMA_LLM_LIBRARY") + }() + } + + if hasPayloads(payloadFS) { + if runnersDir == "" { + runnersDir, err = extractRunners(payloadFS) + } else { + err = refreshRunners(payloadFS, runnersDir) + } + } else if runnersDir == "" { + runnersDir, err = locateRunners() + } + + return runnersDir, err +} + +func Cleanup(payloadFS fs.FS) { + lock.Lock() + defer lock.Unlock() + if hasPayloads(payloadFS) && runnersDir != "" { + // We want to fully clean up the tmpdir parent of the payloads dir + tmpDir := filepath.Clean(filepath.Join(runnersDir, "..")) + slog.Debug("cleaning up", "dir", tmpDir) + err := os.RemoveAll(tmpDir) + if err != nil { + slog.Warn("failed to clean up", "dir", tmpDir, "err", err) + } + } +} + +func locateRunners() (string, error) { + exe, err := os.Executable() + if err != nil { + return "", err + } + + cwd, err := os.Getwd() + if err != nil { + return "", err + } + + var paths []string + for _, root := range []string{filepath.Dir(exe), filepath.Join(filepath.Dir(exe), envconfig.LibRelativeToExe()), cwd} { + paths = append(paths, + root, + filepath.Join(root, runtime.GOOS+"-"+runtime.GOARCH), + filepath.Join(root, "dist", runtime.GOOS+"-"+runtime.GOARCH), + ) + } + + // Try a few variations to improve developer experience when building from source in the local tree + for _, path := range paths { + candidate := filepath.Join(path, "lib", "ollama", "runners") + if _, err := os.Stat(candidate); err == nil { + return candidate, nil + } + } + return "", fmt.Errorf("unable to locate runners in any search path %v", paths) +} + +// Return true if we're carying nested payloads for the runners +func hasPayloads(payloadFS fs.FS) bool { + files, err := fs.Glob(payloadFS, binGlob) + if err != nil || len(files) == 0 || (len(files) == 1 && strings.Contains(files[0], "placeholder")) { + return false + } + return true +} + +func extractRunners(payloadFS fs.FS) (string, error) { + cleanupTmpDirs() + tmpDir, err := os.MkdirTemp(envconfig.TmpDir(), "ollama") + if err != nil { + return "", fmt.Errorf("failed to generate tmp dir: %w", err) + } + // Track our pid so we can clean up orphaned tmpdirs + n := filepath.Join(tmpDir, "ollama.pid") + if err := os.WriteFile(n, []byte(strconv.Itoa(os.Getpid())), 0o644); err != nil { + slog.Warn("failed to write pid file", "file", n, "error", err) + } + // We create a distinct subdirectory for payloads within the tmpdir + // This will typically look like /tmp/ollama3208993108/runners on linux + rDir := filepath.Join(tmpDir, "runners") + + slog.Info("extracting embedded files", "dir", rDir) + return rDir, refreshRunners(payloadFS, rDir) +} + +func refreshRunners(payloadFS fs.FS, rDir string) error { + // extract or refresh server libraries + err := extractFiles(payloadFS, rDir, binGlob) + if err != nil { + return fmt.Errorf("extract binaries: %v", err) + } + return nil +} + +// extract extracts the embedded files to the target directory +func extractFiles(payloadFS fs.FS, targetDir string, glob string) error { + files, err := fs.Glob(payloadFS, glob) + if err != nil || len(files) == 0 { + // Should not happen + return fmt.Errorf("extractFiles called without payload present") + } + + if err := os.MkdirAll(targetDir, 0o755); err != nil { + return fmt.Errorf("extractFiles could not mkdir %s: %v", targetDir, err) + } + + g := new(errgroup.Group) + + // $OS/$GOARCH/$RUNNER/$FILE + for _, file := range files { + filename := file + + runner := filepath.Base(filepath.Dir(filename)) + + slog.Debug("extracting", "runner", runner, "payload", filename) + + g.Go(func() error { + srcf, err := payloadFS.Open(filename) + if err != nil { + return err + } + defer srcf.Close() + + src := io.Reader(srcf) + if strings.HasSuffix(filename, ".gz") { + src, err = gzip.NewReader(src) + if err != nil { + return fmt.Errorf("decompress payload %s: %v", filename, err) + } + filename = strings.TrimSuffix(filename, ".gz") + } + + runnerDir := filepath.Join(targetDir, runner) + if err := os.MkdirAll(runnerDir, 0o755); err != nil { + return fmt.Errorf("extractFiles could not mkdir %s: %v", runnerDir, err) + } + + base := filepath.Base(filename) + destFilename := filepath.Join(runnerDir, base) + + _, err = os.Stat(destFilename) + switch { + case errors.Is(err, os.ErrNotExist): + destFile, err := os.OpenFile(destFilename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755) + if err != nil { + return fmt.Errorf("write payload %s: %v", filename, err) + } + defer destFile.Close() + if _, err := io.Copy(destFile, src); err != nil { + return fmt.Errorf("copy payload %s: %v", filename, err) + } + case err != nil: + return fmt.Errorf("stat payload %s: %v", filename, err) + } + return nil + }) + } + + err = g.Wait() + if err != nil { + slog.Error("failed to extract files", "error", err) + // If we fail to extract, the payload dir is most likely unusable, so cleanup whatever we extracted + err := os.RemoveAll(targetDir) + if err != nil { + slog.Warn("failed to cleanup incomplete payload dir", "dir", targetDir, "error", err) + } + return err + } + return nil +} + +// Best effort to clean up prior tmpdirs +func cleanupTmpDirs() { + tmpDir := envconfig.TmpDir() + if tmpDir == "" { + tmpDir = os.TempDir() + } + matches, err := filepath.Glob(filepath.Join(tmpDir, "ollama*", "ollama.pid")) + if err != nil { + return + } + + for _, match := range matches { + raw, err := os.ReadFile(match) + if errors.Is(err, os.ErrNotExist) { + slog.Debug("not a ollama runtime directory, skipping", "path", match) + continue + } else if err != nil { + slog.Warn("could not read ollama.pid, skipping", "path", match, "error", err) + continue + } + + pid, err := strconv.Atoi(string(raw)) + if err != nil { + slog.Warn("invalid pid, skipping", "path", match, "error", err) + continue + } + + p, err := os.FindProcess(pid) + if err == nil && !errors.Is(p.Signal(syscall.Signal(0)), os.ErrProcessDone) { + slog.Warn("process still running, skipping", "pid", pid, "path", match) + continue + } + + if err := os.Remove(match); err != nil { + slog.Warn("could not cleanup stale pidfile", "path", match, "error", err) + } + + runners := filepath.Join(filepath.Dir(match), "runners") + if err := os.RemoveAll(runners); err != nil { + slog.Warn("could not cleanup stale runners", "path", runners, "error", err) + } + + if err := os.Remove(filepath.Dir(match)); err != nil { + slog.Warn("could not cleanup stale tmpdir", "path", filepath.Dir(match), "error", err) + } + } +} + +// directory names are the name of the runner and may contain an optional +// variant prefixed with '_' as the separator. For example, "cuda_v11" and +// "cuda_v12" or "cpu" and "cpu_avx2". Any library without a variant is the +// lowest common denominator +func GetAvailableServers(payloadsDir string) map[string]string { + if payloadsDir == "" { + slog.Error("empty runner dir") + return nil + } + + // glob payloadsDir for files that start with ollama_ + pattern := filepath.Join(payloadsDir, "*", "ollama_*") + + files, err := filepath.Glob(pattern) + if err != nil { + slog.Debug("could not glob", "pattern", pattern, "error", err) + return nil + } + + servers := make(map[string]string) + for _, file := range files { + slog.Debug("availableServers : found", "file", file) + servers[filepath.Base(filepath.Dir(file))] = filepath.Dir(file) + } + + return servers +} + +// serversForGpu returns a list of compatible servers give the provided GPU +// info, ordered by performance. assumes Init() has been called +// TODO - switch to metadata based mapping +func ServersForGpu(info gpu.GpuInfo) []string { + // glob workDir for files that start with ollama_ + availableServers := GetAvailableServers(runnersDir) + requested := info.Library + if info.Variant != gpu.CPUCapabilityNone.String() { + requested += "_" + info.Variant + } + + servers := []string{} + + // exact match first + for a := range availableServers { + if a == requested { + servers = []string{a} + + if a == "metal" { + return servers + } + + break + } + } + + alt := []string{} + + // Then for GPUs load alternates and sort the list for consistent load ordering + if info.Library != "cpu" { + for a := range availableServers { + if info.Library == strings.Split(a, "_")[0] && a != requested { + alt = append(alt, a) + } + } + + slices.Sort(alt) + servers = append(servers, alt...) + } + + if !(runtime.GOOS == "darwin" && runtime.GOARCH == "arm64") { + // Load up the best CPU variant if not primary requested + if info.Library != "cpu" { + variant := gpu.GetCPUCapability() + // If no variant, then we fall back to default + // If we have a variant, try that if we find an exact match + // Attempting to run the wrong CPU instructions will panic the + // process + if variant != gpu.CPUCapabilityNone { + for cmp := range availableServers { + if cmp == "cpu_"+variant.String() { + servers = append(servers, cmp) + break + } + } + } else { + servers = append(servers, "cpu") + } + } + + if len(servers) == 0 { + servers = []string{"cpu"} + } + } + + return servers +} + +// Return the optimal server for this CPU architecture +func ServerForCpu() string { + if runtime.GOOS == "darwin" && runtime.GOARCH == "arm64" { + return "metal" + } + variant := gpu.GetCPUCapability() + availableServers := GetAvailableServers(runnersDir) + if variant != gpu.CPUCapabilityNone { + for cmp := range availableServers { + if cmp == "cpu_"+variant.String() { + return cmp + } + } + } + return "cpu" +} diff --git a/runners/runners_test.go b/runners/runners_test.go new file mode 100644 index 00000000..e6439448 --- /dev/null +++ b/runners/runners_test.go @@ -0,0 +1,50 @@ +package runners + +import ( + "log/slog" + "os" + "path" + "runtime" + "strings" + "testing" + "testing/fstest" +) + +func TestRefreshRunners(t *testing.T) { + slog.SetLogLoggerLevel(slog.LevelDebug) + + payloadFS := fstest.MapFS{ + path.Join(runtime.GOOS, runtime.GOARCH, "foo", "ollama_llama_server"): {Data: []byte("hello, world\n")}, + } + tmpDir, err := os.MkdirTemp("", "testing") + if err != nil { + t.Fatalf("failed to make tmp dir %s", err) + } + t.Setenv("OLLAMA_TMPDIR", tmpDir) + rDir, err := Refresh(payloadFS) + if err != nil { + t.Fatalf("failed to extract to %s %s", tmpDir, err) + } + if !strings.Contains(rDir, tmpDir) { + t.Fatalf("runner dir %s was not in tmp dir %s", rDir, tmpDir) + } + + // spot check results + servers := GetAvailableServers(rDir) + if len(servers) < 1 { + t.Fatalf("expected at least 1 server") + } + + // Refresh contents + rDir, err = extractRunners(payloadFS) + if err != nil { + t.Fatalf("failed to extract to %s %s", tmpDir, err) + } + if !strings.Contains(rDir, tmpDir) { + t.Fatalf("runner dir %s was not in tmp dir %s", rDir, tmpDir) + } + + cleanupTmpDirs() + + Cleanup(payloadFS) +} diff --git a/scripts/build_darwin.sh b/scripts/build_darwin.sh index a2f76af2..17ac0b94 100755 --- a/scripts/build_darwin.sh +++ b/scripts/build_darwin.sh @@ -2,8 +2,7 @@ set -e -export VERSION=${VERSION:-$(git describe --tags --first-parent --abbrev=7 --long --dirty --always | sed -e "s/^v//g")} -export GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=$VERSION\" \"-X=github.com/ollama/ollama/server.mode=release\"'" +. $(dirname $0)/env.sh mkdir -p dist diff --git a/scripts/build_docker.sh b/scripts/build_docker.sh index e91c56ed..567eb7c7 100755 --- a/scripts/build_docker.sh +++ b/scripts/build_docker.sh @@ -2,76 +2,34 @@ set -eu -export VERSION=${VERSION:-$(git describe --tags --first-parent --abbrev=7 --long --dirty --always | sed -e "s/^v//g")} -export GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=$VERSION\" \"-X=github.com/ollama/ollama/server.mode=release\"'" - -# We use 2 different image repositories to handle combining architecture images into multiarch manifest -# (The ROCm image is x86 only and is not a multiarch manifest) -# For developers, you can override the DOCKER_ORG to generate multiarch manifests -# DOCKER_ORG=jdoe PUSH=1 ./scripts/build_docker.sh -DOCKER_ORG=${DOCKER_ORG:-"ollama"} -RELEASE_IMAGE_REPO=${RELEASE_IMAGE_REPO:-"${DOCKER_ORG}/release"} -FINAL_IMAGE_REPO=${FINAL_IMAGE_REPO:-"${DOCKER_ORG}/ollama"} - -BUILD_ARCH=${BUILD_ARCH:-"amd64 arm64"} +. $(dirname $0)/env.sh # Set PUSH to a non-empty string to trigger push instead of load PUSH=${PUSH:-""} -# In CI mode, we break things down -OLLAMA_SKIP_MANIFEST_CREATE=${OLLAMA_SKIP_MANIFEST_CREATE:-""} -OLLAMA_SKIP_IMAGE_BUILD=${OLLAMA_SKIP_IMAGE_BUILD:-""} - if [ -z "${PUSH}" ] ; then + echo "Building ${FINAL_IMAGE_REPO}:$VERSION locally. set PUSH=1 to push" LOAD_OR_PUSH="--load" else - echo "Will be pushing ${RELEASE_IMAGE_REPO}:$VERSION for ${BUILD_ARCH}" + echo "Will be pushing ${FINAL_IMAGE_REPO}:$VERSION" LOAD_OR_PUSH="--push" fi -if [ -z "${OLLAMA_SKIP_IMAGE_BUILD}" ]; then - for TARGETARCH in ${BUILD_ARCH}; do - docker build \ - ${LOAD_OR_PUSH} \ - --platform=linux/${TARGETARCH} \ - --build-arg=VERSION \ - --build-arg=GOFLAGS \ - -f Dockerfile \ - -t ${RELEASE_IMAGE_REPO}:$VERSION-${TARGETARCH} \ - . - done +docker buildx build \ + ${LOAD_OR_PUSH} \ + --platform=${PLATFORM} \ + ${OLLAMA_COMMON_BUILD_ARGS} \ + -f Dockerfile \ + -t ${FINAL_IMAGE_REPO}:$VERSION \ + . - if echo ${BUILD_ARCH} | grep "amd64" > /dev/null; then - docker build \ - ${LOAD_OR_PUSH} \ - --platform=linux/amd64 \ - --build-arg=VERSION \ - --build-arg=GOFLAGS \ - --target runtime-rocm \ - -f Dockerfile \ - -t ${RELEASE_IMAGE_REPO}:$VERSION-rocm \ - . - fi -fi - -if [ -z "${OLLAMA_SKIP_MANIFEST_CREATE}" ]; then - if [ -n "${PUSH}" ]; then - docker manifest create ${FINAL_IMAGE_REPO}:$VERSION \ - ${RELEASE_IMAGE_REPO}:$VERSION-amd64 \ - ${RELEASE_IMAGE_REPO}:$VERSION-arm64 - docker manifest push ${FINAL_IMAGE_REPO}:$VERSION - - # For symmetry, tag/push the rocm image - if [ "${RELEASE_IMAGE_REPO}" != "${FINAL_IMAGE_REPO}" ]; then - echo "Tagging and pushing rocm image" - docker pull ${RELEASE_IMAGE_REPO}:$VERSION-rocm - docker tag ${RELEASE_IMAGE_REPO}:$VERSION-rocm ${FINAL_IMAGE_REPO}:$VERSION-rocm - docker push ${FINAL_IMAGE_REPO}:$VERSION-rocm - fi - else - echo "Skipping manifest generation when not pushing images are available locally as " - echo " ${RELEASE_IMAGE_REPO}:$VERSION-amd64" - echo " ${RELEASE_IMAGE_REPO}:$VERSION-arm64" - echo " ${RELEASE_IMAGE_REPO}:$VERSION-rocm" - fi -fi +if echo $PLATFORM | grep "amd64" > /dev/null; then + docker buildx build \ + ${LOAD_OR_PUSH} \ + --platform=linux/amd64 \ + ${OLLAMA_COMMON_BUILD_ARGS} \ + --target runtime-rocm \ + -f Dockerfile \ + -t ${FINAL_IMAGE_REPO}:$VERSION-rocm \ + . +fi \ No newline at end of file diff --git a/scripts/build_linux.sh b/scripts/build_linux.sh index 6cb0d0cd..894d9dd2 100755 --- a/scripts/build_linux.sh +++ b/scripts/build_linux.sh @@ -1,37 +1,29 @@ #!/bin/sh +# +# Mac ARM users, rosetta can be flaky, so to use a remote x86 builder +# +# docker context create amd64 --docker host=ssh://mybuildhost +# docker buildx create --name mybuilder amd64 --platform linux/amd64 +# docker buildx create --name mybuilder --append desktop-linux --platform linux/arm64 +# docker buildx use mybuilder + set -eu -export VERSION=${VERSION:-$(git describe --tags --first-parent --abbrev=7 --long --dirty --always | sed -e "s/^v//g")} -export GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=$VERSION\" \"-X=github.com/ollama/ollama/server.mode=release\"'" -GZIP=$(which pigz 2>/dev/null || echo "gzip") +. $(dirname $0)/env.sh -BUILD_ARCH=${BUILD_ARCH:-"amd64 arm64"} -export AMDGPU_TARGETS=${AMDGPU_TARGETS:=""} mkdir -p dist -for TARGETARCH in ${BUILD_ARCH}; do - docker build \ - --platform=linux/$TARGETARCH \ - --build-arg=GOFLAGS \ - --build-arg=CGO_CFLAGS \ - --build-arg=OLLAMA_CUSTOM_CPU_DEFS \ - --build-arg=AMDGPU_TARGETS \ - --target build-$TARGETARCH \ +docker buildx build \ + --output type=local,dest=./dist/ \ + --platform=${PLATFORM} \ + ${OLLAMA_COMMON_BUILD_ARGS} \ + --target dist \ -f Dockerfile \ - -t builder:$TARGETARCH \ . - docker create --platform linux/$TARGETARCH --name builder-$TARGETARCH builder:$TARGETARCH - rm -rf ./dist/linux-$TARGETARCH - docker cp builder-$TARGETARCH:/go/src/github.com/ollama/ollama/dist/linux-$TARGETARCH ./dist - if echo ${TARGETARCH} | grep "amd64" > /dev/null; then - docker cp builder-$TARGETARCH:/go/src/github.com/ollama/ollama/dist/linux-$TARGETARCH-rocm ./dist - fi - docker rm builder-$TARGETARCH - echo "Compressing final linux bundle..." - rm -f ./dist/ollama-linux-$TARGETARCH.tgz - (cd dist/linux-$TARGETARCH && tar cf - . | ${GZIP} --best > ../ollama-linux-$TARGETARCH.tgz ) - if [ -d dist/linux-$TARGETARCH-rocm ]; then - (cd dist/linux-$TARGETARCH-rocm && tar cf - . | ${GZIP} --best > ../ollama-linux-$TARGETARCH-rocm.tgz ) - fi -done + +# buildx behavior changes for single vs. multiplatform +if echo $PLATFORM | grep "," > /dev/null ; then + mv -f ./dist/linux_*64/ollama* ./dist/ + rmdir ./dist/linux_*64 +fi \ No newline at end of file diff --git a/scripts/build_windows.ps1 b/scripts/build_windows.ps1 index eb8570c8..b9508341 100644 --- a/scripts/build_windows.ps1 +++ b/scripts/build_windows.ps1 @@ -7,12 +7,22 @@ $ErrorActionPreference = "Stop" function checkEnv() { - $script:ARCH = $Env:PROCESSOR_ARCHITECTURE.ToLower() - $script:TARGET_ARCH=$Env:PROCESSOR_ARCHITECTURE.ToLower() + if ($null -ne $env:ARCH ) { + $script:ARCH = $env:ARCH + } else { + $arch=([System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture) + if ($null -ne $arch) { + $script:ARCH = ($arch.ToString().ToLower()).Replace("x64", "amd64") + } else { + write-host "WARNING: old powershell detected, assuming amd64 architecture - set `$env:ARCH to override" + $script:ARCH="amd64" + } + } + $script:TARGET_ARCH=$script:ARCH Write-host "Building for ${script:TARGET_ARCH}" write-host "Locating required tools and paths" $script:SRC_DIR=$PWD - if (!$env:VCToolsRedistDir) { + if ($null -eq $env:VCToolsRedistDir) { $MSVC_INSTALL=(Get-CimInstance MSFT_VSInstance -Namespace root/cimv2/vs)[0].InstallLocation $env:VCToolsRedistDir=(get-item "${MSVC_INSTALL}\VC\Redist\MSVC\*")[0] } @@ -28,9 +38,12 @@ function checkEnv() { $script:CUDA_DIRS=$cudaList } - $script:INNO_SETUP_DIR=(get-item "C:\Program Files*\Inno Setup*\")[0] + $inoSetup=(get-item "C:\Program Files*\Inno Setup*\") + if ($inoSetup.length -gt 0) { + $script:INNO_SETUP_DIR=$inoSetup[0] + } - $script:DEPS_DIR="${script:SRC_DIR}\dist\windows-${script:TARGET_ARCH}" + $script:DIST_DIR="${script:SRC_DIR}\dist\windows-${script:TARGET_ARCH}" $env:CGO_ENABLED="1" Write-Output "Checking version" if (!$env:VERSION) { @@ -67,7 +80,6 @@ function checkEnv() { function buildOllama() { - write-host "Building ollama CLI" if ($null -eq ${env:OLLAMA_SKIP_GENERATE}) { Remove-Item -ea 0 -recurse -force -path "${script:SRC_DIR}\dist\windows-${script:ARCH}" @@ -75,15 +87,16 @@ function buildOllama() { # which targets to build # Start by skipping CUDA to build everything else - pwsh -Command { $env:OLLAMA_SKIP_CUDA_GENERATE="1"; & go generate ./... } + write-host "Building ollama runners" + powershell -Command { $env:OLLAMA_SKIP_CUDA_GENERATE="1"; & go generate ./... } if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)} # Then skip everyhting else and build all the CUDA variants foreach ($env:CUDA_LIB_DIR in $script:CUDA_DIRS) { - write-host "Building CUDA ${env:CUDA_LIB_DIR}" + write-host "Building CUDA ${env:CUDA_LIB_DIR} runner" if ($env:CUDA_LIB_DIR.Contains("v12")) { - pwsh -Command { + powershell -Command { $env:OLLAMA_SKIP_CUDA_GENERATE="" $env:OLLAMA_SKIP_STATIC_GENERATE="1" $env:OLLAMA_SKIP_CPU_GENERATE="1" @@ -96,7 +109,7 @@ function buildOllama() { & go generate ./... } } else { - pwsh -Command { + powershell -Command { $env:OLLAMA_SKIP_CUDA_GENERATE="" $env:OLLAMA_SKIP_STATIC_GENERATE="1" $env:OLLAMA_SKIP_CPU_GENERATE="1" @@ -115,6 +128,7 @@ function buildOllama() { } else { write-host "Skipping generate step with OLLAMA_SKIP_GENERATE set" } + write-host "Building ollama CLI" & go build -trimpath -ldflags "-s -w -X=github.com/ollama/ollama/version.Version=$script:VERSION -X=github.com/ollama/ollama/server.mode=release" . if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)} if ("${env:KEY_CONTAINER}") { @@ -130,34 +144,50 @@ function buildApp() { write-host "Building Ollama App" cd "${script:SRC_DIR}\app" & windres -l 0 -o ollama.syso ollama.rc - & go build -trimpath -ldflags "-s -w -H windowsgui -X=github.com/ollama/ollama/version.Version=$script:VERSION -X=github.com/ollama/ollama/server.mode=release" . + & go build -trimpath -ldflags "-s -w -H windowsgui -X=github.com/ollama/ollama/version.Version=$script:VERSION -X=github.com/ollama/ollama/server.mode=release" -o "${script:SRC_DIR}\dist\windows-${script:TARGET_ARCH}-app.exe" . if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)} if ("${env:KEY_CONTAINER}") { & "${script:SignTool}" sign /v /fd sha256 /t http://timestamp.digicert.com /f "${script:OLLAMA_CERT}" ` - /csp "Google Cloud KMS Provider" /kc ${env:KEY_CONTAINER} app.exe + /csp "Google Cloud KMS Provider" /kc ${env:KEY_CONTAINER} "${script:SRC_DIR}\dist\windows-${script:TARGET_ARCH}-app.exe" if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)} } } function gatherDependencies() { - write-host "Gathering runtime dependencies" + if ($null -eq $env:VCToolsRedistDir) { + write-error "Unable to locate VC Install location - please use a Developer shell" + exit 1 + } + write-host "Gathering runtime dependencies from $env:VCToolsRedistDir" cd "${script:SRC_DIR}" - md "${script:DEPS_DIR}\lib\ollama" -ea 0 > $null + md "${script:DIST_DIR}\lib\ollama" -ea 0 > $null # TODO - this varies based on host build system and MSVC version - drive from dumpbin output # currently works for Win11 + MSVC 2019 + Cuda V11 - cp "${env:VCToolsRedistDir}\x64\Microsoft.VC*.CRT\msvcp140*.dll" "${script:DEPS_DIR}\lib\ollama\" - cp "${env:VCToolsRedistDir}\x64\Microsoft.VC*.CRT\vcruntime140.dll" "${script:DEPS_DIR}\lib\ollama\" - cp "${env:VCToolsRedistDir}\x64\Microsoft.VC*.CRT\vcruntime140_1.dll" "${script:DEPS_DIR}\lib\ollama\" - foreach ($part in $("runtime", "stdio", "filesystem", "math", "convert", "heap", "string", "time", "locale", "environment")) { - cp "$env:VCToolsRedistDir\..\..\..\Tools\Llvm\x64\bin\api-ms-win-crt-${part}*.dll" "${script:DEPS_DIR}\lib\ollama\" + if ($script:TARGET_ARCH -eq "amd64") { + $depArch="x64" + } else { + $depArch=$script:TARGET_ARCH + } + if ($depArch -eq "amd64") { + cp "${env:VCToolsRedistDir}\${depArch}\Microsoft.VC*.CRT\msvcp140*.dll" "${script:DIST_DIR}\lib\ollama\" + cp "${env:VCToolsRedistDir}\${depArch}\Microsoft.VC*.CRT\vcruntime140.dll" "${script:DIST_DIR}\lib\ollama\" + cp "${env:VCToolsRedistDir}\${depArch}\Microsoft.VC*.CRT\vcruntime140_1.dll" "${script:DIST_DIR}\lib\ollama\" + $llvmCrtDir="$env:VCToolsRedistDir\..\..\..\Tools\Llvm\${depArch}\bin" + foreach ($part in $("runtime", "stdio", "filesystem", "math", "convert", "heap", "string", "time", "locale", "environment")) { + write-host "cp ${llvmCrtDir}\api-ms-win-crt-${part}*.dll ${script:DIST_DIR}\lib\ollama\" + cp "${llvmCrtDir}\api-ms-win-crt-${part}*.dll" "${script:DIST_DIR}\lib\ollama\" + } + } else { + # Carying the dll's doesn't seem to work, so use the redist installer + copy-item -path "${env:VCToolsRedistDir}\vc_redist.arm64.exe" -destination "${script:DIST_DIR}" -verbose } cp "${script:SRC_DIR}\app\ollama_welcome.ps1" "${script:SRC_DIR}\dist\" if ("${env:KEY_CONTAINER}") { write-host "about to sign" - foreach ($file in (get-childitem "${script:DEPS_DIR}\lib\ollama\cu*.dll") + @("${script:SRC_DIR}\dist\ollama_welcome.ps1")){ + foreach ($file in (get-childitem "${script:DIST_DIR}\lib\ollama\cu*.dll") + @("${script:SRC_DIR}\dist\ollama_welcome.ps1")){ write-host "signing $file" & "${script:SignTool}" sign /v /fd sha256 /t http://timestamp.digicert.com /f "${script:OLLAMA_CERT}" ` /csp "Google Cloud KMS Provider" /kc ${env:KEY_CONTAINER} $file @@ -167,6 +197,10 @@ function gatherDependencies() { } function buildInstaller() { + if ($null -eq ${script:INNO_SETUP_DIR}) { + write-host "Inno Setup not present, skipping installer build" + return + } write-host "Building Ollama Installer" cd "${script:SRC_DIR}\app" $env:PKG_VERSION=$script:PKG_VERSION @@ -183,13 +217,20 @@ function distZip() { Compress-Archive -Path "${script:SRC_DIR}\dist\windows-${script:TARGET_ARCH}\*" -DestinationPath "${script:SRC_DIR}\dist\ollama-windows-${script:TARGET_ARCH}.zip" -Force } +checkEnv try { - checkEnv - buildOllama - buildApp - gatherDependencies - buildInstaller - distZip + if ($($args.count) -eq 0) { + buildOllama + buildApp + gatherDependencies + buildInstaller + distZip + } else { + for ( $i = 0; $i -lt $args.count; $i++ ) { + write-host "performing $($args[$i])" + & $($args[$i]) + } + } } catch { write-host "Build Failed" write-host $_ diff --git a/scripts/env.sh b/scripts/env.sh new file mode 100644 index 00000000..d3ca05d7 --- /dev/null +++ b/scripts/env.sh @@ -0,0 +1,14 @@ +# Common environment setup across build*.sh scripts + +export VERSION=${VERSION:-$(git describe --tags --first-parent --abbrev=7 --long --dirty --always | sed -e "s/^v//g")} +export GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=$VERSION\" \"-X=github.com/ollama/ollama/server.mode=release\"'" +# TODO - consider `docker buildx ls --format=json` to autodiscover platform capability +PLATFORM=${PLATFORM:-"linux/arm64,linux/amd64"} +DOCKER_ORG=${DOCKER_ORG:-"ollama"} +RELEASE_IMAGE_REPO=${RELEASE_IMAGE_REPO:-"${DOCKER_ORG}/release"} +FINAL_IMAGE_REPO=${FINAL_IMAGE_REPO:-"${DOCKER_ORG}/ollama"} +OLLAMA_COMMON_BUILD_ARGS="--build-arg=VERSION --build-arg=GOFLAGS --build-arg=OLLAMA_CUSTOM_CPU_DEFS --build-arg=AMDGPU_TARGETS" + +echo "Building Ollama" +echo "VERSION=$VERSION" +echo "PLATFORM=$PLATFORM" \ No newline at end of file diff --git a/scripts/tag_latest.sh b/scripts/tag_latest.sh index abe42631..1f56f036 100755 --- a/scripts/tag_latest.sh +++ b/scripts/tag_latest.sh @@ -2,32 +2,12 @@ set -eu -# We use 2 different image repositories to handle combining architecture images into multiarch manifest -# (The ROCm image is x86 only and is not a multiarch manifest) # For developers, you can override the DOCKER_ORG to generate multiarch manifests -# DOCKER_ORG=jdoe VERSION=0.1.30 PUSH=1 ./scripts/tag_latest.sh +# DOCKER_ORG=jdoe VERSION=0.1.30 ./scripts/tag_latest.sh DOCKER_ORG=${DOCKER_ORG:-"ollama"} -RELEASE_IMAGE_REPO=${RELEASE_IMAGE_REPO:-"${DOCKER_ORG}/release"} FINAL_IMAGE_REPO=${FINAL_IMAGE_REPO:-"${DOCKER_ORG}/ollama"} -# Set PUSH to a non-empty string to trigger push instead of load -PUSH=${PUSH:-""} - -echo "Assembling manifest and tagging latest" -docker manifest rm ${FINAL_IMAGE_REPO}:latest || true -docker manifest create ${FINAL_IMAGE_REPO}:latest \ - ${RELEASE_IMAGE_REPO}:$VERSION-amd64 \ - ${RELEASE_IMAGE_REPO}:$VERSION-arm64 - -docker pull ${RELEASE_IMAGE_REPO}:$VERSION-rocm -docker tag ${RELEASE_IMAGE_REPO}:$VERSION-rocm ${FINAL_IMAGE_REPO}:rocm - -if [ -n "${PUSH}" ]; then - echo "Pushing latest tags up..." - docker manifest push ${FINAL_IMAGE_REPO}:latest - docker push ${FINAL_IMAGE_REPO}:rocm -else - echo "Not pushing ${FINAL_IMAGE_REPO}:latest and ${FINAL_IMAGE_REPO}:rocm" -fi - - +echo "Updating ${FINAL_IMAGE_REPO}:latest -> ${FINAL_IMAGE_REPO}:${VERSION}" +docker buildx imagetools create -t ${FINAL_IMAGE_REPO}:latest ${FINAL_IMAGE_REPO}:${VERSION} +echo "Updating ${FINAL_IMAGE_REPO}:rocm -> ${FINAL_IMAGE_REPO}:${VERSION}-rocm" +docker buildx imagetools create -t ${FINAL_IMAGE_REPO}:rocm ${FINAL_IMAGE_REPO}:${VERSION}-rocm diff --git a/server/images.go b/server/images.go index b5bf7ad6..c88edc69 100644 --- a/server/images.go +++ b/server/images.go @@ -1025,6 +1025,8 @@ func makeRequestWithRetry(ctx context.Context, method string, requestURL *url.UR switch { case resp.StatusCode == http.StatusUnauthorized: + resp.Body.Close() + // Handle authentication error with one retry challenge := parseRegistryChallenge(resp.Header.Get("www-authenticate")) token, err := getAuthorizationToken(ctx, challenge) @@ -1040,8 +1042,10 @@ func makeRequestWithRetry(ctx context.Context, method string, requestURL *url.UR } } case resp.StatusCode == http.StatusNotFound: + resp.Body.Close() return nil, os.ErrNotExist case resp.StatusCode >= http.StatusBadRequest: + defer resp.Body.Close() responseBody, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("%d: %s", resp.StatusCode, err) diff --git a/server/model.go b/server/model.go index 55fb2d8d..124693d3 100644 --- a/server/model.go +++ b/server/model.go @@ -272,6 +272,30 @@ func detectContentType(r io.Reader) (string, error) { return "unknown", nil } +func parseObjects(s string) []map[string]any { + var objs []map[string]any + for offset := 0; offset < len(s); { + var obj map[string]any + decoder := json.NewDecoder(strings.NewReader(s[offset:])) + if err := decoder.Decode(&obj); errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + break + } else if syntax := &(json.SyntaxError{}); errors.As(err, &syntax) { + // skip over any syntax errors + offset += int(syntax.Offset) + } else if unmarshalType := &(json.UnmarshalTypeError{}); errors.As(err, &unmarshalType) { + // skip over any unmarshalable types + offset += int(unmarshalType.Offset) + } else if err != nil { + return nil + } else { + offset += int(decoder.InputOffset()) + objs = append(objs, obj) + } + } + + return objs +} + // parseToolCalls attempts to parse a JSON string into a slice of ToolCalls. // mxyng: this only really works if the input contains tool calls in some JSON format func (m *Model) parseToolCalls(s string) ([]api.ToolCall, bool) { @@ -304,16 +328,14 @@ func (m *Model) parseToolCalls(s string) ([]api.ToolCall, bool) { return nil, false } - var kv map[string]any - // execute the subtree with placeholders to identify the keys - // trim any commands that might exist in the template - if err := json.Unmarshal(bytes.TrimSuffix(b.Bytes(), []byte(",")), &kv); err != nil { + templateObjects := parseObjects(b.String()) + if len(templateObjects) == 0 { return nil, false } // find the keys that correspond to the name and arguments fields var name, arguments string - for k, v := range kv { + for k, v := range templateObjects[0] { switch v.(type) { case string: name = k @@ -326,43 +348,32 @@ func (m *Model) parseToolCalls(s string) ([]api.ToolCall, bool) { return nil, false } - var objs []map[string]any - for offset := 0; offset < len(s); { - var obj map[string]any - decoder := json.NewDecoder(strings.NewReader(s[offset:])) - if err := decoder.Decode(&obj); errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { - break - } else if syntax := &(json.SyntaxError{}); errors.As(err, &syntax) { - // skip over any syntax errors - offset += int(syntax.Offset) - } else if unmarshalType := &(json.UnmarshalTypeError{}); errors.As(err, &unmarshalType) { - // skip over any unmarshalable types - offset += int(unmarshalType.Offset) - } else if err != nil { - slog.Error("parseToolCalls", "error", err) - return nil, false - } else { - offset += int(decoder.InputOffset()) + responseObjects := parseObjects(s) + if len(responseObjects) == 0 { + return nil, false + } - // collect all nested objects - var collect func(any) []map[string]any - collect = func(obj any) (all []map[string]any) { - switch o := obj.(type) { - case map[string]any: - all = append(all, o) - for _, v := range o { - all = append(all, collect(v)...) - } - case []any: - for _, v := range o { - all = append(all, collect(v)...) - } - } - - return all + // collect all nested objects + var collect func(any) []map[string]any + collect = func(obj any) (all []map[string]any) { + switch o := obj.(type) { + case map[string]any: + all = append(all, o) + for _, v := range o { + all = append(all, collect(v)...) + } + case []any: + for _, v := range o { + all = append(all, collect(v)...) } - objs = append(objs, collect(obj)...) } + + return all + } + + var objs []map[string]any + for _, p := range responseObjects { + objs = append(objs, collect(p)...) } var toolCalls []api.ToolCall diff --git a/server/model_test.go b/server/model_test.go index e1737a5b..304d4655 100644 --- a/server/model_test.go +++ b/server/model_test.go @@ -69,6 +69,7 @@ The temperature in San Francisco, CA is 70°F and in Toronto, Canada is 20°C.`, {"name": "get_current_weather", "arguments": {"format":"celsius","location":"Toronto, Canada"}} `, true}, {"xlam", `{"tool_calls": [{"name": "get_current_weather", "arguments": {"format":"fahrenheit","location":"San Francisco, CA"}},{"name": "get_current_weather", "arguments": {"format":"celsius","location":"Toronto, Canada"}}]}`, true}, + {"nemotron", `{"name": "get_current_weather", "arguments": {"format":"fahrenheit","location":"San Francisco, CA"}},{"name": "get_current_weather", "arguments": {"format":"celsius","location":"Toronto, Canada"}}]} `, true}, } var tools []api.Tool @@ -217,3 +218,45 @@ func TestParseLayerFromCopy(t *testing.T) { t.Fatalf("got %d != want 5", len(layers)) } } + +func TestParseObjects(t *testing.T) { + tests := []struct { + input string + want []map[string]any + }{ + { + input: `[{"name": "get_current_weather", "arguments": {"format":"fahrenheit","location":"San Francisco, CA"}},{"name": "get_current_weather", "arguments": {"format":"celsius","location":"Toronto, Canada"}}]`, + want: []map[string]any{ + {"name": "get_current_weather", "arguments": map[string]any{"format": "fahrenheit", "location": "San Francisco, CA"}}, + {"name": "get_current_weather", "arguments": map[string]any{"format": "celsius", "location": "Toronto, Canada"}}, + }, + }, + { + input: `{"name": "get_current_weather", "arguments": {"format":"fahrenheit","location":"San Francisco, CA"}} `, + want: []map[string]any{ + {"name": "get_current_weather", "arguments": map[string]any{"format": "fahrenheit", "location": "San Francisco, CA"}}, + }, + }, + { + input: `{"name": "get_current_weather", "arguments": {"format":"fahrenheit","location":"San Francisco, CA"}} {"name": "get_current_weather", "arguments": {"format":"celsius","location":"Toronto, ON"}} `, + want: []map[string]any{ + {"name": "get_current_weather", "arguments": map[string]any{"format": "fahrenheit", "location": "San Francisco, CA"}}, + {"name": "get_current_weather", "arguments": map[string]any{"format": "celsius", "location": "Toronto, ON"}}, + }, + }, + { + input: `{"name": "get_current_weather", "arguments": `, + want: nil, + }, + } + + for _, tc := range tests { + t.Run(tc.input, func(t *testing.T) { + got := parseObjects(tc.input) + + if diff := cmp.Diff(got, tc.want); diff != "" { + t.Errorf("mismatch (-got +want):\n%s", diff) + } + }) + } +} diff --git a/server/routes.go b/server/routes.go index 337f3b46..6e549ad0 100644 --- a/server/routes.go +++ b/server/routes.go @@ -26,11 +26,13 @@ import ( "golang.org/x/sync/errgroup" "github.com/ollama/ollama/api" + "github.com/ollama/ollama/build" "github.com/ollama/ollama/envconfig" "github.com/ollama/ollama/gpu" "github.com/ollama/ollama/llm" "github.com/ollama/ollama/openai" "github.com/ollama/ollama/parser" + "github.com/ollama/ollama/runners" "github.com/ollama/ollama/template" "github.com/ollama/ollama/types/errtypes" "github.com/ollama/ollama/types/model" @@ -117,6 +119,32 @@ func (s *Server) GenerateHandler(c *gin.Context) { return } + // expire the runner + if req.Prompt == "" && req.KeepAlive != nil && int(req.KeepAlive.Seconds()) == 0 { + model, err := GetModel(req.Model) + if err != nil { + switch { + case os.IsNotExist(err): + c.JSON(http.StatusNotFound, gin.H{"error": fmt.Sprintf("model '%s' not found", req.Model)}) + case err.Error() == "invalid model name": + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + default: + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + } + return + } + s.sched.expireRunner(model) + + c.JSON(http.StatusOK, api.GenerateResponse{ + Model: req.Model, + CreatedAt: time.Now().UTC(), + Response: "", + Done: true, + DoneReason: "unload", + }) + return + } + if req.Format != "" && req.Format != "json" { c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "format must be empty or \"json\""}) return @@ -665,7 +693,12 @@ func (s *Server) DeleteHandler(c *gin.Context) { m, err := ParseNamedManifest(n) if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + switch { + case os.IsNotExist(err): + c.JSON(http.StatusNotFound, gin.H{"error": fmt.Sprintf("model '%s' not found", cmp.Or(r.Model, r.Name))}) + default: + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + } return } @@ -1190,12 +1223,12 @@ func Serve(ln net.Listener) error { srvr.Close() schedDone() sched.unloadAllRunners() - gpu.Cleanup() + runners.Cleanup(build.EmbedFS) done() }() - if err := llm.Init(); err != nil { - return fmt.Errorf("unable to initialize llm library %w", err) + if _, err := runners.Refresh(build.EmbedFS); err != nil { + return fmt.Errorf("unable to initialize llm runners %w", err) } s.sched.Run(schedCtx) @@ -1322,6 +1355,32 @@ func (s *Server) ChatHandler(c *gin.Context) { return } + // expire the runner + if len(req.Messages) == 0 && req.KeepAlive != nil && int(req.KeepAlive.Seconds()) == 0 { + model, err := GetModel(req.Model) + if err != nil { + switch { + case os.IsNotExist(err): + c.JSON(http.StatusNotFound, gin.H{"error": fmt.Sprintf("model '%s' not found", req.Model)}) + case err.Error() == "invalid model name": + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + default: + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + } + return + } + s.sched.expireRunner(model) + + c.JSON(http.StatusOK, api.ChatResponse{ + Model: req.Model, + CreatedAt: time.Now().UTC(), + Message: api.Message{Role: "assistant"}, + Done: true, + DoneReason: "unload", + }) + return + } + caps := []Capability{CapabilityCompletion} if len(req.Tools) > 0 { caps = append(caps, CapabilityTools) diff --git a/server/routes_test.go b/server/routes_test.go index bffcea20..f7a7a22b 100644 --- a/server/routes_test.go +++ b/server/routes_test.go @@ -15,9 +15,6 @@ import ( "strings" "testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/ollama/ollama/api" "github.com/ollama/ollama/llm" "github.com/ollama/ollama/openai" @@ -30,24 +27,47 @@ func createTestFile(t *testing.T, name string) string { t.Helper() f, err := os.CreateTemp(t.TempDir(), name) - require.NoError(t, err) + if err != nil { + t.Fatalf("failed to create temp file: %v", err) + } defer f.Close() err = binary.Write(f, binary.LittleEndian, []byte("GGUF")) - require.NoError(t, err) + if err != nil { + t.Fatalf("failed to write to file: %v", err) + } err = binary.Write(f, binary.LittleEndian, uint32(3)) - require.NoError(t, err) + if err != nil { + t.Fatalf("failed to write to file: %v", err) + } err = binary.Write(f, binary.LittleEndian, uint64(0)) - require.NoError(t, err) + if err != nil { + t.Fatalf("failed to write to file: %v", err) + } err = binary.Write(f, binary.LittleEndian, uint64(0)) - require.NoError(t, err) + if err != nil { + t.Fatalf("failed to write to file: %v", err) + } return f.Name() } +// equalStringSlices checks if two slices of strings are equal. +func equalStringSlices(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + func Test_Routes(t *testing.T) { type testCase struct { Name string @@ -64,12 +84,16 @@ func Test_Routes(t *testing.T) { r := strings.NewReader(fmt.Sprintf("FROM %s\nPARAMETER seed 42\nPARAMETER top_p 0.9\nPARAMETER stop foo\nPARAMETER stop bar", fname)) modelfile, err := parser.ParseFile(r) - require.NoError(t, err) + if err != nil { + t.Fatalf("failed to parse file: %v", err) + } fn := func(resp api.ProgressResponse) { t.Logf("Status: %s", resp.Status) } err = CreateModel(context.TODO(), model.ParseName(name), "", "", modelfile, fn) - require.NoError(t, err) + if err != nil { + t.Fatalf("failed to create model: %v", err) + } } testCases := []testCase{ @@ -81,10 +105,17 @@ func Test_Routes(t *testing.T) { }, Expected: func(t *testing.T, resp *http.Response) { contentType := resp.Header.Get("Content-Type") - assert.Equal(t, "application/json; charset=utf-8", contentType) + if contentType != "application/json; charset=utf-8" { + t.Errorf("expected content type application/json; charset=utf-8, got %s", contentType) + } body, err := io.ReadAll(resp.Body) - require.NoError(t, err) - assert.Equal(t, fmt.Sprintf(`{"version":"%s"}`, version.Version), string(body)) + if err != nil { + t.Fatalf("failed to read response body: %v", err) + } + expectedBody := fmt.Sprintf(`{"version":"%s"}`, version.Version) + if string(body) != expectedBody { + t.Errorf("expected body %s, got %s", expectedBody, string(body)) + } }, }, { @@ -93,17 +124,24 @@ func Test_Routes(t *testing.T) { Path: "/api/tags", Expected: func(t *testing.T, resp *http.Response) { contentType := resp.Header.Get("Content-Type") - assert.Equal(t, "application/json; charset=utf-8", contentType) + if contentType != "application/json; charset=utf-8" { + t.Errorf("expected content type application/json; charset=utf-8, got %s", contentType) + } body, err := io.ReadAll(resp.Body) - require.NoError(t, err) + if err != nil { + t.Fatalf("failed to read response body: %v", err) + } var modelList api.ListResponse err = json.Unmarshal(body, &modelList) - require.NoError(t, err) + if err != nil { + t.Fatalf("failed to unmarshal response body: %v", err) + } - assert.NotNil(t, modelList.Models) - assert.Empty(t, len(modelList.Models)) + if modelList.Models == nil || len(modelList.Models) != 0 { + t.Errorf("expected empty model list, got %v", modelList.Models) + } }, }, { @@ -112,16 +150,23 @@ func Test_Routes(t *testing.T) { Path: "/v1/models", Expected: func(t *testing.T, resp *http.Response) { contentType := resp.Header.Get("Content-Type") - assert.Equal(t, "application/json", contentType) + if contentType != "application/json" { + t.Errorf("expected content type application/json, got %s", contentType) + } body, err := io.ReadAll(resp.Body) - require.NoError(t, err) + if err != nil { + t.Fatalf("failed to read response body: %v", err) + } var modelList openai.ListCompletion err = json.Unmarshal(body, &modelList) - require.NoError(t, err) + if err != nil { + t.Fatalf("failed to unmarshal response body: %v", err) + } - assert.Equal(t, "list", modelList.Object) - assert.Empty(t, modelList.Data) + if modelList.Object != "list" || len(modelList.Data) != 0 { + t.Errorf("expected empty model list, got %v", modelList.Data) + } }, }, { @@ -133,18 +178,92 @@ func Test_Routes(t *testing.T) { }, Expected: func(t *testing.T, resp *http.Response) { contentType := resp.Header.Get("Content-Type") - assert.Equal(t, "application/json; charset=utf-8", contentType) + if contentType != "application/json; charset=utf-8" { + t.Errorf("expected content type application/json; charset=utf-8, got %s", contentType) + } body, err := io.ReadAll(resp.Body) - require.NoError(t, err) + if err != nil { + t.Fatalf("failed to read response body: %v", err) + } - assert.NotContains(t, string(body), "expires_at") + if strings.Contains(string(body), "expires_at") { + t.Errorf("response body should not contain 'expires_at'") + } var modelList api.ListResponse err = json.Unmarshal(body, &modelList) - require.NoError(t, err) + if err != nil { + t.Fatalf("failed to unmarshal response body: %v", err) + } - assert.Len(t, modelList.Models, 1) - assert.Equal(t, "test-model:latest", modelList.Models[0].Name) + if len(modelList.Models) != 1 || modelList.Models[0].Name != "test-model:latest" { + t.Errorf("expected model 'test-model:latest', got %v", modelList.Models) + } + }, + }, + { + Name: "Delete Model Handler", + Method: http.MethodDelete, + Path: "/api/delete", + Setup: func(t *testing.T, req *http.Request) { + createTestModel(t, "model-to-delete") + + deleteReq := api.DeleteRequest{ + Name: "model-to-delete", + } + jsonData, err := json.Marshal(deleteReq) + if err != nil { + t.Fatalf("failed to marshal delete request: %v", err) + } + + req.Body = io.NopCloser(bytes.NewReader(jsonData)) + }, + Expected: func(t *testing.T, resp *http.Response) { + if resp.StatusCode != http.StatusOK { + t.Errorf("expected status code 200, got %d", resp.StatusCode) + } + + // Verify the model was deleted + _, err := GetModel("model-to-delete") + if err == nil || !os.IsNotExist(err) { + t.Errorf("expected model to be deleted, got error %v", err) + } + }, + }, + { + Name: "Delete Non-existent Model", + Method: http.MethodDelete, + Path: "/api/delete", + Setup: func(t *testing.T, req *http.Request) { + deleteReq := api.DeleteRequest{ + Name: "non-existent-model", + } + jsonData, err := json.Marshal(deleteReq) + if err != nil { + t.Fatalf("failed to marshal delete request: %v", err) + } + + req.Body = io.NopCloser(bytes.NewReader(jsonData)) + }, + Expected: func(t *testing.T, resp *http.Response) { + if resp.StatusCode != http.StatusNotFound { + t.Errorf("expected status code 404, got %d", resp.StatusCode) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("failed to read response body: %v", err) + } + + var errorResp map[string]string + err = json.Unmarshal(body, &errorResp) + if err != nil { + t.Fatalf("failed to unmarshal response body: %v", err) + } + + if !strings.Contains(errorResp["error"], "not found") { + t.Errorf("expected error message to contain 'not found', got %s", errorResp["error"]) + } }, }, { @@ -153,17 +272,23 @@ func Test_Routes(t *testing.T) { Path: "/v1/models", Expected: func(t *testing.T, resp *http.Response) { contentType := resp.Header.Get("Content-Type") - assert.Equal(t, "application/json", contentType) + if contentType != "application/json" { + t.Errorf("expected content type application/json, got %s", contentType) + } body, err := io.ReadAll(resp.Body) - require.NoError(t, err) + if err != nil { + t.Fatalf("failed to read response body: %v", err) + } var modelList openai.ListCompletion err = json.Unmarshal(body, &modelList) - require.NoError(t, err) + if err != nil { + t.Fatalf("failed to unmarshal response body: %v", err) + } - assert.Len(t, modelList.Data, 1) - assert.Equal(t, "test-model:latest", modelList.Data[0].Id) - assert.Equal(t, "library", modelList.Data[0].OwnedBy) + if len(modelList.Data) != 1 || modelList.Data[0].Id != "test-model:latest" || modelList.Data[0].OwnedBy != "library" { + t.Errorf("expected model 'test-model:latest' owned by 'library', got %v", modelList.Data) + } }, }, { @@ -180,20 +305,32 @@ func Test_Routes(t *testing.T) { Stream: &stream, } jsonData, err := json.Marshal(createReq) - require.NoError(t, err) + if err != nil { + t.Fatalf("failed to marshal create request: %v", err) + } req.Body = io.NopCloser(bytes.NewReader(jsonData)) }, Expected: func(t *testing.T, resp *http.Response) { contentType := resp.Header.Get("Content-Type") - assert.Equal(t, "application/json", contentType) + if contentType != "application/json" { + t.Errorf("expected content type application/json, got %s", contentType) + } _, err := io.ReadAll(resp.Body) - require.NoError(t, err) - assert.Equal(t, 200, resp.StatusCode) + if err != nil { + t.Fatalf("failed to read response body: %v", err) + } + if resp.StatusCode != http.StatusOK { // Updated line + t.Errorf("expected status code 200, got %d", resp.StatusCode) + } model, err := GetModel("t-bone") - require.NoError(t, err) - assert.Equal(t, "t-bone:latest", model.ShortName) + if err != nil { + t.Fatalf("failed to get model: %v", err) + } + if model.ShortName != "t-bone:latest" { + t.Errorf("expected model name 't-bone:latest', got %s", model.ShortName) + } }, }, { @@ -207,14 +344,20 @@ func Test_Routes(t *testing.T) { Destination: "beefsteak", } jsonData, err := json.Marshal(copyReq) - require.NoError(t, err) + if err != nil { + t.Fatalf("failed to marshal copy request: %v", err) + } req.Body = io.NopCloser(bytes.NewReader(jsonData)) }, Expected: func(t *testing.T, resp *http.Response) { model, err := GetModel("beefsteak") - require.NoError(t, err) - assert.Equal(t, "beefsteak:latest", model.ShortName) + if err != nil { + t.Fatalf("failed to get model: %v", err) + } + if model.ShortName != "beefsteak:latest" { + t.Errorf("expected model name 'beefsteak:latest', got %s", model.ShortName) + } }, }, { @@ -225,18 +368,26 @@ func Test_Routes(t *testing.T) { createTestModel(t, "show-model") showReq := api.ShowRequest{Model: "show-model"} jsonData, err := json.Marshal(showReq) - require.NoError(t, err) + if err != nil { + t.Fatalf("failed to marshal show request: %v", err) + } req.Body = io.NopCloser(bytes.NewReader(jsonData)) }, Expected: func(t *testing.T, resp *http.Response) { contentType := resp.Header.Get("Content-Type") - assert.Equal(t, "application/json; charset=utf-8", contentType) + if contentType != "application/json; charset=utf-8" { + t.Errorf("expected content type application/json; charset=utf-8, got %s", contentType) + } body, err := io.ReadAll(resp.Body) - require.NoError(t, err) + if err != nil { + t.Fatalf("failed to read response body: %v", err) + } var showResp api.ShowResponse err = json.Unmarshal(body, &showResp) - require.NoError(t, err) + if err != nil { + t.Fatalf("failed to unmarshal response body: %v", err) + } var params []string paramsSplit := strings.Split(showResp.Parameters, "\n") @@ -250,8 +401,16 @@ func Test_Routes(t *testing.T) { "stop \"foo\"", "top_p 0.9", } - assert.Equal(t, expectedParams, params) - assert.InDelta(t, 0, showResp.ModelInfo["general.parameter_count"], 1e-9, "Parameter count should be 0") + if !equalStringSlices(params, expectedParams) { + t.Errorf("expected parameters %v, got %v", expectedParams, params) + } + paramCount, ok := showResp.ModelInfo["general.parameter_count"].(float64) + if !ok { + t.Fatalf("expected parameter count to be a float64, got %T", showResp.ModelInfo["general.parameter_count"]) + } + if math.Abs(paramCount) > 1e-9 { + t.Errorf("expected parameter count to be 0, got %f", paramCount) + } }, }, { @@ -260,16 +419,23 @@ func Test_Routes(t *testing.T) { Path: "/v1/models/show-model", Expected: func(t *testing.T, resp *http.Response) { contentType := resp.Header.Get("Content-Type") - assert.Equal(t, "application/json", contentType) + if contentType != "application/json" { + t.Errorf("expected content type application/json, got %s", contentType) + } body, err := io.ReadAll(resp.Body) - require.NoError(t, err) + if err != nil { + t.Fatalf("failed to read response body: %v", err) + } var retrieveResp api.RetrieveModelResponse err = json.Unmarshal(body, &retrieveResp) - require.NoError(t, err) + if err != nil { + t.Fatalf("failed to unmarshal response body: %v", err) + } - assert.Equal(t, "show-model", retrieveResp.Id) - assert.Equal(t, "library", retrieveResp.OwnedBy) + if retrieveResp.Id != "show-model" || retrieveResp.OwnedBy != "library" { + t.Errorf("expected model 'show-model' owned by 'library', got %v", retrieveResp) + } }, }, } @@ -286,14 +452,18 @@ func Test_Routes(t *testing.T) { t.Run(tc.Name, func(t *testing.T) { u := httpSrv.URL + tc.Path req, err := http.NewRequestWithContext(context.TODO(), tc.Method, u, nil) - require.NoError(t, err) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } if tc.Setup != nil { tc.Setup(t, req) } resp, err := httpSrv.Client().Do(req) - require.NoError(t, err) + if err != nil { + t.Fatalf("failed to do request: %v", err) + } defer resp.Body.Close() if tc.Expected != nil { diff --git a/server/sched.go b/server/sched.go index 58071bf0..3c8656ad 100644 --- a/server/sched.go +++ b/server/sched.go @@ -360,7 +360,6 @@ func (s *Scheduler) processCompleted(ctx context.Context) { slog.Debug("runner expired event received", "modelPath", runner.modelPath) runner.refMu.Lock() if runner.refCount > 0 { - // Shouldn't happen, but safeguard to ensure no leaked runners slog.Debug("expired event with positive ref count, retrying", "modelPath", runner.modelPath, "refCount", runner.refCount) go func(runner *runnerRef) { // We can't unload yet, but want to as soon as the current request completes @@ -802,6 +801,25 @@ func (s *Scheduler) unloadAllRunners() { } } +func (s *Scheduler) expireRunner(model *Model) { + s.loadedMu.Lock() + defer s.loadedMu.Unlock() + runner, ok := s.loaded[model.ModelPath] + if ok { + runner.refMu.Lock() + runner.expiresAt = time.Now() + if runner.expireTimer != nil { + runner.expireTimer.Stop() + runner.expireTimer = nil + } + runner.sessionDuration = 0 + if runner.refCount <= 0 { + s.expiredCh <- runner + } + runner.refMu.Unlock() + } +} + // If other runners are loaded, make sure the pending request will fit in system memory // If not, pick a runner to unload, else return nil and the request can be loaded func (s *Scheduler) maybeFindCPURunnerToUnload(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList) *runnerRef { diff --git a/server/sched_test.go b/server/sched_test.go index fb049574..fe5647c5 100644 --- a/server/sched_test.go +++ b/server/sched_test.go @@ -354,7 +354,7 @@ func TestRequestsMultipleLoadedModels(t *testing.T) { } func TestGetRunner(t *testing.T) { - ctx, done := context.WithTimeout(context.Background(), 100*time.Millisecond) + ctx, done := context.WithTimeout(context.Background(), 200*time.Millisecond) defer done() a := newScenarioRequest(t, ctx, "ollama-model-1a", 10, &api.Duration{Duration: 2 * time.Millisecond}) @@ -395,7 +395,7 @@ func TestGetRunner(t *testing.T) { slog.Info("c") successCh1c, errCh1c := s.GetRunner(c.ctx, c.req.model, c.req.opts, c.req.sessionDuration) // Starts in pending channel, then should be quickly processsed to return an error - time.Sleep(20 * time.Millisecond) // Long enough for the "a" model to expire and unload + time.Sleep(50 * time.Millisecond) // Long enough for the "a" model to expire and unload require.Empty(t, successCh1c) s.loadedMu.Lock() require.Empty(t, s.loaded) @@ -406,6 +406,52 @@ func TestGetRunner(t *testing.T) { b.ctxDone() } +func TestExpireRunner(t *testing.T) { + ctx, done := context.WithTimeout(context.Background(), 20*time.Millisecond) + defer done() + s := InitScheduler(ctx) + req := &LlmRequest{ + ctx: ctx, + model: &Model{ModelPath: "foo"}, + opts: api.DefaultOptions(), + successCh: make(chan *runnerRef, 1), + errCh: make(chan error, 1), + sessionDuration: &api.Duration{Duration: 2 * time.Minute}, + } + + var ggml *llm.GGML + gpus := gpu.GpuInfoList{} + server := &mockLlm{estimatedVRAM: 10, estimatedVRAMByGPU: map[string]uint64{}} + s.newServerFn = func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options, numParallel int) (llm.LlamaServer, error) { + return server, nil + } + s.load(req, ggml, gpus, 0) + + select { + case err := <-req.errCh: + if err != nil { + t.Fatalf("expected no errors when loading, got '%s'", err.Error()) + } + case resp := <-req.successCh: + s.loadedMu.Lock() + if resp.refCount != uint(1) || len(s.loaded) != 1 { + t.Fatalf("expected a model to be loaded") + } + s.loadedMu.Unlock() + } + + s.expireRunner(&Model{ModelPath: "foo"}) + + s.finishedReqCh <- req + s.processCompleted(ctx) + + s.loadedMu.Lock() + if len(s.loaded) != 0 { + t.Fatalf("expected model to be unloaded") + } + s.loadedMu.Unlock() +} + // TODO - add one scenario that triggers the bogus finished event with positive ref count func TestPrematureExpired(t *testing.T) { ctx, done := context.WithTimeout(context.Background(), 500*time.Millisecond) diff --git a/server/testdata/tools/nemotron.gotmpl b/server/testdata/tools/nemotron.gotmpl new file mode 100644 index 00000000..1b6b89ec --- /dev/null +++ b/server/testdata/tools/nemotron.gotmpl @@ -0,0 +1,33 @@ +{{- if (or .Tools .System) }}System +{{ if .System }}{{ .System }} + + +{{ end }} +{{- if .Tools }} +{{- range .Tools }} {{ . }} {{ end }} + + +{{ end }} +{{- end }} +{{- range $i, $m := .Messages }} +{{- $last := eq (len (slice $.Messages $i)) 1 -}} +{{- if eq .Role "user" }}User +{{ .Content }} +{{- if $last }} +Assistant +{{- end }} +{{ else if eq .Role "tool" }}Tool +{{ .Content }} +{{- if $last }} +Assistant +{{- end }} +{{ else if eq .Role "assistant" }}Assistant +{{- if .ToolCalls }} +{{ range .ToolCalls }} {"name": "{{ .Function.Name }}", "arguments": {{ .Function.Arguments }}} {{ end }} +{{ else }} +{{ .Content }} +{{- if not $last }} +{{ end }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/server/testdata/tools/nemotron.out b/server/testdata/tools/nemotron.out new file mode 100644 index 00000000..2166b202 --- /dev/null +++ b/server/testdata/tools/nemotron.out @@ -0,0 +1,18 @@ +System +You are a knowledgable assistant. You can answer questions and perform tasks. + + + {"type":"function","function":{"name":"get_current_weather","description":"Get the current weather","parameters":{"type":"object","required":["location","format"],"properties":{"format":{"type":"string","description":"The temperature unit to use. Infer this from the users location.","enum":["celsius","fahrenheit"]},"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"}}}}} + + +User +What's the weather like today in Paris? +Assistant + {"name": "get_current_weather", "arguments": {"format":"celsius","location":"Paris, France"}} +Tool +22 +Assistant +The current temperature in Paris, France is 22 degrees Celsius. +User +What's the weather like today in San Francisco and Toronto? +Assistant