From 4e8be787c7855f02a7bcf9af116d2905ddd59725 Mon Sep 17 00:00:00 2001 From: Bruce MacDonald Date: Wed, 20 Sep 2023 17:40:42 +0100 Subject: [PATCH 1/6] pack in cuda libs --- docs/development.md | 2 +- llm/llama.cpp/generate_linux.go | 8 ++++++++ llm/llama.go | 2 +- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/docs/development.md b/docs/development.md index 54d910e4..803e5135 100644 --- a/docs/development.md +++ b/docs/development.md @@ -35,5 +35,5 @@ Now you can run `ollama`: ## Building on Linux with GPU support - Install cmake and nvidia-cuda-toolkit -- run `CUDA_VERSION=$(nvcc --version | sed -n 's/^.*release \([0-9]\+\)\.\([0-9]\+\).*$/\1/p') go generate ./...` +- run `CUDA_VERSION=11 CUDA_PATH=/path/to/libcuda.so CUBLAS_PATH=/path/to/libcublas.so CUDART_PATH=/path/to/libcudart.so CUBLASLT_PATH=/path/to/libcublasLt.so go generate ./...` - run `go build .` diff --git a/llm/llama.cpp/generate_linux.go b/llm/llama.cpp/generate_linux.go index 7436391f..67c6647d 100644 --- a/llm/llama.cpp/generate_linux.go +++ b/llm/llama.cpp/generate_linux.go @@ -19,3 +19,11 @@ package llm //go:generate cmake --build ggml/build/cuda-${CUDA_VERSION} --target server --config Release //go:generate cmake -S gguf -B gguf/build/cuda-${CUDA_VERSION} -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on //go:generate cmake --build gguf/build/cuda-${CUDA_VERSION} --target server --config Release +//go:generate cp --dereference ${CUDA_PATH} ggml/build/cuda-${CUDA_VERSION}/bin/libcuda.so +//go:generate cp --dereference ${CUDA_PATH} gguf/build/cuda-${CUDA_VERSION}/bin/libcuda.so +//go:generate cp --dereference ${CUBLAS_PATH} ggml/build/cuda-${CUDA_VERSION}/bin/libcublas.so.11 +//go:generate cp --dereference ${CUBLAS_PATH} gguf/build/cuda-${CUDA_VERSION}/bin/libcublas.so.11 +//go:generate cp --dereference ${CUDART_PATH} ggml/build/cuda-${CUDA_VERSION}/bin/libcudart.so.11.0 +//go:generate cp --dereference ${CUDART_PATH} gguf/build/cuda-${CUDA_VERSION}/bin/libcudart.so.11.0 +//go:generate cp --dereference ${CUBLASLT_PATH} ggml/build/cuda-${CUDA_VERSION}/bin/libcublasLt.so.11 +//go:generate cp --dereference ${CUBLASLT_PATH} gguf/build/cuda-${CUDA_VERSION}/bin/libcublasLt.so.11 diff --git a/llm/llama.go b/llm/llama.go index 2390f653..eaf89ef9 100644 --- a/llm/llama.go +++ b/llm/llama.go @@ -353,7 +353,7 @@ func newLlama(model string, adapters []string, runners []ModelRunner, opts api.O runner.Path, append(params, "--port", strconv.Itoa(port))..., ) - + cmd.Env = append(os.Environ(), fmt.Sprintf("LD_LIBRARY_PATH=%s", filepath.Dir(runner.Path))) cmd.Stdout = os.Stderr cmd.Stderr = os.Stderr From b9bb5ca288523338729fcd4687e654096f635eb6 Mon Sep 17 00:00:00 2001 From: Bruce MacDonald Date: Wed, 20 Sep 2023 17:58:16 +0100 Subject: [PATCH 2/6] use cuda_version --- llm/llama.cpp/generate_linux.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/llm/llama.cpp/generate_linux.go b/llm/llama.cpp/generate_linux.go index 67c6647d..c06cda3a 100644 --- a/llm/llama.cpp/generate_linux.go +++ b/llm/llama.cpp/generate_linux.go @@ -21,9 +21,9 @@ package llm //go:generate cmake --build gguf/build/cuda-${CUDA_VERSION} --target server --config Release //go:generate cp --dereference ${CUDA_PATH} ggml/build/cuda-${CUDA_VERSION}/bin/libcuda.so //go:generate cp --dereference ${CUDA_PATH} gguf/build/cuda-${CUDA_VERSION}/bin/libcuda.so -//go:generate cp --dereference ${CUBLAS_PATH} ggml/build/cuda-${CUDA_VERSION}/bin/libcublas.so.11 -//go:generate cp --dereference ${CUBLAS_PATH} gguf/build/cuda-${CUDA_VERSION}/bin/libcublas.so.11 -//go:generate cp --dereference ${CUDART_PATH} ggml/build/cuda-${CUDA_VERSION}/bin/libcudart.so.11.0 -//go:generate cp --dereference ${CUDART_PATH} gguf/build/cuda-${CUDA_VERSION}/bin/libcudart.so.11.0 -//go:generate cp --dereference ${CUBLASLT_PATH} ggml/build/cuda-${CUDA_VERSION}/bin/libcublasLt.so.11 -//go:generate cp --dereference ${CUBLASLT_PATH} gguf/build/cuda-${CUDA_VERSION}/bin/libcublasLt.so.11 +//go:generate cp --dereference ${CUBLAS_PATH} ggml/build/cuda-${CUDA_VERSION}/bin/libcublas.so.${CUDA_VERSION} +//go:generate cp --dereference ${CUBLAS_PATH} gguf/build/cuda-${CUDA_VERSION}/bin/libcublas.so.${CUDA_VERSION} +//go:generate cp --dereference ${CUDART_PATH} ggml/build/cuda-${CUDA_VERSION}/bin/libcudart.so.${CUDA_VERSION}.0 +//go:generate cp --dereference ${CUDART_PATH} gguf/build/cuda-${CUDA_VERSION}/bin/libcudart.so.${CUDA_VERSION}.0 +//go:generate cp --dereference ${CUBLASLT_PATH} ggml/build/cuda-${CUDA_VERSION}/bin/libcublasLt.so.${CUDA_VERSION} +//go:generate cp --dereference ${CUBLASLT_PATH} gguf/build/cuda-${CUDA_VERSION}/bin/libcublasLt.so.${CUDA_VERSION} From 1255bc9b45686e50795f2bd7a3f312cac2536bca Mon Sep 17 00:00:00 2001 From: Bruce MacDonald Date: Wed, 20 Sep 2023 20:00:41 +0100 Subject: [PATCH 3/6] only package 11.8 runner --- docs/development.md | 2 +- llm/llama.cpp/generate_linux.go | 24 +++++++------- llm/llama.go | 58 ++------------------------------- server/routes.go | 2 +- 4 files changed, 17 insertions(+), 69 deletions(-) diff --git a/docs/development.md b/docs/development.md index 803e5135..98a81b4b 100644 --- a/docs/development.md +++ b/docs/development.md @@ -35,5 +35,5 @@ Now you can run `ollama`: ## Building on Linux with GPU support - Install cmake and nvidia-cuda-toolkit -- run `CUDA_VERSION=11 CUDA_PATH=/path/to/libcuda.so CUBLAS_PATH=/path/to/libcublas.so CUDART_PATH=/path/to/libcudart.so CUBLASLT_PATH=/path/to/libcublasLt.so go generate ./...` +- run `CUDA_PATH=/path/to/libcuda.so CUBLAS_PATH=/path/to/libcublas.so CUDART_PATH=/path/to/libcudart.so CUBLASLT_PATH=/path/to/libcublasLt.so go generate ./...` - run `go build .` diff --git a/llm/llama.cpp/generate_linux.go b/llm/llama.cpp/generate_linux.go index c06cda3a..c9303c4f 100644 --- a/llm/llama.cpp/generate_linux.go +++ b/llm/llama.cpp/generate_linux.go @@ -15,15 +15,15 @@ package llm //go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_K_QUANTS=on //go:generate cmake --build gguf/build/cpu --target server --config Release -//go:generate cmake -S ggml -B ggml/build/cuda-${CUDA_VERSION} -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -//go:generate cmake --build ggml/build/cuda-${CUDA_VERSION} --target server --config Release -//go:generate cmake -S gguf -B gguf/build/cuda-${CUDA_VERSION} -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -//go:generate cmake --build gguf/build/cuda-${CUDA_VERSION} --target server --config Release -//go:generate cp --dereference ${CUDA_PATH} ggml/build/cuda-${CUDA_VERSION}/bin/libcuda.so -//go:generate cp --dereference ${CUDA_PATH} gguf/build/cuda-${CUDA_VERSION}/bin/libcuda.so -//go:generate cp --dereference ${CUBLAS_PATH} ggml/build/cuda-${CUDA_VERSION}/bin/libcublas.so.${CUDA_VERSION} -//go:generate cp --dereference ${CUBLAS_PATH} gguf/build/cuda-${CUDA_VERSION}/bin/libcublas.so.${CUDA_VERSION} -//go:generate cp --dereference ${CUDART_PATH} ggml/build/cuda-${CUDA_VERSION}/bin/libcudart.so.${CUDA_VERSION}.0 -//go:generate cp --dereference ${CUDART_PATH} gguf/build/cuda-${CUDA_VERSION}/bin/libcudart.so.${CUDA_VERSION}.0 -//go:generate cp --dereference ${CUBLASLT_PATH} ggml/build/cuda-${CUDA_VERSION}/bin/libcublasLt.so.${CUDA_VERSION} -//go:generate cp --dereference ${CUBLASLT_PATH} gguf/build/cuda-${CUDA_VERSION}/bin/libcublasLt.so.${CUDA_VERSION} +//go:generate cmake -S ggml -B ggml/build/cuda -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on +//go:generate cmake --build ggml/build/cuda --target server --config Release +//go:generate cmake -S gguf -B gguf/build/cuda -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on +//go:generate cmake --build gguf/build/cuda --target server --config Release +//go:generate cp --dereference ${CUDA_PATH} ggml/build/cuda/bin/libcuda.so +//go:generate cp --dereference ${CUDA_PATH} gguf/build/cuda/bin/libcuda.so +//go:generate cp --dereference ${CUBLAS_PATH} ggml/build/cuda/bin/libcublas.so.11 +//go:generate cp --dereference ${CUBLAS_PATH} gguf/build/cuda/bin/libcublas.so.11 +//go:generate cp --dereference ${CUDART_PATH} ggml/build/cuda/bin/libcudart.so.11.0 +//go:generate cp --dereference ${CUDART_PATH} gguf/build/cuda/bin/libcudart.so.11.0 +//go:generate cp --dereference ${CUBLASLT_PATH} ggml/build/cuda/bin/libcublasLt.so.11 +//go:generate cp --dereference ${CUBLASLT_PATH} gguf/build/cuda/bin/libcublasLt.so.11 diff --git a/llm/llama.go b/llm/llama.go index eaf89ef9..9118da2a 100644 --- a/llm/llama.go +++ b/llm/llama.go @@ -17,7 +17,6 @@ import ( "os/exec" "path" "path/filepath" - "regexp" "runtime" "strconv" "strings" @@ -29,46 +28,6 @@ import ( //go:embed llama.cpp/*/build/*/bin/* var llamaCppEmbed embed.FS -func cudaVersion() int { - // first try nvcc, it gives the most accurate version if available - cmd := exec.Command("nvcc", "--version") - output, err := cmd.CombinedOutput() - if err == nil { - // regex to match the CUDA version line in nvcc --version output - re := regexp.MustCompile(`release (\d+\.\d+),`) - matches := re.FindStringSubmatch(string(output)) - if len(matches) >= 2 { - cudaVersion := matches[1] - cudaVersionParts := strings.Split(cudaVersion, ".") - cudaMajorVersion, err := strconv.Atoi(cudaVersionParts[0]) - if err == nil { - return cudaMajorVersion - } - } - } - - // fallback to nvidia-smi - cmd = exec.Command("nvidia-smi") - output, err = cmd.CombinedOutput() - if err != nil { - return -1 - } - - re := regexp.MustCompile(`CUDA Version: (\d+\.\d+)`) - matches := re.FindStringSubmatch(string(output)) - if len(matches) < 2 { - return -1 - } - - cudaVersion := matches[1] - cudaVersionParts := strings.Split(cudaVersion, ".") - cudaMajorVersion, err := strconv.Atoi(cudaVersionParts[0]) - if err != nil { - return -1 - } - return cudaMajorVersion -} - type ModelRunner struct { Path string // path to the model runner executable } @@ -86,20 +45,9 @@ func chooseRunners(runnerType string) []ModelRunner { path.Join(buildPath, "cpu", "bin", "server"), } case "linux": - cuda := cudaVersion() - if cuda == 11 { - // prioritize CUDA 11 runner - runners = []string{ - path.Join(buildPath, "cuda-11", "bin", "server"), - path.Join(buildPath, "cuda-12", "bin", "server"), - path.Join(buildPath, "cpu", "bin", "server"), - } - } else { - runners = []string{ - path.Join(buildPath, "cuda-12", "bin", "server"), - path.Join(buildPath, "cuda-11", "bin", "server"), - path.Join(buildPath, "cpu", "bin", "server"), - } + runners = []string{ + path.Join(buildPath, "cuda", "bin", "server"), + path.Join(buildPath, "cpu", "bin", "server"), } case "windows": // TODO: select windows GPU runner here when available diff --git a/server/routes.go b/server/routes.go index d3d3d11c..79d2ee72 100644 --- a/server/routes.go +++ b/server/routes.go @@ -556,7 +556,7 @@ func Serve(ln net.Listener, origins []string) error { if runtime.GOOS == "linux" { // check compatibility to log warnings if _, err := llm.CheckVRAM(); err != nil { - log.Printf("Warning: GPU support not enabled, you may need to install GPU drivers: %v", err) + log.Printf("Warning: GPU support may not enabled, check you have installed install GPU drivers: %v", err) } } From fc6ec356fc018704ff49823a624e95069544408f Mon Sep 17 00:00:00 2001 From: Bruce MacDonald Date: Wed, 20 Sep 2023 20:36:14 +0100 Subject: [PATCH 4/6] remove libcuda.so --- docs/development.md | 2 +- llm/llama.cpp/generate_linux.go | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/development.md b/docs/development.md index 98a81b4b..e1b00bac 100644 --- a/docs/development.md +++ b/docs/development.md @@ -35,5 +35,5 @@ Now you can run `ollama`: ## Building on Linux with GPU support - Install cmake and nvidia-cuda-toolkit -- run `CUDA_PATH=/path/to/libcuda.so CUBLAS_PATH=/path/to/libcublas.so CUDART_PATH=/path/to/libcudart.so CUBLASLT_PATH=/path/to/libcublasLt.so go generate ./...` +- run `CUBLAS_PATH=/path/to/libcublas.so CUDART_PATH=/path/to/libcudart.so CUBLASLT_PATH=/path/to/libcublasLt.so go generate ./...` - run `go build .` diff --git a/llm/llama.cpp/generate_linux.go b/llm/llama.cpp/generate_linux.go index c9303c4f..a3ee703d 100644 --- a/llm/llama.cpp/generate_linux.go +++ b/llm/llama.cpp/generate_linux.go @@ -19,8 +19,7 @@ package llm //go:generate cmake --build ggml/build/cuda --target server --config Release //go:generate cmake -S gguf -B gguf/build/cuda -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on //go:generate cmake --build gguf/build/cuda --target server --config Release -//go:generate cp --dereference ${CUDA_PATH} ggml/build/cuda/bin/libcuda.so -//go:generate cp --dereference ${CUDA_PATH} gguf/build/cuda/bin/libcuda.so + //go:generate cp --dereference ${CUBLAS_PATH} ggml/build/cuda/bin/libcublas.so.11 //go:generate cp --dereference ${CUBLAS_PATH} gguf/build/cuda/bin/libcublas.so.11 //go:generate cp --dereference ${CUDART_PATH} ggml/build/cuda/bin/libcudart.so.11.0 From 6c6a31a1e8f33b132388c71e562c07c2564f6dbe Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 20 Sep 2023 12:15:23 -0700 Subject: [PATCH 5/6] embed libraries using cmake --- docs/development.md | 2 +- llm/llama.cpp/generate.go | 5 ++- llm/llama.cpp/generate_darwin_amd64.go | 9 +++--- llm/llama.cpp/generate_darwin_arm64.go | 9 +++--- llm/llama.cpp/generate_linux.go | 17 +++------- ...dd-missing-barriers-for-mul-mat-2699.patch | 32 ------------------- .../0001-add-detokenize-endpoint.patch | 0 .../0001-copy-cuda-runtime-libraries.patch | 27 ++++++++++++++++ .../0002-34B-model-support.patch | 0 ...onization-in-new-matrix-multiplicati.patch | 0 ...dd-missing-barriers-for-mul-mat-2699.patch | 0 ...DA-s-half-type-for-aarch64-1455-2670.patch | 0 12 files changed, 43 insertions(+), 58 deletions(-) delete mode 100644 llm/llama.cpp/ggml_patch/0003-metal-add-missing-barriers-for-mul-mat-2699.patch rename llm/llama.cpp/{ggml_patch => patches}/0001-add-detokenize-endpoint.patch (100%) create mode 100644 llm/llama.cpp/patches/0001-copy-cuda-runtime-libraries.patch rename llm/llama.cpp/{ggml_patch => patches}/0002-34B-model-support.patch (100%) rename llm/llama.cpp/{ggml_patch => patches}/0003-metal-fix-synchronization-in-new-matrix-multiplicati.patch (100%) rename llm/llama.cpp/{ggml_patch => patches}/0004-metal-add-missing-barriers-for-mul-mat-2699.patch (100%) rename llm/llama.cpp/{ggml_patch => patches}/0005-ggml-support-CUDA-s-half-type-for-aarch64-1455-2670.patch (100%) diff --git a/docs/development.md b/docs/development.md index e1b00bac..85cf34c6 100644 --- a/docs/development.md +++ b/docs/development.md @@ -35,5 +35,5 @@ Now you can run `ollama`: ## Building on Linux with GPU support - Install cmake and nvidia-cuda-toolkit -- run `CUBLAS_PATH=/path/to/libcublas.so CUDART_PATH=/path/to/libcudart.so CUBLASLT_PATH=/path/to/libcublasLt.so go generate ./...` +- run `go generate ./...` - run `go build .` diff --git a/llm/llama.cpp/generate.go b/llm/llama.cpp/generate.go index 40a42708..19179ac2 100644 --- a/llm/llama.cpp/generate.go +++ b/llm/llama.cpp/generate.go @@ -6,9 +6,8 @@ package llm //go:generate git submodule init //go:generate git submodule update --force ggml -//go:generate -command git-apply git -C ggml apply -//go:generate git-apply ../ggml_patch/0001-add-detokenize-endpoint.patch -//go:generate git-apply ../ggml_patch/0002-34B-model-support.patch +//go:generate git -C ggml apply ../patches/0001-add-detokenize-endpoint.patch +//go:generate git -C ggml apply ../patches/0002-34B-model-support.patch //go:generate cmake -S ggml -B ggml/build/cpu -DLLAMA_K_QUANTS=on //go:generate cmake --build ggml/build/cpu --target server --config Release diff --git a/llm/llama.cpp/generate_darwin_amd64.go b/llm/llama.cpp/generate_darwin_amd64.go index a8b4f0ad..9b782db3 100644 --- a/llm/llama.cpp/generate_darwin_amd64.go +++ b/llm/llama.cpp/generate_darwin_amd64.go @@ -3,11 +3,10 @@ package llm //go:generate git submodule init //go:generate git submodule update --force ggml -//go:generate -command git-apply git -C ggml apply -//go:generate git-apply ../ggml_patch/0001-add-detokenize-endpoint.patch -//go:generate git-apply ../ggml_patch/0002-34B-model-support.patch -//go:generate git-apply ../ggml_patch/0003-metal-fix-synchronization-in-new-matrix-multiplicati.patch -//go:generate git-apply ../ggml_patch/0004-metal-add-missing-barriers-for-mul-mat-2699.patch +//go:generate git -C ggml apply ../patches/0001-add-detokenize-endpoint.patch +//go:generate git -C ggml apply ../patches/0002-34B-model-support.patch +//go:generate git -C ggml apply ../patches/0003-metal-fix-synchronization-in-new-matrix-multiplicati.patch +//go:generate git -C ggml apply ../patches/0004-metal-add-missing-barriers-for-mul-mat-2699.patch //go:generate cmake -S ggml -B ggml/build/cpu -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DCMAKE_SYSTEM_PROCESSOR=x86_64 -DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_OSX_DEPLOYMENT_TARGET=11.0 //go:generate cmake --build ggml/build/cpu --target server --config Release diff --git a/llm/llama.cpp/generate_darwin_arm64.go b/llm/llama.cpp/generate_darwin_arm64.go index 4923fefb..72d175ef 100644 --- a/llm/llama.cpp/generate_darwin_arm64.go +++ b/llm/llama.cpp/generate_darwin_arm64.go @@ -3,11 +3,10 @@ package llm //go:generate git submodule init //go:generate git submodule update --force ggml -//go:generate -command git-apply git -C ggml apply -//go:generate git-apply ../ggml_patch/0001-add-detokenize-endpoint.patch -//go:generate git-apply ../ggml_patch/0002-34B-model-support.patch -//go:generate git-apply ../ggml_patch/0003-metal-fix-synchronization-in-new-matrix-multiplicati.patch -//go:generate git-apply ../ggml_patch/0004-metal-add-missing-barriers-for-mul-mat-2699.patch +//go:generate git -C ggml apply ../patches/0001-add-detokenize-endpoint.patch +//go:generate git -C ggml apply ../patches/0002-34B-model-support.patch +//go:generate git -C ggml apply ../patches/0003-metal-fix-synchronization-in-new-matrix-multiplicati.patch +//go:generate git -C ggml apply ../patches/0004-metal-add-missing-barriers-for-mul-mat-2699.patch //go:generate cmake -S ggml -B ggml/build/metal -DLLAMA_METAL=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DCMAKE_SYSTEM_PROCESSOR=arm64 -DCMAKE_OSX_ARCHITECTURES=arm64 -DCMAKE_OSX_DEPLOYMENT_TARGET=11.0 //go:generate cmake --build ggml/build/metal --target server --config Release diff --git a/llm/llama.cpp/generate_linux.go b/llm/llama.cpp/generate_linux.go index a3ee703d..76be15d5 100644 --- a/llm/llama.cpp/generate_linux.go +++ b/llm/llama.cpp/generate_linux.go @@ -3,15 +3,15 @@ package llm //go:generate git submodule init //go:generate git submodule update --force ggml -//go:generate -command git-apply git -C ggml apply -//go:generate git-apply ../ggml_patch/0001-add-detokenize-endpoint.patch -//go:generate git-apply ../ggml_patch/0002-34B-model-support.patch -//go:generate git-apply ../ggml_patch/0005-ggml-support-CUDA-s-half-type-for-aarch64-1455-2670.patch - +//go:generate git -C ggml apply ../patches/0001-add-detokenize-endpoint.patch +//go:generate git -C ggml apply ../patches/0002-34B-model-support.patch +//go:generate git -C ggml apply ../patches/0005-ggml-support-CUDA-s-half-type-for-aarch64-1455-2670.patch +//go:generate git -C ggml apply ../patches/0001-copy-cuda-runtime-libraries.patch //go:generate cmake -S ggml -B ggml/build/cpu -DLLAMA_K_QUANTS=on //go:generate cmake --build ggml/build/cpu --target server --config Release //go:generate git submodule update --force gguf +//go:generate git -C gguf apply ../patches/0001-copy-cuda-runtime-libraries.patch //go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_K_QUANTS=on //go:generate cmake --build gguf/build/cpu --target server --config Release @@ -19,10 +19,3 @@ package llm //go:generate cmake --build ggml/build/cuda --target server --config Release //go:generate cmake -S gguf -B gguf/build/cuda -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on //go:generate cmake --build gguf/build/cuda --target server --config Release - -//go:generate cp --dereference ${CUBLAS_PATH} ggml/build/cuda/bin/libcublas.so.11 -//go:generate cp --dereference ${CUBLAS_PATH} gguf/build/cuda/bin/libcublas.so.11 -//go:generate cp --dereference ${CUDART_PATH} ggml/build/cuda/bin/libcudart.so.11.0 -//go:generate cp --dereference ${CUDART_PATH} gguf/build/cuda/bin/libcudart.so.11.0 -//go:generate cp --dereference ${CUBLASLT_PATH} ggml/build/cuda/bin/libcublasLt.so.11 -//go:generate cp --dereference ${CUBLASLT_PATH} gguf/build/cuda/bin/libcublasLt.so.11 diff --git a/llm/llama.cpp/ggml_patch/0003-metal-add-missing-barriers-for-mul-mat-2699.patch b/llm/llama.cpp/ggml_patch/0003-metal-add-missing-barriers-for-mul-mat-2699.patch deleted file mode 100644 index 870e982a..00000000 --- a/llm/llama.cpp/ggml_patch/0003-metal-add-missing-barriers-for-mul-mat-2699.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 8c0ea847ac1460bca534d92266e3471cb31471be Mon Sep 17 00:00:00 2001 -From: Bruce MacDonald -Date: Tue, 5 Sep 2023 16:05:08 -0400 -Subject: [PATCH] metal: add missing barriers for mul-mat #2699 - ---- - ggml-metal.metal | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/ggml-metal.metal b/ggml-metal.metal -index 3f31252..ce3541f 100644 ---- a/ggml-metal.metal -+++ b/ggml-metal.metal -@@ -1850,6 +1850,7 @@ kernel void kernel_mul_mm(device const uchar * src0, - //load data and store to threadgroup memory - half4x4 temp_a; - dequantize_func(x, il, temp_a); -+ threadgroup_barrier(mem_flags::mem_threadgroup); - #pragma unroll(16) - for (int i = 0; i < 16; i++) { - *(sa + SG_MAT_SIZE * ((tiitg / THREAD_PER_ROW / 8) \ -@@ -1895,6 +1896,7 @@ kernel void kernel_mul_mm(device const uchar * src0, - } - } else { - // block is smaller than 64x32, we should avoid writing data outside of the matrix -+ threadgroup_barrier(mem_flags::mem_threadgroup); - threadgroup float *temp_str = ((threadgroup float *)shared_memory) \ - + 32 * (sgitg&1) + (16 * (sgitg>>1)) * BLOCK_SIZE_M; - for (int i = 0; i < 8; i++) { --- -2.39.2 (Apple Git-143) - diff --git a/llm/llama.cpp/ggml_patch/0001-add-detokenize-endpoint.patch b/llm/llama.cpp/patches/0001-add-detokenize-endpoint.patch similarity index 100% rename from llm/llama.cpp/ggml_patch/0001-add-detokenize-endpoint.patch rename to llm/llama.cpp/patches/0001-add-detokenize-endpoint.patch diff --git a/llm/llama.cpp/patches/0001-copy-cuda-runtime-libraries.patch b/llm/llama.cpp/patches/0001-copy-cuda-runtime-libraries.patch new file mode 100644 index 00000000..1fd07973 --- /dev/null +++ b/llm/llama.cpp/patches/0001-copy-cuda-runtime-libraries.patch @@ -0,0 +1,27 @@ +From 5dd02993e8cc2ce309157736b95bb572f274a3fd Mon Sep 17 00:00:00 2001 +From: Michael Yang +Date: Wed, 20 Sep 2023 14:19:52 -0700 +Subject: [PATCH] copy cuda runtime libraries + +--- + CMakeLists.txt | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/CMakeLists.txt b/CMakeLists.txt +index 824d9f2..dd24137 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -274,6 +274,10 @@ if (LLAMA_CUBLAS) + set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart CUDA::cublas CUDA::cublasLt) + endif() + ++ configure_file(${CUDAToolkit_LIBRARY_DIR}/libcudart.so ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/libcudart.so.${CUDAToolkit_VERSION_MAJOR}.0 COPYONLY) ++ configure_file(${CUDAToolkit_LIBRARY_DIR}/libcublas.so ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/libcublas.so.${CUDAToolkit_VERSION_MAJOR} COPYONLY) ++ configure_file(${CUDAToolkit_LIBRARY_DIR}/libcublasLt.so ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/libcublasLt.so.${CUDAToolkit_VERSION_MAJOR} COPYONLY) ++ + if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES) + # 52 == lowest CUDA 12 standard + # 60 == f16 CUDA intrinsics +-- +2.42.0 + diff --git a/llm/llama.cpp/ggml_patch/0002-34B-model-support.patch b/llm/llama.cpp/patches/0002-34B-model-support.patch similarity index 100% rename from llm/llama.cpp/ggml_patch/0002-34B-model-support.patch rename to llm/llama.cpp/patches/0002-34B-model-support.patch diff --git a/llm/llama.cpp/ggml_patch/0003-metal-fix-synchronization-in-new-matrix-multiplicati.patch b/llm/llama.cpp/patches/0003-metal-fix-synchronization-in-new-matrix-multiplicati.patch similarity index 100% rename from llm/llama.cpp/ggml_patch/0003-metal-fix-synchronization-in-new-matrix-multiplicati.patch rename to llm/llama.cpp/patches/0003-metal-fix-synchronization-in-new-matrix-multiplicati.patch diff --git a/llm/llama.cpp/ggml_patch/0004-metal-add-missing-barriers-for-mul-mat-2699.patch b/llm/llama.cpp/patches/0004-metal-add-missing-barriers-for-mul-mat-2699.patch similarity index 100% rename from llm/llama.cpp/ggml_patch/0004-metal-add-missing-barriers-for-mul-mat-2699.patch rename to llm/llama.cpp/patches/0004-metal-add-missing-barriers-for-mul-mat-2699.patch diff --git a/llm/llama.cpp/ggml_patch/0005-ggml-support-CUDA-s-half-type-for-aarch64-1455-2670.patch b/llm/llama.cpp/patches/0005-ggml-support-CUDA-s-half-type-for-aarch64-1455-2670.patch similarity index 100% rename from llm/llama.cpp/ggml_patch/0005-ggml-support-CUDA-s-half-type-for-aarch64-1455-2670.patch rename to llm/llama.cpp/patches/0005-ggml-support-CUDA-s-half-type-for-aarch64-1455-2670.patch From a9ed7cc6aaacf4d4f05da69c9b0a14a0ab2b6a81 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 20 Sep 2023 14:39:15 -0700 Subject: [PATCH 6/6] rename generate.go --- llm/llama.cpp/{generate.go => generate_windows.go} | 3 --- 1 file changed, 3 deletions(-) rename llm/llama.cpp/{generate.go => generate_windows.go} (93%) diff --git a/llm/llama.cpp/generate.go b/llm/llama.cpp/generate_windows.go similarity index 93% rename from llm/llama.cpp/generate.go rename to llm/llama.cpp/generate_windows.go index 19179ac2..0d8cd411 100644 --- a/llm/llama.cpp/generate.go +++ b/llm/llama.cpp/generate_windows.go @@ -1,6 +1,3 @@ -//go:build !darwin -// +build !darwin - package llm //go:generate git submodule init