Merge 66839c3bd7c6f31b59a66b723550d999189fe0c2 into 67691e410db7a50b07a64858820b14de9aa91314
This commit is contained in:
commit
680f5aa5d0
@ -1482,6 +1482,7 @@ func NewCLI() *cobra.Command {
|
||||
envVars["OLLAMA_SCHED_SPREAD"],
|
||||
envVars["OLLAMA_TMPDIR"],
|
||||
envVars["OLLAMA_FLASH_ATTENTION"],
|
||||
envVars["OLLAMA_KV_CACHE_TYPE"],
|
||||
envVars["OLLAMA_LLM_LIBRARY"],
|
||||
envVars["OLLAMA_GPU_OVERHEAD"],
|
||||
envVars["OLLAMA_LOAD_TIMEOUT"],
|
||||
|
33
docs/faq.md
33
docs/faq.md
@ -151,7 +151,7 @@ Refer to the section [above](#how-do-i-configure-ollama-server) for how to set e
|
||||
|
||||
Ollama runs an HTTP server and can be exposed using a proxy server such as Nginx. To do so, configure the proxy to forward requests and optionally set required headers (if not exposing Ollama on the network). For example, with Nginx:
|
||||
|
||||
```
|
||||
```nginx
|
||||
server {
|
||||
listen 80;
|
||||
server_name example.com; # Replace with your domain or IP
|
||||
@ -164,7 +164,7 @@ server {
|
||||
|
||||
## How can I use Ollama with ngrok?
|
||||
|
||||
Ollama can be accessed using a range of tools for tunneling tools. For example with Ngrok:
|
||||
Ollama can be accessed using a range of tools for tunnelling tools. For example with Ngrok:
|
||||
|
||||
```shell
|
||||
ngrok http 11434 --host-header="localhost:11434"
|
||||
@ -285,4 +285,31 @@ Note: Windows with Radeon GPUs currently default to 1 model maximum due to limit
|
||||
|
||||
## How does Ollama load models on multiple GPUs?
|
||||
|
||||
Installing multiple GPUs of the same brand can be a great way to increase your available VRAM to load larger models. When you load a new model, Ollama evaluates the required VRAM for the model against what is currently available. If the model will entirely fit on any single GPU, Ollama will load the model on that GPU. This typically provides the best performance as it reduces the amount of data transfering across the PCI bus during inference. If the model does not fit entirely on one GPU, then it will be spread across all the available GPUs.
|
||||
Installing multiple GPUs of the same brand can be a great way to increase your available VRAM to load larger models. When you load a new model, Ollama evaluates the required VRAM for the model against what is currently available. If the model will entirely fit on any single GPU, Ollama will load the model on that GPU. This typically provides the best performance as it reduces the amount of data transferring across the PCI bus during inference. If the model does not fit entirely on one GPU, then it will be spread across all the available GPUs.
|
||||
|
||||
## How can I enable Flash Attention?
|
||||
|
||||
Flash Attention is a feature of most (but not all) modern models that can significantly reduce memory usage as the context size grows. To enable Flash Attention, set the `OLLAMA_FLASH_ATTENTION` environment variable to `1` when starting the Ollama server.
|
||||
|
||||
> Note: Advanced users using CUDA may benefit from building Ollama and passing `GGML_CUDA_FA_ALL_QUANTS=1` to the llama.cpp build to enable FA for all combinations of quantisation types. More information on this can be found in [llama.cpp](https://github.com/ggerganov/llama.cpp/blob/fb4a0ec0833c71cff5a1a367ba375447ce6106eb/ggml/src/ggml-cuda/fattn-common.cuh#L575).
|
||||
|
||||
## How can I set the quantization type for the K/V cache?
|
||||
|
||||
The K/V context cache can be quantized to significantly reduce memory usage when Flash Attention is enabled.
|
||||
|
||||
To use quantized K/V cache with Ollama you can set the following environment variable:
|
||||
|
||||
- `OLLAMA_KV_CACHE_TYPE` - The quantization type for the K/V cache. Default is `f16`.
|
||||
|
||||
> Note: Currently this is a global option - meaning all models will run with the specified quantization type.
|
||||
|
||||
There are [a number of quantization types available](https://github.com/ggerganov/llama.cpp/pull/7527), the most commonly used are:
|
||||
|
||||
- `f32` - full precision and memory usage.
|
||||
- `f16` - high precision and memory usage (default).
|
||||
- `q8_0` - 8-bit quantization, uses approximately 1/2 the memory of `f16` with a very small loss in precision, this usually has no noticeable impact on the model's quality (recommended if not using f16).
|
||||
- `q4_0` - 4-bit quantization, uses approximately 1/4 the memory of `f16` with a small-medium loss in precision that may be more noticeable at higher context sizes.
|
||||
|
||||
How much the cache quantization impacts the model's response quality will depend on the model and the task. Models have a high GQA count (e.g. Qwen2) may see a larger impact on precision from quantization than models with a low GQA count.
|
||||
|
||||
You may need to experiment with different quantization types to find the best balance between memory usage and quality.
|
||||
|
@ -153,6 +153,8 @@ var (
|
||||
Debug = Bool("OLLAMA_DEBUG")
|
||||
// FlashAttention enables the experimental flash attention feature.
|
||||
FlashAttention = Bool("OLLAMA_FLASH_ATTENTION")
|
||||
// KvCacheType is the quantization type for the K/V cache.
|
||||
KvCacheType = String("OLLAMA_KV_CACHE_TYPE")
|
||||
// NoHistory disables readline history.
|
||||
NoHistory = Bool("OLLAMA_NOHISTORY")
|
||||
// NoPrune disables pruning of model blobs on startup.
|
||||
@ -234,6 +236,7 @@ func AsMap() map[string]EnvVar {
|
||||
ret := map[string]EnvVar{
|
||||
"OLLAMA_DEBUG": {"OLLAMA_DEBUG", Debug(), "Show additional debug information (e.g. OLLAMA_DEBUG=1)"},
|
||||
"OLLAMA_FLASH_ATTENTION": {"OLLAMA_FLASH_ATTENTION", FlashAttention(), "Enabled flash attention"},
|
||||
"OLLAMA_KV_CACHE_TYPE": {"OLLAMA_KV_CACHE_TYPE", KvCacheType(), "Quantisation type for the K/V cache (default: f16)"},
|
||||
"OLLAMA_GPU_OVERHEAD": {"OLLAMA_GPU_OVERHEAD", GpuOverhead(), "Reserve a portion of VRAM per GPU (bytes)"},
|
||||
"OLLAMA_HOST": {"OLLAMA_HOST", Host(), "IP Address for the ollama server (default 127.0.0.1:11434)"},
|
||||
"OLLAMA_KEEP_ALIVE": {"OLLAMA_KEEP_ALIVE", KeepAlive(), "The duration that models stay loaded in memory (default \"5m\")"},
|
||||
|
@ -140,7 +140,7 @@ type ContextParams struct {
|
||||
c C.struct_llama_context_params
|
||||
}
|
||||
|
||||
func NewContextParams(numCtx int, batchSize int, numSeqMax int, threads int, flashAttention bool) ContextParams {
|
||||
func NewContextParams(numCtx int, batchSize int, numSeqMax int, threads int, flashAttention bool, kvCacheType string) ContextParams {
|
||||
params := C.llama_context_default_params()
|
||||
params.n_ctx = C.uint(numCtx)
|
||||
params.n_batch = C.uint(batchSize)
|
||||
@ -149,6 +149,9 @@ func NewContextParams(numCtx int, batchSize int, numSeqMax int, threads int, fla
|
||||
params.n_threads_batch = params.n_threads
|
||||
params.embeddings = C.bool(true)
|
||||
params.flash_attn = C.bool(flashAttention)
|
||||
params.type_k = KvCacheTypeFromStr(kvCacheType)
|
||||
params.type_v = KvCacheTypeFromStr(kvCacheType)
|
||||
|
||||
return ContextParams{c: params}
|
||||
}
|
||||
|
||||
@ -668,3 +671,27 @@ func (s *SamplingContext) Sample(llamaContext *Context, idx int) int {
|
||||
func (s *SamplingContext) Accept(id int, applyGrammar bool) {
|
||||
C.gpt_sampler_caccept(s.c, C.llama_token(id), C.bool(applyGrammar))
|
||||
}
|
||||
|
||||
// KvCacheTypeFromStr converts a string cache type to the corresponding GGML type value
|
||||
func KvCacheTypeFromStr(s string) C.enum_ggml_type {
|
||||
switch s {
|
||||
case "f32":
|
||||
return C.GGML_TYPE_F32
|
||||
case "f16":
|
||||
return C.GGML_TYPE_F16
|
||||
case "q8_0":
|
||||
return C.GGML_TYPE_Q8_0
|
||||
case "q4_0":
|
||||
return C.GGML_TYPE_Q4_0
|
||||
case "q4_1":
|
||||
return C.GGML_TYPE_Q4_1
|
||||
case "iq4_nl":
|
||||
return C.GGML_TYPE_IQ4_NL
|
||||
case "q5_0":
|
||||
return C.GGML_TYPE_Q5_0
|
||||
case "q5_1":
|
||||
return C.GGML_TYPE_Q5_1
|
||||
default:
|
||||
panic("Unsupported cache type: " + s)
|
||||
}
|
||||
}
|
||||
|
@ -471,7 +471,7 @@ func (s *Server) processBatch(tokenBatch *llama.Batch, embedBatch *llama.Batch)
|
||||
// the last one generated wasn't submitted to Decode
|
||||
// - Remove any stop sequences that we stripped out
|
||||
// - If truncateStop removed a portion of a token, drop that
|
||||
// - As defense-in-depth, if truncatedToken didn't find a stop token
|
||||
// - As defence-in-depth, if truncatedToken didn't find a stop token
|
||||
// remove the extra one that we added to the cache len
|
||||
tokenLen := len(seq.cache.Inputs) + 1
|
||||
tokenLen -= origLen - newLen
|
||||
@ -762,6 +762,7 @@ func (s *Server) loadModel(
|
||||
flashAttention bool,
|
||||
threads int,
|
||||
multiUserCache bool,
|
||||
kvCacheType string,
|
||||
) {
|
||||
llama.BackendInit()
|
||||
|
||||
@ -771,7 +772,7 @@ func (s *Server) loadModel(
|
||||
panic(err)
|
||||
}
|
||||
|
||||
ctxParams := llama.NewContextParams(kvSize, s.batchSize*s.parallel, s.parallel, threads, flashAttention)
|
||||
ctxParams := llama.NewContextParams(kvSize, s.batchSize*s.parallel, s.parallel, threads, flashAttention, kvCacheType)
|
||||
s.lc, err = llama.NewContextWithModel(s.model, ctxParams)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@ -819,6 +820,7 @@ func main() {
|
||||
tensorSplit := flag.String("tensor-split", "", "fraction of the model to offload to each GPU, comma-separated list of proportions")
|
||||
multiUserCache := flag.Bool("multiuser-cache", false, "optimize input cache algorithm for multiple users")
|
||||
requirements := flag.Bool("requirements", false, "print json requirement information")
|
||||
kvCacheType := flag.String("kv-cache-type", "f16", "quantization type for KV cache (default: f16)")
|
||||
|
||||
flag.Parse()
|
||||
if *requirements {
|
||||
@ -874,7 +876,7 @@ func main() {
|
||||
}
|
||||
|
||||
server.ready.Add(1)
|
||||
go server.loadModel(params, *mpath, *lpath, *ppath, *kvSize, *flashAttention, *threads, *multiUserCache)
|
||||
go server.loadModel(params, *mpath, *lpath, *ppath, *kvSize, *flashAttention, *threads, *multiUserCache, *kvCacheType)
|
||||
|
||||
server.cond = sync.NewCond(&server.mu)
|
||||
|
||||
|
@ -123,7 +123,21 @@ func EstimateGPULayers(gpus []discover.GpuInfo, ggml *GGML, projectors []string,
|
||||
slog.Warn("model missing blk.0 layer size")
|
||||
}
|
||||
|
||||
kv, graphPartialOffload, graphFullOffload := ggml.GraphSize(uint64(opts.NumCtx), uint64(min(opts.NumCtx, opts.NumBatch)))
|
||||
// Check if the model is an embedding model
|
||||
isEmbeddingModel := false
|
||||
if _, ok := ggml.KV()[fmt.Sprintf("%s.pooling_type", ggml.KV().Architecture())]; ok {
|
||||
isEmbeddingModel = true
|
||||
}
|
||||
|
||||
// Estimate the memory required for KV cache quantisation
|
||||
kv := estimateKvCacheSize(envconfig.KvCacheType(), uint64(opts.NumCtx), ggml.KV().BlockCount(), ggml.KV().EmbeddingHeadCountK(), ggml.KV().HeadCountKV(), isEmbeddingModel) * 2
|
||||
|
||||
// KV is proportional to the number of layers
|
||||
layerSize += kv / ggml.KV().BlockCount()
|
||||
|
||||
// Get graph sizes from ggml
|
||||
_, graphPartialOffload, graphFullOffload = ggml.GraphSize(uint64(opts.NumCtx), uint64(min(opts.NumCtx, opts.NumBatch)))
|
||||
|
||||
if graphPartialOffload == 0 {
|
||||
graphPartialOffload = ggml.KV().GQA() * kv / 6
|
||||
}
|
||||
@ -131,9 +145,6 @@ func EstimateGPULayers(gpus []discover.GpuInfo, ggml *GGML, projectors []string,
|
||||
graphFullOffload = graphPartialOffload
|
||||
}
|
||||
|
||||
// KV is proportional to the number of layers
|
||||
layerSize += kv / ggml.KV().BlockCount()
|
||||
|
||||
// on metal there's no partial offload overhead
|
||||
if gpus[0].Library == "metal" {
|
||||
graphPartialOffload = graphFullOffload
|
||||
@ -437,3 +448,39 @@ func projectorMemoryRequirements(filename string) (weights, graphSize uint64) {
|
||||
|
||||
return weights, graphSize
|
||||
}
|
||||
|
||||
// estimateKvCacheSize determines the memory required for K or V cache based on the quantization type
|
||||
func estimateKvCacheSize(cacheType string, numCtx, blockCount, embeddingHeadCount, headCountKV uint64, isEmbeddingModel bool) uint64 {
|
||||
var bytesPerElement float64
|
||||
|
||||
if isEmbeddingModel && cacheType != "f16" && cacheType != "f32" {
|
||||
cacheType = "f16" // Default to f16 for embedding models if an unsupported type is specified
|
||||
}
|
||||
|
||||
switch cacheType {
|
||||
case "f32", "fp32":
|
||||
bytesPerElement = 4 // fp32
|
||||
case "", "f16", "fp16":
|
||||
bytesPerElement = 2 // fp16
|
||||
case "q8_0":
|
||||
bytesPerElement = 1 // 1/2 of fp16
|
||||
case "q5_1":
|
||||
bytesPerElement = 0.65
|
||||
case "q5_0":
|
||||
bytesPerElement = 0.625
|
||||
case "iq4_nl":
|
||||
bytesPerElement = 0.6 // 3/4 of fp16
|
||||
case "q4_1":
|
||||
bytesPerElement = 0.55
|
||||
case "q4_0":
|
||||
bytesPerElement = 0.5 // 1/4 of fp16
|
||||
default:
|
||||
// Default to fp16 if unknown
|
||||
bytesPerElement = 2
|
||||
slog.Warn("Unknown cache type, defaulting to fp16", "type", cacheType)
|
||||
}
|
||||
|
||||
estimate := uint64(float64(numCtx*blockCount*embeddingHeadCount*headCountKV) * bytesPerElement)
|
||||
// round up to the nearest multiple of 64 bytes
|
||||
return ((estimate + 63) / 64) * 64
|
||||
}
|
||||
|
@ -15,6 +15,7 @@ import (
|
||||
|
||||
func TestEstimateGPULayers(t *testing.T) {
|
||||
t.Setenv("OLLAMA_DEBUG", "1")
|
||||
t.Setenv("OLLAMA_KV_CACHE_TYPE", "")
|
||||
|
||||
modelName := "dummy"
|
||||
f, err := os.CreateTemp(t.TempDir(), modelName)
|
||||
@ -57,6 +58,7 @@ func TestEstimateGPULayers(t *testing.T) {
|
||||
}
|
||||
projectors := []string{}
|
||||
opts := api.DefaultOptions()
|
||||
|
||||
t.Run("cpu", func(t *testing.T) {
|
||||
estimate := EstimateGPULayers(gpus, ggml, projectors, opts)
|
||||
assert.Equal(t, 0, estimate.Layers)
|
||||
@ -70,7 +72,7 @@ func TestEstimateGPULayers(t *testing.T) {
|
||||
projectorSize := uint64(0)
|
||||
memoryLayerOutput := uint64(4)
|
||||
|
||||
// Dual CUDA scenario with assymetry
|
||||
// Dual CUDA scenario with asymmetry
|
||||
gpuMinimumMemory := uint64(2048)
|
||||
gpus = []discover.GpuInfo{
|
||||
{
|
||||
@ -126,3 +128,114 @@ func TestEstimateGPULayers(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEstimateKvCacheSize(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
cacheType string
|
||||
numCtx uint64
|
||||
blockCount uint64
|
||||
embeddingHeadCount uint64
|
||||
headCountKV uint64
|
||||
isEmbeddingModel bool
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "f32 cache type",
|
||||
cacheType: "f32",
|
||||
numCtx: 1024,
|
||||
blockCount: 32,
|
||||
embeddingHeadCount: 32,
|
||||
headCountKV: 32,
|
||||
isEmbeddingModel: false,
|
||||
expected: 134217728, // 128 MB
|
||||
},
|
||||
{
|
||||
name: "f16 cache type",
|
||||
cacheType: "f16",
|
||||
numCtx: 1024,
|
||||
blockCount: 32,
|
||||
embeddingHeadCount: 32,
|
||||
headCountKV: 32,
|
||||
isEmbeddingModel: false,
|
||||
expected: 67108864, // 64 MB
|
||||
},
|
||||
{
|
||||
name: "q4_0 cache type",
|
||||
cacheType: "q4_0",
|
||||
numCtx: 1024,
|
||||
blockCount: 32,
|
||||
embeddingHeadCount: 32,
|
||||
headCountKV: 32,
|
||||
isEmbeddingModel: false,
|
||||
expected: 16777216, // 16 MB
|
||||
},
|
||||
{
|
||||
name: "q8_0 cache type",
|
||||
cacheType: "q8_0",
|
||||
numCtx: 1024,
|
||||
blockCount: 32,
|
||||
embeddingHeadCount: 32,
|
||||
headCountKV: 32,
|
||||
isEmbeddingModel: false,
|
||||
expected: 33554432, // 32 MB
|
||||
},
|
||||
{
|
||||
name: "unknown cache type",
|
||||
cacheType: "unknown",
|
||||
numCtx: 1024,
|
||||
blockCount: 32,
|
||||
embeddingHeadCount: 32,
|
||||
headCountKV: 32,
|
||||
isEmbeddingModel: false,
|
||||
expected: 67108864, // 64 MB (defaults to f16)
|
||||
},
|
||||
{
|
||||
name: "empty cache type",
|
||||
cacheType: "",
|
||||
numCtx: 1024,
|
||||
blockCount: 32,
|
||||
embeddingHeadCount: 32,
|
||||
headCountKV: 32,
|
||||
isEmbeddingModel: false,
|
||||
expected: 67108864, // 64 MB (defaults to f16)
|
||||
},
|
||||
{
|
||||
name: "rounding test",
|
||||
cacheType: "f32",
|
||||
numCtx: 1000,
|
||||
blockCount: 32,
|
||||
embeddingHeadCount: 32,
|
||||
headCountKV: 32,
|
||||
isEmbeddingModel: false,
|
||||
expected: 131072000, // Rounded up to nearest multiple of 64
|
||||
},
|
||||
{
|
||||
name: "embedding model with q4_0 (should default to f16)",
|
||||
cacheType: "q4_0",
|
||||
numCtx: 1024,
|
||||
blockCount: 32,
|
||||
embeddingHeadCount: 32,
|
||||
headCountKV: 32,
|
||||
isEmbeddingModel: true,
|
||||
expected: 67108864, // 64 MB (defaults to f16)
|
||||
},
|
||||
{
|
||||
name: "embedding model with f32",
|
||||
cacheType: "f32",
|
||||
numCtx: 1024,
|
||||
blockCount: 32,
|
||||
embeddingHeadCount: 32,
|
||||
headCountKV: 32,
|
||||
isEmbeddingModel: true,
|
||||
expected: 134217728, // 128 MB
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := estimateKvCacheSize(tt.cacheType, tt.numCtx, tt.blockCount, tt.embeddingHeadCount, tt.headCountKV, tt.isEmbeddingModel)
|
||||
assert.Equal(t, tt.expected, result, "Estimated KV cache size does not match expected value")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ import (
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -217,15 +218,92 @@ func NewLlamaServer(gpus discover.GpuInfoList, model string, ggml *GGML, adapter
|
||||
params = append(params, "--threads", strconv.Itoa(defaultThreads))
|
||||
}
|
||||
|
||||
flashAttnEnabled := envconfig.FlashAttention()
|
||||
// isEmbeddingModel checks for common GGML attributes that help distinguish most embedding models from normal models.
|
||||
isEmbeddingModel := false
|
||||
if _, ok := ggml.KV()[fmt.Sprintf("%s.pooling_type", ggml.KV().Architecture())]; ok {
|
||||
isEmbeddingModel = true
|
||||
}
|
||||
|
||||
// Validates and applies KV cache parameters
|
||||
setCacheTypeParam := func(paramName, cacheType string) {
|
||||
if cacheType == "" {
|
||||
return
|
||||
}
|
||||
|
||||
validCacheTypes := []string{"f32", "f16", "q8_0", "q5_1", "q5_0", "iq4_nl", "q4_1", "q4_0"}
|
||||
if !slices.Contains(validCacheTypes, cacheType) {
|
||||
slog.Warn("invalid cache type, ignoring", "param", paramName, "type", cacheType)
|
||||
return
|
||||
}
|
||||
|
||||
// For embedding models, only allow f16 and f32
|
||||
if isEmbeddingModel && cacheType != "f16" && cacheType != "f32" {
|
||||
slog.Warn("only f16 and f32 cache types are supported for embedding models, ignoring",
|
||||
"param", paramName, "type", cacheType)
|
||||
return
|
||||
}
|
||||
|
||||
params = append(params, paramName, cacheType)
|
||||
slog.Debug("Setting cache type", "param", paramName, "type", cacheType)
|
||||
}
|
||||
|
||||
kvCacheType := envconfig.KvCacheType()
|
||||
|
||||
// Set cache types only if they are not empty
|
||||
supportsFlashAttention := func(ggml *GGML) bool {
|
||||
headCountK := ggml.KV().EmbeddingHeadCountK()
|
||||
headCountV := ggml.KV().EmbeddingHeadCountV()
|
||||
|
||||
if headCountK == 0 || headCountV == 0 {
|
||||
slog.Debug("Model is missing embedding head count for K or V, does not support flash attention")
|
||||
return false
|
||||
}
|
||||
|
||||
if headCountK != headCountV {
|
||||
slog.Debug("Embedding head count K does not equal V, does not support flash attention", "K", headCountK, "V", headCountV)
|
||||
return false
|
||||
}
|
||||
|
||||
slog.Debug("Model supports flash attention", "headCountK", headCountK, "headCountV", headCountV)
|
||||
return true
|
||||
}
|
||||
|
||||
flashAttnSupported := supportsFlashAttention(ggml)
|
||||
|
||||
hardwareSupportsFlashAttn := true
|
||||
for _, g := range gpus {
|
||||
// only cuda (compute capability 7+) and metal support flash attention
|
||||
if g.Library != "metal" && (g.Library != "cuda" || g.DriverMajor < 7) {
|
||||
flashAttnEnabled = false
|
||||
hardwareSupportsFlashAttn = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// mmap has issues with partial offloading on metal
|
||||
flashAttnEnabled := envconfig.FlashAttention() && flashAttnSupported && hardwareSupportsFlashAttn && !isEmbeddingModel
|
||||
|
||||
slog.Debug("Flash attention status",
|
||||
"supported_by_model", flashAttnSupported,
|
||||
"supported_by_hardware", hardwareSupportsFlashAttn,
|
||||
"is_embedding_model", isEmbeddingModel,
|
||||
"enabled", flashAttnEnabled)
|
||||
|
||||
if flashAttnEnabled {
|
||||
params = append(params, "--flash-attn")
|
||||
slog.Info("Enabling flash attention")
|
||||
|
||||
setCacheTypeParam("--kv-cache-type", kvCacheType)
|
||||
} else {
|
||||
slog.Info("Flash attention not enabled")
|
||||
quantizedCacheTypes := []string{"q8_0", "q5_1", "q5_0", "iq4_nl", "q4_1", "q4_0"}
|
||||
if !isEmbeddingModel && (kvCacheType != "") {
|
||||
if slices.Contains(quantizedCacheTypes, kvCacheType) {
|
||||
slog.Warn("Quantized cache types require flash attention. Falling back to default cache types.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// mmap has issues with partial offloading on metal
|
||||
for _, g := range gpus {
|
||||
if g.Library == "metal" &&
|
||||
uint64(opts.NumGPU) > 0 &&
|
||||
uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 {
|
||||
@ -234,10 +312,6 @@ func NewLlamaServer(gpus discover.GpuInfoList, model string, ggml *GGML, adapter
|
||||
}
|
||||
}
|
||||
|
||||
if flashAttnEnabled {
|
||||
params = append(params, "--flash-attn")
|
||||
}
|
||||
|
||||
// Windows CUDA should not use mmap for best performance
|
||||
// Linux with a model larger than free space, mmap leads to thrashing
|
||||
// For CPU loads we want the memory to be allocated, not FS cache
|
||||
|
Loading…
x
Reference in New Issue
Block a user