Compare commits
20 Commits
main
...
jyan/v0.14
Author | SHA1 | Date | |
---|---|---|---|
|
b662e4706e | ||
|
be31611ff1 | ||
|
02ba11b614 | ||
|
03bb60e036 | ||
|
976fc86978 | ||
|
9bceb3b55e | ||
|
7add3e5267 | ||
|
c4f2236cf9 | ||
|
b7ccdcef94 | ||
|
1f4f46800c | ||
|
42574d3b11 | ||
|
7bd7e113e3 | ||
|
20240927f8 | ||
|
3af1c58146 | ||
|
d90b27a57f | ||
|
b7ce14c764 | ||
|
161229a153 | ||
|
bd8d680e26 | ||
|
a562b9069f | ||
|
5d76e78c2f |
@ -53,8 +53,8 @@ Here are some example models that can be downloaded:
|
|||||||
| Llama 3 | 70B | 40GB | `ollama run llama3:70b` |
|
| Llama 3 | 70B | 40GB | `ollama run llama3:70b` |
|
||||||
| Phi 3 Mini | 3.8B | 2.3GB | `ollama run phi3` |
|
| Phi 3 Mini | 3.8B | 2.3GB | `ollama run phi3` |
|
||||||
| Phi 3 Medium | 14B | 7.9GB | `ollama run phi3:medium` |
|
| Phi 3 Medium | 14B | 7.9GB | `ollama run phi3:medium` |
|
||||||
| Gemma | 2B | 1.4GB | `ollama run gemma:2b` |
|
| Gemma 2 | 9B | 5.5GB | `ollama run gemma2` |
|
||||||
| Gemma | 7B | 4.8GB | `ollama run gemma:7b` |
|
| Gemma 2 | 27B | 16GB | `ollama run gemma2:27b` |
|
||||||
| Mistral | 7B | 4.1GB | `ollama run mistral` |
|
| Mistral | 7B | 4.1GB | `ollama run mistral` |
|
||||||
| Moondream 2 | 1.4B | 829MB | `ollama run moondream` |
|
| Moondream 2 | 1.4B | 829MB | `ollama run moondream` |
|
||||||
| Neural Chat | 7B | 4.1GB | `ollama run neural-chat` |
|
| Neural Chat | 7B | 4.1GB | `ollama run neural-chat` |
|
||||||
@ -292,6 +292,7 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
|||||||
- [Olpaka](https://github.com/Otacon/olpaka) (User-friendly Flutter Web App for Ollama)
|
- [Olpaka](https://github.com/Otacon/olpaka) (User-friendly Flutter Web App for Ollama)
|
||||||
- [OllamaSpring](https://github.com/CrazyNeil/OllamaSpring) (Ollama Client for macOS)
|
- [OllamaSpring](https://github.com/CrazyNeil/OllamaSpring) (Ollama Client for macOS)
|
||||||
- [LLocal.in](https://github.com/kartikm7/llocal) (Easy to use Electron Desktop Client for Ollama)
|
- [LLocal.in](https://github.com/kartikm7/llocal) (Easy to use Electron Desktop Client for Ollama)
|
||||||
|
- [Ollama with Google Mesop](https://github.com/rapidarchitect/ollama_mesop/) (Mesop Chat Client implementation with Ollama)
|
||||||
|
|
||||||
### Terminal
|
### Terminal
|
||||||
|
|
||||||
|
42
cmd/cmd.go
42
cmd/cmd.go
@ -162,9 +162,6 @@ func tempZipFiles(path string) (string, error) {
|
|||||||
}
|
}
|
||||||
defer tempfile.Close()
|
defer tempfile.Close()
|
||||||
|
|
||||||
zipfile := zip.NewWriter(tempfile)
|
|
||||||
defer zipfile.Close()
|
|
||||||
|
|
||||||
detectContentType := func(path string) (string, error) {
|
detectContentType := func(path string) (string, error) {
|
||||||
f, err := os.Open(path)
|
f, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -233,6 +230,9 @@ func tempZipFiles(path string) (string, error) {
|
|||||||
files = append(files, tks...)
|
files = append(files, tks...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
zipfile := zip.NewWriter(tempfile)
|
||||||
|
defer zipfile.Close()
|
||||||
|
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
f, err := os.Open(file)
|
f, err := os.Open(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -624,13 +624,13 @@ func ShowHandler(cmd *cobra.Command, args []string) error {
|
|||||||
return errors.New("only one of '--license', '--modelfile', '--parameters', '--system', or '--template' can be specified")
|
return errors.New("only one of '--license', '--modelfile', '--parameters', '--system', or '--template' can be specified")
|
||||||
}
|
}
|
||||||
|
|
||||||
if flagsSet == 1 {
|
req := api.ShowRequest{Name: args[0]}
|
||||||
req := api.ShowRequest{Name: args[0]}
|
resp, err := client.Show(cmd.Context(), &req)
|
||||||
resp, err := client.Show(cmd.Context(), &req)
|
if err != nil {
|
||||||
if err != nil {
|
return err
|
||||||
return err
|
}
|
||||||
}
|
|
||||||
|
|
||||||
|
if flagsSet == 1 {
|
||||||
switch showType {
|
switch showType {
|
||||||
case "license":
|
case "license":
|
||||||
fmt.Println(resp.License)
|
fmt.Println(resp.License)
|
||||||
@ -647,12 +647,12 @@ func ShowHandler(cmd *cobra.Command, args []string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
req := api.ShowRequest{Name: args[0]}
|
showInfo(resp)
|
||||||
resp, err := client.Show(cmd.Context(), &req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func showInfo(resp *api.ShowResponse) {
|
||||||
arch := resp.ModelInfo["general.architecture"].(string)
|
arch := resp.ModelInfo["general.architecture"].(string)
|
||||||
|
|
||||||
modelData := [][]string{
|
modelData := [][]string{
|
||||||
@ -672,11 +672,17 @@ func ShowHandler(cmd *cobra.Command, args []string) error {
|
|||||||
projectorData := [][]string{
|
projectorData := [][]string{
|
||||||
{"arch", "clip"},
|
{"arch", "clip"},
|
||||||
{"parameters", format.HumanNumber(uint64(resp.ProjectorInfo["general.parameter_count"].(float64)))},
|
{"parameters", format.HumanNumber(uint64(resp.ProjectorInfo["general.parameter_count"].(float64)))},
|
||||||
{"projector type", resp.ProjectorInfo["clip.projector_type"].(string)},
|
|
||||||
{"embedding length", fmt.Sprintf("%v", resp.ProjectorInfo["clip.vision.embedding_length"].(float64))},
|
|
||||||
{"projection dimensionality", fmt.Sprintf("%v", resp.ProjectorInfo["clip.vision.projection_dim"].(float64))},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if projectorType, ok := resp.ProjectorInfo["clip.projector_type"]; ok {
|
||||||
|
projectorData = append(projectorData, []string{"projector type", projectorType.(string)})
|
||||||
|
}
|
||||||
|
|
||||||
|
projectorData = append(projectorData,
|
||||||
|
[]string{"embedding length", fmt.Sprintf("%v", resp.ProjectorInfo["clip.vision.embedding_length"].(float64))},
|
||||||
|
[]string{"projection dimensionality", fmt.Sprintf("%v", resp.ProjectorInfo["clip.vision.projection_dim"].(float64))},
|
||||||
|
)
|
||||||
|
|
||||||
mainTableData = append(mainTableData,
|
mainTableData = append(mainTableData,
|
||||||
[]string{"Projector"},
|
[]string{"Projector"},
|
||||||
[]string{renderSubTable(projectorData, false)},
|
[]string{renderSubTable(projectorData, false)},
|
||||||
@ -705,8 +711,6 @@ func ShowHandler(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
table.Render()
|
table.Render()
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func renderSubTable(data [][]string, file bool) string {
|
func renderSubTable(data [][]string, file bool) string {
|
||||||
|
@ -404,15 +404,7 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
|||||||
|
|
||||||
switch args[1] {
|
switch args[1] {
|
||||||
case "info":
|
case "info":
|
||||||
fmt.Println("Model details:")
|
showInfo(resp)
|
||||||
if len(resp.Details.Families) > 0 {
|
|
||||||
fmt.Printf("Family %s\n", strings.Join(resp.Details.Families, ", "))
|
|
||||||
} else if resp.Details.Family != "" {
|
|
||||||
fmt.Printf("Family %s\n", resp.Details.Family)
|
|
||||||
}
|
|
||||||
fmt.Printf("Parameter Size %s\n", resp.Details.ParameterSize)
|
|
||||||
fmt.Printf("Quantization Level %s\n", resp.Details.QuantizationLevel)
|
|
||||||
fmt.Println("")
|
|
||||||
case "license":
|
case "license":
|
||||||
if resp.License == "" {
|
if resp.License == "" {
|
||||||
fmt.Println("No license was specified for this model.")
|
fmt.Println("No license was specified for this model.")
|
||||||
|
@ -26,7 +26,7 @@ All durations are returned in nanoseconds.
|
|||||||
|
|
||||||
### Streaming responses
|
### Streaming responses
|
||||||
|
|
||||||
Certain endpoints stream responses as JSON objects and can optional return non-streamed responses.
|
Certain endpoints stream responses as JSON objects. Streaming can be disabled by providing `{"stream": false}` for these endpoints.
|
||||||
|
|
||||||
## Generate a completion
|
## Generate a completion
|
||||||
|
|
||||||
|
14
docs/faq.md
14
docs/faq.md
@ -257,3 +257,17 @@ If you wish to override the `OLLAMA_KEEP_ALIVE` setting, use the `keep_alive` AP
|
|||||||
## How do I manage the maximum number of requests the Ollama server can queue?
|
## How do I manage the maximum number of requests the Ollama server can queue?
|
||||||
|
|
||||||
If too many requests are sent to the server, it will respond with a 503 error indicating the server is overloaded. You can adjust how many requests may be queue by setting `OLLAMA_MAX_QUEUE`.
|
If too many requests are sent to the server, it will respond with a 503 error indicating the server is overloaded. You can adjust how many requests may be queue by setting `OLLAMA_MAX_QUEUE`.
|
||||||
|
|
||||||
|
## How does Ollama handle concurrent requests?
|
||||||
|
|
||||||
|
Ollama supports two levels of concurrent processing. If your system has sufficient available memory (system memory when using CPU inference, or VRAM for GPU inference) then multiple models can be loaded at the same time. For a given model, if there is sufficient available memory when the model is loaded, it is configured to allow parallel request processing.
|
||||||
|
|
||||||
|
If there is insufficient available memory to load a new model request while one or more models are already loaded, all new requests will be queued until the new model can be loaded. As prior models become idle, one or more will be unloaded to make room for the new model. Queued requests will be processed in order. When using GPU inference new models must be able to completely fit in VRAM to allow concurrent model loads.
|
||||||
|
|
||||||
|
Parallel request processing for a given model results in increasing the context size by the number of parallel requests. For example, a 2K context with 4 parallel requests will result in an 8K context and additional memory allocation.
|
||||||
|
|
||||||
|
The following server settings may be used to adjust how Ollama handles concurrent requests:
|
||||||
|
|
||||||
|
- `OLLAMA_MAX_LOADED_MODELS` - The maximum number of models that can be loaded concurrently provided they fit in available memory. The default is 3 * the number of GPUs or 3 for CPU inference.
|
||||||
|
- `OLLAMA_NUM_PARALLEL` - The maximum number of parallel requests each model will process at the same time. The default will auto-select either 4 or 1 based on available memory.
|
||||||
|
- `OLLAMA_MAX_QUEUE` - The maximum number of requests Ollama will queue when busy before rejecting additional requests. The default is 512
|
||||||
|
@ -18,7 +18,7 @@ Check your compute compatibility to see if your card is supported:
|
|||||||
| | Quadro | `RTX 8000` `RTX 6000` `RTX 5000` `RTX 4000` |
|
| | Quadro | `RTX 8000` `RTX 6000` `RTX 5000` `RTX 4000` |
|
||||||
| 7.0 | NVIDIA | `TITAN V` `V100` `Quadro GV100` |
|
| 7.0 | NVIDIA | `TITAN V` `V100` `Quadro GV100` |
|
||||||
| 6.1 | NVIDIA TITAN | `TITAN Xp` `TITAN X` |
|
| 6.1 | NVIDIA TITAN | `TITAN Xp` `TITAN X` |
|
||||||
| | GeForce GTX | `GTX 1080 Ti` `GTX 1080` `GTX 1070 Ti` `GTX 1070` `GTX 1060` `GTX 1050` |
|
| | GeForce GTX | `GTX 1080 Ti` `GTX 1080` `GTX 1070 Ti` `GTX 1070` `GTX 1060` `GTX 1050 Ti` `GTX 1050` |
|
||||||
| | Quadro | `P6000` `P5200` `P4200` `P3200` `P5000` `P4000` `P3000` `P2200` `P2000` `P1000` `P620` `P600` `P500` `P520` |
|
| | Quadro | `P6000` `P5200` `P4200` `P3200` `P5000` `P4000` `P3000` `P2200` `P2000` `P1000` `P620` `P600` `P500` `P520` |
|
||||||
| | Tesla | `P40` `P4` |
|
| | Tesla | `P40` `P4` |
|
||||||
| 6.0 | NVIDIA | `Tesla P100` `Quadro GP100` |
|
| 6.0 | NVIDIA | `Tesla P100` `Quadro GP100` |
|
||||||
|
@ -104,7 +104,6 @@ curl http://localhost:11434/v1/chat/completions \
|
|||||||
|
|
||||||
#### Notes
|
#### Notes
|
||||||
|
|
||||||
- `finish_reason` will always be `stop`
|
|
||||||
- `usage.prompt_tokens` will be 0 for completions where prompt evaluation is cached
|
- `usage.prompt_tokens` will be 0 for completions where prompt evaluation is cached
|
||||||
|
|
||||||
## Models
|
## Models
|
||||||
|
@ -85,13 +85,13 @@ func AsMap() map[string]EnvVar {
|
|||||||
"OLLAMA_HOST": {"OLLAMA_HOST", Host, "IP Address for the ollama server (default 127.0.0.1:11434)"},
|
"OLLAMA_HOST": {"OLLAMA_HOST", Host, "IP Address for the ollama server (default 127.0.0.1:11434)"},
|
||||||
"OLLAMA_KEEP_ALIVE": {"OLLAMA_KEEP_ALIVE", KeepAlive, "The duration that models stay loaded in memory (default \"5m\")"},
|
"OLLAMA_KEEP_ALIVE": {"OLLAMA_KEEP_ALIVE", KeepAlive, "The duration that models stay loaded in memory (default \"5m\")"},
|
||||||
"OLLAMA_LLM_LIBRARY": {"OLLAMA_LLM_LIBRARY", LLMLibrary, "Set LLM library to bypass autodetection"},
|
"OLLAMA_LLM_LIBRARY": {"OLLAMA_LLM_LIBRARY", LLMLibrary, "Set LLM library to bypass autodetection"},
|
||||||
"OLLAMA_MAX_LOADED_MODELS": {"OLLAMA_MAX_LOADED_MODELS", MaxRunners, "Maximum number of loaded models (default 1)"},
|
"OLLAMA_MAX_LOADED_MODELS": {"OLLAMA_MAX_LOADED_MODELS", MaxRunners, "Maximum number of loaded models per GPU"},
|
||||||
"OLLAMA_MAX_QUEUE": {"OLLAMA_MAX_QUEUE", MaxQueuedRequests, "Maximum number of queued requests"},
|
"OLLAMA_MAX_QUEUE": {"OLLAMA_MAX_QUEUE", MaxQueuedRequests, "Maximum number of queued requests"},
|
||||||
"OLLAMA_MAX_VRAM": {"OLLAMA_MAX_VRAM", MaxVRAM, "Maximum VRAM"},
|
"OLLAMA_MAX_VRAM": {"OLLAMA_MAX_VRAM", MaxVRAM, "Maximum VRAM"},
|
||||||
"OLLAMA_MODELS": {"OLLAMA_MODELS", ModelsDir, "The path to the models directory"},
|
"OLLAMA_MODELS": {"OLLAMA_MODELS", ModelsDir, "The path to the models directory"},
|
||||||
"OLLAMA_NOHISTORY": {"OLLAMA_NOHISTORY", NoHistory, "Do not preserve readline history"},
|
"OLLAMA_NOHISTORY": {"OLLAMA_NOHISTORY", NoHistory, "Do not preserve readline history"},
|
||||||
"OLLAMA_NOPRUNE": {"OLLAMA_NOPRUNE", NoPrune, "Do not prune model blobs on startup"},
|
"OLLAMA_NOPRUNE": {"OLLAMA_NOPRUNE", NoPrune, "Do not prune model blobs on startup"},
|
||||||
"OLLAMA_NUM_PARALLEL": {"OLLAMA_NUM_PARALLEL", NumParallel, "Maximum number of parallel requests (default 1)"},
|
"OLLAMA_NUM_PARALLEL": {"OLLAMA_NUM_PARALLEL", NumParallel, "Maximum number of parallel requests"},
|
||||||
"OLLAMA_ORIGINS": {"OLLAMA_ORIGINS", AllowOrigins, "A comma separated list of allowed origins"},
|
"OLLAMA_ORIGINS": {"OLLAMA_ORIGINS", AllowOrigins, "A comma separated list of allowed origins"},
|
||||||
"OLLAMA_RUNNERS_DIR": {"OLLAMA_RUNNERS_DIR", RunnersDir, "Location for runners"},
|
"OLLAMA_RUNNERS_DIR": {"OLLAMA_RUNNERS_DIR", RunnersDir, "Location for runners"},
|
||||||
"OLLAMA_SCHED_SPREAD": {"OLLAMA_SCHED_SPREAD", SchedSpread, "Always schedule model across all GPUs"},
|
"OLLAMA_SCHED_SPREAD": {"OLLAMA_SCHED_SPREAD", SchedSpread, "Always schedule model across all GPUs"},
|
||||||
@ -129,8 +129,8 @@ func clean(key string) string {
|
|||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// default values
|
// default values
|
||||||
NumParallel = 1
|
NumParallel = 0 // Autoselect
|
||||||
MaxRunners = 1
|
MaxRunners = 0 // Autoselect
|
||||||
MaxQueuedRequests = 512
|
MaxQueuedRequests = 512
|
||||||
|
|
||||||
LoadConfig()
|
LoadConfig()
|
||||||
@ -205,8 +205,8 @@ func LoadConfig() {
|
|||||||
|
|
||||||
if onp := clean("OLLAMA_NUM_PARALLEL"); onp != "" {
|
if onp := clean("OLLAMA_NUM_PARALLEL"); onp != "" {
|
||||||
val, err := strconv.Atoi(onp)
|
val, err := strconv.Atoi(onp)
|
||||||
if err != nil || val <= 0 {
|
if err != nil {
|
||||||
slog.Error("invalid setting must be greater than zero", "OLLAMA_NUM_PARALLEL", onp, "error", err)
|
slog.Error("invalid setting, ignoring", "OLLAMA_NUM_PARALLEL", onp, "error", err)
|
||||||
} else {
|
} else {
|
||||||
NumParallel = val
|
NumParallel = val
|
||||||
}
|
}
|
||||||
@ -251,7 +251,7 @@ func LoadConfig() {
|
|||||||
if maxRunners != "" {
|
if maxRunners != "" {
|
||||||
m, err := strconv.Atoi(maxRunners)
|
m, err := strconv.Atoi(maxRunners)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Error("invalid setting", "OLLAMA_MAX_LOADED_MODELS", maxRunners, "error", err)
|
slog.Error("invalid setting, ignoring", "OLLAMA_MAX_LOADED_MODELS", maxRunners, "error", err)
|
||||||
} else {
|
} else {
|
||||||
MaxRunners = m
|
MaxRunners = m
|
||||||
}
|
}
|
||||||
@ -260,7 +260,7 @@ func LoadConfig() {
|
|||||||
if onp := os.Getenv("OLLAMA_MAX_QUEUE"); onp != "" {
|
if onp := os.Getenv("OLLAMA_MAX_QUEUE"); onp != "" {
|
||||||
p, err := strconv.Atoi(onp)
|
p, err := strconv.Atoi(onp)
|
||||||
if err != nil || p <= 0 {
|
if err != nil || p <= 0 {
|
||||||
slog.Error("invalid setting", "OLLAMA_MAX_QUEUE", onp, "error", err)
|
slog.Error("invalid setting, ignoring", "OLLAMA_MAX_QUEUE", onp, "error", err)
|
||||||
} else {
|
} else {
|
||||||
MaxQueuedRequests = p
|
MaxQueuedRequests = p
|
||||||
}
|
}
|
||||||
|
@ -115,8 +115,6 @@ func AMDGetGPUInfo() []RocmGPUInfo {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO revisit this once ROCm v6 is available on windows.
|
|
||||||
// v5.7 only reports VRAM used by this process, so it's completely wrong and unusable
|
|
||||||
slog.Debug("amdgpu memory", "gpu", i, "total", format.HumanBytes2(totalMemory))
|
slog.Debug("amdgpu memory", "gpu", i, "total", format.HumanBytes2(totalMemory))
|
||||||
slog.Debug("amdgpu memory", "gpu", i, "available", format.HumanBytes2(freeMemory))
|
slog.Debug("amdgpu memory", "gpu", i, "available", format.HumanBytes2(freeMemory))
|
||||||
gpuInfo := RocmGPUInfo{
|
gpuInfo := RocmGPUInfo{
|
||||||
@ -126,6 +124,9 @@ func AMDGetGPUInfo() []RocmGPUInfo {
|
|||||||
TotalMemory: totalMemory,
|
TotalMemory: totalMemory,
|
||||||
FreeMemory: freeMemory,
|
FreeMemory: freeMemory,
|
||||||
},
|
},
|
||||||
|
// Free memory reporting on Windows is not reliable until we bump to ROCm v6.2
|
||||||
|
UnreliableFreeMemory: true,
|
||||||
|
|
||||||
ID: strconv.Itoa(i), // TODO this is probably wrong if we specify visible devices
|
ID: strconv.Itoa(i), // TODO this is probably wrong if we specify visible devices
|
||||||
DependencyPath: libDir,
|
DependencyPath: libDir,
|
||||||
MinimumMemory: rocmMinimumMemory,
|
MinimumMemory: rocmMinimumMemory,
|
||||||
|
@ -29,6 +29,11 @@ type GpuInfo struct {
|
|||||||
// Extra environment variables specific to the GPU as list of [key,value]
|
// Extra environment variables specific to the GPU as list of [key,value]
|
||||||
EnvWorkarounds [][2]string `json:"envs,omitempty"`
|
EnvWorkarounds [][2]string `json:"envs,omitempty"`
|
||||||
|
|
||||||
|
// Set to true if we can NOT reliably discover FreeMemory. A value of true indicates
|
||||||
|
// the FreeMemory is best effort, and may over or under report actual memory usage
|
||||||
|
// False indicates FreeMemory can generally be trusted on this GPU
|
||||||
|
UnreliableFreeMemory bool
|
||||||
|
|
||||||
// GPU information
|
// GPU information
|
||||||
ID string `json:"gpu_id"` // string to use for selection of this specific GPU
|
ID string `json:"gpu_id"` // string to use for selection of this specific GPU
|
||||||
Name string `json:"name"` // user friendly name if available
|
Name string `json:"name"` // user friendly name if available
|
||||||
|
46
llm/ext_server/server.cpp
vendored
46
llm/ext_server/server.cpp
vendored
@ -1650,26 +1650,41 @@ struct llama_server_context
|
|||||||
}
|
}
|
||||||
slot.params.n_keep = std::min(slot.n_ctx - 4, slot.params.n_keep);
|
slot.params.n_keep = std::min(slot.n_ctx - 4, slot.params.n_keep);
|
||||||
|
|
||||||
|
char buf[256];
|
||||||
|
llama_model_meta_val_str(model, "general.architecture", buf, 256);
|
||||||
|
bool gemma2 = strcmp(buf, "gemma2") == 0;
|
||||||
|
|
||||||
|
int32_t truncate_at = slot.n_ctx;
|
||||||
|
|
||||||
|
// truncate at 2/3 of the context length for gemma2 models
|
||||||
|
// as they do not support context shifts (from the sliding window implementation).
|
||||||
|
// this way, prompts that almost fit the context length can still generate a full
|
||||||
|
// response without a sudden stop from hitting the context limit
|
||||||
|
if (gemma2) {
|
||||||
|
truncate_at = 2 * slot.n_ctx / 3;
|
||||||
|
}
|
||||||
|
|
||||||
// if input prompt is too big, truncate it, if group attention self-extend is disabled
|
// if input prompt is too big, truncate it, if group attention self-extend is disabled
|
||||||
if (slot.ga_n == 1 && slot.n_prompt_tokens >= slot.n_ctx)
|
if (slot.ga_n == 1 && slot.n_prompt_tokens >= truncate_at)
|
||||||
{
|
{
|
||||||
const int n_left = slot.n_ctx - slot.params.n_keep;
|
const int n_left = slot.n_ctx - slot.params.n_keep;
|
||||||
const int n_block_size = n_left / 2;
|
const int n_shift = n_left / 2;
|
||||||
const int erased_blocks = (slot.n_prompt_tokens - slot.params.n_keep - n_block_size) / n_block_size;
|
const int n_erase = slot.n_prompt_tokens - slot.params.n_keep - n_shift;
|
||||||
|
|
||||||
std::vector<llama_token> new_tokens(
|
std::vector<llama_token> new_tokens(
|
||||||
prompt_tokens.begin(),
|
prompt_tokens.begin(),
|
||||||
prompt_tokens.begin() + slot.params.n_keep);
|
prompt_tokens.begin() + slot.params.n_keep);
|
||||||
new_tokens.insert(
|
new_tokens.insert(
|
||||||
new_tokens.end(),
|
new_tokens.end(),
|
||||||
prompt_tokens.begin() + slot.params.n_keep + erased_blocks * n_block_size,
|
prompt_tokens.begin() + slot.params.n_keep + n_erase,
|
||||||
prompt_tokens.end());
|
prompt_tokens.end());
|
||||||
|
|
||||||
LOG_VERBOSE("input truncated", {
|
LOG_INFO("input truncated", {
|
||||||
{"n_ctx", slot.n_ctx},
|
{"n_ctx", slot.n_ctx},
|
||||||
{"n_keep", slot.params.n_keep},
|
{"n_keep", slot.params.n_keep},
|
||||||
{"n_left", n_left},
|
{"n_left", n_left},
|
||||||
{"new_tokens", tokens_to_str(ctx, new_tokens.cbegin(), new_tokens.cend())},
|
{"n_shift", n_shift},
|
||||||
|
{"n_erase", n_erase},
|
||||||
});
|
});
|
||||||
slot.truncated = true;
|
slot.truncated = true;
|
||||||
prompt_tokens = new_tokens;
|
prompt_tokens = new_tokens;
|
||||||
@ -1678,6 +1693,19 @@ struct llama_server_context
|
|||||||
GGML_ASSERT(slot.n_prompt_tokens < slot.n_ctx);
|
GGML_ASSERT(slot.n_prompt_tokens < slot.n_ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Models with sliding window attention do not work with context shifts, so
|
||||||
|
// limit their prediction to the context length
|
||||||
|
if (gemma2) {
|
||||||
|
int32_t limit = slot.n_ctx - slot.n_prompt_tokens;
|
||||||
|
slot.n_predict = limit;
|
||||||
|
slot.params.n_predict = limit;
|
||||||
|
LOG_INFO("model does not support sliding window, limiting generation", {
|
||||||
|
{"n_ctx", slot.n_ctx},
|
||||||
|
{"n_prompt_tokens", slot.n_prompt_tokens},
|
||||||
|
{"n_predict", slot.n_predict}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
if (!slot.params.cache_prompt)
|
if (!slot.params.cache_prompt)
|
||||||
{
|
{
|
||||||
llama_sampling_reset(slot.ctx_sampling);
|
llama_sampling_reset(slot.ctx_sampling);
|
||||||
|
15
llm/ggml.go
15
llm/ggml.go
@ -366,9 +366,18 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui
|
|||||||
4*batch*(1+2*embedding+context*(1+heads))+embedding*(6*context*headsKV/heads+embedding*9/16),
|
4*batch*(1+2*embedding+context*(1+heads))+embedding*(6*context*headsKV/heads+embedding*9/16),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
case "gemma":
|
case "gemma", "gemma2":
|
||||||
fullOffload = 4 * batch * (embedding + vocab)
|
fullOffload = max(
|
||||||
partialOffload = 4*batch*(2*embedding+vocab+1) + embedding*vocab*105/128
|
4*batch*(embedding+vocab),
|
||||||
|
4*batch*(2+context+context*heads+2*embedding+2*embeddingHeadsK*heads),
|
||||||
|
)
|
||||||
|
|
||||||
|
partialOffload = max(
|
||||||
|
4*embedding*batch+embedding*vocab*105/128+4*vocab*batch,
|
||||||
|
4*batch*(2*embedding+1+2*embeddingHeadsK*heads+context+context*heads)+
|
||||||
|
4*embeddingHeadsK*context*8+
|
||||||
|
embedding*embeddingHeadsK*heads*9/16,
|
||||||
|
)
|
||||||
case "command-r":
|
case "command-r":
|
||||||
fullOffload = max(
|
fullOffload = max(
|
||||||
4*batch*(embedding+vocab),
|
4*batch*(embedding+vocab),
|
||||||
|
305
llm/patches/07-gemma.diff
Normal file
305
llm/patches/07-gemma.diff
Normal file
@ -0,0 +1,305 @@
|
|||||||
|
From 5cadb45f39d001ffbad95b690d6cf0abcb4a6d96 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Ollama maintainers <hello@ollama.com>
|
||||||
|
Date: Wed, 26 Jun 2024 16:18:09 -0700
|
||||||
|
Subject: [PATCH] Architecture support
|
||||||
|
|
||||||
|
---
|
||||||
|
llama.cpp | 194 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
|
||||||
|
1 file changed, 193 insertions(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/llama.cpp b/llama.cpp
|
||||||
|
index 61948751..3b4196f5 100644
|
||||||
|
--- a/llama.cpp
|
||||||
|
+++ b/llama.cpp
|
||||||
|
@@ -217,6 +217,7 @@ enum llm_arch {
|
||||||
|
LLM_ARCH_INTERNLM2,
|
||||||
|
LLM_ARCH_MINICPM,
|
||||||
|
LLM_ARCH_GEMMA,
|
||||||
|
+ LLM_ARCH_GEMMA2,
|
||||||
|
LLM_ARCH_STARCODER2,
|
||||||
|
LLM_ARCH_MAMBA,
|
||||||
|
LLM_ARCH_XVERSE,
|
||||||
|
@@ -255,6 +256,7 @@ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
|
||||||
|
{ LLM_ARCH_INTERNLM2, "internlm2" },
|
||||||
|
{ LLM_ARCH_MINICPM, "minicpm" },
|
||||||
|
{ LLM_ARCH_GEMMA, "gemma" },
|
||||||
|
+ { LLM_ARCH_GEMMA2, "gemma2" },
|
||||||
|
{ LLM_ARCH_STARCODER2, "starcoder2" },
|
||||||
|
{ LLM_ARCH_MAMBA, "mamba" },
|
||||||
|
{ LLM_ARCH_XVERSE, "xverse" },
|
||||||
|
@@ -464,10 +466,12 @@ enum llm_tensor {
|
||||||
|
LLM_TENSOR_ATTN_NORM,
|
||||||
|
LLM_TENSOR_ATTN_NORM_2,
|
||||||
|
LLM_TENSOR_ATTN_OUT_NORM,
|
||||||
|
+ LLM_TENSOR_ATTN_POST_NORM,
|
||||||
|
LLM_TENSOR_ATTN_ROT_EMBD,
|
||||||
|
LLM_TENSOR_FFN_GATE_INP,
|
||||||
|
LLM_TENSOR_FFN_GATE_INP_SHEXP,
|
||||||
|
LLM_TENSOR_FFN_NORM,
|
||||||
|
+ LLM_TENSOR_FFN_POST_NORM,
|
||||||
|
LLM_TENSOR_FFN_GATE,
|
||||||
|
LLM_TENSOR_FFN_DOWN,
|
||||||
|
LLM_TENSOR_FFN_UP,
|
||||||
|
@@ -960,6 +964,24 @@ static const std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NA
|
||||||
|
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
+ {
|
||||||
|
+ LLM_ARCH_GEMMA2,
|
||||||
|
+ {
|
||||||
|
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
|
||||||
|
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
|
||||||
|
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
|
||||||
|
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
|
||||||
|
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
|
||||||
|
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
|
||||||
|
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
|
||||||
|
+ { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" },
|
||||||
|
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
|
||||||
|
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
|
||||||
|
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
|
||||||
|
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
||||||
|
+ { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" },
|
||||||
|
+ },
|
||||||
|
+ },
|
||||||
|
{
|
||||||
|
LLM_ARCH_STARCODER2,
|
||||||
|
{
|
||||||
|
@@ -1941,6 +1963,8 @@ enum e_model {
|
||||||
|
MODEL_8x22B,
|
||||||
|
MODEL_16x12B,
|
||||||
|
MODEL_10B_128x3_66B,
|
||||||
|
+ MODEL_9B,
|
||||||
|
+ MODEL_27B,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const size_t kiB = 1024;
|
||||||
|
@@ -2114,6 +2138,7 @@ struct llama_layer {
|
||||||
|
struct ggml_tensor * attn_out_norm_b;
|
||||||
|
struct ggml_tensor * attn_q_a_norm;
|
||||||
|
struct ggml_tensor * attn_kv_a_norm;
|
||||||
|
+ struct ggml_tensor * attn_post_norm;
|
||||||
|
|
||||||
|
// attention
|
||||||
|
struct ggml_tensor * wq;
|
||||||
|
@@ -2136,6 +2161,7 @@ struct llama_layer {
|
||||||
|
// normalization
|
||||||
|
struct ggml_tensor * ffn_norm;
|
||||||
|
struct ggml_tensor * ffn_norm_b;
|
||||||
|
+ struct ggml_tensor * ffn_post_norm;
|
||||||
|
struct ggml_tensor * layer_out_norm;
|
||||||
|
struct ggml_tensor * layer_out_norm_b;
|
||||||
|
struct ggml_tensor * ffn_norm_exps;
|
||||||
|
@@ -4529,6 +4555,16 @@ static void llm_load_hparams(
|
||||||
|
}
|
||||||
|
} break;
|
||||||
|
case LLM_ARCH_GEMMA:
|
||||||
|
+ {
|
||||||
|
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
|
||||||
|
+
|
||||||
|
+ switch (hparams.n_layer) {
|
||||||
|
+ case 18: model.type = e_model::MODEL_9B; break;
|
||||||
|
+ case 28: model.type = e_model::MODEL_27B; break;
|
||||||
|
+ default: model.type = e_model::MODEL_UNKNOWN;
|
||||||
|
+ }
|
||||||
|
+ } break;
|
||||||
|
+ case LLM_ARCH_GEMMA2:
|
||||||
|
{
|
||||||
|
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
|
||||||
|
|
||||||
|
@@ -6305,6 +6341,40 @@ static bool llm_load_tensors(
|
||||||
|
layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
|
||||||
|
}
|
||||||
|
} break;
|
||||||
|
+ case LLM_ARCH_GEMMA2:
|
||||||
|
+ {
|
||||||
|
+ model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
|
||||||
|
+
|
||||||
|
+ // output
|
||||||
|
+ model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
|
||||||
|
+ model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading
|
||||||
|
+
|
||||||
|
+ const int64_t n_ff = hparams.n_ff;
|
||||||
|
+ const int64_t n_embd_head_k = hparams.n_embd_head_k;
|
||||||
|
+ const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa();
|
||||||
|
+ const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa();
|
||||||
|
+
|
||||||
|
+ for (uint32_t i = 0; i < n_layer; ++i) {
|
||||||
|
+ ggml_context * ctx_layer = ctx_for_layer(i);
|
||||||
|
+ ggml_context * ctx_split = ctx_for_layer_split(i);
|
||||||
|
+
|
||||||
|
+ auto & layer = model.layers[i];
|
||||||
|
+
|
||||||
|
+ layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
|
||||||
|
+
|
||||||
|
+ layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * hparams.n_head});
|
||||||
|
+ layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa});
|
||||||
|
+ layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa});
|
||||||
|
+ layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * hparams.n_head, n_embd});
|
||||||
|
+ layer.attn_post_norm = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd});
|
||||||
|
+
|
||||||
|
+ layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
|
||||||
|
+ layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
|
||||||
|
+ layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
|
||||||
|
+ layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
|
||||||
|
+ layer.ffn_post_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd});
|
||||||
|
+ }
|
||||||
|
+ } break;
|
||||||
|
case LLM_ARCH_STARCODER2:
|
||||||
|
{
|
||||||
|
model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
|
||||||
|
@@ -10614,6 +10684,123 @@ struct llm_build_context {
|
||||||
|
return gf;
|
||||||
|
}
|
||||||
|
|
||||||
|
+ struct ggml_cgraph * build_gemma2() {
|
||||||
|
+ struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
|
||||||
|
+
|
||||||
|
+ const int64_t n_embd_head_k = hparams.n_embd_head_k;
|
||||||
|
+
|
||||||
|
+ struct ggml_tensor * cur;
|
||||||
|
+ struct ggml_tensor * inpL;
|
||||||
|
+
|
||||||
|
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
|
||||||
|
+
|
||||||
|
+ inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd));
|
||||||
|
+ cb(inpL, "inp_scaled", -1);
|
||||||
|
+
|
||||||
|
+ // inp_pos - contains the positions
|
||||||
|
+ struct ggml_tensor * inp_pos = build_inp_pos();
|
||||||
|
+
|
||||||
|
+ // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
||||||
|
+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
|
||||||
|
+
|
||||||
|
+ for (int il = 0; il < n_layer; ++il) {
|
||||||
|
+ // norm
|
||||||
|
+ cur = llm_build_norm(ctx0, inpL, hparams,
|
||||||
|
+ model.layers[il].attn_norm, NULL,
|
||||||
|
+ LLM_NORM_RMS, cb, il);
|
||||||
|
+ cb(cur, "attn_norm", il);
|
||||||
|
+
|
||||||
|
+ // self-attention
|
||||||
|
+ {
|
||||||
|
+ // compute Q and K and RoPE them
|
||||||
|
+ struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
|
||||||
|
+ cb(Qcur, "Qcur", il);
|
||||||
|
+
|
||||||
|
+ struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
|
||||||
|
+ cb(Kcur, "Kcur", il);
|
||||||
|
+
|
||||||
|
+ struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
|
||||||
|
+ cb(Vcur, "Vcur", il);
|
||||||
|
+
|
||||||
|
+ Qcur = ggml_rope_ext(
|
||||||
|
+ ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head_k, n_head, n_tokens), inp_pos, nullptr,
|
||||||
|
+ n_embd_head_k, rope_type, n_ctx_orig, freq_base, freq_scale,
|
||||||
|
+ ext_factor, attn_factor, beta_fast, beta_slow);
|
||||||
|
+ cb(Qcur, "Qcur", il);
|
||||||
|
+
|
||||||
|
+ Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k)));
|
||||||
|
+ cb(Qcur, "Qcur_scaled", il);
|
||||||
|
+
|
||||||
|
+ Kcur = ggml_rope_ext(
|
||||||
|
+ ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head_k, n_head_kv, n_tokens), inp_pos, nullptr,
|
||||||
|
+ n_embd_head_k, rope_type, n_ctx_orig, freq_base, freq_scale,
|
||||||
|
+ ext_factor, attn_factor, beta_fast, beta_slow);
|
||||||
|
+ cb(Kcur, "Kcur", il);
|
||||||
|
+
|
||||||
|
+ cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
|
+ model.layers[il].wo, NULL,
|
||||||
|
+ Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il);
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ if (il == n_layer - 1) {
|
||||||
|
+ // skip computing output for unused tokens
|
||||||
|
+ struct ggml_tensor * inp_out_ids = build_inp_out_ids();
|
||||||
|
+ cur = ggml_get_rows(ctx0, cur, inp_out_ids);
|
||||||
|
+ inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ cur = llm_build_norm(ctx0, cur, hparams,
|
||||||
|
+ model.layers[il].attn_post_norm, NULL,
|
||||||
|
+ LLM_NORM_RMS, cb, il);
|
||||||
|
+ cb(cur, "attn_post_norm", il);
|
||||||
|
+
|
||||||
|
+ struct ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL);
|
||||||
|
+ cb(sa_out, "sa_out", il);
|
||||||
|
+
|
||||||
|
+ cur = llm_build_norm(ctx0, sa_out, hparams,
|
||||||
|
+ model.layers[il].ffn_norm, NULL,
|
||||||
|
+ LLM_NORM_RMS, cb, il);
|
||||||
|
+ cb(cur, "ffn_norm", il);
|
||||||
|
+
|
||||||
|
+ // feed-forward network
|
||||||
|
+ {
|
||||||
|
+ cur = llm_build_ffn(ctx0, cur,
|
||||||
|
+ model.layers[il].ffn_up, NULL,
|
||||||
|
+ model.layers[il].ffn_gate, NULL,
|
||||||
|
+ model.layers[il].ffn_down, NULL,
|
||||||
|
+ NULL,
|
||||||
|
+ LLM_FFN_GELU, LLM_FFN_PAR, cb, il);
|
||||||
|
+ cb(cur, "ffn_out", il);
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ cur = llm_build_norm(ctx0, cur, hparams,
|
||||||
|
+ model.layers[il].ffn_post_norm, NULL,
|
||||||
|
+ LLM_NORM_RMS, cb, -1);
|
||||||
|
+ cb(cur, "ffn_post_norm", -1);
|
||||||
|
+
|
||||||
|
+ cur = ggml_add(ctx0, cur, sa_out);
|
||||||
|
+ cb(cur, "l_out", il);
|
||||||
|
+
|
||||||
|
+ // input for next layer
|
||||||
|
+ inpL = cur;
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ cur = inpL;
|
||||||
|
+
|
||||||
|
+ cur = llm_build_norm(ctx0, cur, hparams,
|
||||||
|
+ model.output_norm, NULL,
|
||||||
|
+ LLM_NORM_RMS, cb, -1);
|
||||||
|
+ cb(cur, "result_norm", -1);
|
||||||
|
+
|
||||||
|
+ // lm_head
|
||||||
|
+ cur = ggml_mul_mat(ctx0, model.output, cur);
|
||||||
|
+ cb(cur, "result_output", -1);
|
||||||
|
+
|
||||||
|
+ ggml_build_forward_expand(gf, cur);
|
||||||
|
+
|
||||||
|
+ return gf;
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
struct ggml_cgraph * build_starcoder2() {
|
||||||
|
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
|
||||||
|
|
||||||
|
@@ -11847,6 +12034,10 @@ static struct ggml_cgraph * llama_build_graph(
|
||||||
|
{
|
||||||
|
result = llm.build_gemma();
|
||||||
|
} break;
|
||||||
|
+ case LLM_ARCH_GEMMA2:
|
||||||
|
+ {
|
||||||
|
+ result = llm.build_gemma2();
|
||||||
|
+ } break;
|
||||||
|
case LLM_ARCH_STARCODER2:
|
||||||
|
{
|
||||||
|
result = llm.build_starcoder2();
|
||||||
|
@@ -16671,6 +16862,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) {
|
||||||
|
case LLM_ARCH_PHI2:
|
||||||
|
case LLM_ARCH_PHI3:
|
||||||
|
case LLM_ARCH_GEMMA:
|
||||||
|
+ case LLM_ARCH_GEMMA2:
|
||||||
|
case LLM_ARCH_STARCODER2:
|
||||||
|
case LLM_ARCH_GPTNEOX:
|
||||||
|
return LLAMA_ROPE_TYPE_NEOX;
|
||||||
|
@@ -18551,7 +18743,7 @@ static int32_t llama_chat_apply_template_internal(
|
||||||
|
if (add_ass) {
|
||||||
|
ss << "<s>assistant\n";
|
||||||
|
}
|
||||||
|
- } else if (tmpl == "gemma" || tmpl.find("<start_of_turn>") != std::string::npos) {
|
||||||
|
+ } else if (tmpl == "gemma" || tmpl == "gemma2" || tmpl.find("<start_of_turn>") != std::string::npos) {
|
||||||
|
// google/gemma-7b-it
|
||||||
|
std::string system_prompt = "";
|
||||||
|
for (auto message : chat) {
|
||||||
|
--
|
||||||
|
2.45.2
|
||||||
|
|
@ -82,7 +82,7 @@ func LoadModel(model string, maxArraySize int) (*GGML, error) {
|
|||||||
|
|
||||||
// NewLlamaServer will run a server for the given GPUs
|
// NewLlamaServer will run a server for the given GPUs
|
||||||
// The gpu list must be a single family.
|
// The gpu list must be a single family.
|
||||||
func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options) (LlamaServer, error) {
|
func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options, numParallel int) (LlamaServer, error) {
|
||||||
var err error
|
var err error
|
||||||
var cpuRunner string
|
var cpuRunner string
|
||||||
var estimate MemoryEstimate
|
var estimate MemoryEstimate
|
||||||
@ -218,8 +218,10 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
|||||||
|
|
||||||
// Windows CUDA should not use mmap for best performance
|
// Windows CUDA should not use mmap for best performance
|
||||||
// Linux with a model larger than free space, mmap leads to thrashing
|
// Linux with a model larger than free space, mmap leads to thrashing
|
||||||
|
// For CPU loads we want the memory to be allocated, not FS cache
|
||||||
if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == api.TriStateUndefined) ||
|
if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == api.TriStateUndefined) ||
|
||||||
(runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == api.TriStateUndefined) ||
|
(runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == api.TriStateUndefined) ||
|
||||||
|
(gpus[0].Library == "cpu" && opts.UseMMap == api.TriStateUndefined) ||
|
||||||
opts.UseMMap == api.TriStateFalse {
|
opts.UseMMap == api.TriStateFalse {
|
||||||
params = append(params, "--no-mmap")
|
params = append(params, "--no-mmap")
|
||||||
}
|
}
|
||||||
@ -232,15 +234,6 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
|||||||
params = append(params, "--numa")
|
params = append(params, "--numa")
|
||||||
}
|
}
|
||||||
|
|
||||||
numParallel := envconfig.NumParallel
|
|
||||||
|
|
||||||
// TODO (jmorganca): multimodal models don't support parallel yet
|
|
||||||
// see https://github.com/ollama/ollama/issues/4165
|
|
||||||
if len(projectors) > 0 {
|
|
||||||
numParallel = 1
|
|
||||||
slog.Warn("multimodal models don't support parallel requests yet")
|
|
||||||
}
|
|
||||||
|
|
||||||
params = append(params, "--parallel", fmt.Sprintf("%d", numParallel))
|
params = append(params, "--parallel", fmt.Sprintf("%d", numParallel))
|
||||||
|
|
||||||
if estimate.TensorSplit != "" {
|
if estimate.TensorSplit != "" {
|
||||||
@ -567,6 +560,9 @@ func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
|
|||||||
if s.status != nil && s.status.LastErrMsg != "" {
|
if s.status != nil && s.status.LastErrMsg != "" {
|
||||||
msg = s.status.LastErrMsg
|
msg = s.status.LastErrMsg
|
||||||
}
|
}
|
||||||
|
if strings.Contains(msg, "unknown model") {
|
||||||
|
return fmt.Errorf("this model is not supported by your version of Ollama. You may need to upgrade")
|
||||||
|
}
|
||||||
return fmt.Errorf("llama runner process has terminated: %v %s", err, msg)
|
return fmt.Errorf("llama runner process has terminated: %v %s", err, msg)
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
@ -25,6 +25,7 @@ var errorPrefixes = []string{
|
|||||||
"CUDA error",
|
"CUDA error",
|
||||||
"cudaMalloc failed",
|
"cudaMalloc failed",
|
||||||
"\"ERR\"",
|
"\"ERR\"",
|
||||||
|
"architecture",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *StatusWriter) Write(b []byte) (int, error) {
|
func (w *StatusWriter) Write(b []byte) (int, error) {
|
||||||
|
@ -11,6 +11,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/ollama/ollama/api"
|
"github.com/ollama/ollama/api"
|
||||||
"github.com/ollama/ollama/convert"
|
"github.com/ollama/ollama/convert"
|
||||||
@ -77,62 +78,80 @@ func parseFromModel(ctx context.Context, name model.Name, fn func(api.ProgressRe
|
|||||||
return layers, nil
|
return layers, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseFromZipFile(_ context.Context, file *os.File, digest string, fn func(api.ProgressResponse)) (layers []*layerGGML, err error) {
|
func extractFromZipFile(p string, file *os.File, fn func(api.ProgressResponse)) error {
|
||||||
stat, err := file.Stat()
|
stat, err := file.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err := zip.NewReader(file, stat.Size())
|
r, err := zip.NewReader(file, stat.Size())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
tempdir, err := os.MkdirTemp(filepath.Dir(file.Name()), "")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(tempdir)
|
|
||||||
|
|
||||||
fn(api.ProgressResponse{Status: "unpacking model metadata"})
|
fn(api.ProgressResponse{Status: "unpacking model metadata"})
|
||||||
for _, f := range r.File {
|
for _, f := range r.File {
|
||||||
|
n := filepath.Join(p, f.Name)
|
||||||
|
if !strings.HasPrefix(n, p) {
|
||||||
|
slog.Warn("skipped extracting file outside of context", "name", f.Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.MkdirAll(filepath.Dir(n), 0o750); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// TODO(mxyng): this should not write out all files to disk
|
// TODO(mxyng): this should not write out all files to disk
|
||||||
outfile, err := os.Create(filepath.Join(tempdir, f.Name))
|
outfile, err := os.Create(n)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
defer outfile.Close()
|
defer outfile.Close()
|
||||||
|
|
||||||
infile, err := f.Open()
|
infile, err := f.Open()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
defer infile.Close()
|
defer infile.Close()
|
||||||
|
|
||||||
if _, err = io.Copy(outfile, infile); err != nil {
|
if _, err = io.Copy(outfile, infile); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := outfile.Close(); err != nil {
|
if err := outfile.Close(); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := infile.Close(); err != nil {
|
if err := infile.Close(); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mf, err := convert.GetModelFormat(tempdir)
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseFromZipFile(_ context.Context, file *os.File, digest string, fn func(api.ProgressResponse)) (layers []*layerGGML, err error) {
|
||||||
|
tempDir, err := os.MkdirTemp(filepath.Dir(file.Name()), "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir)
|
||||||
|
|
||||||
|
if err := extractFromZipFile(tempDir, file, fn); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
mf, err := convert.GetModelFormat(tempDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
params, err := mf.GetParams(tempdir)
|
params, err := mf.GetParams(tempDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
mArch, err := mf.GetModelArch("", tempdir, params)
|
mArch, err := mf.GetModelArch("", tempDir, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -150,7 +169,7 @@ func parseFromZipFile(_ context.Context, file *os.File, digest string, fn func(a
|
|||||||
|
|
||||||
// TODO(mxyng): this should write directly into a layer
|
// TODO(mxyng): this should write directly into a layer
|
||||||
// e.g. NewLayer(arch.Reader(), "application/vnd.ollama.image.model")
|
// e.g. NewLayer(arch.Reader(), "application/vnd.ollama.image.model")
|
||||||
temp, err := os.CreateTemp(tempdir, "fp16")
|
temp, err := os.CreateTemp(tempDir, "fp16")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
92
server/model_test.go
Normal file
92
server/model_test.go
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/zip"
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
func createZipFile(t *testing.T, name string) *os.File {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
f, err := os.CreateTemp(t.TempDir(), "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
zf := zip.NewWriter(f)
|
||||||
|
defer zf.Close()
|
||||||
|
|
||||||
|
zh, err := zf.CreateHeader(&zip.FileHeader{Name: name})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.Copy(zh, bytes.NewReader([]byte(""))); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExtractFromZipFile(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
expect []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "good",
|
||||||
|
expect: []string{"good"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: filepath.Join("..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "bad"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range cases {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
f := createZipFile(t, tt.name)
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
tempDir := t.TempDir()
|
||||||
|
if err := extractFromZipFile(tempDir, f, func(api.ProgressResponse) {}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var matches []string
|
||||||
|
if err := filepath.Walk(tempDir, func(p string, fi os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !fi.IsDir() {
|
||||||
|
matches = append(matches, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var actual []string
|
||||||
|
for _, match := range matches {
|
||||||
|
rel, err := filepath.Rel(tempDir, match)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
actual = append(actual, rel)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !slices.Equal(actual, tt.expect) {
|
||||||
|
t.Fatalf("expected %d files, got %d", len(tt.expect), len(matches))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@ -1237,6 +1237,11 @@ func (s *Server) ProcessHandler(c *gin.Context) {
|
|||||||
models = append(models, mr)
|
models = append(models, mr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
slices.SortStableFunc(models, func(i, j api.ProcessModelResponse) int {
|
||||||
|
// longest duration remaining listed first
|
||||||
|
return cmp.Compare(j.ExpiresAt.Unix(), i.ExpiresAt.Unix())
|
||||||
|
})
|
||||||
|
|
||||||
c.JSON(http.StatusOK, api.ProcessResponse{Models: models})
|
c.JSON(http.StatusOK, api.ProcessResponse{Models: models})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
124
server/sched.go
124
server/sched.go
@ -23,6 +23,7 @@ type LlmRequest struct {
|
|||||||
ctx context.Context //nolint:containedctx
|
ctx context.Context //nolint:containedctx
|
||||||
model *Model
|
model *Model
|
||||||
opts api.Options
|
opts api.Options
|
||||||
|
origNumCtx int // Track the initial ctx request
|
||||||
sessionDuration time.Duration
|
sessionDuration time.Duration
|
||||||
successCh chan *runnerRef
|
successCh chan *runnerRef
|
||||||
errCh chan error
|
errCh chan error
|
||||||
@ -38,13 +39,23 @@ type Scheduler struct {
|
|||||||
loaded map[string]*runnerRef
|
loaded map[string]*runnerRef
|
||||||
loadedMu sync.Mutex
|
loadedMu sync.Mutex
|
||||||
|
|
||||||
loadFn func(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList)
|
loadFn func(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList, numParallel int)
|
||||||
newServerFn func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options) (llm.LlamaServer, error)
|
newServerFn func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options, numParallel int) (llm.LlamaServer, error)
|
||||||
getGpuFn func() gpu.GpuInfoList
|
getGpuFn func() gpu.GpuInfoList
|
||||||
getCpuFn func() gpu.GpuInfoList
|
getCpuFn func() gpu.GpuInfoList
|
||||||
reschedDelay time.Duration
|
reschedDelay time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Default automatic value for number of models we allow per GPU
|
||||||
|
// Model will still need to fit in VRAM, but loading many small models
|
||||||
|
// on a large GPU can cause stalling
|
||||||
|
var defaultModelsPerGPU = 3
|
||||||
|
|
||||||
|
// Default automatic value for parallel setting
|
||||||
|
// Model will still need to fit in VRAM. If this setting wont fit
|
||||||
|
// we'll back off down to 1 to try to get it to fit
|
||||||
|
var defaultParallel = 4
|
||||||
|
|
||||||
var ErrMaxQueue = fmt.Errorf("server busy, please try again. maximum pending requests exceeded")
|
var ErrMaxQueue = fmt.Errorf("server busy, please try again. maximum pending requests exceeded")
|
||||||
|
|
||||||
func InitScheduler(ctx context.Context) *Scheduler {
|
func InitScheduler(ctx context.Context) *Scheduler {
|
||||||
@ -65,13 +76,10 @@ func InitScheduler(ctx context.Context) *Scheduler {
|
|||||||
|
|
||||||
// context must be canceled to decrement ref count and release the runner
|
// context must be canceled to decrement ref count and release the runner
|
||||||
func (s *Scheduler) GetRunner(c context.Context, model *Model, opts api.Options, sessionDuration time.Duration) (chan *runnerRef, chan error) {
|
func (s *Scheduler) GetRunner(c context.Context, model *Model, opts api.Options, sessionDuration time.Duration) (chan *runnerRef, chan error) {
|
||||||
// allocate a large enough kv cache for all parallel requests
|
|
||||||
if opts.NumCtx < 4 {
|
if opts.NumCtx < 4 {
|
||||||
opts.NumCtx = 4
|
opts.NumCtx = 4
|
||||||
}
|
}
|
||||||
|
|
||||||
opts.NumCtx *= envconfig.NumParallel
|
|
||||||
|
|
||||||
req := &LlmRequest{
|
req := &LlmRequest{
|
||||||
ctx: c,
|
ctx: c,
|
||||||
model: model,
|
model: model,
|
||||||
@ -110,11 +118,25 @@ func (s *Scheduler) processPending(ctx context.Context) {
|
|||||||
case pending := <-s.pendingReqCh:
|
case pending := <-s.pendingReqCh:
|
||||||
// Block other requests until we get this pending request running
|
// Block other requests until we get this pending request running
|
||||||
pending.schedAttempts++
|
pending.schedAttempts++
|
||||||
|
if pending.origNumCtx == 0 {
|
||||||
|
pending.origNumCtx = pending.opts.NumCtx
|
||||||
|
}
|
||||||
|
|
||||||
if pending.ctx.Err() != nil {
|
if pending.ctx.Err() != nil {
|
||||||
slog.Debug("pending request cancelled or timed out, skipping scheduling")
|
slog.Debug("pending request cancelled or timed out, skipping scheduling")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
numParallel := envconfig.NumParallel
|
||||||
|
// TODO (jmorganca): multimodal models don't support parallel yet
|
||||||
|
// see https://github.com/ollama/ollama/issues/4165
|
||||||
|
if len(pending.model.ProjectorPaths) > 0 && numParallel != 1 {
|
||||||
|
numParallel = 1
|
||||||
|
slog.Warn("multimodal models don't support parallel requests yet")
|
||||||
|
}
|
||||||
|
// Keep NumCtx and numParallel in sync
|
||||||
|
if numParallel > 1 {
|
||||||
|
pending.opts.NumCtx = pending.origNumCtx * numParallel
|
||||||
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
var runnerToExpire *runnerRef
|
var runnerToExpire *runnerRef
|
||||||
@ -143,6 +165,26 @@ func (s *Scheduler) processPending(ctx context.Context) {
|
|||||||
gpus = s.getGpuFn()
|
gpus = s.getGpuFn()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if envconfig.MaxRunners <= 0 {
|
||||||
|
// No user specified MaxRunners, so figure out what automatic setting to use
|
||||||
|
// If all GPUs have reliable free memory reporting, defaultModelsPerGPU * the number of GPUs
|
||||||
|
// if any GPU has unreliable free memory reporting, 1x the number of GPUs
|
||||||
|
allReliable := true
|
||||||
|
for _, gpu := range gpus {
|
||||||
|
if gpu.UnreliableFreeMemory {
|
||||||
|
allReliable = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if allReliable {
|
||||||
|
envconfig.MaxRunners = defaultModelsPerGPU * len(gpus)
|
||||||
|
slog.Debug("updating default concurrency", "OLLAMA_MAX_LOADED_MODELS", envconfig.MaxRunners, "gpu_count", len(gpus))
|
||||||
|
} else {
|
||||||
|
slog.Info("one or more GPUs detected that are unable to accurately report free memory - disabling default concurrency")
|
||||||
|
envconfig.MaxRunners = len(gpus)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Load model for fitting
|
// Load model for fitting
|
||||||
ggml, err := llm.LoadModel(pending.model.ModelPath, 0)
|
ggml, err := llm.LoadModel(pending.model.ModelPath, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -152,26 +194,32 @@ func (s *Scheduler) processPending(ctx context.Context) {
|
|||||||
|
|
||||||
// Evaluate if the model will fit in the available system memory, or if we should unload a model first
|
// Evaluate if the model will fit in the available system memory, or if we should unload a model first
|
||||||
if len(gpus) == 1 && gpus[0].Library == "cpu" {
|
if len(gpus) == 1 && gpus[0].Library == "cpu" {
|
||||||
|
// simplifying assumption of defaultParallel when in CPU mode
|
||||||
|
if numParallel <= 0 {
|
||||||
|
numParallel = defaultParallel
|
||||||
|
pending.opts.NumCtx = pending.origNumCtx * numParallel
|
||||||
|
}
|
||||||
|
|
||||||
if loadedCount == 0 {
|
if loadedCount == 0 {
|
||||||
slog.Debug("cpu mode with first model, loading")
|
slog.Debug("cpu mode with first model, loading")
|
||||||
s.loadFn(pending, ggml, gpus)
|
s.loadFn(pending, ggml, gpus, numParallel)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
runnerToExpire = s.maybeFindCPURunnerToUnload(pending, ggml, gpus)
|
runnerToExpire = s.maybeFindCPURunnerToUnload(pending, ggml, gpus)
|
||||||
if runnerToExpire == nil {
|
if runnerToExpire == nil {
|
||||||
slog.Debug("cpu mode with available system memory or first model, loading")
|
slog.Debug("cpu mode with available system memory or first model, loading")
|
||||||
s.loadFn(pending, ggml, gpus)
|
s.loadFn(pending, ggml, gpus, numParallel)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
// else we need to expire a runner
|
// else we need to expire a runner
|
||||||
} else if loadedCount == 0 {
|
} else if loadedCount == 0 {
|
||||||
// No models loaded. Load the model but prefer the best fit.
|
// No models loaded. Load the model but prefer the best fit.
|
||||||
slog.Debug("loading first model", "model", pending.model.ModelPath)
|
slog.Debug("loading first model", "model", pending.model.ModelPath)
|
||||||
g := pickBestFitGPUs(pending, ggml, gpus)
|
g := pickBestFitGPUs(pending, ggml, gpus, &numParallel)
|
||||||
if g != nil {
|
if g != nil {
|
||||||
gpus = g
|
gpus = g
|
||||||
}
|
}
|
||||||
s.loadFn(pending, ggml, gpus)
|
s.loadFn(pending, ggml, gpus, numParallel)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -186,10 +234,10 @@ func (s *Scheduler) processPending(ctx context.Context) {
|
|||||||
|
|
||||||
// Update free memory from currently loaded models
|
// Update free memory from currently loaded models
|
||||||
s.updateFreeSpace(availGpus)
|
s.updateFreeSpace(availGpus)
|
||||||
fitGpus := pickBestFitGPUs(pending, ggml, availGpus)
|
fitGpus := pickBestFitGPUs(pending, ggml, availGpus, &numParallel)
|
||||||
if fitGpus != nil {
|
if fitGpus != nil {
|
||||||
slog.Debug("new model fits with existing models, loading")
|
slog.Debug("new model fits with existing models, loading")
|
||||||
s.loadFn(pending, ggml, fitGpus)
|
s.loadFn(pending, ggml, fitGpus, numParallel)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -350,8 +398,11 @@ func (pending *LlmRequest) useLoadedRunner(runner *runnerRef, finished chan *Llm
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Scheduler) load(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList) {
|
func (s *Scheduler) load(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList, numParallel int) {
|
||||||
llama, err := s.newServerFn(gpus, req.model.ModelPath, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts)
|
if numParallel < 1 {
|
||||||
|
numParallel = 1
|
||||||
|
}
|
||||||
|
llama, err := s.newServerFn(gpus, req.model.ModelPath, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts, numParallel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// some older models are not compatible with newer versions of llama.cpp
|
// some older models are not compatible with newer versions of llama.cpp
|
||||||
// show a generalized compatibility error until there is a better way to
|
// show a generalized compatibility error until there is a better way to
|
||||||
@ -375,6 +426,7 @@ func (s *Scheduler) load(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList)
|
|||||||
loading: true,
|
loading: true,
|
||||||
refCount: 1,
|
refCount: 1,
|
||||||
}
|
}
|
||||||
|
runner.numParallel = numParallel
|
||||||
runner.refMu.Lock()
|
runner.refMu.Lock()
|
||||||
|
|
||||||
s.loadedMu.Lock()
|
s.loadedMu.Lock()
|
||||||
@ -483,8 +535,9 @@ type runnerRef struct {
|
|||||||
expireTimer *time.Timer
|
expireTimer *time.Timer
|
||||||
expiresAt time.Time
|
expiresAt time.Time
|
||||||
|
|
||||||
model *Model
|
model *Model
|
||||||
modelPath string
|
modelPath string
|
||||||
|
numParallel int
|
||||||
*api.Options
|
*api.Options
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -525,6 +578,9 @@ func (runner *runnerRef) needsReload(ctx context.Context, req *LlmRequest) bool
|
|||||||
optsNew.NumGPU = -1
|
optsNew.NumGPU = -1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Normalize the NumCtx for parallelism
|
||||||
|
optsExisting.NumCtx = optsExisting.NumCtx / runner.numParallel
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
if !reflect.DeepEqual(runner.model.AdapterPaths, req.model.AdapterPaths) || // have the adapters changed?
|
if !reflect.DeepEqual(runner.model.AdapterPaths, req.model.AdapterPaths) || // have the adapters changed?
|
||||||
@ -611,22 +667,38 @@ func (a ByDuration) Less(i, j int) bool {
|
|||||||
|
|
||||||
// pickBestFitGPUs will try to find the optimal placement of the model in the available GPUs where the model fully fits
|
// pickBestFitGPUs will try to find the optimal placement of the model in the available GPUs where the model fully fits
|
||||||
// If the model can not be fit fully within the available GPU(s) nil is returned
|
// If the model can not be fit fully within the available GPU(s) nil is returned
|
||||||
func pickBestFitGPUs(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList) gpu.GpuInfoList {
|
// If numParallel is <= 0, this will attempt try to optimize parallism based on available VRAM, and adjust
|
||||||
|
// opts.NumCtx accordingly
|
||||||
|
func pickBestFitGPUs(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList, numParallel *int) gpu.GpuInfoList {
|
||||||
var estimatedVRAM uint64
|
var estimatedVRAM uint64
|
||||||
|
|
||||||
|
var numParallelToTry []int
|
||||||
|
if *numParallel <= 0 {
|
||||||
|
// If no specific parallel setting was provided, try larger then smaller, always end with 1
|
||||||
|
numParallelToTry = append(numParallelToTry, defaultParallel, 1)
|
||||||
|
} else {
|
||||||
|
numParallelToTry = []int{*numParallel}
|
||||||
|
}
|
||||||
|
|
||||||
for _, gl := range gpus.ByLibrary() {
|
for _, gl := range gpus.ByLibrary() {
|
||||||
var ok bool
|
var ok bool
|
||||||
sgl := append(make(gpu.GpuInfoList, 0, len(gl)), gl...)
|
sgl := append(make(gpu.GpuInfoList, 0, len(gl)), gl...)
|
||||||
|
|
||||||
// TODO - potentially sort by performance capability, existing models loaded, etc.
|
// TODO - potentially sort by performance capability, existing models loaded, etc.
|
||||||
|
// TODO - Eliminate any GPUs that already have envconfig.MaxRunners loaded on them
|
||||||
// Note: at present, this will favor more VRAM over faster GPU speed in mixed setups
|
// Note: at present, this will favor more VRAM over faster GPU speed in mixed setups
|
||||||
sort.Sort(sort.Reverse(gpu.ByFreeMemory(sgl)))
|
sort.Sort(sort.Reverse(gpu.ByFreeMemory(sgl)))
|
||||||
|
|
||||||
// First attempt to fit the model into a single GPU
|
// First attempt to fit the model into a single GPU
|
||||||
if !envconfig.SchedSpread {
|
for _, p := range numParallelToTry {
|
||||||
for _, g := range sgl {
|
req.opts.NumCtx = req.origNumCtx * p
|
||||||
if ok, estimatedVRAM = llm.PredictServerFit([]gpu.GpuInfo{g}, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts); ok {
|
if !envconfig.SchedSpread {
|
||||||
slog.Debug("new model will fit in available VRAM in single GPU, loading", "model", req.model.ModelPath, "gpu", g.ID, "available", g.FreeMemory, "required", format.HumanBytes2(estimatedVRAM))
|
for _, g := range sgl {
|
||||||
return []gpu.GpuInfo{g}
|
if ok, estimatedVRAM = llm.PredictServerFit([]gpu.GpuInfo{g}, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts); ok {
|
||||||
|
slog.Info("new model will fit in available VRAM in single GPU, loading", "model", req.model.ModelPath, "gpu", g.ID, "parallel", p, "available", g.FreeMemory, "required", format.HumanBytes2(estimatedVRAM))
|
||||||
|
*numParallel = p
|
||||||
|
return []gpu.GpuInfo{g}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -636,9 +708,13 @@ func pickBestFitGPUs(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList) gpu.
|
|||||||
// - try subsets of GPUs instead of just falling back to 1 or all in a family
|
// - try subsets of GPUs instead of just falling back to 1 or all in a family
|
||||||
|
|
||||||
// Now try all the GPUs
|
// Now try all the GPUs
|
||||||
if ok, estimatedVRAM = llm.PredictServerFit(sgl, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts); ok {
|
for _, p := range numParallelToTry {
|
||||||
slog.Debug("new model will fit in available VRAM, loading", "model", req.model.ModelPath, "library", sgl[0].Library, "required", format.HumanBytes2(estimatedVRAM))
|
req.opts.NumCtx = req.origNumCtx * p
|
||||||
return sgl
|
if ok, estimatedVRAM = llm.PredictServerFit(sgl, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts); ok {
|
||||||
|
slog.Info("new model will fit in available VRAM, loading", "model", req.model.ModelPath, "library", sgl[0].Library, "parallel", p, "required", format.HumanBytes2(estimatedVRAM))
|
||||||
|
*numParallel = p
|
||||||
|
return sgl
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -47,11 +47,11 @@ func TestLoad(t *testing.T) {
|
|||||||
sessionDuration: 2,
|
sessionDuration: 2,
|
||||||
}
|
}
|
||||||
// Fail to load model first
|
// Fail to load model first
|
||||||
s.newServerFn = func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options) (llm.LlamaServer, error) {
|
s.newServerFn = func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options, numParallel int) (llm.LlamaServer, error) {
|
||||||
return nil, fmt.Errorf("something failed to load model blah")
|
return nil, fmt.Errorf("something failed to load model blah")
|
||||||
}
|
}
|
||||||
gpus := gpu.GpuInfoList{}
|
gpus := gpu.GpuInfoList{}
|
||||||
s.load(req, ggml, gpus)
|
s.load(req, ggml, gpus, 0)
|
||||||
require.Empty(t, req.successCh)
|
require.Empty(t, req.successCh)
|
||||||
require.Len(t, req.errCh, 1)
|
require.Len(t, req.errCh, 1)
|
||||||
s.loadedMu.Lock()
|
s.loadedMu.Lock()
|
||||||
@ -61,10 +61,10 @@ func TestLoad(t *testing.T) {
|
|||||||
require.Contains(t, err.Error(), "this model may be incompatible")
|
require.Contains(t, err.Error(), "this model may be incompatible")
|
||||||
|
|
||||||
server := &mockLlm{estimatedVRAM: 10, estimatedVRAMByGPU: map[string]uint64{}}
|
server := &mockLlm{estimatedVRAM: 10, estimatedVRAMByGPU: map[string]uint64{}}
|
||||||
s.newServerFn = func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options) (llm.LlamaServer, error) {
|
s.newServerFn = func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options, numParallel int) (llm.LlamaServer, error) {
|
||||||
return server, nil
|
return server, nil
|
||||||
}
|
}
|
||||||
s.load(req, ggml, gpus)
|
s.load(req, ggml, gpus, 0)
|
||||||
select {
|
select {
|
||||||
case err := <-req.errCh:
|
case err := <-req.errCh:
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -78,12 +78,12 @@ func TestLoad(t *testing.T) {
|
|||||||
|
|
||||||
req.model.ModelPath = "dummy_model_path"
|
req.model.ModelPath = "dummy_model_path"
|
||||||
server.waitResp = fmt.Errorf("wait failure")
|
server.waitResp = fmt.Errorf("wait failure")
|
||||||
s.load(req, ggml, gpus)
|
s.load(req, ggml, gpus, 0)
|
||||||
select {
|
select {
|
||||||
case err := <-req.errCh:
|
case err := <-req.errCh:
|
||||||
require.Contains(t, err.Error(), "wait failure")
|
require.Contains(t, err.Error(), "wait failure")
|
||||||
case resp := <-req.successCh:
|
case resp := <-req.successCh:
|
||||||
t.Errorf("unexpected success %v", resp)
|
t.Fatalf("unexpected success %v", resp)
|
||||||
}
|
}
|
||||||
s.loadedMu.Lock()
|
s.loadedMu.Lock()
|
||||||
runner := s.loaded["dummy_model_path"]
|
runner := s.loaded["dummy_model_path"]
|
||||||
@ -102,7 +102,7 @@ type bundle struct {
|
|||||||
ggml *llm.GGML
|
ggml *llm.GGML
|
||||||
}
|
}
|
||||||
|
|
||||||
func (scenario *bundle) newServer(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options) (llm.LlamaServer, error) {
|
func (scenario *bundle) newServer(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options, numParallel int) (llm.LlamaServer, error) {
|
||||||
return scenario.srv, nil
|
return scenario.srv, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -200,7 +200,7 @@ func TestRequests(t *testing.T) {
|
|||||||
require.Empty(t, s.pendingReqCh)
|
require.Empty(t, s.pendingReqCh)
|
||||||
require.Empty(t, scenario1a.req.errCh)
|
require.Empty(t, scenario1a.req.errCh)
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
t.Errorf("timeout")
|
t.Fatal("timeout")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Same runner as first request due to not needing a reload
|
// Same runner as first request due to not needing a reload
|
||||||
@ -213,7 +213,7 @@ func TestRequests(t *testing.T) {
|
|||||||
require.Empty(t, s.pendingReqCh)
|
require.Empty(t, s.pendingReqCh)
|
||||||
require.Empty(t, scenario1b.req.errCh)
|
require.Empty(t, scenario1b.req.errCh)
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
t.Errorf("timeout")
|
t.Fatal("timeout")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Trigger a reload
|
// Trigger a reload
|
||||||
@ -231,7 +231,7 @@ func TestRequests(t *testing.T) {
|
|||||||
require.Empty(t, s.pendingReqCh)
|
require.Empty(t, s.pendingReqCh)
|
||||||
require.Empty(t, scenario2a.req.errCh)
|
require.Empty(t, scenario2a.req.errCh)
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
t.Errorf("timeout")
|
t.Fatal("timeout")
|
||||||
}
|
}
|
||||||
|
|
||||||
envconfig.MaxRunners = 1
|
envconfig.MaxRunners = 1
|
||||||
@ -247,7 +247,7 @@ func TestRequests(t *testing.T) {
|
|||||||
require.Empty(t, s.pendingReqCh)
|
require.Empty(t, s.pendingReqCh)
|
||||||
require.Empty(t, scenario3a.req.errCh)
|
require.Empty(t, scenario3a.req.errCh)
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
t.Errorf("timeout")
|
t.Fatal("timeout")
|
||||||
}
|
}
|
||||||
s.loadedMu.Lock()
|
s.loadedMu.Lock()
|
||||||
require.Len(t, s.loaded, 1)
|
require.Len(t, s.loaded, 1)
|
||||||
@ -263,7 +263,7 @@ func TestRequests(t *testing.T) {
|
|||||||
require.Empty(t, s.pendingReqCh)
|
require.Empty(t, s.pendingReqCh)
|
||||||
require.Empty(t, scenario3b.req.errCh)
|
require.Empty(t, scenario3b.req.errCh)
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
t.Errorf("timeout")
|
t.Fatal("timeout")
|
||||||
}
|
}
|
||||||
s.loadedMu.Lock()
|
s.loadedMu.Lock()
|
||||||
require.Len(t, s.loaded, 2)
|
require.Len(t, s.loaded, 2)
|
||||||
@ -279,7 +279,7 @@ func TestRequests(t *testing.T) {
|
|||||||
require.Empty(t, s.pendingReqCh)
|
require.Empty(t, s.pendingReqCh)
|
||||||
require.Empty(t, scenario3c.req.errCh)
|
require.Empty(t, scenario3c.req.errCh)
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
t.Errorf("timeout")
|
t.Fatal("timeout")
|
||||||
}
|
}
|
||||||
s.loadedMu.Lock()
|
s.loadedMu.Lock()
|
||||||
require.Len(t, s.loaded, 3)
|
require.Len(t, s.loaded, 3)
|
||||||
@ -306,7 +306,7 @@ func TestRequests(t *testing.T) {
|
|||||||
require.Empty(t, s.pendingReqCh)
|
require.Empty(t, s.pendingReqCh)
|
||||||
require.Empty(t, scenario3d.req.errCh)
|
require.Empty(t, scenario3d.req.errCh)
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
t.Errorf("timeout")
|
t.Fatal("timeout")
|
||||||
}
|
}
|
||||||
s.loadedMu.Lock()
|
s.loadedMu.Lock()
|
||||||
require.Len(t, s.loaded, 2)
|
require.Len(t, s.loaded, 2)
|
||||||
@ -349,7 +349,7 @@ func TestGetRunner(t *testing.T) {
|
|||||||
require.Empty(t, s.pendingReqCh)
|
require.Empty(t, s.pendingReqCh)
|
||||||
require.Empty(t, errCh1a)
|
require.Empty(t, errCh1a)
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
t.Errorf("timeout")
|
t.Fatal("timeout")
|
||||||
}
|
}
|
||||||
scenario1a.ctxDone()
|
scenario1a.ctxDone()
|
||||||
s.loadedMu.Lock()
|
s.loadedMu.Lock()
|
||||||
@ -400,7 +400,7 @@ func TestPrematureExpired(t *testing.T) {
|
|||||||
slog.Info("sending premature expired event now")
|
slog.Info("sending premature expired event now")
|
||||||
s.expiredCh <- resp // Shouldn't happen in real life, but make sure its safe
|
s.expiredCh <- resp // Shouldn't happen in real life, but make sure its safe
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
t.Errorf("timeout")
|
t.Fatal("timeout")
|
||||||
}
|
}
|
||||||
time.Sleep(scenario1a.req.sessionDuration)
|
time.Sleep(scenario1a.req.sessionDuration)
|
||||||
scenario1a.ctxDone()
|
scenario1a.ctxDone()
|
||||||
@ -427,7 +427,7 @@ func TestUseLoadedRunner(t *testing.T) {
|
|||||||
}
|
}
|
||||||
finished := make(chan *LlmRequest)
|
finished := make(chan *LlmRequest)
|
||||||
llm1 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{}}
|
llm1 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{}}
|
||||||
r1 := &runnerRef{llama: llm1, sessionDuration: 1}
|
r1 := &runnerRef{llama: llm1, sessionDuration: 1, numParallel: 1}
|
||||||
req.useLoadedRunner(r1, finished)
|
req.useLoadedRunner(r1, finished)
|
||||||
require.Equal(t, uint(1), r1.refCount)
|
require.Equal(t, uint(1), r1.refCount)
|
||||||
require.Equal(t, time.Duration(2), r1.sessionDuration)
|
require.Equal(t, time.Duration(2), r1.sessionDuration)
|
||||||
@ -435,7 +435,7 @@ func TestUseLoadedRunner(t *testing.T) {
|
|||||||
case success := <-req.successCh:
|
case success := <-req.successCh:
|
||||||
require.Equal(t, r1, success)
|
require.Equal(t, r1, success)
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
t.Errorf("timeout")
|
t.Fatal("timeout")
|
||||||
}
|
}
|
||||||
done()
|
done()
|
||||||
fin := <-finished
|
fin := <-finished
|
||||||
@ -461,8 +461,8 @@ func TestUpdateFreeSpace(t *testing.T) {
|
|||||||
gpus[1].FreeMemory = 1900
|
gpus[1].FreeMemory = 1900
|
||||||
llm1 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{"1": 50, "2": 50}}
|
llm1 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{"1": 50, "2": 50}}
|
||||||
llm2 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{"1": 125, "2": 75}}
|
llm2 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{"1": 125, "2": 75}}
|
||||||
r1 := &runnerRef{llama: llm1, gpus: gpus}
|
r1 := &runnerRef{llama: llm1, gpus: gpus, numParallel: 1}
|
||||||
r2 := &runnerRef{llama: llm2, gpus: gpus}
|
r2 := &runnerRef{llama: llm2, gpus: gpus, numParallel: 1}
|
||||||
|
|
||||||
s := InitScheduler(ctx)
|
s := InitScheduler(ctx)
|
||||||
s.loadedMu.Lock()
|
s.loadedMu.Lock()
|
||||||
@ -513,8 +513,8 @@ func TestFindRunnerToUnload(t *testing.T) {
|
|||||||
ctx, done := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
ctx, done := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||||
defer done()
|
defer done()
|
||||||
|
|
||||||
r1 := &runnerRef{refCount: 1, sessionDuration: 1}
|
r1 := &runnerRef{refCount: 1, sessionDuration: 1, numParallel: 1}
|
||||||
r2 := &runnerRef{sessionDuration: 2}
|
r2 := &runnerRef{sessionDuration: 2, numParallel: 1}
|
||||||
|
|
||||||
s := InitScheduler(ctx)
|
s := InitScheduler(ctx)
|
||||||
s.loadedMu.Lock()
|
s.loadedMu.Lock()
|
||||||
@ -536,9 +536,13 @@ func TestNeedsReload(t *testing.T) {
|
|||||||
llm := &mockLlm{estimatedVRAMByGPU: map[string]uint64{}}
|
llm := &mockLlm{estimatedVRAMByGPU: map[string]uint64{}}
|
||||||
do := api.DefaultOptions()
|
do := api.DefaultOptions()
|
||||||
runner := &runnerRef{
|
runner := &runnerRef{
|
||||||
model: &Model{AdapterPaths: []string{"adapter1"}, ProjectorPaths: []string{"projector1"}},
|
model: &Model{
|
||||||
Options: &do,
|
AdapterPaths: []string{"adapter1"},
|
||||||
llama: llm,
|
ProjectorPaths: []string{"projector1"},
|
||||||
|
},
|
||||||
|
Options: &do,
|
||||||
|
llama: llm,
|
||||||
|
numParallel: 1,
|
||||||
}
|
}
|
||||||
req := &LlmRequest{
|
req := &LlmRequest{
|
||||||
model: &Model{
|
model: &Model{
|
||||||
@ -581,8 +585,8 @@ func TestUnloadAllRunners(t *testing.T) {
|
|||||||
s := InitScheduler(ctx)
|
s := InitScheduler(ctx)
|
||||||
s.unloadAllRunners()
|
s.unloadAllRunners()
|
||||||
|
|
||||||
r1 := &runnerRef{llama: llm1}
|
r1 := &runnerRef{llama: llm1, numParallel: 1}
|
||||||
r2 := &runnerRef{llama: llm2}
|
r2 := &runnerRef{llama: llm2, numParallel: 1}
|
||||||
|
|
||||||
s.loadedMu.Lock()
|
s.loadedMu.Lock()
|
||||||
s.loaded["a"] = r1
|
s.loaded["a"] = r1
|
||||||
@ -596,14 +600,32 @@ func TestUnloadAllRunners(t *testing.T) {
|
|||||||
|
|
||||||
func TestUnload(t *testing.T) {
|
func TestUnload(t *testing.T) {
|
||||||
llm1 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{}}
|
llm1 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{}}
|
||||||
r1 := &runnerRef{llama: llm1}
|
r1 := &runnerRef{llama: llm1, numParallel: 1}
|
||||||
r2 := &runnerRef{model: &Model{AdapterPaths: []string{"A"}}}
|
r2 := &runnerRef{model: &Model{AdapterPaths: []string{"A"}}, numParallel: 1}
|
||||||
r1.unload()
|
r1.unload()
|
||||||
require.True(t, llm1.closeCalled)
|
require.True(t, llm1.closeCalled)
|
||||||
r2.unload()
|
r2.unload()
|
||||||
require.Nil(t, r2.model)
|
require.Nil(t, r2.model)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAlreadyCanceled(t *testing.T) {
|
||||||
|
ctx, done := context.WithTimeout(context.Background(), 500*time.Millisecond)
|
||||||
|
defer done()
|
||||||
|
dctx, done2 := context.WithCancel(ctx)
|
||||||
|
done2()
|
||||||
|
scenario1a := newScenario(t, dctx, "ollama-model-1", 10)
|
||||||
|
scenario1a.req.sessionDuration = 0
|
||||||
|
s := InitScheduler(ctx)
|
||||||
|
slog.Info("scenario1a")
|
||||||
|
s.pendingReqCh <- scenario1a.req
|
||||||
|
require.Len(t, s.pendingReqCh, 1)
|
||||||
|
s.Run(ctx)
|
||||||
|
time.Sleep(5 * time.Millisecond)
|
||||||
|
require.Empty(t, s.pendingReqCh)
|
||||||
|
require.Empty(t, scenario1a.req.errCh)
|
||||||
|
require.Empty(t, scenario1a.req.successCh)
|
||||||
|
}
|
||||||
|
|
||||||
type mockLlm struct {
|
type mockLlm struct {
|
||||||
pingResp error
|
pingResp error
|
||||||
waitResp error
|
waitResp error
|
||||||
|
Loading…
x
Reference in New Issue
Block a user