From 7487229c343e5177356a40ddc5061a9fc6dfaded Mon Sep 17 00:00:00 2001 From: Lei Jitang Date: Tue, 28 May 2024 08:21:10 +0800 Subject: [PATCH] llm/server.go: Fix 2 minor typos (#4661) Signed-off-by: Lei Jitang --- llm/server.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/llm/server.go b/llm/server.go index f670931f..cf75de90 100644 --- a/llm/server.go +++ b/llm/server.go @@ -24,9 +24,9 @@ import ( "golang.org/x/sync/semaphore" "github.com/ollama/ollama/api" + "github.com/ollama/ollama/envconfig" "github.com/ollama/ollama/format" "github.com/ollama/ollama/gpu" - "github.com/ollama/ollama/envconfig" ) type LlamaServer interface { @@ -243,7 +243,7 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr gpuCount = 0 } - // Find an availableServers port, retry on each iterration in case the failure was a port conflict race + // Find an availableServers port, retry on each iteration in case the failure was a port conflict race port := 0 if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil { var l *net.TCPListener @@ -756,7 +756,7 @@ func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn fu var c completion if err := json.Unmarshal(evt, &c); err != nil { - return fmt.Errorf("error unmarshaling llm prediction response: %v", err) + return fmt.Errorf("error unmarshalling llm prediction response: %v", err) } switch {