diff --git a/llama/llama.go b/llama/llama.go index 55501598..db2b522d 100644 --- a/llama/llama.go +++ b/llama/llama.go @@ -302,7 +302,7 @@ func (m *Model) Tokenize(text string, addSpecial bool, parseSpecial bool) ([]int } tokens := make([]int, result) - for i := 0; i < int(result); i++ { + for i := range result { tokens[i] = int(cTokens[i]) } diff --git a/llama/runner/runner.go b/llama/runner/runner.go index 6fad0d95..05f436ad 100644 --- a/llama/runner/runner.go +++ b/llama/runner/runner.go @@ -441,7 +441,10 @@ func main() { server.model = llama.LoadModelFromFile(*mpath, params) if *lpath != "" { - server.model.ApplyLoraFromFile(*lpath, 1.0, "", *threads) + err := server.model.ApplyLoraFromFile(*lpath, 1.0, "", *threads) + if err != nil { + panic(err) + } } ctxParams := llama.NewContextParams(*numCtx, *threads, *flashAttention)