Compare commits

...

1 Commits

Author SHA1 Message Date
Bruce MacDonald
e94e5b1771 fix: retry on concurrent request failure 2023-12-08 17:33:41 -08:00

View File

@ -535,6 +535,8 @@ type prediction struct {
} }
const maxBufferSize = 512 * format.KiloByte const maxBufferSize = 512 * format.KiloByte
const maxRetries = 3
const retryDelay = 1 * time.Second
type PredictOpts struct { type PredictOpts struct {
Model string Model string
@ -557,6 +559,11 @@ type PredictResult struct {
EvalDuration time.Duration EvalDuration time.Duration
} }
// IsRetryable checks if the line matches a condition that can be retried
func isRetryable(line []byte) bool {
return bytes.Contains(line, []byte("slot unavailable"))
}
func (llm *llama) Predict(ctx context.Context, predict PredictOpts, fn func(PredictResult)) error { func (llm *llama) Predict(ctx context.Context, predict PredictOpts, fn func(PredictResult)) error {
request := map[string]any{ request := map[string]any{
"prompt": predict.Prompt, "prompt": predict.Prompt,
@ -585,53 +592,69 @@ func (llm *llama) Predict(ctx context.Context, predict PredictOpts, fn func(Pred
request["grammar"] = jsonGrammar request["grammar"] = jsonGrammar
} }
// Handling JSON marshaling with special characters unescaped.
buffer := &bytes.Buffer{}
enc := json.NewEncoder(buffer)
enc.SetEscapeHTML(false)
if err := enc.Encode(request); err != nil {
return fmt.Errorf("failed to marshal data: %v", err)
}
endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", llm.Port) endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", llm.Port)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
if err != nil {
return fmt.Errorf("error creating POST request: %v", err)
}
req.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req) for retries := 0; retries < maxRetries; retries++ {
if err != nil { // Handling JSON marshaling with special characters unescaped.
return fmt.Errorf("POST predict: %v", err) buffer := &bytes.Buffer{}
} enc := json.NewEncoder(buffer)
defer resp.Body.Close() enc.SetEscapeHTML(false)
if resp.StatusCode >= 400 { if err := enc.Encode(request); err != nil {
bodyBytes, err := io.ReadAll(resp.Body) return fmt.Errorf("failed to marshal data: %v", err)
if err != nil {
return fmt.Errorf("failed reading llm error response: %w", err)
} }
log.Printf("llm predict error: %s", bodyBytes)
return fmt.Errorf("%s", bodyBytes)
}
scanner := bufio.NewScanner(resp.Body) if retries > 0 {
// increase the buffer size to avoid running out of space time.Sleep(retryDelay) // wait before retrying
buf := make([]byte, 0, maxBufferSize) }
scanner.Buffer(buf, maxBufferSize) req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
for scanner.Scan() { if err != nil {
select { return fmt.Errorf("error creating POST request: %v", err)
case <-ctx.Done(): }
// This handles the request cancellation req.Header.Set("Content-Type", "application/json")
return ctx.Err()
default: resp, err := http.DefaultClient.Do(req)
line := scanner.Bytes() if err != nil {
if len(line) == 0 { return fmt.Errorf("POST predict: %v", err)
continue }
defer resp.Body.Close()
if resp.StatusCode >= 400 {
bodyBytes, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("failed reading llm error response: %w", err)
} }
log.Printf("llm predict error: %s", bodyBytes)
return fmt.Errorf("%s", bodyBytes)
}
scanner := bufio.NewScanner(resp.Body)
// increase the buffer size to avoid running out of space
buf := make([]byte, 0, maxBufferSize)
scanner.Buffer(buf, maxBufferSize)
retryNeeded := false
for scanner.Scan() {
select {
case <-ctx.Done():
// This handles the request cancellation
return ctx.Err()
default:
line := scanner.Bytes()
if len(line) == 0 {
continue
}
if isRetryable(line) {
retryNeeded = true
break
}
evt, ok := bytes.CutPrefix(line, []byte("data: "))
if !ok {
return fmt.Errorf("error parsing llm response stream: %s", line)
}
if evt, ok := bytes.CutPrefix(line, []byte("data: ")); ok {
var p prediction var p prediction
if err := json.Unmarshal(evt, &p); err != nil { if err := json.Unmarshal(evt, &p); err != nil {
return fmt.Errorf("error unmarshaling llm prediction response: %v", err) return fmt.Errorf("error unmarshaling llm prediction response: %v", err)
@ -661,21 +684,27 @@ func (llm *llama) Predict(ctx context.Context, predict PredictOpts, fn func(Pred
} }
} }
} }
}
if err := scanner.Err(); err != nil { if err := scanner.Err(); err != nil {
if strings.Contains(err.Error(), "unexpected EOF") { if strings.Contains(err.Error(), "unexpected EOF") {
// this means the llama runner subprocess crashed // this means the llama runner subprocess crashed
llm.Close() llm.Close()
if llm.StatusWriter != nil && llm.StatusWriter.LastErrMsg != "" { if llm.StatusWriter != nil && llm.StatusWriter.LastErrMsg != "" {
return fmt.Errorf("llama runner exited: %v", llm.StatusWriter.LastErrMsg) return fmt.Errorf("llama runner exited: %v", llm.StatusWriter.LastErrMsg)
}
return fmt.Errorf("llama runner exited, you may not have enough available memory to run this model")
} }
return fmt.Errorf("llama runner exited, you may not have enough available memory to run this model") return fmt.Errorf("error reading llm response: %v", err)
}
if !retryNeeded {
// success
return nil
} }
return fmt.Errorf("error reading llm response: %v", err)
} }
return nil // should never reach here ideally
return fmt.Errorf("max retries exceeded")
} }
type TokenizeRequest struct { type TokenizeRequest struct {