forked from third-party-mirrors/ollama
fix prompt for non-mllama multimodal
This commit is contained in:
parent
c48e2cfc0d
commit
96a8b2f7d8
@ -84,8 +84,7 @@ func chatPrompt(ctx context.Context, m *Model, tokenize tokenizeFunc, opts *api.
|
||||
msgs[lastMsgIdx].Content = strings.TrimSpace("<|image|>" + msgs[lastMsgIdx].Content)
|
||||
images = append(images, imgData)
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
for cnt, msg := range msgs[currMsgIdx:] {
|
||||
for _, i := range msg.Images {
|
||||
imgData := llm.ImageData{
|
||||
@ -105,6 +104,7 @@ func chatPrompt(ctx context.Context, m *Model, tokenize tokenizeFunc, opts *api.
|
||||
images = append(images, imgData)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// truncate any messages that do not fit into the context window
|
||||
var b bytes.Buffer
|
||||
|
Loading…
x
Reference in New Issue
Block a user