Merge 540eec985fd6f9b5d4db018b7461621f850defb1 into d7eb05b9361febead29a74e71ddffc2ebeff5302
This commit is contained in:
commit
77651cb0e0
31
examples/golang-streamchat/README.md
Normal file
31
examples/golang-streamchat/README.md
Normal file
@ -0,0 +1,31 @@
|
||||
# golang-steamchat
|
||||
|
||||
This is a simple golang code to stream response from llama2.
|
||||
|
||||
## Get Started
|
||||
|
||||
1. Run the Ollama Docker container:
|
||||
|
||||
```shell
|
||||
sudo docker run -d -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama
|
||||
```
|
||||
|
||||
For more detailed information, refer to the [Ollama Quickstart Docker](https://hub.docker.com/r/ollama/ollama). Please note we are using CPU only, the AI will response slow, if you have GPU, you can follow the instruction to run the docker and using your GPU to improve performance.
|
||||
|
||||
2. Pull the llama2 model:
|
||||
|
||||
```shell
|
||||
curl --location 'http://localhost:11434/api/pull' \
|
||||
--header 'Content-Type: application/json' \
|
||||
--data '{
|
||||
"name": "llama2:7b"
|
||||
}'
|
||||
```
|
||||
|
||||
3. Run the golang code.
|
||||
|
||||
```shell
|
||||
go run main.go
|
||||
```
|
||||
|
||||

|
BIN
examples/golang-streamchat/assets/get-started.gif
Normal file
BIN
examples/golang-streamchat/assets/get-started.gif
Normal file
Binary file not shown.
After Width: | Height: | Size: 2.7 MiB |
3
examples/golang-streamchat/go.mod
Normal file
3
examples/golang-streamchat/go.mod
Normal file
@ -0,0 +1,3 @@
|
||||
module github.com/jmorganca/ollama/tree/main/examples/golang-streamchat
|
||||
|
||||
go 1.21.4
|
84
examples/golang-streamchat/main.go
Normal file
84
examples/golang-streamchat/main.go
Normal file
@ -0,0 +1,84 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func main() {
|
||||
err := SendChatRequest(ReqStreamChat{
|
||||
Model: ModelLlama27b,
|
||||
Messages: []ReqStreamChatMessage{
|
||||
{
|
||||
Role: RoleUser,
|
||||
Content: "Show me how to use golang channel.",
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func SendChatRequest(payload ReqStreamChat) error {
|
||||
jsonBytes, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", "error marshal payload", err)
|
||||
}
|
||||
|
||||
url := "http://localhost:11434/api/chat"
|
||||
res, err := http.Post(url, "application/json", bytes.NewBuffer(jsonBytes))
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %w", "error http post request", err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
dec := json.NewDecoder(res.Body)
|
||||
for {
|
||||
var r ResStreamChat
|
||||
if err := dec.Decode(&r); err != nil {
|
||||
break
|
||||
}
|
||||
fmt.Print(r.Message.Content)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type Model string
|
||||
|
||||
const (
|
||||
ModelLlama27b Model = "llama2:7b"
|
||||
)
|
||||
|
||||
type Role string
|
||||
|
||||
const (
|
||||
RoleUser Role = "user"
|
||||
RoleAssistant Role = "assistant"
|
||||
)
|
||||
|
||||
type ReqStreamChat struct {
|
||||
Model Model `json:"model"`
|
||||
Messages []ReqStreamChatMessage `json:"messages"`
|
||||
}
|
||||
|
||||
type ReqStreamChatMessage struct {
|
||||
Role Role
|
||||
Content string
|
||||
}
|
||||
|
||||
type ResStreamChat struct {
|
||||
Model Model `json:"model"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
Message ResStreamChatMessage `json:"message"`
|
||||
Done bool `json:"done"`
|
||||
}
|
||||
|
||||
type ResStreamChatMessage struct {
|
||||
Role Role `json:"role"`
|
||||
Content string `json:"content"`
|
||||
Images interface{} `json:"images"`
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user