forked from third-party-mirrors/ollama
wip
This commit is contained in:
parent
fbc8572859
commit
ec17359a68
@ -1499,11 +1499,11 @@ static enum ggml_status ggml_metal_graph_compute(
|
|||||||
// to the matrix-vector kernel
|
// to the matrix-vector kernel
|
||||||
int ne11_mm_min = 1;
|
int ne11_mm_min = 1;
|
||||||
|
|
||||||
#if 0
|
|
||||||
// the numbers below are measured on M2 Ultra for 7B and 13B models
|
// the numbers below are measured on M2 Ultra for 7B and 13B models
|
||||||
// these numbers do not translate to other devices or model sizes
|
// these numbers do not translate to other devices or model sizes
|
||||||
// TODO: need to find a better approach
|
// TODO: need to find a better approach
|
||||||
if ([ctx->device.name isEqualToString:@"Apple M2 Ultra"]) {
|
// if ([ctx->device.name isEqualToString:@"Apple M2 Ultra"]) {
|
||||||
switch (src0t) {
|
switch (src0t) {
|
||||||
case GGML_TYPE_F16: ne11_mm_min = 2; break;
|
case GGML_TYPE_F16: ne11_mm_min = 2; break;
|
||||||
case GGML_TYPE_Q8_0: ne11_mm_min = 7; break;
|
case GGML_TYPE_Q8_0: ne11_mm_min = 7; break;
|
||||||
@ -1518,8 +1518,8 @@ static enum ggml_status ggml_metal_graph_compute(
|
|||||||
case GGML_TYPE_Q6_K: ne11_mm_min = 7; break;
|
case GGML_TYPE_Q6_K: ne11_mm_min = 7; break;
|
||||||
default: ne11_mm_min = 1; break;
|
default: ne11_mm_min = 1; break;
|
||||||
}
|
}
|
||||||
}
|
// }
|
||||||
#endif
|
|
||||||
|
|
||||||
// for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs
|
// for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs
|
||||||
// AMD GPU and older A-chips will reuse matrix-vector multiplication kernel
|
// AMD GPU and older A-chips will reuse matrix-vector multiplication kernel
|
||||||
|
@ -38,15 +38,14 @@ import (
|
|||||||
"github.com/ollama/ollama/llm"
|
"github.com/ollama/ollama/llm"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SystemInfo is an unused example of calling llama.cpp functions using CGo
|
|
||||||
func PrintSystemInfo() string {
|
|
||||||
return C.GoString(C.llama_print_system_info())
|
|
||||||
}
|
|
||||||
|
|
||||||
func BackendInit() {
|
func BackendInit() {
|
||||||
C.llama_backend_init()
|
C.llama_backend_init()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func PrintSystemInfo() string {
|
||||||
|
return C.GoString(C.llama_print_system_info())
|
||||||
|
}
|
||||||
|
|
||||||
type ContextParams struct {
|
type ContextParams struct {
|
||||||
c C.struct_llama_context_params
|
c C.struct_llama_context_params
|
||||||
}
|
}
|
||||||
@ -100,7 +99,8 @@ func (c *Context) Model() *Model {
|
|||||||
return &Model{c: C.llama_get_model(c.c)}
|
return &Model{c: C.llama_get_model(c.c)}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Context) SampleTokenGreedy(batch Batch) int {
|
// TODO: break this up
|
||||||
|
func (c *Context) SampleTokenGreedy(batch Batch, i int) int {
|
||||||
nv := c.Model().NumVocab()
|
nv := c.Model().NumVocab()
|
||||||
|
|
||||||
// TODO(jmorganca): split this up into different functions
|
// TODO(jmorganca): split this up into different functions
|
||||||
@ -108,7 +108,7 @@ func (c *Context) SampleTokenGreedy(batch Batch) int {
|
|||||||
defer C.free(unsafe.Pointer(candidates))
|
defer C.free(unsafe.Pointer(candidates))
|
||||||
|
|
||||||
// get most recent logits
|
// get most recent logits
|
||||||
logits := C.llama_get_logits_ith(c.c, C.int(batch.NumTokens()-1))
|
logits := C.llama_get_logits_ith(c.c, C.int(i))
|
||||||
for i := 0; i < int(nv); i++ {
|
for i := 0; i < int(nv); i++ {
|
||||||
ptr := (*C.struct_llama_token_data)(unsafe.Pointer(uintptr(unsafe.Pointer(candidates)) + uintptr(i)*unsafe.Sizeof(C.struct_llama_token_data{})))
|
ptr := (*C.struct_llama_token_data)(unsafe.Pointer(uintptr(unsafe.Pointer(candidates)) + uintptr(i)*unsafe.Sizeof(C.struct_llama_token_data{})))
|
||||||
ptr.id = C.int(i)
|
ptr.id = C.int(i)
|
||||||
@ -123,6 +123,10 @@ func (c *Context) SampleTokenGreedy(batch Batch) int {
|
|||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Context) KvCacheSeqRm(seqId int, p0 int, p1 int) bool {
|
||||||
|
return bool(C.llama_kv_cache_seq_rm(c.c, C.int(seqId), C.int(p0), C.int(p1)))
|
||||||
|
}
|
||||||
|
|
||||||
func LoadModelFromFile(modelPath string, params ModelParams) *Model {
|
func LoadModelFromFile(modelPath string, params ModelParams) *Model {
|
||||||
return &Model{c: C.llama_load_model_from_file(C.CString(modelPath), params.c)}
|
return &Model{c: C.llama_load_model_from_file(C.CString(modelPath), params.c)}
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/base64"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -9,13 +9,149 @@ import (
|
|||||||
"log/slog"
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/ollama/ollama/llama"
|
"github.com/ollama/ollama/llama"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type Sequence struct {
|
||||||
|
// number of tokens evaluated
|
||||||
|
nPast int
|
||||||
|
|
||||||
|
// tokens left to evaluate
|
||||||
|
tokens []int
|
||||||
|
|
||||||
|
responses chan string
|
||||||
|
}
|
||||||
|
|
||||||
|
// prompt returns true if the prompt is still being processed
|
||||||
|
func (s *Sequence) prompt() bool {
|
||||||
|
return s.nPast < len(s.tokens)-1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) NewSequence(text string, w http.ResponseWriter) *Sequence {
|
||||||
|
tokens, err := s.lc.Model().Tokenize(text, 2048, true, true)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Sequence{
|
||||||
|
tokens: tokens,
|
||||||
|
responses: make(chan string, 1),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Server struct {
|
||||||
|
model *llama.Model
|
||||||
|
lc *llama.Context
|
||||||
|
cc *llama.ClipContext
|
||||||
|
|
||||||
|
// parallel is the number of parallel requests to handle
|
||||||
|
parallel int
|
||||||
|
|
||||||
|
// seqs is the list of parallel sequences being evaluated
|
||||||
|
seqs []*Sequence
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
|
||||||
|
cond *sync.Cond
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) allNil() bool {
|
||||||
|
for _, item := range s.seqs {
|
||||||
|
if item != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) run(ctx context.Context) {
|
||||||
|
batch := llama.NewBatch(512, 0, s.parallel)
|
||||||
|
defer batch.Free()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
slog.Info("Processing batch", "seqs", len(s.seqs))
|
||||||
|
s.mu.Lock()
|
||||||
|
for s.allNil() {
|
||||||
|
fmt.Println("wait")
|
||||||
|
s.cond.Wait() // Wait until an item is added
|
||||||
|
}
|
||||||
|
s.mu.Unlock()
|
||||||
|
|
||||||
|
fmt.Println("seqs", s.seqs, len(s.seqs))
|
||||||
|
|
||||||
|
// prepare the batch
|
||||||
|
ibatch := make([]int, s.parallel)
|
||||||
|
for i, seq := range s.seqs {
|
||||||
|
if seq == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for j, t := range seq.tokens {
|
||||||
|
// todo: make this n_batch
|
||||||
|
if j > 512 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
batch.Add(t, seq.nPast, []int{i}, !seq.prompt())
|
||||||
|
seq.nPast++
|
||||||
|
|
||||||
|
if seq.prompt() {
|
||||||
|
ibatch[i] = batch.NumTokens() + 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := s.lc.Decode(batch)
|
||||||
|
if err != nil {
|
||||||
|
panic("Failed to decode")
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, seq := range s.seqs {
|
||||||
|
if seq == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// don't sample prompt processing
|
||||||
|
if seq.prompt() {
|
||||||
|
if len(seq.tokens) < 512 {
|
||||||
|
seq.tokens = []int{}
|
||||||
|
} else {
|
||||||
|
seq.tokens = seq.tokens[512:]
|
||||||
|
}
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// sample a token
|
||||||
|
// TODO: sample based on the sequence
|
||||||
|
fmt.Println("Sampling token", i, ibatch[i])
|
||||||
|
token := s.lc.SampleTokenGreedy(batch, ibatch[i])
|
||||||
|
|
||||||
|
// if it's an end of sequence token, break
|
||||||
|
// TODO: just end this sequence
|
||||||
|
if s.model.TokenIsEog(token) {
|
||||||
|
// TODO: end the sequence instead of quitting the pool
|
||||||
|
s.lc.KvCacheSeqRm(i, 0, -1)
|
||||||
|
close(seq.responses)
|
||||||
|
s.seqs[i] = nil
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
seq.responses <- s.model.TokenToPiece(token)
|
||||||
|
seq.tokens = []int{token}
|
||||||
|
}
|
||||||
|
|
||||||
|
batch.Clear()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type Request struct {
|
type Request struct {
|
||||||
Prompt string `json:"prompt"`
|
Prompt string `json:"prompt"`
|
||||||
Images []string `json:"images"`
|
Images []string `json:"images"`
|
||||||
@ -25,124 +161,53 @@ type Response struct {
|
|||||||
Token string `json:"token"`
|
Token string `json:"token"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Server struct {
|
func (s *Server) handler(w http.ResponseWriter, r *http.Request) {
|
||||||
model *llama.Model
|
|
||||||
lc *llama.Context
|
|
||||||
cc *llama.ClipContext
|
|
||||||
}
|
|
||||||
|
|
||||||
var mu sync.Mutex
|
|
||||||
|
|
||||||
func (s *Server) stream(w http.ResponseWriter, r *http.Request) {
|
|
||||||
var request Request
|
var request Request
|
||||||
if err := json.NewDecoder(r.Body).Decode(&request); err != nil {
|
if err := json.NewDecoder(r.Body).Decode(&request); err != nil {
|
||||||
http.Error(w, "Bad request", http.StatusBadRequest)
|
http.Error(w, "Bad request", http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
|
|
||||||
// Set the headers to indicate streaming
|
// Set the headers to indicate streaming
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
w.Header().Set("Transfer-Encoding", "chunked")
|
w.Header().Set("Transfer-Encoding", "chunked")
|
||||||
w.WriteHeader(http.StatusOK)
|
w.WriteHeader(http.StatusOK)
|
||||||
|
|
||||||
enc := json.NewEncoder(w)
|
seq := s.NewSequence(request.Prompt, w)
|
||||||
|
|
||||||
// create embeddings for each image
|
s.mu.Lock()
|
||||||
var embeddings []*llama.LlavaImageEmbed
|
for i, sq := range s.seqs {
|
||||||
if s.cc != nil {
|
if sq == nil {
|
||||||
for _, img := range request.Images {
|
s.seqs[i] = seq
|
||||||
data, err := base64.StdEncoding.DecodeString(img)
|
fmt.Println("signal")
|
||||||
if err != nil {
|
s.cond.Signal()
|
||||||
http.Error(w, "Failed to decode image", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
embd := llama.NewLlavaImageEmbed(s.cc, data)
|
|
||||||
embeddings = append(embeddings, embd)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var nPast int
|
|
||||||
|
|
||||||
// eval the prompt
|
|
||||||
re := regexp.MustCompile(`\[\s*img-(\d+)\s*\]`)
|
|
||||||
matches := re.FindAllStringSubmatchIndex(request.Prompt, -1)
|
|
||||||
|
|
||||||
// eval each chunk including images
|
|
||||||
pos := 0
|
|
||||||
for _, match := range matches {
|
|
||||||
part := request.Prompt[pos:match[0]]
|
|
||||||
fmt.Println("Text part:", part)
|
|
||||||
|
|
||||||
// eval text before image
|
|
||||||
err := s.evalText(part, &nPast)
|
|
||||||
if err != nil {
|
|
||||||
log.Println("Failed to eval text:", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// eval image
|
|
||||||
imgIndexStr := request.Prompt[match[2]:match[3]]
|
|
||||||
imgIndex, err := strconv.Atoi(imgIndexStr)
|
|
||||||
if err != nil {
|
|
||||||
slog.Warn("Failed to parse image index", "index", imgIndexStr)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Println("Tag index:", imgIndex)
|
|
||||||
if imgIndex <= len(embeddings) {
|
|
||||||
slog.Info("evaluating image", "index", imgIndex)
|
|
||||||
llama.LlavaEvalImageEmbed(s.lc, embeddings[imgIndex], 512, &nPast)
|
|
||||||
}
|
|
||||||
|
|
||||||
pos = match[1]
|
|
||||||
}
|
|
||||||
|
|
||||||
// eval remaining text
|
|
||||||
if pos < len(request.Prompt) {
|
|
||||||
s.evalText(request.Prompt[pos:], &nPast)
|
|
||||||
}
|
|
||||||
|
|
||||||
batch := llama.NewBatch(512, 0, 1)
|
|
||||||
defer batch.Free()
|
|
||||||
|
|
||||||
// main loop
|
|
||||||
for n := nPast; n < 2048; n++ {
|
|
||||||
// sample a token
|
|
||||||
token := s.lc.SampleTokenGreedy(batch)
|
|
||||||
|
|
||||||
// if it's an end of sequence token, break
|
|
||||||
if s.model.TokenIsEog(token) {
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
s.mu.Unlock()
|
||||||
|
|
||||||
// print the token
|
for token := range seq.responses {
|
||||||
str := s.model.TokenToPiece(token)
|
if err := json.NewEncoder(w).Encode(&Response{
|
||||||
|
Token: token,
|
||||||
if err := enc.Encode(&Response{Token: str}); err != nil {
|
}); err != nil {
|
||||||
log.Println("Failed to encode result:", err)
|
log.Println("Failed to encode result:", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
w.(http.Flusher).Flush()
|
|
||||||
|
|
||||||
batch.Clear()
|
flusher, ok := w.(http.Flusher)
|
||||||
batch.Add(token, n, []int{0}, true)
|
if !ok {
|
||||||
|
http.Error(w, "Streaming not supported", http.StatusInternalServerError)
|
||||||
err := s.lc.Decode(batch)
|
return
|
||||||
if err != nil {
|
|
||||||
panic("Failed to decode")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
s.lc.KvCacheClear()
|
flusher.Flush()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
mpath := flag.String("model", "", "Path to model binary file")
|
mpath := flag.String("model", "", "Path to model binary file")
|
||||||
ppath := flag.String("projector", "", "Path to projector binary file")
|
ppath := flag.String("projector", "", "Path to projector binary file")
|
||||||
|
parallel := flag.Int("parallel", 1, "Number of sequences to handle simultaneously")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
// load the model
|
// load the model
|
||||||
@ -156,7 +221,7 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var cc *llama.ClipContext
|
var cc *llama.ClipContext
|
||||||
if ppath != nil {
|
if *ppath != "" {
|
||||||
cc = llama.NewClipContext(*ppath)
|
cc = llama.NewClipContext(*ppath)
|
||||||
if cc == nil {
|
if cc == nil {
|
||||||
panic("Failed to create clip context")
|
panic("Failed to create clip context")
|
||||||
@ -167,8 +232,15 @@ func main() {
|
|||||||
model: model,
|
model: model,
|
||||||
lc: lc,
|
lc: lc,
|
||||||
cc: cc,
|
cc: cc,
|
||||||
|
parallel: *parallel,
|
||||||
|
seqs: make([]*Sequence, *parallel),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
server.cond = sync.NewCond(&server.mu)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
go server.run(ctx)
|
||||||
|
|
||||||
addr := "127.0.0.1:8080"
|
addr := "127.0.0.1:8080"
|
||||||
listener, err := net.Listen("tcp", addr)
|
listener, err := net.Listen("tcp", addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -178,35 +250,13 @@ func main() {
|
|||||||
defer listener.Close()
|
defer listener.Close()
|
||||||
|
|
||||||
httpServer := http.Server{
|
httpServer := http.Server{
|
||||||
Handler: http.HandlerFunc(server.stream),
|
Handler: http.HandlerFunc(server.handler),
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Println("Server listening on", addr)
|
log.Println("Server listening on", addr)
|
||||||
if err := httpServer.Serve(listener); err != nil {
|
if err := httpServer.Serve(listener); err != nil {
|
||||||
log.Fatal("server error:", err)
|
log.Fatal("server error:", err)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
cancel()
|
||||||
func (s *Server) evalText(text string, nPast *int) error {
|
|
||||||
// eval before
|
|
||||||
batch := llama.NewBatch(512, 0, 1)
|
|
||||||
defer batch.Free()
|
|
||||||
|
|
||||||
tokens, err := s.lc.Model().Tokenize(text, 2048, true, true)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("tokenize failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// prompt eval
|
|
||||||
for _, t := range tokens {
|
|
||||||
batch.Add(t, *nPast, []int{0}, true)
|
|
||||||
*nPast++
|
|
||||||
}
|
|
||||||
|
|
||||||
err = s.lc.Decode(batch)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("decode failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user