wip
This commit is contained in:
parent
b9b07f1a97
commit
79ebc5c5a1
@ -1,6 +1,7 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
@ -36,7 +37,66 @@ func checkError(resp *http.Response, body []byte) error {
|
||||
return apiError
|
||||
}
|
||||
|
||||
func (c *Client) do(ctx context.Context, method string, path string, stream bool, reqData any, respData any) error {
|
||||
func (c *Client) stream(ctx context.Context, method string, path string, reqData any, callback func (data []byte)) error {
|
||||
var reqBody io.Reader
|
||||
var data []byte
|
||||
var err error
|
||||
if reqData != nil {
|
||||
data, err = json.Marshal(reqData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reqBody = bytes.NewReader(data)
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s%s", c.URL, path)
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, method, url, reqBody)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.PrivateKey != nil {
|
||||
s := signature.SignatureData{
|
||||
Method: method,
|
||||
Path: url,
|
||||
Data: data,
|
||||
}
|
||||
authHeader, err := signature.SignAuthData(s, c.PrivateKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Authorization", authHeader)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Accept", "application/json")
|
||||
|
||||
for k, v := range c.Headers {
|
||||
req.Header[k] = v
|
||||
}
|
||||
|
||||
res, err := c.HTTP.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
reader := bufio.NewReader(res.Body)
|
||||
|
||||
for {
|
||||
line, err := reader.ReadBytes('\n')
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
callback(bytes.TrimSuffix(line, []byte("\n")))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) do(ctx context.Context, method string, path string, reqData any, respData any) error {
|
||||
var reqBody io.Reader
|
||||
var data []byte
|
||||
var err error
|
||||
@ -97,3 +157,14 @@ func (c *Client) do(ctx context.Context, method string, path string, stream bool
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) Generate(ctx context.Context, req *GenerateRequest, callback func(token string)) (*GenerateResponse, error) {
|
||||
var res GenerateResponse
|
||||
if err := c.stream(ctx, http.MethodPost, "/api/generate", req, func(token []byte) {
|
||||
callback(string(token))
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
}
|
||||
|
142
cmd/cmd.go
142
cmd/cmd.go
@ -3,6 +3,7 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
@ -10,14 +11,60 @@ import (
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/jmorganca/ollama/api"
|
||||
"github.com/jmorganca/ollama/llama"
|
||||
"github.com/jmorganca/ollama/server"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func NewAPIClient(cmd *cobra.Command) (*api.Client, error) {
|
||||
var rawKey []byte
|
||||
func sockpath() string {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return path.Join(home, ".ollama", "ollama.sock")
|
||||
}
|
||||
|
||||
func running() bool {
|
||||
// Set a timeout duration
|
||||
timeout := time.Second
|
||||
// Dial the unix socket
|
||||
conn, err := net.DialTimeout("unix", sockpath(), timeout)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if conn != nil {
|
||||
defer conn.Close()
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func serve() error {
|
||||
sp := sockpath()
|
||||
|
||||
if err := os.MkdirAll(path.Dir(sp), 0o700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(sp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ln, err := net.Listen("unix", sp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.Chmod(sp, 0o700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return server.Serve(ln)
|
||||
}
|
||||
|
||||
func NewAPIClient() (*api.Client, error) {
|
||||
var err error
|
||||
|
||||
home, err := os.UserHomeDir()
|
||||
@ -31,16 +78,6 @@ func NewAPIClient(cmd *cobra.Command) (*api.Client, error) {
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
k, _ := cmd.Flags().GetString("key")
|
||||
|
||||
if k != "" {
|
||||
fn := path.Join(home, ".ollama/keys/", k)
|
||||
rawKey, err = os.ReadFile(fn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &api.Client{
|
||||
URL: "http://localhost",
|
||||
HTTP: http.Client{
|
||||
@ -50,7 +87,6 @@ func NewAPIClient(cmd *cobra.Command) (*api.Client, error) {
|
||||
},
|
||||
},
|
||||
},
|
||||
PrivateKey: rawKey,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -69,29 +105,41 @@ func NewCLI() *cobra.Command {
|
||||
},
|
||||
}
|
||||
|
||||
rootCmd.PersistentFlags().StringP("key", "k", "", "Private key to use for authenticating")
|
||||
|
||||
cobra.EnableCommandSorting = false
|
||||
|
||||
modelsCmd := &cobra.Command{
|
||||
Use: "models",
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
Short: "List models",
|
||||
Long: "List the models",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
client, err := NewAPIClient(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("client = %q\n", client)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
runCmd := &cobra.Command{
|
||||
Use: "run",
|
||||
Short: "Run a model and submit prompts.",
|
||||
Use: "run MODEL",
|
||||
Short: "Run a model",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command,args []string) error {
|
||||
l, err := llama.New(req.Model, llama.EnableF16Memory, llama.SetContext(128), llama.EnableEmbeddings, llama.SetGPULayers(gpulayers))
|
||||
if err != nil {
|
||||
fmt.Println("Loading the model failed:", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
ch := make(chan string)
|
||||
|
||||
go func() {
|
||||
defer close(ch)
|
||||
_, err := l.Predict(req.Prompt, llama.Debug, llama.SetTokenCallback(func(token string) bool {
|
||||
ch <- token
|
||||
return true
|
||||
}), llama.SetTokens(tokens), llama.SetThreads(threads), llama.SetTopK(90), llama.SetTopP(0.86), llama.SetStopWords("llama"))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
tok, ok := <-ch
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
c.SSEvent("token", tok)
|
||||
return true
|
||||
})
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@ -101,35 +149,11 @@ func NewCLI() *cobra.Command {
|
||||
Aliases: []string{"start"},
|
||||
Short: "Start ollama",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
socket := path.Join(home, ".ollama", "ollama.sock")
|
||||
if err := os.MkdirAll(path.Dir(socket), 0o700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(socket); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ln, err := net.Listen("unix", socket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.Chmod(socket, 0o700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return server.Serve(ln)
|
||||
return serve()
|
||||
},
|
||||
}
|
||||
|
||||
rootCmd.AddCommand(
|
||||
modelsCmd,
|
||||
serveCmd,
|
||||
runCmd,
|
||||
)
|
||||
|
@ -55,78 +55,11 @@ void sigint_handler(int signo) {
|
||||
}
|
||||
#endif
|
||||
|
||||
int get_embeddings(void *params_ptr, void *state_pr, float *res_embeddings) {
|
||||
gpt_params *params_p = (gpt_params *)params_ptr;
|
||||
llama_context *ctx = (llama_context *)state_pr;
|
||||
gpt_params params = *params_p;
|
||||
|
||||
if (params.seed <= 0) {
|
||||
params.seed = time(NULL);
|
||||
}
|
||||
|
||||
std::mt19937 rng(params.seed);
|
||||
|
||||
llama_init_backend(params.numa);
|
||||
|
||||
int n_past = 0;
|
||||
|
||||
// Add a space in front of the first character to match OG llama tokenizer
|
||||
// behavior
|
||||
params.prompt.insert(0, 1, ' ');
|
||||
|
||||
// tokenize the prompt
|
||||
auto embd_inp = ::llama_tokenize(ctx, params.prompt, true);
|
||||
|
||||
// determine newline token
|
||||
auto llama_token_newline = ::llama_tokenize(ctx, "\n", false);
|
||||
|
||||
if (embd_inp.size() > 0) {
|
||||
if (llama_eval(ctx, embd_inp.data(), embd_inp.size(), n_past,
|
||||
params.n_threads)) {
|
||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
const int n_embd = llama_n_embd(ctx);
|
||||
|
||||
const auto embeddings = llama_get_embeddings(ctx);
|
||||
|
||||
for (int i = 0; i < n_embd; i++) {
|
||||
res_embeddings[i] = embeddings[i];
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int get_token_embeddings(void *params_ptr, void *state_pr, int *tokens,
|
||||
int tokenSize, float *res_embeddings) {
|
||||
gpt_params *params_p = (gpt_params *)params_ptr;
|
||||
llama_context *ctx = (llama_context *)state_pr;
|
||||
gpt_params params = *params_p;
|
||||
|
||||
for (int i = 0; i < tokenSize; i++) {
|
||||
auto token_str = llama_token_to_str(ctx, tokens[i]);
|
||||
if (token_str == nullptr) {
|
||||
continue;
|
||||
}
|
||||
std::vector<std::string> my_vector;
|
||||
std::string str_token(token_str); // create a new std::string from the char*
|
||||
params_p->prompt += str_token;
|
||||
}
|
||||
|
||||
return get_embeddings(params_ptr, state_pr, res_embeddings);
|
||||
}
|
||||
|
||||
int eval(void *params_ptr, void *state_pr, char *text) {
|
||||
gpt_params *params_p = (gpt_params *)params_ptr;
|
||||
llama_context *ctx = (llama_context *)state_pr;
|
||||
|
||||
int eval(gpt_params *params, llama_context *ctx, char *text) {
|
||||
auto n_past = 0;
|
||||
auto last_n_tokens_data =
|
||||
std::vector<llama_token>(params_p->repeat_last_n, 0);
|
||||
auto last_n_tokens_data = std::vector<llama_token>(params->repeat_last_n, 0);
|
||||
|
||||
auto tokens = std::vector<llama_token>(params_p->n_ctx);
|
||||
auto tokens = std::vector<llama_token>(params->n_ctx);
|
||||
auto n_prompt_tokens =
|
||||
llama_tokenize(ctx, text, tokens.data(), tokens.size(), true);
|
||||
|
||||
@ -135,26 +68,21 @@ int eval(void *params_ptr, void *state_pr, char *text) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
// evaluate prompt
|
||||
return llama_eval(ctx, tokens.data(), n_prompt_tokens, n_past,
|
||||
params_p->n_threads);
|
||||
params->n_threads);
|
||||
}
|
||||
|
||||
int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
|
||||
gpt_params *params_p = (gpt_params *)params_ptr;
|
||||
llama_context *ctx = (llama_context *)state_pr;
|
||||
|
||||
gpt_params params = *params_p;
|
||||
int llama_predict(gpt_params *params, llama_context *ctx, char *result,
|
||||
bool debug) {
|
||||
|
||||
const int n_ctx = llama_n_ctx(ctx);
|
||||
|
||||
if (params.seed <= 0) {
|
||||
params.seed = time(NULL);
|
||||
if (params->seed <= 0) {
|
||||
params->seed = time(NULL);
|
||||
}
|
||||
|
||||
std::mt19937 rng(params.seed);
|
||||
|
||||
std::string path_session = params.path_prompt_cache;
|
||||
std::mt19937 rng(params->seed);
|
||||
std::string path_session = params->path_prompt_cache;
|
||||
std::vector<llama_token> session_tokens;
|
||||
|
||||
if (!path_session.empty()) {
|
||||
@ -177,7 +105,7 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
|
||||
return 1;
|
||||
}
|
||||
session_tokens.resize(n_token_count_out);
|
||||
llama_set_rng_seed(ctx, params.seed);
|
||||
llama_set_rng_seed(ctx, params->seed);
|
||||
if (debug) {
|
||||
fprintf(stderr, "%s: loaded a session with prompt size of %d tokens\n",
|
||||
__func__, (int)session_tokens.size());
|
||||
@ -191,12 +119,12 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
|
||||
}
|
||||
|
||||
std::vector<llama_token> embd_inp;
|
||||
if (!params.prompt.empty() || session_tokens.empty()) {
|
||||
if (!params->prompt.empty() || session_tokens.empty()) {
|
||||
// Add a space in front of the first character to match OG llama tokenizer
|
||||
// behavior
|
||||
params.prompt.insert(0, 1, ' ');
|
||||
params->prompt.insert(0, 1, ' ');
|
||||
|
||||
embd_inp = ::llama_tokenize(ctx, params.prompt, true);
|
||||
embd_inp = ::llama_tokenize(ctx, params->prompt, true);
|
||||
} else {
|
||||
embd_inp = session_tokens;
|
||||
}
|
||||
@ -212,7 +140,7 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
|
||||
n_matching_session_tokens++;
|
||||
}
|
||||
if (debug) {
|
||||
if (params.prompt.empty() &&
|
||||
if (params->prompt.empty() &&
|
||||
n_matching_session_tokens == embd_inp.size()) {
|
||||
fprintf(stderr, "%s: using full prompt from session file\n", __func__);
|
||||
} else if (n_matching_session_tokens >= embd_inp.size()) {
|
||||
@ -237,8 +165,8 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
|
||||
session_tokens.resize(embd_inp.size() - 1);
|
||||
}
|
||||
// number of tokens to keep when resetting context
|
||||
if (params.n_keep < 0 || params.n_keep > (int)embd_inp.size()) {
|
||||
params.n_keep = (int)embd_inp.size();
|
||||
if (params->n_keep < 0 || params->n_keep > (int)embd_inp.size()) {
|
||||
params->n_keep = (int)embd_inp.size();
|
||||
}
|
||||
|
||||
// determine newline token
|
||||
@ -251,7 +179,7 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
|
||||
bool need_to_save_session =
|
||||
!path_session.empty() && n_matching_session_tokens < embd_inp.size();
|
||||
int n_past = 0;
|
||||
int n_remain = params.n_predict;
|
||||
int n_remain = params->n_predict;
|
||||
int n_consumed = 0;
|
||||
int n_session_consumed = 0;
|
||||
|
||||
@ -263,7 +191,7 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
|
||||
const std::vector<llama_token> tmp = {
|
||||
llama_token_bos(),
|
||||
};
|
||||
llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads);
|
||||
llama_eval(ctx, tmp.data(), tmp.size(), 0, params->n_threads);
|
||||
llama_reset_timings(ctx);
|
||||
}
|
||||
|
||||
@ -276,10 +204,10 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
|
||||
// - take half of the last (n_ctx - n_keep) tokens and recompute the
|
||||
// logits in batches
|
||||
if (n_past + (int)embd.size() > n_ctx) {
|
||||
const int n_left = n_past - params.n_keep;
|
||||
const int n_left = n_past - params->n_keep;
|
||||
|
||||
// always keep the first token - BOS
|
||||
n_past = std::max(1, params.n_keep);
|
||||
n_past = std::max(1, params->n_keep);
|
||||
|
||||
// insert n_left/2 tokens at the start of embd from last_n_tokens
|
||||
embd.insert(embd.begin(),
|
||||
@ -288,14 +216,6 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
|
||||
|
||||
// stop saving session if we run out of context
|
||||
path_session.clear();
|
||||
|
||||
// printf("\n---\n");
|
||||
// printf("resetting: '");
|
||||
// for (int i = 0; i < (int) embd.size(); i++) {
|
||||
// printf("%s", llama_token_to_str(ctx, embd[i]));
|
||||
// }
|
||||
// printf("'\n");
|
||||
// printf("\n---\n");
|
||||
}
|
||||
|
||||
// try to reuse a matching prefix from the loaded session instead of
|
||||
@ -324,15 +244,17 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
|
||||
// evaluate tokens in batches
|
||||
// embd is typically prepared beforehand to fit within a batch, but not
|
||||
// always
|
||||
for (int i = 0; i < (int)embd.size(); i += params.n_batch) {
|
||||
for (int i = 0; i < (int)embd.size(); i += params->n_batch) {
|
||||
int n_eval = (int)embd.size() - i;
|
||||
if (n_eval > params.n_batch) {
|
||||
n_eval = params.n_batch;
|
||||
if (n_eval > params->n_batch) {
|
||||
n_eval = params->n_batch;
|
||||
}
|
||||
if (llama_eval(ctx, &embd[i], n_eval, n_past, params.n_threads)) {
|
||||
|
||||
if (llama_eval(ctx, &embd[i], n_eval, n_past, params->n_threads)) {
|
||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
n_past += n_eval;
|
||||
}
|
||||
|
||||
@ -346,26 +268,26 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
|
||||
|
||||
if ((int)embd_inp.size() <= n_consumed) {
|
||||
// out of user input, sample next token
|
||||
const float temp = params.temp;
|
||||
const float temp = params->temp;
|
||||
const int32_t top_k =
|
||||
params.top_k <= 0 ? llama_n_vocab(ctx) : params.top_k;
|
||||
const float top_p = params.top_p;
|
||||
const float tfs_z = params.tfs_z;
|
||||
const float typical_p = params.typical_p;
|
||||
params->top_k <= 0 ? llama_n_vocab(ctx) : params->top_k;
|
||||
const float top_p = params->top_p;
|
||||
const float tfs_z = params->tfs_z;
|
||||
const float typical_p = params->typical_p;
|
||||
const int32_t repeat_last_n =
|
||||
params.repeat_last_n < 0 ? n_ctx : params.repeat_last_n;
|
||||
const float repeat_penalty = params.repeat_penalty;
|
||||
const float alpha_presence = params.presence_penalty;
|
||||
const float alpha_frequency = params.frequency_penalty;
|
||||
const int mirostat = params.mirostat;
|
||||
const float mirostat_tau = params.mirostat_tau;
|
||||
const float mirostat_eta = params.mirostat_eta;
|
||||
const bool penalize_nl = params.penalize_nl;
|
||||
params->repeat_last_n < 0 ? n_ctx : params->repeat_last_n;
|
||||
const float repeat_penalty = params->repeat_penalty;
|
||||
const float alpha_presence = params->presence_penalty;
|
||||
const float alpha_frequency = params->frequency_penalty;
|
||||
const int mirostat = params->mirostat;
|
||||
const float mirostat_tau = params->mirostat_tau;
|
||||
const float mirostat_eta = params->mirostat_eta;
|
||||
const bool penalize_nl = params->penalize_nl;
|
||||
|
||||
// optionally save the session on first sample (for faster prompt loading
|
||||
// next time)
|
||||
if (!path_session.empty() && need_to_save_session &&
|
||||
!params.prompt_cache_ro) {
|
||||
!params->prompt_cache_ro) {
|
||||
need_to_save_session = false;
|
||||
llama_save_session_file(ctx, path_session.c_str(),
|
||||
session_tokens.data(), session_tokens.size());
|
||||
@ -378,8 +300,8 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
|
||||
auto n_vocab = llama_n_vocab(ctx);
|
||||
|
||||
// Apply params.logit_bias map
|
||||
for (auto it = params.logit_bias.begin(); it != params.logit_bias.end();
|
||||
it++) {
|
||||
for (auto it = params->logit_bias.begin();
|
||||
it != params->logit_bias.end(); it++) {
|
||||
logits[it->first] += it->second;
|
||||
}
|
||||
|
||||
@ -435,7 +357,6 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
|
||||
id = llama_sample_token(ctx, &candidates_p);
|
||||
}
|
||||
}
|
||||
// printf("`%d`", candidates_p.size);
|
||||
|
||||
last_n_tokens.erase(last_n_tokens.begin());
|
||||
last_n_tokens.push_back(id);
|
||||
@ -450,7 +371,7 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
|
||||
// call the token callback, no need to check if one is actually
|
||||
// registered, that will be handled on the Go side.
|
||||
auto token_str = llama_token_to_str(ctx, id);
|
||||
if (!tokenCallback(state_pr, (char *)token_str)) {
|
||||
if (!tokenCallback(ctx, (char *)token_str)) {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
@ -461,7 +382,7 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
|
||||
last_n_tokens.erase(last_n_tokens.begin());
|
||||
last_n_tokens.push_back(embd_inp[n_consumed]);
|
||||
++n_consumed;
|
||||
if ((int)embd.size() >= params.n_batch) {
|
||||
if ((int)embd.size() >= params->n_batch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -472,13 +393,13 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
|
||||
}
|
||||
|
||||
// check for stop prompt
|
||||
if (params.antiprompt.size()) {
|
||||
if (params->antiprompt.size()) {
|
||||
std::string last_output;
|
||||
for (auto id : last_n_tokens) {
|
||||
last_output += llama_token_to_str(ctx, id);
|
||||
}
|
||||
// Check if each of the reverse prompts appears at the end of the output.
|
||||
for (std::string &antiprompt : params.antiprompt) {
|
||||
for (std::string &antiprompt : params->antiprompt) {
|
||||
// size_t extra_padding = params.interactive ? 0 : 2;
|
||||
size_t extra_padding = 2;
|
||||
size_t search_start_pos =
|
||||
@ -501,8 +422,8 @@ int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
|
||||
}
|
||||
}
|
||||
|
||||
if (!path_session.empty() && params.prompt_cache_all &&
|
||||
!params.prompt_cache_ro) {
|
||||
if (!path_session.empty() && params->prompt_cache_all &&
|
||||
!params->prompt_cache_ro) {
|
||||
if (debug) {
|
||||
fprintf(stderr, "\n%s: saving final output to session file '%s'\n",
|
||||
__func__, path_session.c_str());
|
||||
@ -525,68 +446,8 @@ end:
|
||||
return 0;
|
||||
}
|
||||
|
||||
void llama_binding_free_model(void *state_ptr) {
|
||||
llama_context *ctx = (llama_context *)state_ptr;
|
||||
llama_free(ctx);
|
||||
}
|
||||
|
||||
void llama_free_params(void *params_ptr) {
|
||||
gpt_params *params = (gpt_params *)params_ptr;
|
||||
delete params;
|
||||
}
|
||||
|
||||
std::vector<std::string> create_vector(const char **strings, int count) {
|
||||
std::vector<std::string> *vec = new std::vector<std::string>;
|
||||
for (int i = 0; i < count; i++) {
|
||||
vec->push_back(std::string(strings[i]));
|
||||
}
|
||||
return *vec;
|
||||
}
|
||||
|
||||
void delete_vector(std::vector<std::string> *vec) { delete vec; }
|
||||
|
||||
int load_state(void *ctx, char *statefile, char *modes) {
|
||||
llama_context *state = (llama_context *)ctx;
|
||||
const llama_context *constState = static_cast<const llama_context *>(state);
|
||||
const size_t state_size = llama_get_state_size(state);
|
||||
uint8_t *state_mem = new uint8_t[state_size];
|
||||
|
||||
{
|
||||
FILE *fp_read = fopen(statefile, modes);
|
||||
if (state_size != llama_get_state_size(constState)) {
|
||||
fprintf(stderr, "\n%s : failed to validate state size\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
const size_t ret = fread(state_mem, 1, state_size, fp_read);
|
||||
if (ret != state_size) {
|
||||
fprintf(stderr, "\n%s : failed to read state\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
llama_set_state_data(
|
||||
state, state_mem); // could also read directly from memory mapped file
|
||||
fclose(fp_read);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void save_state(void *ctx, char *dst, char *modes) {
|
||||
llama_context *state = (llama_context *)ctx;
|
||||
|
||||
const size_t state_size = llama_get_state_size(state);
|
||||
uint8_t *state_mem = new uint8_t[state_size];
|
||||
|
||||
// Save state (rng, logits, embedding and kv_cache) to file
|
||||
{
|
||||
FILE *fp_write = fopen(dst, modes);
|
||||
llama_copy_state_data(
|
||||
state, state_mem); // could also copy directly to memory mapped file
|
||||
fwrite(state_mem, 1, state_size, fp_write);
|
||||
fclose(fp_write);
|
||||
}
|
||||
}
|
||||
void llama_binding_free_mode(llama_context *ctx) { llama_free(ctx); }
|
||||
void llama_free_params(gpt_params *params) { delete params; }
|
||||
|
||||
void *llama_allocate_params(
|
||||
const char *prompt, int seed, int threads, int tokens, int top_k,
|
||||
@ -640,9 +501,13 @@ void *llama_allocate_params(
|
||||
if (ignore_eos) {
|
||||
params->logit_bias[llama_token_eos()] = -INFINITY;
|
||||
}
|
||||
|
||||
if (antiprompt_count > 0) {
|
||||
params->antiprompt = create_vector(antiprompt, antiprompt_count);
|
||||
for (int i = 0; i < antiprompt_count; i++) {
|
||||
params->antiprompt.push_back(std::string(antiprompt[i]));
|
||||
}
|
||||
}
|
||||
|
||||
params->tfs_z = tfs_z;
|
||||
params->typical_p = typical_p;
|
||||
params->presence_penalty = presence_penalty;
|
||||
@ -650,6 +515,7 @@ void *llama_allocate_params(
|
||||
params->mirostat_eta = mirostat_eta;
|
||||
params->mirostat_tau = mirostat_tau;
|
||||
params->penalize_nl = penalize_nl;
|
||||
|
||||
std::stringstream ss(logit_bias);
|
||||
llama_token key;
|
||||
char sign;
|
||||
@ -669,7 +535,6 @@ void *load_model(const char *fname, int n_ctx, int n_seed, bool memory_f16,
|
||||
bool mlock, bool embeddings, bool mmap, bool low_vram,
|
||||
bool vocab_only, int n_gpu_layers, int n_batch,
|
||||
const char *maingpu, const char *tensorsplit, bool numa) {
|
||||
// load the model
|
||||
auto lparams = llama_context_default_params();
|
||||
|
||||
lparams.n_ctx = n_ctx;
|
||||
@ -706,25 +571,6 @@ void *load_model(const char *fname, int n_ctx, int n_seed, bool memory_f16,
|
||||
lparams.n_batch = n_batch;
|
||||
|
||||
llama_init_backend(numa);
|
||||
void *res = nullptr;
|
||||
try {
|
||||
llama_model *model = llama_load_model_from_file(fname, lparams);
|
||||
if (model == NULL) {
|
||||
fprintf(stderr, "error: failed to load model \n");
|
||||
return res;
|
||||
}
|
||||
|
||||
llama_context *lctx = llama_new_context_with_model(model, lparams);
|
||||
if (lctx == NULL) {
|
||||
fprintf(stderr, "error: failed to create context with model \n");
|
||||
llama_free_model(model);
|
||||
return res;
|
||||
}
|
||||
|
||||
} catch (std::runtime_error &e) {
|
||||
fprintf(stderr, "failed %s", e.what());
|
||||
return res;
|
||||
}
|
||||
|
||||
return res;
|
||||
return llama_init_from_file(fname, lparams);
|
||||
}
|
||||
|
@ -30,22 +30,13 @@ extern "C" {
|
||||
|
||||
extern unsigned char tokenCallback(void *, char *);
|
||||
|
||||
int load_state(void *ctx, char *statefile, char *modes);
|
||||
|
||||
int eval(void *params_ptr, void *ctx, char *text);
|
||||
|
||||
void save_state(void *ctx, char *dst, char *modes);
|
||||
|
||||
void *load_model(const char *fname, int n_ctx, int n_seed, bool memory_f16,
|
||||
bool mlock, bool embeddings, bool mmap, bool low_vram,
|
||||
bool vocab_only, int n_gpu, int n_batch, const char *maingpu,
|
||||
const char *tensorsplit, bool numa);
|
||||
|
||||
int get_embeddings(void *params_ptr, void *state_pr, float *res_embeddings);
|
||||
|
||||
int get_token_embeddings(void *params_ptr, void *state_pr, int *tokens,
|
||||
int tokenSize, float *res_embeddings);
|
||||
|
||||
void *llama_allocate_params(
|
||||
const char *prompt, int seed, int threads, int tokens, int top_k,
|
||||
float top_p, float temp, float repeat_penalty, int repeat_last_n,
|
||||
@ -59,13 +50,11 @@ void *llama_allocate_params(
|
||||
|
||||
void llama_free_params(void *params_ptr);
|
||||
|
||||
void llama_binding_free_model(void *state);
|
||||
void llama_binding_free_model(llama_context *ctx);
|
||||
|
||||
int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
||||
std::vector<std::string> create_vector(const char **strings, int count);
|
||||
void delete_vector(std::vector<std::string> *vec);
|
||||
#endif
|
||||
|
126
llama/llama.go
126
llama/llama.go
@ -31,135 +31,35 @@ package llama
|
||||
import "C"
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type LLama struct {
|
||||
state unsafe.Pointer
|
||||
ctx unsafe.Pointer
|
||||
embeddings bool
|
||||
contextSize int
|
||||
}
|
||||
|
||||
func New(model string, opts ...ModelOption) (*LLama, error) {
|
||||
mo := NewModelOptions(opts...)
|
||||
|
||||
// TODO: free this pointer
|
||||
modelPath := C.CString(model)
|
||||
result := C.load_model(modelPath, C.int(mo.ContextSize), C.int(mo.Seed), C.bool(mo.F16Memory), C.bool(mo.MLock), C.bool(mo.Embeddings), C.bool(mo.MMap), C.bool(mo.LowVRAM), C.bool(mo.VocabOnly), C.int(mo.NGPULayers), C.int(mo.NBatch), C.CString(mo.MainGPU), C.CString(mo.TensorSplit), C.bool(mo.NUMA))
|
||||
if result == nil {
|
||||
|
||||
ctx := C.load_model(modelPath, C.int(mo.ContextSize), C.int(mo.Seed), C.bool(mo.F16Memory), C.bool(mo.MLock), C.bool(mo.Embeddings), C.bool(mo.MMap), C.bool(mo.LowVRAM), C.bool(mo.VocabOnly), C.int(mo.NGPULayers), C.int(mo.NBatch), C.CString(mo.MainGPU), C.CString(mo.TensorSplit), C.bool(mo.NUMA))
|
||||
if ctx == nil {
|
||||
return nil, fmt.Errorf("failed loading model")
|
||||
}
|
||||
|
||||
ll := &LLama{state: result, contextSize: mo.ContextSize, embeddings: mo.Embeddings}
|
||||
ll := &LLama{ctx: ctx, contextSize: mo.ContextSize, embeddings: mo.Embeddings}
|
||||
|
||||
return ll, nil
|
||||
}
|
||||
|
||||
func (l *LLama) Free() {
|
||||
C.llama_binding_free_model(l.state)
|
||||
}
|
||||
|
||||
func (l *LLama) LoadState(state string) error {
|
||||
d := C.CString(state)
|
||||
w := C.CString("rb")
|
||||
|
||||
result := C.load_state(l.state, d, w)
|
||||
if result != 0 {
|
||||
return fmt.Errorf("error while loading state")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LLama) SaveState(dst string) error {
|
||||
d := C.CString(dst)
|
||||
w := C.CString("wb")
|
||||
|
||||
C.save_state(l.state, d, w)
|
||||
|
||||
_, err := os.Stat(dst)
|
||||
return err
|
||||
}
|
||||
|
||||
// Token Embeddings
|
||||
func (l *LLama) TokenEmbeddings(tokens []int, opts ...PredictOption) ([]float32, error) {
|
||||
if !l.embeddings {
|
||||
return []float32{}, fmt.Errorf("model loaded without embeddings")
|
||||
}
|
||||
|
||||
po := NewPredictOptions(opts...)
|
||||
|
||||
outSize := po.Tokens
|
||||
if po.Tokens == 0 {
|
||||
outSize = 9999999
|
||||
}
|
||||
|
||||
floats := make([]float32, outSize)
|
||||
|
||||
myArray := (*C.int)(C.malloc(C.size_t(len(tokens)) * C.sizeof_int))
|
||||
|
||||
// Copy the values from the Go slice to the C array
|
||||
for i, v := range tokens {
|
||||
(*[1<<31 - 1]int32)(unsafe.Pointer(myArray))[i] = int32(v)
|
||||
}
|
||||
|
||||
params := C.llama_allocate_params(C.CString(""), C.int(po.Seed), C.int(po.Threads), C.int(po.Tokens), C.int(po.TopK),
|
||||
C.float(po.TopP), C.float(po.Temperature), C.float(po.Penalty), C.int(po.Repeat),
|
||||
C.bool(po.IgnoreEOS), C.bool(po.F16KV),
|
||||
C.int(po.Batch), C.int(po.NKeep), nil, C.int(0),
|
||||
C.float(po.TailFreeSamplingZ), C.float(po.TypicalP), C.float(po.FrequencyPenalty), C.float(po.PresencePenalty),
|
||||
C.int(po.Mirostat), C.float(po.MirostatETA), C.float(po.MirostatTAU), C.bool(po.PenalizeNL), C.CString(po.LogitBias),
|
||||
C.CString(po.PathPromptCache), C.bool(po.PromptCacheAll), C.bool(po.MLock), C.bool(po.MMap),
|
||||
C.CString(po.MainGPU), C.CString(po.TensorSplit),
|
||||
C.bool(po.PromptCacheRO),
|
||||
)
|
||||
ret := C.get_token_embeddings(params, l.state, myArray, C.int(len(tokens)), (*C.float)(&floats[0]))
|
||||
if ret != 0 {
|
||||
return floats, fmt.Errorf("embedding inference failed")
|
||||
}
|
||||
return floats, nil
|
||||
}
|
||||
|
||||
// Embeddings
|
||||
func (l *LLama) Embeddings(text string, opts ...PredictOption) ([]float32, error) {
|
||||
if !l.embeddings {
|
||||
return []float32{}, fmt.Errorf("model loaded without embeddings")
|
||||
}
|
||||
|
||||
po := NewPredictOptions(opts...)
|
||||
|
||||
input := C.CString(text)
|
||||
if po.Tokens == 0 {
|
||||
po.Tokens = 99999999
|
||||
}
|
||||
floats := make([]float32, po.Tokens)
|
||||
reverseCount := len(po.StopPrompts)
|
||||
reversePrompt := make([]*C.char, reverseCount)
|
||||
var pass **C.char
|
||||
for i, s := range po.StopPrompts {
|
||||
cs := C.CString(s)
|
||||
reversePrompt[i] = cs
|
||||
pass = &reversePrompt[0]
|
||||
}
|
||||
|
||||
params := C.llama_allocate_params(input, C.int(po.Seed), C.int(po.Threads), C.int(po.Tokens), C.int(po.TopK),
|
||||
C.float(po.TopP), C.float(po.Temperature), C.float(po.Penalty), C.int(po.Repeat),
|
||||
C.bool(po.IgnoreEOS), C.bool(po.F16KV),
|
||||
C.int(po.Batch), C.int(po.NKeep), pass, C.int(reverseCount),
|
||||
C.float(po.TailFreeSamplingZ), C.float(po.TypicalP), C.float(po.FrequencyPenalty), C.float(po.PresencePenalty),
|
||||
C.int(po.Mirostat), C.float(po.MirostatETA), C.float(po.MirostatTAU), C.bool(po.PenalizeNL), C.CString(po.LogitBias),
|
||||
C.CString(po.PathPromptCache), C.bool(po.PromptCacheAll), C.bool(po.MLock), C.bool(po.MMap),
|
||||
C.CString(po.MainGPU), C.CString(po.TensorSplit),
|
||||
C.bool(po.PromptCacheRO),
|
||||
)
|
||||
|
||||
ret := C.get_embeddings(params, l.state, (*C.float)(&floats[0]))
|
||||
if ret != 0 {
|
||||
return floats, fmt.Errorf("embedding inference failed")
|
||||
}
|
||||
|
||||
return floats, nil
|
||||
C.llama_binding_free_model(l.ctx)
|
||||
}
|
||||
|
||||
func (l *LLama) Eval(text string, opts ...PredictOption) error {
|
||||
@ -189,7 +89,7 @@ func (l *LLama) Eval(text string, opts ...PredictOption) error {
|
||||
C.CString(po.MainGPU), C.CString(po.TensorSplit),
|
||||
C.bool(po.PromptCacheRO),
|
||||
)
|
||||
ret := C.eval(params, l.state, input)
|
||||
ret := C.eval(params, l.ctx, input)
|
||||
if ret != 0 {
|
||||
return fmt.Errorf("inference failed")
|
||||
}
|
||||
@ -203,7 +103,7 @@ func (l *LLama) Predict(text string, opts ...PredictOption) (string, error) {
|
||||
po := NewPredictOptions(opts...)
|
||||
|
||||
if po.TokenCallback != nil {
|
||||
setCallback(l.state, po.TokenCallback)
|
||||
setCallback(l.ctx, po.TokenCallback)
|
||||
}
|
||||
|
||||
input := C.CString(text)
|
||||
@ -231,7 +131,7 @@ func (l *LLama) Predict(text string, opts ...PredictOption) (string, error) {
|
||||
C.CString(po.MainGPU), C.CString(po.TensorSplit),
|
||||
C.bool(po.PromptCacheRO),
|
||||
)
|
||||
ret := C.llama_predict(params, l.state, (*C.char)(unsafe.Pointer(&out[0])), C.bool(po.DebugMode))
|
||||
ret := C.llama_predict(params, l.ctx, (*C.char)(unsafe.Pointer(&out[0])), C.bool(po.DebugMode))
|
||||
if ret != 0 {
|
||||
return "", fmt.Errorf("inference failed")
|
||||
}
|
||||
@ -248,7 +148,7 @@ func (l *LLama) Predict(text string, opts ...PredictOption) (string, error) {
|
||||
C.llama_free_params(params)
|
||||
|
||||
if po.TokenCallback != nil {
|
||||
setCallback(l.state, nil)
|
||||
setCallback(l.ctx, nil)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
@ -268,7 +168,7 @@ func (l *LLama) Predict(text string, opts ...PredictOption) (string, error) {
|
||||
//
|
||||
// It is save to call this method while a prediction is running.
|
||||
func (l *LLama) SetTokenCallback(callback func(token string) bool) {
|
||||
setCallback(l.state, callback)
|
||||
setCallback(l.ctx, callback)
|
||||
}
|
||||
|
||||
var (
|
||||
|
0
python/poetry.lock → sdk/poetry.lock
generated
0
python/poetry.lock → sdk/poetry.lock
generated
@ -17,26 +17,14 @@ import (
|
||||
func Serve(ln net.Listener) error {
|
||||
r := gin.Default()
|
||||
|
||||
var l *llama.LLama
|
||||
|
||||
gpulayers := 1
|
||||
// TODO: these should be request parameters
|
||||
gpulayers := 0
|
||||
tokens := 512
|
||||
threads := runtime.NumCPU()
|
||||
model := "/Users/pdevine/.cache/gpt4all/GPT4All-13B-snoozy.ggmlv3.q4_0.bin"
|
||||
|
||||
r.POST("/api/load", func(c *gin.Context) {
|
||||
var err error
|
||||
l, err = llama.New(model, llama.EnableF16Memory, llama.SetContext(128), llama.EnableEmbeddings, llama.SetGPULayers(gpulayers))
|
||||
if err != nil {
|
||||
fmt.Println("Loading the model failed:", err.Error())
|
||||
}
|
||||
})
|
||||
|
||||
r.POST("/api/unload", func(c *gin.Context) {
|
||||
})
|
||||
|
||||
r.POST("/api/generate", func(c *gin.Context) {
|
||||
// TODO: set prompt from template
|
||||
fmt.Println("Generating text...")
|
||||
|
||||
var req api.GenerateRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
@ -44,6 +32,14 @@ func Serve(ln net.Listener) error {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println(req)
|
||||
|
||||
l, err := llama.New(req.Model, llama.EnableF16Memory, llama.SetContext(128), llama.EnableEmbeddings, llama.SetGPULayers(gpulayers))
|
||||
if err != nil {
|
||||
fmt.Println("Loading the model failed:", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
ch := make(chan string)
|
||||
|
||||
go func() {
|
||||
@ -55,7 +51,7 @@ func Serve(ln net.Listener) error {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
}()
|
||||
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
tok, ok := <-ch
|
||||
@ -65,11 +61,6 @@ func Serve(ln net.Listener) error {
|
||||
c.SSEvent("token", tok)
|
||||
return true
|
||||
})
|
||||
|
||||
// embeds, err := l.Embeddings(text)
|
||||
// if err != nil {
|
||||
// fmt.Printf("Embeddings: error %s \n", err.Error())
|
||||
// }
|
||||
})
|
||||
|
||||
log.Printf("Listening on %s", ln.Addr())
|
||||
|
Loading…
x
Reference in New Issue
Block a user