Compare commits
5 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
a62817d677 | ||
|
30dd74930d | ||
|
ce78e400c2 | ||
|
edeea1d6f0 | ||
|
450400107b |
231
llm/ext_server/server.cpp
vendored
231
llm/ext_server/server.cpp
vendored
@ -1040,6 +1040,7 @@ struct llama_server_context
|
||||
img.request_encode_image = false;
|
||||
}
|
||||
|
||||
LOG_TEE("slot has images: %d\n", slot.images.size());
|
||||
return slot.images.size() > 0;
|
||||
}
|
||||
|
||||
@ -1271,6 +1272,150 @@ struct llama_server_context
|
||||
}
|
||||
}
|
||||
|
||||
/* bool process_images_paligemma(server_slot &slot, int n_batch)
|
||||
{
|
||||
// set_off_embeds(ctx);
|
||||
int n_past = 0;
|
||||
int image_idx = 0;
|
||||
slot_image &img = slot.images[image_idx];
|
||||
|
||||
// rescale image embeddings
|
||||
float *data = img.image_embedding;
|
||||
for (int i = 0; i < 2048 * 256; i++)
|
||||
{
|
||||
data[i] = data[i] / sqrt(2048);
|
||||
}
|
||||
|
||||
if (ctx)
|
||||
{
|
||||
// set_image_embeds(ctx, data);
|
||||
// print_embeds(ctx);
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("ctx is null");
|
||||
}
|
||||
|
||||
// generate user_prompt -> this should contain image tokens prepended and a new line appended:
|
||||
// batch.n_tokens += (int)slot.images.size() * llama_n_embd(model);
|
||||
std::vector<llama_token> tokens;
|
||||
std::string prompt = "caption es";
|
||||
std::vector<llama_token> text = ::llama_tokenize(ctx, prompt, false, true);
|
||||
|
||||
for (int i = 0; i < (int)slot.images.size() * 256; i++)
|
||||
{
|
||||
tokens.push_back(257152);
|
||||
}
|
||||
|
||||
tokens.push_back(2);
|
||||
|
||||
for (int i = 0; i < text.size(); i++)
|
||||
{
|
||||
// printf("token [%d]: %d\n", text[i]);
|
||||
tokens.push_back(text[i]);
|
||||
}
|
||||
|
||||
tokens.push_back(108);
|
||||
|
||||
batch.n_tokens = (int)slot.images.size() * 256 + 2 + text.size();
|
||||
printf("\nbatch.n_tokens %d\n", batch.n_tokens);
|
||||
|
||||
for (int i = 0; i < batch.n_tokens; i++)
|
||||
{
|
||||
printf("token %d: %d\n", i, tokens[i]);
|
||||
}
|
||||
|
||||
for (int i = 0; i < batch.n_tokens; i += n_batch)
|
||||
{
|
||||
printf("calling decode\n");
|
||||
int n_eval = (int)batch.n_tokens - i;
|
||||
if (n_eval > n_batch)
|
||||
{
|
||||
n_eval = n_batch;
|
||||
}
|
||||
printf("n_eval: %d, n_past: %d, slot.n_past: %d\n", n_eval, n_past, slot.n_past);
|
||||
llama_set_causal_attn(ctx, false);
|
||||
|
||||
printf("DEBUGGING DECODE BATCH:\n");
|
||||
for (int j = 0; j < n_eval; j++)
|
||||
{
|
||||
printf("token[%d]: %d\n", j, tokens[j]);
|
||||
}
|
||||
|
||||
llama_batch my_batch = llama_batch_get_one(&tokens[i], n_eval, 0, 0);
|
||||
printf("%s: viewing batch: n_tokens = %d, batch.token %d, batch.pos = %d, batch.logits = %d\n", __func__, n_eval, batch.token + i, batch.pos + i, batch.logits + i);
|
||||
for (int j = 0; j < n_eval; j++)
|
||||
{
|
||||
// printf("new batch view token [%d]: %d\n", j, (batch.token[i + j]));
|
||||
}
|
||||
|
||||
printf("%s: viewing batch: n_tokens = %d, batch.token %d, batch.pos = %d, batch.logits = %d\n", __func__, n_eval, my_batch.token + i, my_batch.pos + i, my_batch.logits + i);
|
||||
for (int j = 0; j < n_eval; j++)
|
||||
{
|
||||
// printf("new batch view token [%d]: %d\n", j, (my_batch.token[i + j]));
|
||||
}
|
||||
|
||||
printf("n_eval: %d, llama_pos: %d, llama_seq_id: %d\n", n_eval, 0, 0);
|
||||
if (llama_decode(ctx, llama_batch_get_one(&tokens[i], n_eval, 0, 0)))
|
||||
{
|
||||
printf("%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, batch.n_tokens, n_batch, n_past);
|
||||
return false;
|
||||
}
|
||||
llama_set_causal_attn(ctx, true);
|
||||
slot.n_past += n_eval;
|
||||
}
|
||||
printf("done processing images paligemma\n");
|
||||
// llama_batch_clear(batch);
|
||||
return true;
|
||||
} */
|
||||
|
||||
bool prepare_pali(server_slot &slot, int n_batch)
|
||||
{
|
||||
// set_off_embeds(ctx);
|
||||
int n_past = 0;
|
||||
int image_idx = 0;
|
||||
slot_image &img = slot.images[image_idx];
|
||||
|
||||
// rescale image embeddings
|
||||
float *data = img.image_embedding;
|
||||
for (int i = 0; i < 2048 * 256; i++)
|
||||
{
|
||||
data[i] = data[i] / sqrt(2048);
|
||||
}
|
||||
set_image_embeds(ctx, data);
|
||||
|
||||
// generate user_prompt -> this should contain image tokens prepended and a new line appended:
|
||||
// batch.n_tokens += (int)slot.images.size() * llama_n_embd(model);
|
||||
std::vector<llama_token> tokens;
|
||||
std::string prompt = "How much ketchup is in this image?";
|
||||
std::vector<llama_token> text = ::llama_tokenize(ctx, prompt, false, true);
|
||||
|
||||
for (int i = 0; i < (int)slot.images.size() * 256; i++)
|
||||
{
|
||||
tokens.push_back(257152);
|
||||
}
|
||||
|
||||
tokens.push_back(2);
|
||||
|
||||
for (int i = 0; i < text.size(); i++)
|
||||
{
|
||||
// printf("token [%d]: %d\n", text[i]);
|
||||
tokens.push_back(text[i]);
|
||||
}
|
||||
|
||||
tokens.push_back(108);
|
||||
|
||||
printf("currently, system_tokens.size %d\n", system_tokens.size());
|
||||
for (int i = 0; i < (int)tokens.size(); ++i)
|
||||
{
|
||||
llama_batch_add(batch, tokens[i], system_tokens.size() + slot.n_past, {slot.id}, true);
|
||||
slot.n_past += 1;
|
||||
}
|
||||
// llama_set_causal_attn(ctx, false);
|
||||
printf("slot.n_past == %d\n", slot.n_past);
|
||||
return true;
|
||||
}
|
||||
|
||||
// for multiple images processing
|
||||
bool ingest_images(server_slot &slot, int n_batch)
|
||||
{
|
||||
@ -1551,6 +1696,15 @@ struct llama_server_context
|
||||
}
|
||||
|
||||
bool update_slots() {
|
||||
/* gpt_params params;
|
||||
params.model = "/Users/joshyan/Projects/PaliGemma/paligemma-3b-pt-224-text-model-f16.gguf";
|
||||
llama_model_params model_params = llama_model_params_from_gpt_params(params);
|
||||
|
||||
llama_model *model = llama_load_model_from_file(params.model.c_str(), model_params);
|
||||
llama_context_params ctx_params = llama_context_params_from_gpt_params(params);
|
||||
llama_context *ctx_llama = llama_new_context_with_model(model, ctx_params);
|
||||
ctx = ctx_llama; */
|
||||
|
||||
if (system_need_update)
|
||||
{
|
||||
LOG_DEBUG("updating system prompt", {});
|
||||
@ -1811,9 +1965,15 @@ struct llama_server_context
|
||||
const bool has_images = process_images(slot);
|
||||
|
||||
// process the prefix of first image
|
||||
std::vector<llama_token> prefix_tokens = has_images ? tokenize(slot.images[0].prefix_prompt, add_bos_token) : prompt_tokens;
|
||||
std::vector<llama_token> prefix_tokens = has_images ? tokenize(slot.images[0].prefix_prompt, false) : prompt_tokens;
|
||||
printf("\nprinting prefix tokens\n");
|
||||
for (int i = 0; i < prefix_tokens.size(); i++)
|
||||
{
|
||||
printf("prefix token[%d]: %d\n", i, prefix_tokens[i]);
|
||||
}
|
||||
|
||||
int32_t slot_npast = slot.n_past_se > 0 ? slot.n_past_se : slot.n_past;
|
||||
printf("slot_npast = %d\n", slot_npast);
|
||||
|
||||
int32_t ga_i = slot.ga_i;
|
||||
int32_t ga_n = slot.ga_n;
|
||||
@ -1833,7 +1993,12 @@ struct llama_server_context
|
||||
slot_npast++;
|
||||
}
|
||||
|
||||
if (has_images && !ingest_images(slot, n_batch))
|
||||
LOG_ERROR("checking has images", {
|
||||
{"has images", has_images},
|
||||
{"task_id", slot.task_id},
|
||||
});
|
||||
// if (has_images && !ingest_images(slot, n_batch))
|
||||
if (has_images && !prepare_pali(slot, n_batch))
|
||||
{
|
||||
LOG_ERROR("failed processing images", {
|
||||
{"slot_id", slot.id},
|
||||
@ -1844,7 +2009,9 @@ struct llama_server_context
|
||||
// no one at the moment is checking the return value
|
||||
return false;
|
||||
}
|
||||
print_causal(ctx);
|
||||
|
||||
printf("batch.n_tokens here for setting logits: %d\n", batch.n_tokens);
|
||||
// extract the logits only for the last token
|
||||
if (batch.n_tokens > 0)
|
||||
{
|
||||
@ -1859,18 +2026,58 @@ struct llama_server_context
|
||||
|
||||
if (batch.n_tokens == 0)
|
||||
{
|
||||
/* completion_token_output result;
|
||||
const llama_token id = llama_sampling_sample(slots[0].ctx_sampling, ctx, NULL, slots[0].i_batch);
|
||||
|
||||
llama_sampling_accept(slots[0].ctx_sampling, ctx, id, true);
|
||||
|
||||
slots[0].n_decoded += 1;
|
||||
if (slots[0].n_decoded == 1)
|
||||
{
|
||||
slots[0].t_start_genereration = ggml_time_us();
|
||||
slots[0].t_prompt_processing = (slots[0].t_start_genereration - slots[0].t_start_process_prompt) / 1e3;
|
||||
metrics.on_prompt_eval(slots[0]);
|
||||
}
|
||||
|
||||
llama_token_data_array cur_p = {slots[0].ctx_sampling->cur.data(), slots[0].ctx_sampling->cur.size(), false};
|
||||
result.tok = id;
|
||||
|
||||
const int32_t n_probs = slots[0].sparams.n_probs;
|
||||
if (slots[0].sparams.temp <= 0 && n_probs > 0)
|
||||
{
|
||||
// for llama_sample_token_greedy we need to sort candidates
|
||||
llama_sample_softmax(ctx, &cur_p);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < std::min(cur_p.size, (size_t)n_probs); ++i)
|
||||
{
|
||||
result.probs.push_back({cur_p.data[i].id, cur_p.data[i].p});
|
||||
}
|
||||
|
||||
if (!process_token(result, slots[0]))
|
||||
{
|
||||
slots[0].release();
|
||||
slots[0].print_timings();
|
||||
send_final_response(slots[0]);
|
||||
metrics.on_prediction(slots[0]);
|
||||
}
|
||||
|
||||
slots[0].i_batch = -1; */
|
||||
all_slots_are_idle = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
printf("batch.n_tokens = %d\n", batch.n_tokens);
|
||||
for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch)
|
||||
{
|
||||
printf("i = %d\n", i);
|
||||
const int32_t n_tokens = std::min(n_batch, batch.n_tokens - i);
|
||||
|
||||
for (auto & slot : slots)
|
||||
{
|
||||
if (slot.ga_n != 1)
|
||||
{
|
||||
printf("slot.ga_n = %d\n", slot.ga_n);
|
||||
// context extension via Self-Extend
|
||||
while (slot.n_past_se >= slot.ga_i + slot.ga_w)
|
||||
{
|
||||
@ -1897,6 +2104,8 @@ struct llama_server_context
|
||||
}
|
||||
}
|
||||
|
||||
printf("batching\n");
|
||||
|
||||
llama_batch batch_view =
|
||||
{
|
||||
n_tokens,
|
||||
@ -1908,9 +2117,17 @@ struct llama_server_context
|
||||
batch.logits + i,
|
||||
0, 0, 0, // unused
|
||||
};
|
||||
|
||||
// llama_batch batch_view = prepare_pali(slots[0], n_batch);
|
||||
printf("%s: viewing batch: n_tokens = %d, batch.token %d, batch.pos = %d, batch.logits = %d\n", __func__, n_tokens, batch.token + i, batch.pos + i, batch.logits + i);
|
||||
for (int j = 0; j < n_tokens; j++)
|
||||
{
|
||||
printf("new batch view token [%d]: %d\n", j, (batch.token[i + j]));
|
||||
}
|
||||
printf("current state of causal attn: ");
|
||||
print_causal(ctx);
|
||||
const int ret = llama_decode(ctx, batch_view);
|
||||
|
||||
llama_set_causal_attn(ctx, true);
|
||||
print_causal(ctx);
|
||||
if (ret != 0)
|
||||
{
|
||||
if (n_batch == 1 || ret < 0)
|
||||
@ -1930,6 +2147,7 @@ struct llama_server_context
|
||||
|
||||
for (auto & slot : slots)
|
||||
{
|
||||
printf("there are currently n slots\n");
|
||||
if (slot.i_batch < (int) i || slot.i_batch >= (int) (i + n_tokens))
|
||||
{
|
||||
continue;
|
||||
@ -1938,6 +2156,7 @@ struct llama_server_context
|
||||
// prompt evaluated for embedding
|
||||
if (slot.embedding)
|
||||
{
|
||||
printf("slot.embedding is true\n");
|
||||
send_embedding(slot, batch_view);
|
||||
slot.release();
|
||||
slot.i_batch = -1;
|
||||
@ -1945,8 +2164,10 @@ struct llama_server_context
|
||||
}
|
||||
|
||||
completion_token_output result;
|
||||
printf("sampling for the ith token: %d\n", slot.i_batch - i);
|
||||
// batch.logits[263] = true;
|
||||
const llama_token id = llama_sampling_sample(slot.ctx_sampling, ctx, NULL, slot.i_batch - i);
|
||||
|
||||
printf("got back this token: %d\n", id);
|
||||
llama_sampling_accept(slot.ctx_sampling, ctx, id, true);
|
||||
|
||||
slot.n_decoded += 1;
|
||||
|
@ -9,8 +9,8 @@ set -o pipefail
|
||||
echo "Starting darwin generate script"
|
||||
source $(dirname $0)/gen_common.sh
|
||||
init_vars
|
||||
git_module_setup
|
||||
apply_patches
|
||||
#git_module_setup
|
||||
#apply_patches
|
||||
|
||||
sign() {
|
||||
if [ -n "$APPLE_IDENTITY" ]; then
|
||||
@ -97,5 +97,5 @@ case "${GOARCH}" in
|
||||
;;
|
||||
esac
|
||||
|
||||
cleanup
|
||||
#cleanup
|
||||
echo "go generate completed. LLM runners: $(cd ${BUILD_DIR}/..; echo *)"
|
||||
|
311
llm/patches/12-paligemma.diff
Normal file
311
llm/patches/12-paligemma.diff
Normal file
@ -0,0 +1,311 @@
|
||||
diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp
|
||||
index 54aa822c..45d03982 100644
|
||||
--- a/examples/llava/clip.cpp
|
||||
+++ b/examples/llava/clip.cpp
|
||||
@@ -765,9 +765,12 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
||||
embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
|
||||
embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
|
||||
|
||||
- embeddings = ggml_gelu(ctx0, embeddings);
|
||||
- embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
|
||||
- embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
|
||||
+ // paligemma missing second linear layer
|
||||
+ if (model.mm_2_w) {
|
||||
+ embeddings = ggml_gelu(ctx0, embeddings);
|
||||
+ embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings);
|
||||
+ embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
|
||||
+ }
|
||||
|
||||
} else if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
|
||||
embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
|
||||
@@ -2542,7 +2545,10 @@ int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
|
||||
return ctx->vision_model.mm_model_peg_0_b->ne[0];
|
||||
}
|
||||
if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
|
||||
- return ctx->vision_model.mm_2_b->ne[0];
|
||||
+ // paligemma missing second linear layer
|
||||
+ if (ctx->vision_model.mm_2_b == nullptr) {
|
||||
+ return ctx->vision_model.mm_0_b->ne[0];
|
||||
+ }
|
||||
}
|
||||
if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
|
||||
return ctx->vision_model.mm_3_b->ne[0];
|
||||
diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp
|
||||
index 8c7dd2ae..38eeb305 100644
|
||||
--- a/examples/llava/llava-cli.cpp
|
||||
+++ b/examples/llava/llava-cli.cpp
|
||||
@@ -18,7 +18,10 @@ static bool eval_tokens(struct llama_context * ctx_llama, std::vector<llama_toke
|
||||
if (n_eval > n_batch) {
|
||||
n_eval = n_batch;
|
||||
}
|
||||
- if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval, *n_past, 0))) {
|
||||
+
|
||||
+ llama_batch my_batch = llama_batch_get_one(&tokens[i], n_eval, *n_past, 0);
|
||||
+ if (llama_decode(ctx_llama, my_batch))
|
||||
+ {
|
||||
LOG_TEE("%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past);
|
||||
return false;
|
||||
}
|
||||
@@ -36,6 +39,11 @@ static bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) {
|
||||
static bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){
|
||||
std::string str2 = str;
|
||||
std::vector<llama_token> embd_inp = ::llama_tokenize(ctx_llama, str2, add_bos, true);
|
||||
+ embd_inp.push_back(108);
|
||||
+ for (int i = 0; i < embd_inp.size(); i++)
|
||||
+ {
|
||||
+ printf("token[%d]: %d\n", i, embd_inp[i]);
|
||||
+ }
|
||||
eval_tokens(ctx_llama, embd_inp, n_batch, n_past);
|
||||
return true;
|
||||
}
|
||||
@@ -183,9 +191,17 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_
|
||||
}
|
||||
}
|
||||
|
||||
- eval_string(ctx_llava->ctx_llama, system_prompt.c_str(), params->n_batch, &n_past, true);
|
||||
- llava_eval_image_embed(ctx_llava->ctx_llama, image_embed, params->n_batch, &n_past);
|
||||
- eval_string(ctx_llava->ctx_llama, user_prompt.c_str(), params->n_batch, &n_past, false);
|
||||
+ // build user prompt with 256 image tokens
|
||||
+ user_prompt = "What is in this image?";
|
||||
+ std::string image_token_prefix = "";
|
||||
+ for (int i = 0; i < 256; i++) {
|
||||
+ image_token_prefix += "<image>";
|
||||
+ }
|
||||
+ std::string user_prompt_with_images = image_token_prefix + "<bos>" + user_prompt;
|
||||
+
|
||||
+ llama_set_causal_attn(ctx_llava->ctx_llama, true);
|
||||
+ eval_string(ctx_llava->ctx_llama, user_prompt_with_images.c_str(), params->n_batch, &n_past, false);
|
||||
+ // llama_set_causal_attn(ctx_llava->ctx_llama, true);
|
||||
|
||||
// generate the response
|
||||
|
||||
@@ -324,6 +340,19 @@ int main(int argc, char ** argv) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
+ if (!image_embed || !image_embed->embed) {
|
||||
+ std::cerr << "Error: image_embed or image_embed->embed is null." << std::endl;
|
||||
+ return 1;
|
||||
+ }
|
||||
+
|
||||
+ // image feature scaling
|
||||
+ float *data = image_embed->embed;
|
||||
+ for (int i = 0; i < 2048 * 256; i++) {
|
||||
+ data[i] = data[i] / sqrt(2048);
|
||||
+ }
|
||||
+
|
||||
+ set_image_embeds(ctx_llava->ctx_llama, image_embed->embed);
|
||||
+
|
||||
// process the prompt
|
||||
process_prompt(ctx_llava, image_embed, ¶ms, params.prompt);
|
||||
|
||||
diff --git a/include/llama.h b/include/llama.h
|
||||
index ce07f4fa..c3465d68 100644
|
||||
--- a/include/llama.h
|
||||
+++ b/include/llama.h
|
||||
@@ -444,6 +444,13 @@ extern "C" {
|
||||
// Frees all allocated memory
|
||||
LLAMA_API void llama_free(struct llama_context * ctx);
|
||||
|
||||
+ // save image embeddings
|
||||
+ LLAMA_API void set_image_embeds(struct llama_context *ctx, float *data);
|
||||
+
|
||||
+ LLAMA_API void print_embeds(struct llama_context *ctx);
|
||||
+
|
||||
+ LLAMA_API void print_causal(struct llama_context *ctx);
|
||||
+
|
||||
LLAMA_API int64_t llama_time_us(void);
|
||||
|
||||
LLAMA_API size_t llama_max_devices(void);
|
||||
diff --git a/src/llama.cpp b/src/llama.cpp
|
||||
index 7f2f0003..d5926202 100644
|
||||
--- a/src/llama.cpp
|
||||
+++ b/src/llama.cpp
|
||||
@@ -2677,6 +2677,7 @@ struct llama_context {
|
||||
|
||||
const struct llama_model & model;
|
||||
|
||||
+ float *image_embeds;
|
||||
struct llama_cparams cparams;
|
||||
struct llama_sampling sampling;
|
||||
struct llama_kv_cache kv_self;
|
||||
@@ -2760,6 +2761,33 @@ struct llama_context {
|
||||
struct ggml_tensor * inp_KQ_mask_cross; // F32 [n_outputs_enc, n_batch]
|
||||
};
|
||||
|
||||
+void set_image_embeds(llama_context *ctx, float *data) {
|
||||
+ ctx->image_embeds = data;
|
||||
+}
|
||||
+
|
||||
+void print_embeds(struct llama_context *ctx)
|
||||
+{
|
||||
+ if (ctx->image_embeds)
|
||||
+ {
|
||||
+ for (int i = 0; i < 256; i++)
|
||||
+ {
|
||||
+ LLAMA_LOG_INFO("%f ", ctx->image_embeds[i]);
|
||||
+ }
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+void print_causal(llama_context *ctx)
|
||||
+{
|
||||
+ if (ctx->cparams.causal_attn)
|
||||
+ {
|
||||
+ LLAMA_LOG_INFO("causal attn is true\n");
|
||||
+ }
|
||||
+ else
|
||||
+ {
|
||||
+ LLAMA_LOG_INFO("causal attn is false\n");
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
struct llama_lora_weight {
|
||||
struct ggml_tensor * a = nullptr;
|
||||
struct ggml_tensor * b = nullptr;
|
||||
@@ -3021,6 +3049,96 @@ static bool llama_kv_cache_init(
|
||||
return true;
|
||||
}
|
||||
|
||||
+void llama_log_tensor(ggml_tensor *tensor, char *filename)
|
||||
+{
|
||||
+ if (tensor == NULL)
|
||||
+ {
|
||||
+ fprintf(stderr, "Tensor is NULL\n");
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
+ FILE *fp = fopen(filename, "wb");
|
||||
+ if (fp == NULL)
|
||||
+ {
|
||||
+ fprintf(stderr, "Failed to open file '%s'\n", filename);
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
+ LLAMA_LOG_INFO("Tensor name: %s\n", tensor->name);
|
||||
+ LLAMA_LOG_INFO("Tensor type: ");
|
||||
+ switch (tensor->type)
|
||||
+ {
|
||||
+ case GGML_TYPE_F32:
|
||||
+ LLAMA_LOG_INFO("GGML_TYPE_F32\n");
|
||||
+ break;
|
||||
+ case GGML_TYPE_F16:
|
||||
+ printf("GGML_TYPE_F16\n");
|
||||
+ break;
|
||||
+ case GGML_TYPE_Q4_0:
|
||||
+ printf("GGML_TYPE_Q4_0\n");
|
||||
+ break;
|
||||
+ case GGML_TYPE_Q4_1:
|
||||
+ printf("GGML_TYPE_Q4_1\n");
|
||||
+ break;
|
||||
+ default:
|
||||
+ printf("Unknown\n");
|
||||
+ }
|
||||
+
|
||||
+ LLAMA_LOG_INFO("Tensor dimensions: ");
|
||||
+ for (int i = 0; i < GGML_MAX_DIMS; i++)
|
||||
+ {
|
||||
+ if (tensor->ne[i] == 1)
|
||||
+ break;
|
||||
+ printf("%ld ", tensor->ne[i]);
|
||||
+ }
|
||||
+ printf("\n");
|
||||
+
|
||||
+ size_t num_elements = ggml_nelements(tensor);
|
||||
+ LLAMA_LOG_INFO("num elements: %zu\n", num_elements);
|
||||
+
|
||||
+ LLAMA_LOG_INFO("Tensor data:\n");
|
||||
+ switch (tensor->type)
|
||||
+ {
|
||||
+ case GGML_TYPE_F32:
|
||||
+ {
|
||||
+ float *data = (float *)tensor->data;
|
||||
+ for (size_t i = 0; i < num_elements; i++)
|
||||
+ {
|
||||
+ fprintf(fp, "%f ", data[i]);
|
||||
+ if (i % 2048 == 0 && i != 0)
|
||||
+ {
|
||||
+ fprintf(fp, "\n");
|
||||
+ }
|
||||
+ }
|
||||
+ /* for (size_t i = 0; i < 25; i++)
|
||||
+ {
|
||||
+ LLAMA_LOG_INFO("%f ", data[i]);
|
||||
+ if (i % 2048 == 0 && i != 0)
|
||||
+ {
|
||||
+ LLAMA_LOG_INFO("\n");
|
||||
+ }
|
||||
+ } */
|
||||
+ }
|
||||
+ break;
|
||||
+ case GGML_TYPE_F16:
|
||||
+ {
|
||||
+ // Implement custom printing for fp16 data
|
||||
+ fprintf(fp, "F16 data (not shown)\n");
|
||||
+ }
|
||||
+ break;
|
||||
+ // For quantized types, you might need to implement custom printing logic
|
||||
+ case GGML_TYPE_Q4_0:
|
||||
+ case GGML_TYPE_Q4_1:
|
||||
+ fprintf(fp, "Quantized data (not shown)\n");
|
||||
+ break;
|
||||
+ default:
|
||||
+ fprintf(fp, "Unknown data type\n");
|
||||
+ }
|
||||
+ fprintf(fp, "\n");
|
||||
+
|
||||
+ fclose(fp);
|
||||
+}
|
||||
+
|
||||
// find an empty slot of size "n_tokens" in the cache
|
||||
// updates the cache head
|
||||
// Note: On success, it's important that cache.head points
|
||||
@@ -11660,6 +11778,18 @@ struct llm_build_context {
|
||||
|
||||
inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
|
||||
|
||||
+ // set the image embeddings in the input tensor
|
||||
+ if (lctx.image_embeds) {
|
||||
+ struct ggml_tensor *image_embeds = ggml_dup_tensor(ctx0, inpL);
|
||||
+ image_embeds->data = lctx.image_embeds;
|
||||
+ image_embeds->ne[1] = 256;
|
||||
+ print_embeds(&lctx);
|
||||
+ // llama_log_tensor(image_embeds, "/Users/joshyan/ollama/tensordata");
|
||||
+
|
||||
+ inpL = ggml_set_2d_inplace(ctx0, inpL, image_embeds, inpL->nb[1], 0);
|
||||
+ lctx.image_embeds = NULL;
|
||||
+ }
|
||||
+
|
||||
inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd));
|
||||
cb(inpL, "inp_scaled", -1);
|
||||
|
||||
@@ -14678,7 +14808,7 @@ static int llama_decode_internal(
|
||||
}
|
||||
|
||||
// non-causal masks do not use the KV cache
|
||||
- if (hparams.causal_attn) {
|
||||
+ if (hparams.causal_attn || lctx.image_embeds) {
|
||||
llama_kv_cache_update(&lctx);
|
||||
|
||||
// if we have enough unused cells before the current head ->
|
||||
@@ -18565,6 +18695,12 @@ float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) {
|
||||
if (ctx->logits == nullptr) {
|
||||
throw std::runtime_error("no logits");
|
||||
}
|
||||
+ // LLAMA_LOG_INFO("CURRENTLY, I IS %d\n", i);
|
||||
+ // printf("currently, i is: %d", i);
|
||||
+ /* for (int i = 0; i < 263; i++)
|
||||
+ {
|
||||
+ printf("output_ids[%d]: %d\n", i, ctx->output_ids[i]);
|
||||
+ } */
|
||||
|
||||
if (i < 0) {
|
||||
j = ctx->n_outputs + i;
|
||||
@@ -18577,6 +18713,7 @@ float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) {
|
||||
j = ctx->output_ids[i];
|
||||
}
|
||||
|
||||
+ j = 0;
|
||||
if (j < 0) {
|
||||
throw std::runtime_error(format("batch.logits[%d] != true", i));
|
||||
}
|
@ -179,7 +179,7 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
opts.NumGPU = 0
|
||||
if len(servers) == 0 {
|
||||
return nil, fmt.Errorf("no servers found for %v", gpus)
|
||||
}
|
||||
@ -733,7 +733,7 @@ func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn fu
|
||||
"n_predict": req.Options.NumPredict,
|
||||
"n_keep": req.Options.NumKeep,
|
||||
"main_gpu": req.Options.MainGPU,
|
||||
"temperature": req.Options.Temperature,
|
||||
"temperature": 0,
|
||||
"top_k": req.Options.TopK,
|
||||
"top_p": req.Options.TopP,
|
||||
"min_p": req.Options.MinP,
|
||||
|
Loading…
x
Reference in New Issue
Block a user