fix gemma2-2b conversion
This commit is contained in:
parent
b3554778bd
commit
db8c944498
@ -34,10 +34,20 @@ func (p *gemma2Model) KV(t *Tokenizer) llm.KV {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *gemma2Model) Replacements() []string {
|
func (p *gemma2Model) Replacements() []string {
|
||||||
return append(
|
return []string{
|
||||||
p.gemmaModel.Replacements(),
|
"model.embed_tokens", "token_embd",
|
||||||
|
"model.norm", "output_norm",
|
||||||
|
"model.layers", "blk",
|
||||||
|
"input_layernorm", "attn_norm",
|
||||||
|
"self_attn.q_proj", "attn_q",
|
||||||
|
"self_attn.k_proj", "attn_k",
|
||||||
|
"self_attn.v_proj", "attn_v",
|
||||||
|
"self_attn.o_proj", "attn_output",
|
||||||
|
"mlp.gate_proj", "ffn_gate",
|
||||||
|
"mlp.down_proj", "ffn_down",
|
||||||
|
"mlp.up_proj", "ffn_up",
|
||||||
"post_attention_layernorm", "post_attention_norm",
|
"post_attention_layernorm", "post_attention_norm",
|
||||||
"pre_feedforward_layernorm", "ffn_norm",
|
"pre_feedforward_layernorm", "ffn_norm",
|
||||||
"post_feedforward_layernorm", "post_ffw_norm",
|
"post_feedforward_layernorm", "post_ffw_norm",
|
||||||
)
|
}
|
||||||
}
|
}
|
||||||
|
@ -96,6 +96,7 @@ func TestConvertModel(t *testing.T) {
|
|||||||
"Mistral-7B-Instruct-v0.2",
|
"Mistral-7B-Instruct-v0.2",
|
||||||
"Mixtral-8x7B-Instruct-v0.1",
|
"Mixtral-8x7B-Instruct-v0.1",
|
||||||
"gemma-2b-it",
|
"gemma-2b-it",
|
||||||
|
"gemma-2-2b-it",
|
||||||
// microsoft/Phi-3-mini-128-instruct@d548c233192db00165d842bf8edff054bb3212f8
|
// microsoft/Phi-3-mini-128-instruct@d548c233192db00165d842bf8edff054bb3212f8
|
||||||
"Phi-3-mini-128k-instruct",
|
"Phi-3-mini-128k-instruct",
|
||||||
"all-MiniLM-L6-v2",
|
"all-MiniLM-L6-v2",
|
||||||
|
Loading…
x
Reference in New Issue
Block a user