Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion src/llama-graph.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -900,7 +900,8 @@ ggml_tensor * llm_graph_context::build_cvec(

ggml_tensor * llm_graph_context::build_lora_mm(
ggml_tensor * w,
ggml_tensor * cur) const {
ggml_tensor * cur,
ggml_tensor * w_s) const {
ggml_tensor * res = ggml_mul_mat(ctx0, w, cur);

for (const auto & lora : *loras) {
Expand All @@ -921,6 +922,10 @@ ggml_tensor * llm_graph_context::build_lora_mm(
res = ggml_add(ctx0, res, ab_cur);
}

if (w_s) {
res = ggml_mul(ctx0, res, w_s);
}

return res;
}

Expand Down
5 changes: 3 additions & 2 deletions src/llama-graph.h
Original file line number Diff line number Diff line change
Expand Up @@ -764,10 +764,11 @@ struct llm_graph_context {
ggml_tensor * cur,
int il) const;

// do mat_mul, while optionally apply lora
// do mat_mul, while optionally apply lora and per-tensor scale
ggml_tensor * build_lora_mm(
ggml_tensor * w,
ggml_tensor * cur) const;
ggml_tensor * cur,
ggml_tensor * w_s = nullptr) const;
Comment on lines 768 to +771
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For a follow-up PR, we should move the cur at the front, to increase consistency of the interfaces:

    ggml_tensor * build_lora_mm(
              ggml_tensor * cur,
              ggml_tensor * w,
              ggml_tensor * w_s = nullptr) const;

It will touch a lot of lines though.


// do mat_mul_id, while optionally apply lora
ggml_tensor * build_lora_mm_id(
Expand Down
25 changes: 5 additions & 20 deletions src/models/bitnet.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,32 +29,23 @@ llm_build_bitnet::llm_build_bitnet(const llama_model & model, const llm_graph_pa
// self-attention
{
// compute Q and K and RoPE them
ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
if (model.layers[il].wq_s) {
Qcur = ggml_mul(ctx0, Qcur, model.layers[il].wq_s);
}
ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur, model.layers[il].wq_s);
cb(Qcur, "Qcur", il);
if (model.layers[il].bq) {
Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
cb(Qcur, "Qcur", il);
}

// B1.K
ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
if (model.layers[il].wk_s) {
Kcur = ggml_mul(ctx0, Kcur, model.layers[il].wk_s);
}
ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur, model.layers[il].wk_s);
cb(Kcur, "Kcur", il);
if (model.layers[il].bk) {
Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
cb(Kcur, "Kcur", il);
}

// B1.V
ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
if (model.layers[il].wv_s) {
Vcur = ggml_mul(ctx0, Vcur, model.layers[il].wv_s);
}
ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur, model.layers[il].wv_s);
cb(Vcur, "Vcur", il);
if (model.layers[il].bv) {
Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
Expand Down Expand Up @@ -90,10 +81,7 @@ llm_build_bitnet::llm_build_bitnet(const llama_model & model, const llm_graph_pa
LLM_NORM_RMS, il);
cb(cur, "attn_sub_norm", il);

cur = build_lora_mm(model.layers[il].wo, cur);
if (model.layers[il].wo_s) {
cur = ggml_mul(ctx0, cur, model.layers[il].wo_s);
}
cur = build_lora_mm(model.layers[il].wo, cur, model.layers[il].wo_s);
if (model.layers[il].bo) {
cur = ggml_add(ctx0, cur, model.layers[il].bo);
}
Expand Down Expand Up @@ -127,10 +115,7 @@ llm_build_bitnet::llm_build_bitnet(const llama_model & model, const llm_graph_pa
LLM_NORM_RMS, il);
cb(cur, "ffn_sub_norm", il);

cur = build_lora_mm(model.layers[il].ffn_down, cur);
if (model.layers[il].ffn_down_s) {
cur = ggml_mul(ctx0, cur, model.layers[il].ffn_down_s);
}
cur = build_lora_mm(model.layers[il].ffn_down, cur, model.layers[il].ffn_down_s);
cb(cur, "ffn_down", il);

cur = ggml_add(ctx0, cur, ffn_inp);
Expand Down
15 changes: 3 additions & 12 deletions src/models/llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,28 +43,19 @@ llm_build_llama<embed>::llm_build_llama(const llama_model & model, const llm_gra
ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);

// compute Q and K and RoPE them
ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
if (model.layers[il].wq_s) {
Qcur = ggml_mul(ctx0, Qcur, model.layers[il].wq_s);
}
ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur, model.layers[il].wq_s);
cb(Qcur, "Qcur", il);
if (model.layers[il].bq) {
Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
cb(Qcur, "Qcur", il);
}
ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
if (model.layers[il].wk_s) {
Kcur = ggml_mul(ctx0, Kcur, model.layers[il].wk_s);
}
ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur, model.layers[il].wk_s);
cb(Kcur, "Kcur", il);
if (model.layers[il].bk) {
Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
cb(Kcur, "Kcur", il);
}
ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
if (model.layers[il].wv_s) {
Vcur = ggml_mul(ctx0, Vcur, model.layers[il].wv_s);
}
ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur, model.layers[il].wv_s);
cb(Vcur, "Vcur", il);
if (model.layers[il].bv) {
Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
Expand Down
15 changes: 3 additions & 12 deletions src/models/qwen3.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,22 +30,13 @@ llm_build_qwen3::llm_build_qwen3(const llama_model & model, const llm_graph_para
// self-attention
{
// compute Q and K and RoPE them
ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
if (model.layers[il].wq_s) {
Qcur = ggml_mul(ctx0, Qcur, model.layers[il].wq_s);
}
ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur, model.layers[il].wq_s);
cb(Qcur, "Qcur", il);

ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
if (model.layers[il].wk_s) {
Kcur = ggml_mul(ctx0, Kcur, model.layers[il].wk_s);
}
ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur, model.layers[il].wk_s);
cb(Kcur, "Kcur", il);

ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
if (model.layers[il].wv_s) {
Vcur = ggml_mul(ctx0, Vcur, model.layers[il].wv_s);
}
ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur, model.layers[il].wv_s);
cb(Vcur, "Vcur", il);

Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
Expand Down
15 changes: 3 additions & 12 deletions src/models/qwen3moe.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,22 +30,13 @@ llm_build_qwen3moe::llm_build_qwen3moe(const llama_model & model, const llm_grap
// self_attention
{
// compute Q and K and RoPE them
ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
if (model.layers[il].wq_s) {
Qcur = ggml_mul(ctx0, Qcur, model.layers[il].wq_s);
}
ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur, model.layers[il].wq_s);
cb(Qcur, "Qcur", il);

ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
if (model.layers[il].wk_s) {
Kcur = ggml_mul(ctx0, Kcur, model.layers[il].wk_s);
}
ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur, model.layers[il].wk_s);
cb(Kcur, "Kcur", il);

ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
if (model.layers[il].wv_s) {
Vcur = ggml_mul(ctx0, Vcur, model.layers[il].wv_s);
}
ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur, model.layers[il].wv_s);
cb(Vcur, "Vcur", il);

Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
Expand Down
Loading