Skip to content

Commit

Permalink
Merge pull request #179 from MollySophia/fix-linux-cuda
Browse files Browse the repository at this point in the history
Fix build with CUDA under linux
  • Loading branch information
LaylBongers authored Aug 7, 2024
2 parents d622368 + 15ce960 commit 84fea22
Show file tree
Hide file tree
Showing 3 changed files with 4 additions and 5 deletions.
1 change: 0 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,6 @@ elseif()
set(GGML_STANDALONE ON)
endif()

set(BUILD_SHARED_LIBS OFF)
if (NOT TARGET ggml)
add_subdirectory(ggml)
# ... otherwise assume ggml is added by a parent CMakeLists.txt
Expand Down
6 changes: 3 additions & 3 deletions rwkv_graph.inc
Original file line number Diff line number Diff line change
Expand Up @@ -548,7 +548,7 @@ static struct ggml_tensor * rwkv_att_v6(
ggml_reshape_1d(ctx, layer.att_time_decay, n_embed)
);

w = rwkv_exp(ctx, ggml_neg_inplace(ctx, rwkv_exp(ctx, w)));
w = rwkv_exp(ctx, ggml_neg(ctx, rwkv_exp(ctx, w)));
w = ggml_reshape_4d(ctx, w, 1, head_size, head_count, sequence_length);

// dup is not strictly required; doing it just in case.
Expand Down Expand Up @@ -576,9 +576,9 @@ static struct ggml_tensor * rwkv_att_v6(
x = rwkv_group_norm_eps_64e_minus5(ctx, x, head_count);
// Convert back to a regular vector.
x = ggml_reshape_2d(ctx, x, n_embed, sequence_length);
x = ggml_add_inplace(
x = ggml_add(
ctx,
ggml_mul_inplace(
ggml_mul(
ctx,
x,
layer.att_ln_x_weight
Expand Down
2 changes: 1 addition & 1 deletion rwkv_operators.inc
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ static void rwkv_groupnorm_impl(

// Element-wise exp(x)
struct ggml_tensor * rwkv_exp(struct ggml_context * ctx, struct ggml_tensor * x) {
return ggml_map_custom1_inplace(ctx, x, rwkv_exp_impl, 1, NULL);
return ggml_map_custom1(ctx, x, rwkv_exp_impl, 1, NULL);
}

// Element-wise 1 - x
Expand Down

0 comments on commit 84fea22

Please sign in to comment.