Skip to content

Commit

Permalink
Support some ops for lightllm and lmdeploy.
Browse files Browse the repository at this point in the history
  • Loading branch information
yao-fengchen committed Jun 21, 2024
1 parent af6dbbe commit a640eeb
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 78 deletions.
54 changes: 5 additions & 49 deletions csrc/extensions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -341,38 +341,6 @@ void extTokenSoftmaxReduceVInference(const at::Tensor& logics,
b_start_loc, b_seq_len, max_input_len, other_kv_index);
}

// void extTokenDecodeAttentionInference(const at::Tensor& q, const at::Tensor& k,
// const at::Tensor& v, at::Tensor& out,
// const at::Tensor& b_loc,
// const at::Tensor& b_start_loc,
// const at::Tensor& b_seq_len,
// int max_input_len, int other_kv_index) {
// callDiopi(diopiTokenDecodeAttentionInference, out, q, k, v, b_loc, b_start_loc,
// b_seq_len, max_input_len, other_kv_index);
// }

// void extTokenDecodeAttentionInferenceBatchOne(const at::Tensor& q, const at::Tensor& k,
// const at::Tensor& v, at::Tensor& out,
// const at::Tensor& b_loc,
// const at::Tensor& b_start_loc,
// const at::Tensor& b_seq_len,
// int max_input_len, int other_kv_index) {
// callDiopi(diopiTokenDecodeAttentionInferenceBatchOne, out, q, k, v, b_loc, b_start_loc,
// b_seq_len, max_input_len, other_kv_index);
// }

// void extIncreFlashAttention(const at::Tensor& q, const at::Tensor& k,
// const at::Tensor& v, at::Tensor& out,
// const int head, const char* layout,
// const c10::optional<at::Tensor>& padding_mask = {},
// const c10::optional<at::Tensor>& atten_mask = {},
// const OptionalIntArray& actual_seq_lengths = {},
// int64_t num_heads = 1, double scale_value = 1.0,
// const std::string& input_layout = "BSH", int64_t num_key_value_heads = 0) {
// callDiopi(diopiIncreFlashAttention, out, q, k, v, padding_mask, atten_mask,
// actual_seq_lengths, num_heads, scale_value, input_layout.c_str(), num_key_value_heads);
// }

void extPromptFlashAttention(at::Tensor& out, const at::Tensor& q,
const at::Tensor& k, const at::Tensor& v,
const at::Tensor& atten_mask,
Expand Down Expand Up @@ -412,11 +380,11 @@ void extApplyPenaltyV2(at::Tensor& logits, const at::Tensor& presence_penalty,
}

void extPagedAttention(at::Tensor& out, const at::Tensor& q, const at::Tensor& k, const at::Tensor& v,
const at::IntArrayRef& actual_seq_lengths,
int64_t numHeads, int64_t numKeyValueHeads, int64_t dim,
const at::Tensor& block_table,
int64_t block_size) {
callDiopi(diopiPagedAttention, out, q, k, v, actual_seq_lengths,
const c10::optional<at::Tensor>& atten_mask = {},
const at::IntArrayRef& actual_seq_lengths = {},
int64_t numHeads = 1, int64_t numKeyValueHeads = 1, int64_t dim = 1,
const c10::optional<at::Tensor>& block_table = {}, int64_t block_size = 1) {
callDiopi(diopiPagedAttention, out, q, k, v, atten_mask, actual_seq_lengths,
numHeads, numKeyValueHeads, dim,
block_table, block_size);
}
Expand Down Expand Up @@ -501,18 +469,6 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("token_softmax_reducev_inference", &extTokenSoftmaxReduceVInference,
"deeplink ext_token_softmax_reducev_inference");
}
// if (&diopiTokenDecodeAttentionInference != nullptr) {
// m.def("token_decode_attention_inference", &extTokenDecodeAttentionInference,
// "deeplink token_decode_attention_inference");
// }
// if (&diopiTokenDecodeAttentionInferenceBatchOne != nullptr) {
// m.def("token_decode_attention_inference_batch_one", &extTokenDecodeAttentionInferenceBatchOne,
// "deeplink token_decode_attention_inference");
// }
// if (&diopiIncreFlashAttention != nullptr) {
// m.def("incre_flash_attention", &extIncreFlashAttention,
// "deeplink incre_flash_attention");
// }
if (&diopiPromptFlashAttention != nullptr) {
m.def("prompt_flash_attention", &extPromptFlashAttention,
"deeplink ext_prompt_flash_attention");
Expand Down
34 changes: 5 additions & 29 deletions deeplink_ext/patch_lightllm.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,39 +71,15 @@ def flash_context_attention(q, k, v, out, b_start_loc, b_seq_len, max_input_len)
ext.prompt_flash_attention(single_out, single_q, single_k, single_v, None, mask, [], head, scale, 2147473647, 0, "BSH", numKeyValueHeads)
return out

# def fused_context_attention(out, q, k, v, mask, b_seq_len, max_input_len, head, numKeyValueHeads, dim):
# batch = b_start_loc.shape[0]
# scale = 1 / math.sqrt(dim)
# mask_key_str = str(batch) + ":" + str(max_input_len)
# if mask_key_str not in mask_cache:
# mask = torch.tril(torch.ones(max_input_len, max_input_len, dtype=torch.bool), diagonal=0).cuda()
# mask = mask.repeat(batch, 1, 1)
# mask = torch.logical_not(mask)
# mask_cache[mask_key_str] = mask
# print(f"cache mask in context attention, batch:seqLen={mask_key_str}")

# mask = mask_cache[mask_key_str]
# ext.prompt_flash_attention(out, q, k, v,
# mask, b_seq_len, max_input_len, head, numKeyValueHeads, dim)
# return out

# context_attention_pack.context_attention_fwd = (
# # flash_context_attention
# fused_context_attention
# )
context_attention_pack.prompt_flash_attention = ext.prompt_flash_attention

def patch_paged_token_attention_inference():
# def paged_token_attention(q, k_cache, v_cache, out, q_head_num, kv_head_num, head_dim, b_seq_len, block_table:torch.Tensor, block_size):
# ext.paged_attention(out, q, k_cache, v_cache, None, None,
# b_seq_len, block_table, q_head_num, kv_head_num,
# 1.0 / math.sqrt(head_dim), "BSH", block_size, 0,
# None, None, None, None, None, None, None, None
# )
# return out
def paged_token_attention(out, q, k_cache, v_cache, b_seq_len, q_head_num,
kv_head_num, head_dim, block_table, block_size):
ext.paged_attention(out, q, k_cache, v_cache, None, b_seq_len, q_head_num,
kv_head_num, head_dim, block_table, block_size)

token_attention_pack.paged_token_attention = ext.paged_attention

token_attention_pack.paged_token_attention = paged_token_attention

def patch_token_attention_inference():
token_attention_pack.token_att_fwd = ext.token_attention_inference
Expand Down

0 comments on commit a640eeb

Please sign in to comment.