diff --git a/flash_attn/flash_attn_interface.py b/flash_attn/flash_attn_interface.py index 1e041e4538d..efed9c43e59 100644 --- a/flash_attn/flash_attn_interface.py +++ b/flash_attn/flash_attn_interface.py @@ -10,7 +10,7 @@ # We need to import the CUDA kernels after importing torch USE_TRITON_ROCM = os.getenv("FLASH_ATTENTION_TRITON_AMD_ENABLE", "FALSE") == "TRUE" if USE_TRITON_ROCM: - from .flash_attn_triton_amd import interface_fa as flash_attn_gpu + from .flash_attn_triton_amd import flash_attn_2 as flash_attn_gpu else: import flash_attn_2_cuda as flash_attn_gpu @@ -127,7 +127,10 @@ def _flash_attn_forward_fake( softmax_lse = torch.empty((batch_size, num_heads, seqlen_q), dtype=torch.float32, device=q.device, layout=q.layout) p = torch.empty((0,), dtype=q.dtype, device=q.device, layout=q.layout) if return_softmax: - p = torch.empty((batch_size, num_heads, round_multiple(seqlen_q, 128), round_multiple(seqlen_k, 128)), dtype=q.dtype, device=q.device, layout=q.layout) + if torch.cuda.is_available() and torch.version.hip: + p = torch.empty((batch_size, num_heads, seqlen_q, seqlen_k), dtype=q.dtype, device=q.device, layout=q.layout) + else: + p = torch.empty((batch_size, num_heads, round_multiple(seqlen_q, 128), round_multiple(seqlen_k, 128)), dtype=q.dtype, device=q.device, layout=q.layout) rng_state = torch.empty((2,), dtype=torch.int64, device=q.device) return out, softmax_lse, p, rng_state @@ -220,10 +223,11 @@ def _flash_attn_varlen_forward_fake( out = torch.empty_like(q) softmax_lse = torch.empty((num_heads, total_q), dtype=torch.float32, device=q.device, layout=q.layout) p = torch.empty((0,), dtype=q.dtype, device=q.device, layout=q.layout) - seqlen_q_rounded = round_multiple(max_seqlen_q, 128) - seqlen_k_rounded = round_multiple(max_seqlen_k, 128) if return_softmax: - p = torch.empty((batch_size, num_heads, seqlen_q_rounded, seqlen_k_rounded), dtype=q.dtype, device=q.device, layout=q.layout) + if torch.cuda.is_available() and torch.version.hip: + p = torch.empty((batch_size, num_heads, max_seqlen_q, max_seqlen_k), dtype=q.dtype, device=q.device, layout=q.layout) + else: + p = torch.empty((batch_size, num_heads, round_multiple(max_seqlen_q, 128), round_multiple(max_seqlen_k, 128)), dtype=q.dtype, device=q.device, layout=q.layout) rng_state = torch.empty((2,), dtype=torch.int64, device=q.device) return out, softmax_lse, p, rng_state @@ -315,7 +319,10 @@ def _flash_attn_backward_fake( if dv is None: dv = torch.empty_like(v) batch_size, seqlen_q, num_heads, _ = q.shape - softmax_d = torch.empty((batch_size, num_heads, round_multiple(seqlen_q, 128)), device=q.device, dtype=torch.float32) + if torch.cuda.is_available() and torch.version.hip: + softmax_d = torch.empty((batch_size, num_heads, seqlen_q), device=q.device, dtype=torch.float32) + else: + softmax_d = torch.empty((batch_size, num_heads, round_multiple(seqlen_q, 128)), device=q.device, dtype=torch.float32) return softmax_d @@ -426,7 +433,10 @@ def _flash_attn_varlen_backward_fake( dk = torch.empty_like(k) if dv is None: dv = torch.empty_like(v) - softmax_d = torch.empty((num_heads, total_q + 128 * batch_size), device=q.device, dtype=torch.float32) + if torch.cuda.is_available() and torch.version.hip: + softmax_d = torch.empty((num_heads, total_q), device=q.device, dtype=torch.float32) + else: + softmax_d = torch.empty((num_heads, total_q + 128 * batch_size), device=q.device, dtype=torch.float32) return softmax_d diff --git a/flash_attn/flash_attn_triton_amd/__init__.py b/flash_attn/flash_attn_triton_amd/__init__.py index e69de29bb2d..78f85fb268f 100644 --- a/flash_attn/flash_attn_triton_amd/__init__.py +++ b/flash_attn/flash_attn_triton_amd/__init__.py @@ -0,0 +1,4 @@ +from . import interface_v2 as flash_attn_2 +from . import interface_v3 as flash_attn_3 + +__all__ = ["flash_attn_2", "flash_attn_3"] diff --git a/flash_attn/flash_attn_triton_amd/bench.py b/flash_attn/flash_attn_triton_amd/bench.py deleted file mode 100755 index e19de575c8c..00000000000 --- a/flash_attn/flash_attn_triton_amd/bench.py +++ /dev/null @@ -1,1391 +0,0 @@ -import os -import sys -import torch -import triton -import time -import argparse -import itertools -import logging -import warnings -import datetime -import pandas as pd -from logging import warning -from typing import Dict, List, Literal, Optional, Tuple -from dataclasses import dataclass -from functools import lru_cache -from utils import get_arch, input_helper - -DEBUG = False - -ENV_FLAGS = ["FLASH_ATTENTION_TRITON_AMD_ENABLE", "FLASH_ATTENTION_TRITON_AMD_AUTOTUNE", "FLASH_ATTENTION_TRITON_AMD_DEBUG"] - -FUNCTIONS = [ - "flash_attn_func", - "flash_attn_fp8_func", - "flash_attn_kvpacked_func", - "flash_attn_qkvpacked_func", - "flash_attn_qkvpacked_fp8_func", - "flash_attn_varlen_func", - "flash_attn_varlen_fp8_func", - "flash_attn_varlen_kvpacked_func", - "flash_attn_varlen_qkvpacked_func", - "flash_attn_varlen_qkvpacked_fp8_func", - "flash_attn_with_kvcache", -] - -SUPPORTED_DTYPES = { - "flash_attn_func": [torch.float16], - "flash_attn_fp8_func": [torch.float8_e4m3fnuz], - "flash_attn_kvpacked_func": [torch.float16], - "flash_attn_qkvpacked_func": [torch.float16], - "flash_attn_qkvpacked_fp8_func": [torch.float16], - "flash_attn_varlen_func": [torch.float16], - "flash_attn_varlen_fp8_func": [torch.float8_e4m3fnuz], - "flash_attn_varlen_kvpacked_func": [torch.float16], - "flash_attn_varlen_qkvpacked_func": [torch.float16], - "flash_attn_varlen_qkvpacked_fp8_func": [torch.float16], - "flash_attn_with_kvcache": [torch.float16], -} - -SUPPORTED_BACKENDS = { - "flash_attn_func": ["ck", "triton"], - "flash_attn_fp8_func": ["triton"], - "flash_attn_kvpacked_func": ["ck", "triton"], - "flash_attn_qkvpacked_func": ["ck", "triton"], - "flash_attn_qkvpacked_fp8_func": ["triton"], - "flash_attn_varlen_func": ["ck", "triton"], - "flash_attn_varlen_fp8_func": ["triton"], - "flash_attn_varlen_kvpacked_func": ["ck", "triton"], - "flash_attn_varlen_qkvpacked_func": ["ck", "triton"], - "flash_attn_varlen_qkvpacked_fp8_func": ["triton"], - "flash_attn_with_kvcache": ["ck", "triton"], -} - -VALID_MODES = ['fwd', 'bwd', 'full'] -SUPPORTED_MODES = { - "flash_attn_func": ["fwd", "bwd", "full"], - "flash_attn_fp8_func": ["fwd", "bwd", "full"], - "flash_attn_kvpacked_func": ["fwd", "bwd", "full"], - "flash_attn_qkvpacked_func": ["fwd", "bwd", "full"], - "flash_attn_qkvpacked_fp8_func": ["fwd", "bwd", "full"], - "flash_attn_varlen_func": ["fwd", "bwd", "full"], - "flash_attn_varlen_fp8_func": ["fwd", "bwd", "full"], - "flash_attn_varlen_kvpacked_func": ["fwd", "bwd", "full"], - "flash_attn_varlen_qkvpacked_func": ["fwd", "bwd", "full"], - "flash_attn_varlen_qkvpacked_fp8_func": ["fwd", "bwd", "full"], - "flash_attn_with_kvcache": ["fwd"], -} - - -# Add a global variable for verbose mode -VERBOSE = False - -@dataclass -class EnvVariableConfig: - key: str - values: List[str] - backend: Optional[Literal["triton", "ck"]] = None - -ENV_VARIABLE_CONFIGS : List[EnvVariableConfig] = [ - # EnvVariableConfig(key="BWD_MODE", values=["split", "fused_atomics", "fused_no_atomics"], backend="triton"), -] - -class FunctionConfig: - def __init__(self, fn_name: str, mode: Literal["fwd", "bwd", "full"], dtype, backend: Literal["triton", "ck"], env_config: Dict): - self.fn_name = fn_name - self.mode: Literal["fwd", "bwd", "full"] = mode - self.dtype = dtype - self.backend: Literal["triton", "ck"] = backend - self.arch = get_arch() - self.env_configs = env_config - - def __str__(self): - # extract base dtype name if it's a torch dtype - dtype_str = str(self.dtype) - if "torch." in dtype_str: - dtype_str = dtype_str.split(".")[-1] - - if len(self.env_configs) > 0: - env_str = "" - for env_key, env_value in self.env_configs.items(): - env_str += f"{env_key}={env_value}" - return f"{self.fn_name}_{self.mode}_{dtype_str}_{self.backend}_{self.arch}_{env_str}" - else: - return f"{self.fn_name}_{self.mode}_{dtype_str}_{self.backend}_{self.arch}" - - def column_name(self): - return f"{self}_ms" -def generate_fn_inputs( - fn_name: str, - BATCH: int, - HQ: int, - HK: int, - N_CTX_Q: int, - N_CTX_K: int, - D_HEAD: int, - CAUSAL: bool, - DROPOUT_P: float, - dtype: torch.dtype, - device: Literal["cpu", "cuda"] - ): - if fn_name == "flash_attn_func": - return input_helper(BATCH, HQ, HK, N_CTX_Q, N_CTX_K, D_HEAD, CAUSAL, DROPOUT_P, dtype, layout="bshd", device=device) - elif fn_name == "flash_attn_kvpacked_func": - return input_helper(BATCH, HQ, HK, N_CTX_Q, N_CTX_K, D_HEAD, CAUSAL, DROPOUT_P, dtype, layout="bshd", packing="kv", device=device) - elif fn_name == "flash_attn_qkvpacked_func": - return input_helper(BATCH, HQ, HK, N_CTX_Q, N_CTX_K, D_HEAD, CAUSAL, DROPOUT_P, dtype, layout="bshd", packing="qkv", device=device) - elif fn_name == "flash_attn_varlen_func": - return input_helper(BATCH, HQ, HK, N_CTX_Q, N_CTX_K, D_HEAD, CAUSAL, DROPOUT_P, dtype, layout="thd", device=device) - elif fn_name == "flash_attn_varlen_kvpacked_func": - return input_helper(BATCH, HQ, HK, N_CTX_Q, N_CTX_K, D_HEAD, CAUSAL, DROPOUT_P, dtype, layout="thd", packing="kv", device=device) - elif fn_name == "flash_attn_varlen_qkvpacked_func": - return input_helper(BATCH, HQ, HK, N_CTX_Q, N_CTX_K, D_HEAD, CAUSAL, DROPOUT_P, dtype, layout="thd", packing="qkv", device=device) - elif fn_name == "flash_attn_with_kvcache": - return input_helper(BATCH, HQ, HK, N_CTX_Q, N_CTX_K, D_HEAD, CAUSAL, DROPOUT_P, dtype, layout="bshd", device=device) - elif fn_name == "flash_attn_fp8_func": - return input_helper(BATCH, HQ, HK, N_CTX_Q, N_CTX_K, D_HEAD, CAUSAL, DROPOUT_P, dtype, layout="bshd", device=device) - elif fn_name == "flash_attn_qkvpacked_fp8_func": - return input_helper(BATCH, HQ, HK, N_CTX_Q, N_CTX_K, D_HEAD, CAUSAL, DROPOUT_P, dtype, layout="bshd", packing="qkv", device=device) - elif fn_name == "flash_attn_varlen_fp8_func": - return input_helper(BATCH, HQ, HK, N_CTX_Q, N_CTX_K, D_HEAD, CAUSAL, DROPOUT_P, dtype, layout="thd", device=device) - elif fn_name == "flash_attn_varlen_qkvpacked_fp8_func": - return input_helper(BATCH, HQ, HK, N_CTX_Q, N_CTX_K, D_HEAD, CAUSAL, DROPOUT_P, dtype, layout="thd", packing="qkv", device=device) - else: - valid_fn_names = ", ".join(FUNCTIONS) - raise ValueError(f"{fn_name} should be one of the following functions. {valid_fn_names}") - -def estimate_memory(config): - batch, hq, hk, sq, sk, d_head, causal, dropout = config - memory_estimate = batch * (hq * sq + hk * sk) * d_head * 4 # bytes - return memory_estimate - -def generate_benchmark_configs(is_varlen: bool, packing: Optional[Literal["kv", "qkv"]]): - """ - generates a small number of configs that cover the parameter space well - """ - - # define all parameter options as lists - batch_sizes = [1, 64] - if packing == "qkv": - hq_values = hk_values = [2, 8] - sq_values = sk_values = [256, 8192] - else: - if is_varlen: # make sure the seqlen is greater than the batchsize so that subsequences are greater than 0 - hq_values = [16, 32] # test mqa/gqa - hk_values = [8, 16] - sq_values = [128, 512] - sk_values = [512, 2024] - else: - hq_values = [64, 128] # test mqa/gqa - hk_values = [16, 64] - sq_values = [4, 4096] - sk_values = [4096, 16384] # test large k values for inference perf - d_head_values = [64, 128] - causal_values = [True, False] # most models usual causal True - dropout_values = [0.0, 0.1] - - # generate all fn_configs without inputs - input_configs = [] - - # one big loop to generate configs - for batch in batch_sizes: - for hq in hq_values: - for hk in hk_values: - for sq in sq_values: - for sk in sk_values: - for d_head in d_head_values: - for causal in causal_values: - for dropout in dropout_values: - # filter configs - input_config = (batch, hq, hk, sq, sk, d_head, causal, dropout) - - # skip if memory usage would be too high - if estimate_memory(input_config) > 8 * 1024 * 1024 * 1024: # 8 GB limit - continue - - # we need hq to be a multiple of hk - if hq % hk != 0: - continue - - # for qkvpacked functions, q and k must have same dimensions - if packing == "qkv" and (sq != sk or hq != hk): - continue - - input_configs.append(input_config) - - return input_configs - -def create_benchmark_fn( - flash_attn, - fn_name, - fn_input, - mode: Literal["fwd", "bwd", "full"] -): - if DEBUG: - print("create_benchmark_fn") - print("flash_attn:", flash_attn) - print("fn_name:", fn_name) - print("fn_input:", len(fn_input)) - print("mode:", mode) - - if fn_name == "flash_attn_func": - q, k, v, do, metadata = fn_input - if mode == "fwd": - def flash_attn_bench_fn(): - out, lse, S_dmask = flash_attn.flash_attn_func( - q, - k, - v, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0, - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - return out - elif mode == "bwd": - out, lse, S_dmask = flash_attn.flash_attn_func( - q, - k, - v, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0, - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - def flash_attn_bench_fn(): - dq, dk, dv = torch.autograd.grad(out, (q, k, v), do, retain_graph=True) - return dq, dk, dv - elif mode == "full": - def flash_attn_bench_fn(): - out, lse, S_dmask = flash_attn.flash_attn_func( - q, - k, - v, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0, - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - dq, dk, dv = torch.autograd.grad(out, (q, k, v), do, retain_graph=True) - return dq, dk, dv - else: - raise ValueError(f"Unsupported benchmarking mode: {mode}") - - return flash_attn_bench_fn - - elif fn_name == "flash_attn_kvpacked_func": - q, kv, do, metadata = fn_input - if mode == "fwd": - def flash_attn_kvpacked_bench_fn(): - out, lse, S_dmask = flash_attn.flash_attn_kvpacked_func( - q, - kv, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0, - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - return out - elif mode == "bwd": - out, lse, S_dmask = flash_attn.flash_attn_kvpacked_func( - q, - kv, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0, - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - def flash_attn_kvpacked_bench_fn(): - dq, dkv = torch.autograd.grad(out, (q, kv), do, retain_graph=True) - return dq, dkv - elif mode == "full": - def flash_attn_kvpacked_bench_fn(): - out, lse, S_dmask = flash_attn.flash_attn_kvpacked_func( - q, - kv, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0, - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - dq, dkv = torch.autograd.grad(out, (q, kv), do, retain_graph=True) - return dq, dkv - else: - raise ValueError(f"Unsupported benchmarking mode: {mode}") - - return flash_attn_kvpacked_bench_fn - elif fn_name == "flash_attn_qkvpacked_func": - qkv, do, metadata = fn_input - if mode == "fwd": - def flash_attn_qkvpacked_bench_fn(): - out, lse, S_dmask = flash_attn.flash_attn_qkvpacked_func( - qkv, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0, - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - return out - elif mode == "bwd": - out, lse, S_dmask = flash_attn.flash_attn_qkvpacked_func( - qkv, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0, - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - def flash_attn_qkvpacked_bench_fn(): - dqkv = torch.autograd.grad(out, (qkv), do, retain_graph=True) - return dqkv - elif mode == "full": - def flash_attn_qkvpacked_bench_fn(): - out, lse, S_dmask = flash_attn.flash_attn_qkvpacked_func( - qkv, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0, - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - dqkv = torch.autograd.grad(out, (qkv), do, retain_graph=True) - return dqkv - else: - raise ValueError(f"Unsupported benchmarking mode: {mode}") - - return flash_attn_qkvpacked_bench_fn - elif fn_name == "flash_attn_varlen_func": - q_unpad, k_unpad, v_unpad, do_unpad, metadata = fn_input - if mode == "fwd": - def flash_attn_varlen_bench_fn(): - out_unpad, lse, S_dmask = flash_attn.flash_attn_varlen_func( - q_unpad, - k_unpad, - v_unpad, - metadata.cu_seqlens_q, - metadata.cu_seqlens_k, - metadata.max_seqlens_q, - metadata.max_seqlens_k, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0 , - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - return out_unpad - elif mode == "bwd": - out_unpad, lse, S_dmask = flash_attn.flash_attn_varlen_func( - q_unpad, - k_unpad, - v_unpad, - metadata.cu_seqlens_q, - metadata.cu_seqlens_k, - metadata.max_seqlens_q, - metadata.max_seqlens_k, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0 , - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - def flash_attn_varlen_bench_fn(): - dq_unpad, dk_unpad, dv_unpad = torch.autograd.grad(out_unpad, (q_unpad, k_unpad, v_unpad), do_unpad, retain_graph=True) - return dq_unpad, dk_unpad, dv_unpad - elif mode == "full": - def flash_attn_varlen_bench_fn(): - out_unpad, lse, S_dmask = flash_attn.flash_attn_varlen_func( - q_unpad, - k_unpad, - v_unpad, - metadata.cu_seqlens_q, - metadata.cu_seqlens_k, - metadata.max_seqlens_q, - metadata.max_seqlens_k, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0 , - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - dq_unpad, dk_unpad, dv_unpad = torch.autograd.grad(out_unpad, (q_unpad, k_unpad, v_unpad), do_unpad, retain_graph=True) - return dq_unpad, dk_unpad, dv_unpad - else: - raise ValueError(f"Unsupported benchmarking mode: {mode}") - - return flash_attn_varlen_bench_fn - elif fn_name == "flash_attn_varlen_kvpacked_func": - q_unpad, kv_unpad, do_unpad, metadata = fn_input - if mode == "fwd": - def flash_attn_varlen_kvpacked_bench_fn(): - out_unpad, lse, S_dmask = flash_attn.flash_attn_varlen_kvpacked_func( - q_unpad, - kv_unpad, - metadata.cu_seqlens_q, - metadata.cu_seqlens_k, - metadata.max_seqlens_q, - metadata.max_seqlens_k, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0 , - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - return out_unpad - elif mode == "bwd": - out_unpad, lse, S_dmask = flash_attn.flash_attn_varlen_kvpacked_func( - q_unpad, - kv_unpad, - metadata.cu_seqlens_q, - metadata.cu_seqlens_k, - metadata.max_seqlens_q, - metadata.max_seqlens_k, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0 , - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - def flash_attn_varlen_kvpacked_bench_fn(): - dq_unpad, dkv_unpad = torch.autograd.grad(out_unpad, (q_unpad, kv_unpad), do_unpad, retain_graph=True) - return dq_unpad, dkv_unpad - elif mode == "full": - def flash_attn_varlen_kvpacked_bench_fn(): - out_unpad, lse, S_dmask = flash_attn.flash_attn_varlen_kvpacked_func( - q_unpad, - kv_unpad, - metadata.cu_seqlens_q, - metadata.cu_seqlens_k, - metadata.max_seqlens_q, - metadata.max_seqlens_k, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0 , - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - dq_unpad, dkv_unpad = torch.autograd.grad(out_unpad, (q_unpad, kv_unpad), do_unpad, retain_graph=True) - return dq_unpad, dkv_unpad - else: - raise ValueError(f"Unsupported benchmarking mode: {mode}") - - return flash_attn_varlen_kvpacked_bench_fn - elif fn_name == "flash_attn_varlen_qkvpacked_func": - qkv_unpad, do_unpad, metadata = fn_input - if mode == "fwd": - def flash_attn_varlen_qkvpacked_bench_fn(): - out_unpad, lse, S_dmask = flash_attn.flash_attn_varlen_qkvpacked_func( - qkv_unpad, - metadata.cu_seqlens_q, - metadata.max_seqlens_q, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0 , - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - return out_unpad - elif mode == "bwd": - out_unpad, lse, S_dmask = flash_attn.flash_attn_varlen_qkvpacked_func( - qkv_unpad, - metadata.cu_seqlens_q, - metadata.max_seqlens_q, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0 , - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - def flash_attn_varlen_qkvpacked_bench_fn(): - dqkv_unpad = torch.autograd.grad(out_unpad, (qkv_unpad), do_unpad, retain_graph=True) - return dqkv_unpad - elif mode == "full": - def flash_attn_varlen_qkvpacked_bench_fn(): - out_unpad, lse, S_dmask = flash_attn.flash_attn_varlen_qkvpacked_func( - qkv_unpad, - metadata.cu_seqlens_q, - metadata.max_seqlens_q, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0 , - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - dqkv_unpad = torch.autograd.grad(out_unpad, (qkv_unpad), do_unpad, retain_graph=True) - return dqkv_unpad - else: - raise ValueError(f"Unsupported benchmarking mode: {mode}") - - return flash_attn_varlen_qkvpacked_bench_fn - elif fn_name == "flash_attn_with_kvcache": - q, k_cache, v_cache, _, metadata = fn_input - if mode == "fwd": - def flash_attn_with_kvcache_bench_fn(): - out = flash_attn.flash_attn_with_kvcache( - q, - k_cache, - v_cache, - None, - None, - rotary_cos=None, - rotary_sin=None, - cache_seqlens=None, - cache_batch_idx=None, - cache_leftpad=None, - block_table=None, - causal=metadata.causal, - window_size=(-1, -1), - rotary_interleaved=False, - alibi_slopes=None, - num_splits=0, - ) - return out - else: - raise ValueError(f"Unsupported benchmarking mode: {mode}") - - return flash_attn_with_kvcache_bench_fn - elif fn_name == "flash_attn_fp8_func": - (q, descale_q), (k, descale_k), (v, descale_v), (do, descale_do), metadata = fn_input - if mode == "fwd": - def flash_attn_f8_bench_fn(): - out, lse, S_dmask = flash_attn.flash_attn_fp8_func( - q, - k, - v, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0, - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - return out - elif mode == "bwd": - out, lse, S_dmask = flash_attn.flash_attn_fp8_func( - q, - k, - v, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0, - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - def flash_attn_f8_bench_fn(): - dq, dk, dv = torch.autograd.grad(out, (q, k, v), do, retain_graph=True) - return dq, dk, dv - elif mode == "full": - def flash_attn_f8_bench_fn(): - out, lse, S_dmask = flash_attn.flash_attn_fp8_func( - q, - k, - v, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0, - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - dq, dk, dv = torch.autograd.grad(out, (q, k, v), do, retain_graph=True) - return dq, dk, dv - else: - raise ValueError(f"Unsupported benchmarking mode: {mode}") - - return flash_attn_f8_bench_fn - elif fn_name == "flash_attn_qkvpacked_fp8_func": - qkv, do, metadata = fn_input - if mode == "fwd": - def flash_attn_qkvpacked_fp8_bench_fn(): - out, lse, S_dmask = flash_attn.flash_attn_qkvpacked_fp8_func( - qkv, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0, - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - return out - elif mode == "bwd": - out, lse, S_dmask = flash_attn.flash_attn_qkvpacked_fp8_func( - qkv, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0, - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - def flash_attn_qkvpacked_fp8_bench_fn(): - dqkv = torch.autograd.grad(out, (qkv), do, retain_graph=True) - return dqkv - elif mode == "full": - def flash_attn_qkvpacked_fp8_bench_fn(): - out, lse, S_dmask = flash_attn.flash_attn_qkvpacked_fp8_func( - qkv, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0, - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - dqkv = torch.autograd.grad(out, (qkv), do, retain_graph=True) - return dqkv - else: - raise ValueError(f"Unsupported benchmarking mode: {mode}") - - return flash_attn_qkvpacked_fp8_bench_fn - elif fn_name == "flash_attn_varlen_fp8_func": - (q_unpad, descale_q), (k_unpad, descale_k), (v_unpad, descale_v), (do_unpad, descale_do), metadata = fn_input - if mode == "fwd": - def flash_attn_varlen_fp8_bench_fn(): - out_unpad, lse, S_dmask = flash_attn.flash_attn_varlen_fp8_func( - q_unpad, - k_unpad, - v_unpad, - metadata.cu_seqlens_q, - metadata.cu_seqlens_k, - metadata.max_seqlens_q, - metadata.max_seqlens_k, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0 , - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - return out_unpad - elif mode == "bwd": - out_unpad, lse, S_dmask = flash_attn.flash_attn_varlen_fp8_func( - q_unpad, - k_unpad, - v_unpad, - metadata.cu_seqlens_q, - metadata.cu_seqlens_k, - metadata.max_seqlens_q, - metadata.max_seqlens_k, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0 , - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - def flash_attn_varlen_fp8_bench_fn(): - dq_unpad, dk_unpad, dv_unpad = torch.autograd.grad(out_unpad, (q_unpad, k_unpad, v_unpad), do_unpad, retain_graph=True) - return dq_unpad, dk_unpad, dv_unpad - elif mode == "full": - def flash_attn_varlen_fp8_bench_fn(): - out_unpad, lse, S_dmask = flash_attn.flash_attn_varlen_fp8_func( - q_unpad, - k_unpad, - v_unpad, - metadata.cu_seqlens_q, - metadata.cu_seqlens_k, - metadata.max_seqlens_q, - metadata.max_seqlens_k, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0 , - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - dq_unpad, dk_unpad, dv_unpad = torch.autograd.grad(out_unpad, (q_unpad, k_unpad, v_unpad), do_unpad, retain_graph=True) - return dq_unpad, dk_unpad, dv_unpad - else: - raise ValueError(f"Unsupported benchmarking mode: {mode}") - - return flash_attn_varlen_fp8_bench_fn - elif fn_name == "flash_attn_varlen_qkvpacked_fp8_func": - qkv_unpad, do_unpad, metadata = fn_input - if mode == "fwd": - def flash_attn_varlen_qkvpacked_fp8_bench_fn(): - out_unpad, lse, S_dmask = flash_attn.flash_attn_varlen_qkvpacked_fp8_func( - qkv_unpad, - metadata.cu_seqlens_q, - metadata.max_seqlens_q, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0 , - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - return out_unpad - elif mode == "bwd": - out_unpad, lse, S_dmask = flash_attn.flash_attn_varlen_qkvpacked_fp8_func( - qkv_unpad, - metadata.cu_seqlens_q, - metadata.max_seqlens_q, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0 , - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - def flash_attn_varlen_qkvpacked_fp8_bench_fn(): - dqkv_unpad = torch.autograd.grad(out_unpad, (qkv_unpad), do_unpad, retain_graph=True) - return dqkv_unpad - elif mode == "full": - def flash_attn_varlen_qkvpacked_fp8_bench_fn(): - out_unpad, lse, S_dmask = flash_attn.flash_attn_varlen_qkvpacked_fp8_func( - qkv_unpad, - metadata.cu_seqlens_q, - metadata.max_seqlens_q, - metadata.dropout_p, - causal=metadata.causal, - window_size=(-1, -1), - softcap=0.0 , - alibi_slopes=None, - deterministic=False, - return_attn_probs=True, - ) - dqkv_unpad = torch.autograd.grad(out_unpad, (qkv_unpad), do_unpad, retain_graph=True) - return dqkv_unpad - else: - raise ValueError(f"Unsupported benchmarking mode: {mode}") - - return flash_attn_varlen_qkvpacked_fp8_bench_fn - else: - valid_fn_names = ", ".join(FUNCTIONS) - raise ValueError(f"{fn_name} should be one of the following functions. {valid_fn_names}") - -def get_packing_type(fn_name: str) -> Optional[Literal["kv", "qkv"]]: - if "_kvpacked" in fn_name: - packing = "kv" - elif "_qkvpacked" in fn_name: - packing = "qkv" - else: - packing = None - - return packing - -def load_flash_attn_module(backend: Literal["triton", "ck"], env_configs: Dict = {}): - """ - Load the flash_attn module with the specified backend configuration - """ - global VERBOSE - - # remove any existing env variables first - for key in ENV_FLAGS: - if key in os.environ: - del os.environ[key] - - # set environment variable for the desired backend - if backend == "triton": - os.environ["FLASH_ATTENTION_TRITON_AMD_ENABLE"] = "TRUE" - os.environ["FLASH_ATTENTION_TRITON_AMD_DEBUG"] = "0" - os.environ["FLASH_ATTENTION_TRITON_AMD_AUTOTUNE"] = "1" - elif backend == "ck": - os.environ["FLASH_ATTENTION_TRITON_AMD_ENABLE"] = "FALSE" - else: - raise ValueError(f"Unknown backend {backend}") - - # add custom env configs - add_env_configs(env_configs) - - if VERBOSE: # Only print if both local and global verbose are True - print(f"Loading flash_attn module with {backend} backend.") - - # Remove any existing flash_attn modules from sys.modules - for module_name in list(sys.modules.keys()): - if module_name.startswith('flash_attn'): - del sys.modules[module_name] - - # Clear CUDA cache - torch.cuda.empty_cache() - - # Import and return the module - import flash_attn - - # disable triton printing from autotuning - if not VERBOSE: - os.environ["TRITON_PRINT_AUTOTUNING"] = "0" - - return flash_attn - -def add_env_configs(env_config: Dict): - for env_key, env_value in env_config.items(): - if env_key in os.environ: - del os.environ[env_key] # remove previous version so that env key is the latest key added - os.environ[env_key] = env_value - -def run_benchmark(func_config: FunctionConfig, input_configs): - """ - Runs the benchmark for the provided function configuration with the given input configurations. - """ - global VERBOSE - - # extract function configuration parameters - fn_name = func_config.fn_name - mode = func_config.mode - dtype = func_config.dtype - backend = func_config.backend - - # load flash attention module - flash_attn_module = load_flash_attn_module(backend, func_config.env_configs) - - # start timing the benchmark - start_time = time.time() - if VERBOSE: - print(f"Benchmarking {func_config} ...") - else: - print(f"Running {fn_name} ({mode}, {backend})...", end='', flush=True) - - # Setup benchmark configurations - bench_configs = [ - triton.testing.Benchmark( - x_names=["BATCH", "HQ", "HK", "N_CTX_Q", "N_CTX_K", "D_HEAD", "CAUSAL", "DROPOUT"], - x_vals=list(input_configs.keys()), - line_arg="provider", - line_vals=["triton"], - line_names=["Time (ms)"], - styles=[("red", "-")], - ylabel="ms", - plot_name=f"benchmark-{func_config}", - args={ - }, - ) - ] - - @triton.testing.perf_report(bench_configs) - def bench_function( - BATCH, HQ, HK, N_CTX_Q, N_CTX_K, D_HEAD, CAUSAL, DROPOUT, provider, device="cuda" - ): - if DEBUG: - print("BATCH:", BATCH) - print("HQ:", HQ) - print("HK:", HK) - print("N_CTX_Q:", N_CTX_Q) - print("N_CTX_Q:", N_CTX_Q) - print("D_HEAD:", D_HEAD) - print("CAUSAL:", CAUSAL) - print("DROPOUT:", DROPOUT) - print("mode:", mode) - print("provider:", provider) - print("device:", device) - fn_input = input_configs[(BATCH, HQ, HK, N_CTX_Q, N_CTX_K, D_HEAD, CAUSAL, DROPOUT)] - benchmark_fn = create_benchmark_fn(flash_attn_module, fn_name, fn_input, mode) - - # run the benchmark - ms = triton.testing.do_bench(benchmark_fn, warmup=25, rep=100) - return ms - - df = bench_function.run(return_df=True)[0] - - # set the column name to reflect the function configuration - df = df.rename(columns={"Time (ms)": func_config.column_name()}) - - # calculate and print elapsed time - elapsed_time = time.time() - start_time - - return df, elapsed_time - -def filter_modes(requested_modes, fn_name, supported_modes_for_fn): - modes_to_run = [] - if requested_modes: - for mode in requested_modes: - if mode in supported_modes_for_fn: - modes_to_run.append(mode) - else: - warning(f"Mode '{mode}' requested but not supported by function '{fn_name}'. Skipping this mode for this function.") - else: - modes_to_run = ["full" if "full" in supported_modes_for_fn else "fwd"] - return modes_to_run - -def get_env_value_combinations(current_backend: Optional[Literal["triton", "ck"]]) -> List[Dict[str, str]]: - # filter environment variations applicable to the current backend - applicable_variations = [ - var_config for var_config in ENV_VARIABLE_CONFIGS - if var_config.backend is None or var_config.backend == current_backend - ] - - if not applicable_variations: - # no applicable variations, return list with empty dict - return [{}] - - # prepare keys and value lists - variation_keys = [v.key for v in applicable_variations] - variation_value_lists = [v.values for v in applicable_variations] - - # generate all combinations as dictionaries directly - env_configs = [] - for value_combination in itertools.product(*variation_value_lists): - env_configs.append(dict(zip(variation_keys, value_combination))) - - return env_configs - -def get_input_config_set(config_type): - if config_type == "llama": - # batch, hq, hk, sq, sk, d_head, causal, dropout - input_configs = [ - # LLaMA 3 8B - (4, 32, 8, 8192, 8192, 128, True, 0.0), - # LLaMA 3 70B - (4, 64, 8, 8192, 8192, 128, True, 0.0), - ] - else: - raise ValueError(f"Unknown input config: {config_type}") - - return input_configs - -def available_backends(): - """Check which backends are available by trying to load them.""" - available = [] - - for backend in ["triton", "ck"]: - try: - # try loading the module with this backend - load_flash_attn_module(backend) - available.append(backend) - except Exception as e: - # backend not available, just continue - if DEBUG: - print(f"Backend {backend} not available: {e}") - - if not available: - raise ValueError("No backends are available. Please check your flash_attn installation.") - - return available - -# 2. Simplify get_fn_params to remove the backend filtering logic here -@lru_cache() -def get_fn_params(fn_name): - # get params for fn - packing = get_packing_type(fn_name) - is_varlen = True if "varlen" in fn_name else False - is_fp8 = True if "fp8" in fn_name else False - supported_dtypes = SUPPORTED_DTYPES.get(fn_name, [torch.float16]) - supported_backends = SUPPORTED_BACKENDS.get(fn_name, ["triton"]) # just get what the function supports - supports_backward = False if fn_name in ["flash_attn_with_kvcache"] else True - supported_modes = SUPPORTED_MODES.get(fn_name, ["fwd"]) - device = "cuda" - - # get supported env configs for each backend - supported_env_configs = {} - for backend in supported_backends: - supported_env_configs[backend] = get_env_value_combinations(backend) - - # check backward pass support - if not supports_backward: - warning(f"{fn_name} does not have a backward pass so benching forward pass only.") - - return is_varlen, is_fp8, packing, supported_dtypes, supported_backends, supported_modes, supported_env_configs, device - -# 3. Create a new simpler function to validate and filter backends -def validate_backends(requested_backends, supported_backends, fn_name): - """Validate that requested backends are available and supported.""" - # get actually available backends - available = available_backends() - - # determine which backends to use - if requested_backends: - # user specified backends - validate them - valid_backends = [] - for backend in requested_backends: - if backend not in available: - warning(f"Backend '{backend}' is not available on this system. Skipping.") - continue - if backend not in supported_backends: - warning(f"Backend '{backend}' is not supported by function '{fn_name}'. Skipping.") - continue - valid_backends.append(backend) - - if not valid_backends: - raise ValueError(f"None of the requested backends {requested_backends} are available and supported for {fn_name}") - - return valid_backends - else: - # no backends specified - use all available and supported - valid_backends = [b for b in supported_backends if b in available] - - if not valid_backends: - raise ValueError(f"No available backends found for {fn_name}. Function supports {supported_backends} but only {available} are available.") - - return valid_backends - -# 4. Update process_args to use the new validate_backends function -def process_args(): - """ - Parses command-line arguments and returns function configs and input configs. - """ - global VERBOSE - - # create parser - parser = argparse.ArgumentParser( - prog="Benchmark FlashAttention", - allow_abbrev=False, - ) - # functions - parser.add_argument( - "-benchmark_fn", - type=str, - nargs="*", - choices=FUNCTIONS, - required=True, - help=f"Function(s) to benchmark", - ) - parser.add_argument( - "--mode", - type=str, - nargs='*', - choices=VALID_MODES, - default=["fwd", "bwd"], - help=f"Benchmarking mode(s) to run. Default: fwd, bwd", - ) - parser.add_argument( - "--backend", - type=str, - nargs='*', - choices=["triton", "ck"], - default=["triton"], - help="Backend(s) to run. Default: triton", - ) - parser.add_argument( - "--output", - type=str, - choices=["ms", "tflops"], - default="tflops", - help="Output metric type: ms (milliseconds) or tflops (TFLOPS). Default: tflops", - ) - parser.add_argument( - "--format", - type=str, - choices=["csv", "markdown"], - default="csv", - help="Output file format: csv or markdown. Default: csv", - ) - parser.add_argument( - "--verbose", "-v", - action="store_true", - help="Enable verbose output (show autotuning details)", - ) - # config - parser.add_argument("-b", type=int, default=None, help="Batch size") - parser.add_argument("-hq", type=int, default=None, help="Q Number of heads") - parser.add_argument("-hk", type=int, default=None, help="K and V Number of heads") - parser.add_argument("-sq", type=int, default=None, help="Q Sequence Length") - parser.add_argument("-sk", type=int, default=None, help="K and V Sequence Length") - parser.add_argument("-d", type=int, default=None, help="Head Dimension") - parser.add_argument("-causal", action="store_true", default=None, help="Causal") - parser.add_argument("-dropout", type=float, default=None, help="Dropout") - - # parse args - args = parser.parse_args() - - # Set global verbose flag - VERBOSE = args.verbose - - # parse function args - benchmark_fns = args.benchmark_fn - requested_modes = args.mode - requested_backends = args.backend - output_type: Literal["ms", "tflops"] = args.output - output_format: Literal["csv", "markdown"] = args.format - - # generate function configurations and input configurations separately - all_function_configs = [] - all_input_configs = {} # Maps function config -> input configs - - for fn_name in benchmark_fns: - is_varlen, is_fp8, packing, supported_dtypes, supported_backends, supported_modes_for_fn, supported_env_configs, device = get_fn_params(fn_name) - - # Generate or use custom input configurations - if args.b or args.hq or args.hk or args.sq or args.sk or args.d: - assert args.b and args.hq and args.sq and args.d, ( - "if custom config is specified, please provide at least batch, number of Q heads, Q sequence length, and head size." - ) - - batch = args.b - hq = args.hq - hk = args.hk if args.hk is not None else args.hq - sq = args.sq - sk = args.sk if args.sk is not None else args.sq - d_head = args.d - causal = args.causal if args.causal is not None else False - dropout = args.dropout if args.dropout is not None else 0.0 - input_configs = [(batch, hq, hk, sq, sk, d_head, causal, dropout)] - else: - input_configs = get_input_config_set("llama") - - # filter by mode - modes_to_run = filter_modes(requested_modes, fn_name, supported_modes_for_fn) - if not modes_to_run: - warning(f"No valid modes to run for function '{fn_name}' based on request and function support. Skipping this function.") - continue - - # validate and filter backends - try: - backends_to_run = validate_backends(requested_backends, supported_backends, fn_name) - except ValueError as e: - warning(str(e)) - continue - - # create a function config for each backend and dtype combination - for backend in backends_to_run: - for dtype in supported_dtypes: - for mode in modes_to_run: - for env_config in supported_env_configs[backend]: - func_config = FunctionConfig(fn_name, mode, dtype, backend, env_config) - all_function_configs.append(func_config) - - # Generate inputs for this function configuration - fn_inputs = {} - for input_config in input_configs: - fn_inputs[input_config] = generate_fn_inputs(fn_name, *input_config, dtype, device) - - all_input_configs[func_config] = fn_inputs - - return all_function_configs, all_input_configs, output_type, output_format - -def check_environment_variables(): - for key in ENV_FLAGS: - if key in os.environ: - raise ValueError(f"Running with {key} environment variable is not recommended for the benching script. Use --help to see how to use the benching script.") - -def compute_flops(batch, hq, hk, sq, sk, d_head, causal): - # 2 FLOPs per multiply‑add - if causal: - valid_pairs = ((sk * (sk + 1)) // 2 if sq > sk else - sq * sk - (sq * (sq - 1)) // 2) - else: - valid_pairs = sq * sk - return 2 * batch * hq * valid_pairs * d_head - -# see ref, https://github.com/ROCm/aiter/blob/jukorhon/mha-bwd/op_benchmarks/triton/bench_mha.py -def _flops_single_row(row: pd.Series, mode: str) -> float: - b, hq, d_head = int(row["BATCH"]), int(row["HQ"]), int(row["D_HEAD"]) - sq, sk = int(row["N_CTX_Q"]), int(row["N_CTX_K"]) - causal = bool(row["CAUSAL"]) - - # -------- number of (query, key) products per head ---------------- - if not causal: - valid_pairs = sq * sk - else: # triangular mask - if sq > sk: - valid_pairs = sk * (sk + 1) // 2 + (sq - sk) * sk - else: # sq <= sk - valid_pairs = sq * (sq + 1) // 2 - - # one matmul FLOPs (mul + add) = 2 · m · n · k - flops_per_matmul = 2.0 * b * hq * valid_pairs * d_head - total_flops = 2.0 * flops_per_matmul # 2 matmuls in forward - - if mode == "fwd": - pass - elif mode == "bwd": - total_flops *= 2.5 # 2·bwd + 0.5·recompute - elif mode == "full": - total_flops *= 3.5 # fwd + bwd - else: - raise ValueError(f"unknown mode {mode}") - - return total_flops - -def add_tflops_columns(df: pd.DataFrame, func_cfg: FunctionConfig) -> pd.DataFrame: - ms_col = func_cfg.column_name() - tf_col = ms_col.replace("_ms", "_tflops") - flops = df.apply(_flops_single_row, axis=1, mode=func_cfg.mode) - df[tf_col] = flops / df[ms_col] * 1e-9 - return df - -def generate_output_filename(function_configs, output_type, output_format): - # create a timestamp - timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") - - # simple filename format - base_filename = f"benchmark_{timestamp}" - - if output_format == "csv": - return base_filename + ".csv" - else: # markdown - return base_filename + ".md" - -def main(): - """ - Main function to run benchmarks. - """ - global VERBOSE - - # check environment variables - check_environment_variables() - - # start timing the entire benchmarking process - total_start_time = time.time() - - # process args to get function configs and input configs - function_configs, all_input_configs, output_type, output_format = process_args() - - # Print summary of what will be benchmarked (always show this) - print(f"\nBenchmarking {len(function_configs)} configuration(s):") - unique_fns = set(fc.fn_name for fc in function_configs) - print(f" Functions: {', '.join(unique_fns)}") - unique_backends = set(fc.backend for fc in function_configs) - print(f" Backends: {', '.join(unique_backends)}") - unique_modes = set(fc.mode for fc in function_configs) - print(f" Modes: {', '.join(unique_modes)}") - print() - - # run benchmarks for each function configuration - combined_ms_df = None - combined_tf_df = None - input_cols = ["BATCH", "HQ", "HK", "N_CTX_Q", "N_CTX_K", "D_HEAD", "CAUSAL", "DROPOUT"] - - for i, func_config in enumerate(function_configs, 1): - # Progress indicator - if not VERBOSE: - print(f"[{i}/{len(function_configs)}] ", end='') - - # run benchmark with the input configs for this function config - input_configs = all_input_configs[func_config] - df, elapsed_time = run_benchmark(func_config, input_configs) - - if VERBOSE: - print(f"Total time for benchmarking {func_config.fn_name} in {func_config.mode} mode with {func_config.dtype}: {elapsed_time:.2f} seconds") - - # add to combined table - df = add_tflops_columns(df, func_config) - ms_cols = [c for c in df.columns if c.endswith('_ms')] - tf_cols = [c for c in df.columns if c.endswith('_tflops')] - - ms_df = df[input_cols + ms_cols] - tf_df = df[input_cols + tf_cols] - - if combined_ms_df is None: - combined_ms_df = ms_df - combined_tf_df = tf_df - else: - combined_ms_df = combined_ms_df.merge(ms_df, on=input_cols, how="outer") - combined_tf_df = combined_tf_df.merge(tf_df, on=input_cols, how="outer") - - # print new line to seperate the combined data information from the benchmark specific information - print() - - # print total time for all benchmarks - total_elapsed_time = time.time() - total_start_time - print(f"Total benchmark time: {total_elapsed_time:.1f} seconds") - - # save combined data and make comparisons if we have multiple function configs - has_multiple_func_configs = False # len(function_configs) > 1 - if has_multiple_func_configs: - if len(function_configs) == 2: - func1 = function_configs[0] - func2 = function_configs[1] - - # construct column names for the timing results - col1 = func1.column_name() - col2 = func2.column_name() - - # Check if we're comparing triton vs ck (in either order) - is_triton_vs_ck = ( - (func1.backend == "triton" and func2.backend == "ck") or - (func1.backend == "ck" and func2.backend == "triton") - ) - - # For triton vs ck comparisons - if is_triton_vs_ck: - # For triton vs ck comparisons, always make triton the baseline - if func1.backend == "triton" and func2.backend == "ck": - triton_col = col1 - ck_col = col2 - ratio_col = f"ck_to_triton_ratio" - else: - triton_col = col2 - ck_col = col1 - ratio_col = f"ck_to_triton_ratio" - - # Calculate ratio: ck_time / triton_time (values > 1 mean triton is faster) - combined_ms_df[ratio_col] = combined_ms_df[ck_col] / combined_ms_df[triton_col] - - # print explanation - print(f"Comparison Results (triton vs ck):") - print(f"Ratio values: values > 1 mean triton is faster (by that factor), values < 1 mean ck is faster") - - # output based on selected metric - if output_type == "ms": - if combined_ms_df is not None: - filename = generate_output_filename(function_configs, "ms", output_format) - print(f"\nCombined wall-time (ms) table:") - print(combined_ms_df) - - if output_format == "csv": - combined_ms_df.to_csv(filename, index=False) - print(f"Results saved to: {filename}") - else: # markdown - with open(filename, 'w') as f: - f.write(combined_ms_df.to_markdown(index=False, floatfmt=".2f")) - print(f"Results saved to: {filename}") - else: # output_type == "tflops" - if combined_tf_df is not None: - filename = generate_output_filename(function_configs, "tflops", output_format) - print(f"\nCombined throughput (TFLOPs) table:") - print(combined_tf_df) - - if output_format == "csv": - combined_tf_df.to_csv(filename, index=False) - print(f"Results saved to: {filename}") - else: # markdown - with open(filename, 'w') as f: - f.write(combined_tf_df.to_markdown(index=False, floatfmt=".2f")) - print(f"Results saved to: {filename}") - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/flash_attn/flash_attn_triton_amd/bwd.py b/flash_attn/flash_attn_triton_amd/bwd.py new file mode 100755 index 00000000000..085232cedc5 --- /dev/null +++ b/flash_attn/flash_attn_triton_amd/bwd.py @@ -0,0 +1,4997 @@ +import os +import torch +import triton # type: ignore +import triton.language as tl # type: ignore +from typing import Literal, Optional +from .utils import ( + DEBUG, + DROPOUT_USE_PYTORCH, + DROPOUT_DUMP, + compute_fp8_scaling_factors, + create_dropout_mask, + create_dropout_mask_varlen, + is_cdna, + is_fp8, +) + +# NOTE: triton fails to import tl.constexprs so create them here for the file +tl_DROPOUT_USE_PYTORCH: tl.constexpr = triton.language.constexpr(DROPOUT_USE_PYTORCH) +tl_DROPOUT_DUMP: tl.constexpr = triton.language.constexpr(DROPOUT_DUMP) + + +def get_autotune_configs(): + if False: + if is_cdna(): + # shared meta-parameters + NUM_STAGES = 1 + NUM_WARPS = 4 + WAVES_PER_EU = 2 + MATRIX_INSTR_NONKDIM = 16 + + preprocess_autotune_configs = [ + triton.Config( + { + "PRE_BLOCK": 128, + "waves_per_eu": WAVES_PER_EU, + "matrix_instr_nonkdim": MATRIX_INSTR_NONKDIM, + }, + num_stages=NUM_STAGES, + num_warps=NUM_WARPS, + ), # og config + triton.Config( + { + "PRE_BLOCK": 64, + "waves_per_eu": WAVES_PER_EU, + "matrix_instr_nonkdim": MATRIX_INSTR_NONKDIM, + }, + num_stages=NUM_STAGES, + num_warps=NUM_WARPS, + ), + triton.Config( + { + "PRE_BLOCK": 32, + "waves_per_eu": WAVES_PER_EU, + "matrix_instr_nonkdim": MATRIX_INSTR_NONKDIM, + }, + num_stages=NUM_STAGES, + num_warps=NUM_WARPS, + ), + triton.Config( + { + "PRE_BLOCK": 16, + "waves_per_eu": WAVES_PER_EU, + "matrix_instr_nonkdim": MATRIX_INSTR_NONKDIM, + }, + num_stages=NUM_STAGES, + num_warps=NUM_WARPS, + ), + ] + preprocess_autotune_keys = [ + "IS_CAUSAL", + "dropout_p", + "MAX_SEQLENS_Q", + "MAX_SEQLENS_K", + "ACTUAL_HEAD_DIM_QK", + "ACTUAL_HEAD_DIM_V", + "IS_VARLEN", + "HQ", + "HK", + ] + causal_autotune_configs = [ + triton.Config( + { + "BLOCK_M1": 32, + "BLOCK_N1": 128, + "BLOCK_M2": 128, + "BLOCK_N2": 32, + "BLK_SLICE_FACTOR": 2, + "waves_per_eu": WAVES_PER_EU, + "matrix_instr_nonkdim": MATRIX_INSTR_NONKDIM, + }, + num_stages=NUM_STAGES, + num_warps=NUM_WARPS, + ), # og config + triton.Config( + { + "BLOCK_M1": 16, + "BLOCK_N1": 128, + "BLOCK_M2": 128, + "BLOCK_N2": 16, + "BLK_SLICE_FACTOR": 2, + "waves_per_eu": WAVES_PER_EU, + "matrix_instr_nonkdim": MATRIX_INSTR_NONKDIM, + }, + num_stages=NUM_STAGES, + num_warps=NUM_WARPS, + ), + triton.Config( + { + "BLOCK_M1": 16, + "BLOCK_N1": 64, + "BLOCK_M2": 64, + "BLOCK_N2": 16, + "BLK_SLICE_FACTOR": 2, + "waves_per_eu": WAVES_PER_EU, + "matrix_instr_nonkdim": MATRIX_INSTR_NONKDIM, + }, + num_stages=NUM_STAGES, + num_warps=NUM_WARPS, + ), + triton.Config( + { + "BLOCK_M1": 32, + "BLOCK_N1": 64, + "BLOCK_M2": 64, + "BLOCK_N2": 32, + "BLK_SLICE_FACTOR": 2, + "waves_per_eu": WAVES_PER_EU, + "matrix_instr_nonkdim": MATRIX_INSTR_NONKDIM, + }, + num_stages=NUM_STAGES, + num_warps=NUM_WARPS, + ), + ] + causal_autotune_keys = [ + "IS_CAUSAL", + "dropout_p", + "MAX_SEQLENS_Q", + "MAX_SEQLENS_K", + "ACTUAL_HEAD_DIM_QK", + "ACTUAL_HEAD_DIM_V", + "IS_VARLEN", + "HQ", + "HK", + ] + noncausal_autotune_configs = [ + triton.Config( + { + "BLOCK_M1": 32, + "BLOCK_N1": 128, + "BLOCK_M2": 128, + "BLOCK_N2": 32, + "BLK_SLICE_FACTOR": 2, + "waves_per_eu": WAVES_PER_EU, + "matrix_instr_nonkdim": MATRIX_INSTR_NONKDIM, + }, + num_stages=NUM_STAGES, + num_warps=NUM_WARPS, + ), # og config + triton.Config( + { + "BLOCK_M1": 16, + "BLOCK_N1": 128, + "BLOCK_M2": 128, + "BLOCK_N2": 16, + "BLK_SLICE_FACTOR": 2, + "waves_per_eu": WAVES_PER_EU, + "matrix_instr_nonkdim": MATRIX_INSTR_NONKDIM, + }, + num_stages=NUM_STAGES, + num_warps=NUM_WARPS, + ), + triton.Config( + { + "BLOCK_M1": 16, + "BLOCK_N1": 64, + "BLOCK_M2": 64, + "BLOCK_N2": 16, + "BLK_SLICE_FACTOR": 2, + "waves_per_eu": WAVES_PER_EU, + "matrix_instr_nonkdim": MATRIX_INSTR_NONKDIM, + }, + num_stages=NUM_STAGES, + num_warps=NUM_WARPS, + ), + triton.Config( + { + "BLOCK_M1": 32, + "BLOCK_N1": 64, + "BLOCK_M2": 64, + "BLOCK_N2": 32, + "BLK_SLICE_FACTOR": 2, + "waves_per_eu": WAVES_PER_EU, + "matrix_instr_nonkdim": MATRIX_INSTR_NONKDIM, + }, + num_stages=NUM_STAGES, + num_warps=NUM_WARPS, + ), + ] + noncausal_autotune_keys = [ + "IS_CAUSAL", + "dropout_p", + "MAX_SEQLENS_Q", + "MAX_SEQLENS_K", + "ACTUAL_HEAD_DIM_QK", + "ACTUAL_HEAD_DIM_V", + "IS_VARLEN", + "HQ", + "HK", + ] + + return ( + (preprocess_autotune_configs, preprocess_autotune_keys), + (causal_autotune_configs, causal_autotune_keys), + (noncausal_autotune_configs, noncausal_autotune_keys), + ) + else: + raise ValueError("Unknown Device Type") + else: + # meta-parameters + # TODO: fix num_stages later + NUM_WARPS, NUM_STAGES = 4, 1 + WAVES_PER_EU = 1 + PRE_BLOCK = 128 + BLOCK_M1, BLOCK_N1, BLOCK_M2, BLOCK_N2 = 32, 128, 128, 32 + BLK_SLICE_FACTOR = 2 + + assert BLOCK_N1 == BLOCK_M2 + + # configs for the kernels + preprocess_autotune_configs = [ + triton.Config( + {"PRE_BLOCK": PRE_BLOCK, "waves_per_eu": WAVES_PER_EU}, + num_stages=NUM_STAGES, + num_warps=NUM_WARPS, + ), + ] + preprocess_autotune_keys = [ + "max_seqlen_q", + "ACTUAL_HEAD_DIM_V", + "IS_VARLEN", + ] + causal_autotune_configs = [ + triton.Config( + { + "BLOCK_M1": BLOCK_M1, + "BLOCK_N1": BLOCK_N1, + "BLOCK_M2": BLOCK_M2, + "BLOCK_N2": BLOCK_N2, + "BLK_SLICE_FACTOR": BLK_SLICE_FACTOR, + "waves_per_eu": WAVES_PER_EU, + }, + num_stages=NUM_STAGES, + num_warps=NUM_WARPS, + ), + ] + causal_autotune_keys = [ + "dropout_p", + "max_seqlen_q", + "max_seqlen_k", + "ACTUAL_HEAD_DIM_QK", + "ACTUAL_HEAD_DIM_V", + "IS_VARLEN", + "HQ", + "HK", + ] + noncausal_autotune_configs = [ + triton.Config( + { + "BLOCK_M1": BLOCK_M1, + "BLOCK_N1": BLOCK_N1, + "BLOCK_M2": BLOCK_M2, + "BLOCK_N2": BLOCK_N2, + "BLK_SLICE_FACTOR": BLK_SLICE_FACTOR, + "waves_per_eu": WAVES_PER_EU, + }, + num_stages=NUM_STAGES, + num_warps=NUM_WARPS, + ), + ] + noncausal_autotune_keys = [ + "dropout_p", + "max_seqlen_q", + "max_seqlen_k", + "ACTUAL_HEAD_DIM_QK", + "ACTUAL_HEAD_DIM_V", + "IS_VARLEN", + "HQ", + "HK", + ] + return ( + (preprocess_autotune_configs, preprocess_autotune_keys), + (causal_autotune_configs, causal_autotune_keys), + (noncausal_autotune_configs, noncausal_autotune_keys), + ) + + +( + (preprocess_autotune_configs, preprocess_autotune_keys), + (causal_autotune_configs, causal_autotune_keys), + (noncausal_autotune_configs, noncausal_autotune_keys), +) = get_autotune_configs() + + +# This function computes delta given output Out and gradient DO +# Here is the I/O shape: +# Out: (batch, nhead_q, max_seqlens_q, headDim) +# DO: (batch, nhead_q, max_seqlens_q, headDim) +# Delta: (batch, nheads_q, max_seqlens_q), same as softmax_lse defined at +@triton.jit +def _bwd_fused_atomics_preprocess( + o_ptr, + do_ptr, # noqa: E741 + delta_ptr, + stride_o_b, + stride_o_h, + stride_o_m, + stride_o_k, + stride_delta_b, + stride_delta_h, + stride_delta_m, + stride_descale_do_z, + cu_seqlens_q, + max_seqlen_q, + descale_do_ptr, + BLOCK_M: tl.constexpr, + BLOCK_D_MODEL: tl.constexpr, + BLOCK_D_MODEL_POW2: tl.constexpr, + IS_VARLEN: tl.constexpr, + IS_FP8: tl.constexpr, +): + pid_m = tl.program_id(0) # seqlen + bid = tl.program_id(1) # batch + hid = tl.program_id(2) # head + + # Handle varlen + q_start = 0 + seqlen_q = max_seqlen_q + if IS_VARLEN: + q_start = tl.load(cu_seqlens_q + bid) + q_end = tl.load(cu_seqlens_q + bid + 1) + seqlen_q = q_end - q_start + else: + q_start = 0 + seqlen_q = max_seqlen_q + + # Compute offsets + offs_m = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) + offs_k = tl.arange(0, BLOCK_D_MODEL_POW2) + + # Offset O/DO by batch, head and q_start + offs = ( + bid * stride_o_b + + hid * stride_o_h + + q_start * stride_o_m + + offs_m[:, None] * stride_o_m + + offs_k[None, :] * stride_o_k + ) + + # create masks + mask_m = offs_m < seqlen_q + mask = mask_m[:, None] + PADDED_HEAD: tl.constexpr = BLOCK_D_MODEL != BLOCK_D_MODEL_POW2 + if PADDED_HEAD: + mask &= offs_k[None, :] < BLOCK_D_MODEL + + # load [BLOCK_M, BLOCK_D_MODEL_POW2] + o = tl.load(o_ptr + offs, mask=mask, other=0.0) + do = tl.load(do_ptr + offs, mask=mask, other=0.0) + + # compute and write-back to delta + if IS_FP8: + descale_do = tl.load(descale_do_ptr + bid * stride_descale_do_z + hid) + + # NOTE: do is in the fp8 range and o is not in fp8 + delta = tl.sum(o.to(tl.float32) * (do.to(tl.float32) * descale_do), axis=1) + else: + delta = tl.sum(o.to(tl.float32) * do.to(tl.float32), axis=1) + + offs_delta = ( + bid * stride_delta_b + + hid * stride_delta_h + + q_start * stride_delta_m + + offs_m * stride_delta_m + ) + tl.store(delta_ptr + offs_delta, delta, mask=mask_m) + + +@triton.jit +def _bwd_fused_atomics_dq_inner( + dq, + q, + K, + V, + do, + m, + Delta, + sm_scale, + stride_qm, + stride_qk, + stride_kn, + stride_kk, + stride_vn, + stride_vk, + stride_dropout_m, + stride_dropout_n, + stride_deltam, + seqlen_q, + seqlen_k, + dropout_p, + philox_seed, + batch_philox_offset, + dropout_offset, + start_m, + start_n, + end_n, + num_steps, + descale_q, + descale_k, + descale_v, + descale_do, + BLOCK_M: tl.constexpr, + BLOCK_N: tl.constexpr, + BLOCK_D_MODEL: tl.constexpr, + BLOCK_D_MODEL_POW2: tl.constexpr, + MASK: tl.constexpr, + ENABLE_DROPOUT: tl.constexpr, + IS_FP8: tl.constexpr, + FP8_MAX: tl.constexpr, +): + RCP_LN2: tl.constexpr = 1.4426950408889634 + + PADDED_HEAD: tl.constexpr = BLOCK_D_MODEL != BLOCK_D_MODEL_POW2 + delta_qk = seqlen_q - seqlen_k + offs_m = start_m + tl.arange(0, BLOCK_M) + offs_n = start_n + tl.arange(0, BLOCK_N) + offs_k = tl.arange(0, BLOCK_D_MODEL_POW2) + + # mask to make sure not OOB of seqlen_q + mask_m = offs_m < seqlen_q + + kT_ptrs = K + offs_n[None, :] * stride_kn + offs_k[:, None] * stride_kk + vT_ptrs = V + offs_n[None, :] * stride_vn + offs_k[:, None] * stride_vk + + # D (= delta) is pre-divided by ds_scale. + Di = tl.load(Delta + offs_m * stride_deltam, mask=mask_m, other=0.0) + + curr_n = start_n + step_n = BLOCK_N + curr_philox_offset = batch_philox_offset + curr_dropout_offset = dropout_offset + for blk_idx in range(num_steps): + offs_n = curr_n + tl.arange(0, BLOCK_N) + # end_n is needed because the end of causal True might not be perfectly + # aligned with the end of the block + mask_n = offs_n < end_n + mask_kT = mask_n[None, :] + mask_mn = mask_m[:, None] & (offs_n[None, :] < end_n) + if PADDED_HEAD: + mask_kT &= offs_k[:, None] < BLOCK_D_MODEL + + kT = tl.load(kT_ptrs, mask=mask_kT, other=0.0) + vT = tl.load(vT_ptrs, mask=mask_kT, other=0.0) + + # dropout + if ENABLE_DROPOUT: + philox_offs = ( + curr_philox_offset + + offs_m[:, None] * stride_dropout_m + + offs_n[None, :] * stride_dropout_n + ) + rand_vals = tl.rand(philox_seed, philox_offs) + dropout_mask = rand_vals > dropout_p + dropout_scale = 1 / (1 - dropout_p) + + # qk + if IS_FP8: + qk = tl.dot(q, kT) * descale_q * descale_k + else: + qk = tl.dot(q, kT) + p = tl.math.exp2(qk * sm_scale * RCP_LN2 - m * RCP_LN2) + + if MASK: + causal_mask = (offs_m[:, None] - delta_qk) >= offs_n[None, :] + mask = causal_mask * mask_mn + p = tl.where(mask, p, 0.0) + + # dp + if IS_FP8: + dp = tl.dot(do, vT) * descale_do * descale_v + else: + dp = tl.dot(do, vT) + + if ENABLE_DROPOUT: + dp = tl.where(dropout_mask, dp, 0.0) * dropout_scale + + # ds + delta_i = Di[:, None] + ds = p * (dp - delta_i) + + # dq + # NOTE: We need to de-scale dq in the end, because kT was pre-scaled. + if IS_FP8: + scale_ds, descale_ds = compute_fp8_scaling_factors(ds, FP8_MAX) + dq += ( + tl.dot((ds * scale_ds).to(kT.type.element_ty), tl.trans(kT)) + * descale_ds + * descale_k + ) + else: + dq += tl.dot(ds.to(kT.type.element_ty), tl.trans(kT)) + + curr_n += step_n + kT_ptrs += step_n * stride_kn + vT_ptrs += step_n * stride_vn + return dq + + +@triton.jit +def _bwd_fused_atomics_dkdv_inner( + dk, + dv, + Q, + k, + v, + DO, + M, + D, + sm_scale, + stride_q_m, + stride_q_k, + stride_do_m, + stride_do_k, + stride_dropout_m, + stride_dropout_n, + stride_deltam, + dropout_p, + philox_seed, + batch_philox_offset, + dropout_offset, + seqlen_q, + seqlen_k, + start_n, + start_m, + num_steps, + descale_q, + descale_k, + descale_v, + descale_do, + BLOCK_M: tl.constexpr, + BLOCK_N: tl.constexpr, + BLOCK_D_MODEL: tl.constexpr, + BLOCK_D_MODEL_POW2: tl.constexpr, + MASK: tl.constexpr, + ENABLE_DROPOUT: tl.constexpr, + IS_FP8: tl.constexpr, + FP8_MAX: tl.constexpr, +): + PADDED_HEAD: tl.constexpr = BLOCK_D_MODEL != BLOCK_D_MODEL_POW2 + delta_qk = seqlen_q - seqlen_k + offs_m = start_m + tl.arange(0, BLOCK_M) + offs_n = start_n + tl.arange(0, BLOCK_N) + offs_k = tl.arange(0, BLOCK_D_MODEL_POW2) + + # mask to make sure not OOB of seqlen_q + mask_n = offs_n < seqlen_k + qT_ptrs = ( + Q + offs_m[None, :] * stride_q_m + offs_k[:, None] * stride_q_k + ) # [BLOCK_D_MODEL_POW2, BLOCK_M] + do_ptrs = DO + offs_m[:, None] * stride_do_m + offs_k[None, :] * stride_do_k + curr_m = start_m + step_m = BLOCK_M + curr_philox_offset = batch_philox_offset + curr_dropout_offset = dropout_offset + RCP_LN2: tl.constexpr = 1.4426950408889634 + + # Iterate over blocks(BLOCK_M size) of Q while calculating + # a fixed block(BLOCK_N) of dk and dv. Note, during backward + # pass P has to be recomputed. However, this kernel computes + # dV and dK, so we compute we need P^T and S^T. See backward pass + # equations + # + # From Flash Attention Paper: + # ForwardPass: S = QkT, P=softmax(S), O=PV + # + # BackwardPass equations + # dV = P^TdO + # dP = dOV^T + # dS = dsoftmax(dP) + # dQ = dSK + # dK = QdS^T + for blk_idx in range(num_steps): + offs_m = curr_m + tl.arange(0, BLOCK_M) + mask_m = offs_m < seqlen_q + mask_qT = mask_m[None, :] + mask_do = mask_m[:, None] + mask_nm = mask_n[:, None] & (offs_m[None, :] < seqlen_q) + if PADDED_HEAD: + mask_qT &= offs_k[:, None] < BLOCK_D_MODEL + mask_do &= offs_k[None, :] < BLOCK_D_MODEL + + # load qT + qT = tl.load(qT_ptrs, mask=mask_qT, other=0.0) + + # dropout + if ENABLE_DROPOUT: + # NOTE: dropout is transposed because it is used to mask pT + philox_offs = ( + curr_philox_offset + + offs_m[None, :] * stride_dropout_m + + offs_n[:, None] * stride_dropout_n + ) + rand_vals = tl.rand(philox_seed, philox_offs) + dropout_mask = rand_vals > dropout_p + dropout_scale = 1.0 / (1 - dropout_p) + + # Load M + m = tl.load(M + offs_m * stride_deltam, mask=mask_m, other=0.0) + + # Compute qkT + if IS_FP8: + qkT = tl.dot(k, qT) * descale_q * descale_k + else: + qkT = tl.dot(k, qT) + + # Compute pT(use m and also apply sm_scale) + pT = tl.math.exp(qkT * sm_scale - m[None, :]) + + if MASK: + causal_mask = (offs_m[None, :] - delta_qk) >= offs_n[:, None] + mask = causal_mask & mask_nm + pT = tl.where(mask, pT, 0.0) + + # load DO + do = tl.load(do_ptrs, mask=mask_do, other=0.0) + + # dV + if ENABLE_DROPOUT: + pT_dropout = tl.where(dropout_mask, pT, 0.0) * dropout_scale + if IS_FP8: + scale_p_dropout, descale_p_dropout = compute_fp8_scaling_factors( + pT_dropout, FP8_MAX + ) + dv += ( + tl.dot((pT_dropout * scale_p_dropout).to(do.type.element_ty), do) + * descale_p_dropout + * descale_do + ) + else: + dv += tl.dot(pT_dropout.to(do.type.element_ty), do) + else: + if IS_FP8: + scale_pT, descale_pT = compute_fp8_scaling_factors(pT, FP8_MAX) + dv += ( + tl.dot((pT * scale_pT).to(do.type.element_ty), do) + * descale_pT + * descale_do + ) + else: + dv += tl.dot(pT.to(do.type.element_ty), do) + + # Load delta + Di = tl.load(D + offs_m * stride_deltam, mask=mask_m) + + # Compute dP and dS + if IS_FP8: + dpT = tl.dot(v, tl.trans(do)) * descale_v * descale_do + else: + dpT = tl.dot(v, tl.trans(do)) + + if ENABLE_DROPOUT: + dpT = tl.where(dropout_mask, dpT, 0.0) * dropout_scale + + delta_i = Di[None, :] + dsT = pT * (dpT - delta_i) + + # compute dk + if IS_FP8: + scale_dsT, descale_dsT = compute_fp8_scaling_factors(dsT, FP8_MAX) + dk += ( + tl.dot((dsT * scale_dsT).to(qT.type.element_ty), tl.trans(qT)) + * descale_dsT + * descale_q + ) + else: + dk += tl.dot(dsT.to(qT.type.element_ty), tl.trans(qT)) + + # increment pointers + curr_m += step_m + qT_ptrs += step_m * stride_q_m + do_ptrs += step_m * stride_do_m + + return dk, dv + + +@triton.jit +def _bwd_fused_atomics_dkdvdq_inner( + dk, + dv, + Q, + k, + v, + DO, + DQ, + M, + D, + sm_scale, + stride_q_m, + stride_q_k, + stride_dq_m, + stride_dq_k, + stride_do_m, + stride_do_k, + stride_dropout_m, + stride_dropout_n, + stride_deltam, + dropout_p, + philox_seed, + batch_philox_offset, + dropout_offset, + seqlen_q, + seqlen_k, + start_n, + start_m, + num_steps, + descale_q, + descale_k, + descale_v, + descale_do, + BLOCK_M: tl.constexpr, + BLOCK_N: tl.constexpr, + BLOCK_D_MODEL: tl.constexpr, + BLOCK_D_MODEL_POW2: tl.constexpr, + MASK: tl.constexpr, + ENABLE_DROPOUT: tl.constexpr, + IS_FP8: tl.constexpr, + FP8_MAX: tl.constexpr, + workgroup_id: tl.int32, +): + PADDED_HEAD: tl.constexpr = BLOCK_D_MODEL != BLOCK_D_MODEL_POW2 + delta_qk = seqlen_q - seqlen_k + offs_m = start_m + tl.arange(0, BLOCK_M) + offs_n = start_n + tl.arange(0, BLOCK_N) + offs_k = tl.arange(0, BLOCK_D_MODEL_POW2) + + # mask to make sure not OOB of seqlen_q + mask_n = offs_n < seqlen_k + + qT_ptrs_start = ( + Q + offs_m[None, :] * stride_q_m + offs_k[:, None] * stride_q_k + ) # [BLOCK_D_MODEL_POW2, BLOCK_M] + dq_ptrs_start = ( + DQ + offs_m[:, None] * stride_dq_m + offs_k[None, :] * stride_dq_k + ) # [BLOCK_M, BLOCK_D_MODEL_POW2] + + do_ptrs_start = DO + offs_m[:, None] * stride_do_m + offs_k[None, :] * stride_do_k + curr_m = start_m + step_m = BLOCK_M + curr_philox_offset = batch_philox_offset + curr_dropout_offset = dropout_offset + RCP_LN2: tl.constexpr = 1.4426950408889634 + + # Iterate over blocks(BLOCK_M size) of Q while calculating + # a fixed block(BLOCK_N) of dk and dv. Note, during backward + # pass P has to be recomputed. However, this kernel computes + # dV and dK, so we compute we need P^T and S^T. See backward pass + # equations + # + # From Flash Attention Paper: + # ForwardPass: S = QkT, P=softmax(S), O=PV + # + # BackwardPass equations + # dV = P^TdO + # dP = dOV^T + # dS = dsoftmax(dP) + # dQ = dSK + # dK = QdS^T + + # Compute a starting index and step based on workgroup_id + # Use a simple hash-like function to spread out the starting points + start_idx = ( + workgroup_id * 17 + ) % num_steps # 17 is an arbitrary prime to spread indices + # Ensure step is coprime with num_steps to visit all indices exactly once + step = 1 # 3 if num_steps > 1 or num_steps==3 else 1 # coprime with num_steps + + for iter in range(num_steps): + # Compute the permuted block index + blk_idx = (start_idx + iter * step) % num_steps + + curr_m = start_m + blk_idx * step_m + qT_ptrs = qT_ptrs_start + blk_idx * step_m * stride_q_m + dq_ptrs = dq_ptrs_start + blk_idx * step_m * stride_dq_m + do_ptrs = do_ptrs_start + blk_idx * step_m * stride_do_m + + offs_m = curr_m + tl.arange(0, BLOCK_M) + mask_m = offs_m < seqlen_q + mask_qT = mask_m[None, :] + mask_do = mask_m[:, None] + mask_nm = mask_n[:, None] & (offs_m[None, :] < seqlen_q) + + if PADDED_HEAD: + mask_qT &= offs_k[:, None] < BLOCK_D_MODEL + mask_do &= offs_k[None, :] < BLOCK_D_MODEL + + # load qT + qT = tl.load(qT_ptrs, mask=mask_qT, other=0.0) + + # dropout + if ENABLE_DROPOUT: + # NOTE: dropout is transposed because it is used to mask pT + philox_offs = ( + curr_philox_offset + + offs_m[None, :] * stride_dropout_m + + offs_n[:, None] * stride_dropout_n + ) + rand_vals = tl.rand(philox_seed, philox_offs) + dropout_mask = rand_vals > dropout_p + dropout_scale = 1.0 / (1 - dropout_p) + + # Load M + m = tl.load(M + offs_m * stride_deltam, mask=mask_m, other=0.0) + + # Compute qkT + if IS_FP8: + qkT = tl.dot(k, qT) * descale_q * descale_k + else: + qkT = tl.dot(k, qT) + + # Compute pT(use m and also apply sm_scale) + pT = tl.math.exp(qkT * sm_scale - m[None, :]) + + if MASK: + causal_mask = (offs_m[None, :] - delta_qk) >= (offs_n[:, None]) + mask = causal_mask & mask_nm + pT = tl.where(mask, pT, 0.0) + + # load DO + do = tl.load(do_ptrs, mask=mask_do, other=0.0) + + # dV + if ENABLE_DROPOUT: + pT_dropout = tl.where(dropout_mask, pT, 0.0) * dropout_scale + if IS_FP8: + scale_p_dropout, descale_p_dropout = compute_fp8_scaling_factors( + pT_dropout, FP8_MAX + ) + dv += ( + tl.dot((pT_dropout * scale_p_dropout).to(do.type.element_ty), do) + * descale_p_dropout + * descale_do + ) + else: + dv += tl.dot(pT_dropout.to(do.type.element_ty), do) + else: + if IS_FP8: + scale_pT, descale_pT = compute_fp8_scaling_factors(pT, FP8_MAX) + dv += ( + tl.dot((pT * scale_pT).to(do.type.element_ty), do) + * descale_pT + * descale_do + ) + else: + dv += tl.dot(pT.to(do.type.element_ty), do) + + # Load delta + Di = tl.load(D + offs_m * stride_deltam, mask=mask_m) + + # Compute dP and dS + if IS_FP8: + dpT = tl.dot(v, tl.trans(do)) * descale_v * descale_do + else: + dpT = tl.dot(v, tl.trans(do)) + + if ENABLE_DROPOUT: + dpT = tl.where(dropout_mask, dpT, 0.0) * dropout_scale + + delta_i = Di[None, :] + dsT = pT * (dpT - delta_i) + + # compute dk + if IS_FP8: + scale_dsT, descale_dsT = compute_fp8_scaling_factors(dsT, FP8_MAX) + dk += ( + tl.dot((dsT * scale_dsT).to(qT.type.element_ty), tl.trans(qT)) + * descale_dsT + * descale_q + ) + else: + dk += tl.dot(dsT.to(qT.type.element_ty), tl.trans(qT)) + + # We can compute the dq_partial here and do a atomic add to the correct memory location + # NOTE: Possible problems with the atomic add: contention, is inside a loop which has achieved bad perf before + # (BLOCK_M, BLOCK_N) x (BLOCK_N, D) + if IS_FP8: + dq_partial = ( + tl.dot((dsT * scale_dsT).to(k.dtype).T, k) * descale_dsT * descale_k + ) + else: + dq_partial = tl.dot(dsT.to(k.dtype).T, k) + tl.atomic_add( + dq_ptrs, + dq_partial * sm_scale, + mask=mask_m[:, None], + sem="relaxed", + ) + + return dk, dv + + +@triton.jit +def _bwd_kernel_fused_atomics_dkdvdq_causal( + q_ptr, + k_ptr, + v_ptr, + sm_scale, + do_ptr, + dk_ptr, + dv_ptr, + dq_ptr, + m_ptr, + delta_ptr, + stride_q_b, + stride_q_h, + stride_q_m, + stride_q_k, + stride_k_b, + stride_k_h, + stride_k_n, + stride_k_k, + stride_v_b, + stride_v_h, + stride_v_n, + stride_v_k, + stride_dk_b, + stride_dk_h, + stride_dk_n, + stride_dk_k, + stride_dq_b, + stride_dq_h, + stride_dq_m, + stride_dq_k, + stride_delta_b, + stride_delta_h, + stride_delta_m, + stride_do_b, + stride_do_h, + stride_do_m, + stride_do_k, + stride_dropout_b, + stride_dropout_h, + stride_dropout_m, + stride_dropout_n, + stride_descale_q_z, + stride_descale_k_z, + stride_descale_v_z, + stride_descale_do_z, + cu_seqlens_q, + cu_seqlens_k, + max_seqlen_q, + max_seqlen_k, + dropout_mask, + dropout_p, + philox_seed, + philox_offset_base, + descale_q_ptr, + descale_k_ptr, + descale_v_ptr, + descale_do_ptr, + NUM_Q_HEADS: tl.constexpr, + NUM_K_HEADS: tl.constexpr, + BATCH, + NUM_K_PIDS, + BLOCK_M: tl.constexpr, + BLOCK_N: tl.constexpr, + BLK_SLICE_FACTOR: tl.constexpr, + BLOCK_D_MODEL: tl.constexpr, + BLOCK_D_MODEL_POW2: tl.constexpr, + ENABLE_DROPOUT: tl.constexpr, + IS_VARLEN: tl.constexpr, + IS_FP8: tl.constexpr, + FP8_MAX: tl.constexpr, +): + wid = tl.program_id(0) # workgoup id: 0, ..., NUM_K_PIDS * BATCH * NUM_K_HEADS - 1 + + # workgroups get launched first along batch dim, then in head_k dim, and then in seq k block dim + batch_idx = wid % BATCH + head_k_idx = wid // BATCH % NUM_K_HEADS + seq_k_blk_idx = wid // (BATCH * NUM_K_HEADS) % NUM_K_PIDS + + # Determine q and k start along with seqlen_q and seqlen_k + q_start = 0 + k_start = 0 + seqlen_q = max_seqlen_q + seqlen_k = max_seqlen_k + if IS_VARLEN: + q_start = tl.load(cu_seqlens_q + batch_idx) + q_end = tl.load(cu_seqlens_q + batch_idx + 1) + k_start = tl.load(cu_seqlens_k + batch_idx) + k_end = tl.load(cu_seqlens_k + batch_idx + 1) + seqlen_q = q_end - q_start + seqlen_k = k_end - k_start + + dk = tl.zeros([BLOCK_N, BLOCK_D_MODEL_POW2], dtype=tl.float32) + dv = tl.zeros([BLOCK_N, BLOCK_D_MODEL_POW2], dtype=tl.float32) + + # Figure out causal starting block since we have seqlen_q >=< seqlen_k. + # Unlike forward pass where we tile on M dim and iterate on N dim, so that + # we can skip some M blocks, in backward pass, we tile on the N dim for kv + # and iterate over the M. In this way, we cannot skip N blocks, but only to + # determine the starting M blocks to skip some initial blocks masked by + # causal. + delta_qk = seqlen_q - seqlen_k + + # q > k: diretcly skip all the way until the start of causal block + start_delta_q_gt_k = delta_qk + + # q < k: some blocks will have no Masked block, other needs to re-calc + # starting position + # delta_qk is negative so flip it, only multiple of BLOCK_N can skip the + # masked op + num_blocks_skip = -delta_qk // BLOCK_N + delta_aligned = (num_blocks_skip + 1) * BLOCK_N + delta_qk + start_delta_q_lt_k = delta_aligned // BLOCK_M * BLOCK_M + if delta_qk >= 0: + start_delta = delta_qk + else: + start_delta = start_delta_q_lt_k + + start_n = seq_k_blk_idx * BLOCK_N + + offs_k = tl.arange(0, BLOCK_D_MODEL_POW2) + offs_n = start_n + tl.arange(0, BLOCK_N) + # Mask for loading K and V + mask_kv = offs_n[:, None] < seqlen_k + PADDED_HEAD: tl.constexpr = BLOCK_D_MODEL != BLOCK_D_MODEL_POW2 + if PADDED_HEAD: + mask_k = offs_k < BLOCK_D_MODEL + mask_kv &= mask_k[None, :] + + GROUP_SIZE = NUM_Q_HEADS // NUM_K_HEADS + adj_k = ( + batch_idx * stride_k_b + + head_k_idx * stride_k_h + + k_start * stride_k_n + + offs_n[:, None] * stride_k_n + + offs_k[None, :] * stride_k_k + ) + adj_v = ( + batch_idx * stride_v_b + + head_k_idx * stride_v_h + + k_start * stride_v_n + + offs_n[:, None] * stride_v_n + + offs_k[None, :] * stride_v_k + ) + # load K and V: they stay in SRAM throughout the inner loop. + k = tl.load(k_ptr + adj_k, mask=mask_kv, other=0.0) + v = tl.load(v_ptr + adj_v, mask=mask_kv, other=0.0) + + # If MQA / GQA, set the K and V head offsets appropriately. + for head_q_idx in range( + head_k_idx * GROUP_SIZE, head_k_idx * GROUP_SIZE + GROUP_SIZE + ): + if delta_qk >= 0: + start_m = start_n + start_delta + len_m = BLOCK_N + else: + start_m = max(start_n + delta_qk, 0) + start_m = (start_m // BLOCK_M) * BLOCK_M + # because we might shift the masked blocks up, we are deeper into + # the masked out region, so we would potentially increase the total + # steps with masked operation to get out of it + residue_m = max(start_n + delta_qk - start_m, 0) + len_m = BLOCK_N + residue_m + + # offset input and output tensor by batch and Q/K heads + adj_q = batch_idx * stride_q_b + head_q_idx * stride_q_h + q_start * stride_q_m + adj_dq = ( + batch_idx * stride_dq_b + head_q_idx * stride_dq_h + q_start * stride_dq_m + ) + + q_ptr_adj = q_ptr + adj_q + dq_ptr_adj = dq_ptr + adj_dq + + adj_do = ( + batch_idx * stride_do_b + head_q_idx * stride_do_h + q_start * stride_do_m + ) + do_ptr_adj = do_ptr + adj_do + adj_delta = ( + batch_idx * stride_delta_b + + head_q_idx * stride_delta_h + + q_start * stride_delta_m + ) + m_ptr_adj = m_ptr + adj_delta + delta_ptr_adj = delta_ptr + adj_delta + + # batch_philox_offset is the ACTUALLY dropout offset + # dropout_offset is for debug purpose and will be removed later + batch_philox_offset = 0 + dropout_offset = 0 + if ENABLE_DROPOUT: + batch_philox_offset = ( + philox_offset_base + + batch_idx * stride_dropout_b + + head_q_idx * stride_dropout_h + ) + dropout_offset = ( + dropout_mask + + batch_idx * stride_dropout_b + + head_q_idx * stride_dropout_h + ) + + MASK_BLOCK_M: tl.constexpr = BLOCK_M // BLK_SLICE_FACTOR + # bound the masked operation to q len so it does not have to wast cycles + len_m = min(len_m, seqlen_q) + num_steps = tl.cdiv(len_m, MASK_BLOCK_M) + + # when q < k, we may skip the initial masked op + # if seq_k_blk_idx < num_blocks_skip: + # num_steps = 0 + + if IS_FP8: + descale_q = tl.load( + descale_q_ptr + batch_idx * stride_descale_q_z + head_q_idx + ) + descale_k = tl.load( + descale_k_ptr + batch_idx * stride_descale_k_z + head_k_idx + ) + descale_v = tl.load( + descale_v_ptr + batch_idx * stride_descale_v_z + head_k_idx + ) + descale_do = tl.load( + descale_do_ptr + batch_idx * stride_descale_do_z + head_q_idx + ) + else: + descale_q, descale_k, descale_v, descale_do = 1.0, 1.0, 1.0, 1.0 + + # if unaligned start_m is negative, the current N-tile has no block on the + # diagonal of causal mask, so everything have no causal mask + dk, dv = _bwd_fused_atomics_dkdvdq_inner( + dk, + dv, # output tensors + q_ptr_adj, + k, + v, + do_ptr_adj, + dq_ptr_adj, + m_ptr_adj, + delta_ptr_adj, + sm_scale, # input tensors + stride_q_m, + stride_q_k, # strides for q + stride_dq_m, + stride_dq_k, # strides for q + stride_do_m, + stride_do_k, # strides for o + stride_dropout_m, + stride_dropout_n, # strides for dropout + stride_delta_m, + dropout_p, + philox_seed, + batch_philox_offset, + dropout_offset, # + seqlen_q, + seqlen_k, # max sequence length for q and k + start_n, + start_m, + num_steps, # iteration numbers + descale_q, + descale_k, + descale_v, + descale_do, # fp8 descale factors from user + MASK_BLOCK_M, + BLOCK_N, # block dim + BLOCK_D_MODEL, + BLOCK_D_MODEL_POW2, # head dim + MASK=True, # causal masking + ENABLE_DROPOUT=ENABLE_DROPOUT, # activate dropout + IS_FP8=IS_FP8, + FP8_MAX=FP8_MAX, + workgroup_id=seq_k_blk_idx, + ) + + start_m += num_steps * MASK_BLOCK_M + num_steps = tl.cdiv(seqlen_q - start_m, BLOCK_M) + end_m = start_m + num_steps * BLOCK_M + + dk, dv = _bwd_fused_atomics_dkdvdq_inner( + dk, + dv, # output tensors + q_ptr_adj, + k, + v, + do_ptr_adj, + dq_ptr_adj, + m_ptr_adj, + delta_ptr_adj, + sm_scale, # input tensors + stride_q_m, + stride_q_k, # strides for q + stride_dq_m, + stride_dq_k, # strides for dq + stride_do_m, + stride_do_k, # strides for o + stride_dropout_m, + stride_dropout_n, # strides for dropout + stride_delta_m, + dropout_p, + philox_seed, + batch_philox_offset, + dropout_offset, # + seqlen_q, + seqlen_k, # max sequence length for q and k + start_n, + start_m, + num_steps, # iteration numbers + descale_q, + descale_k, + descale_v, + descale_do, # fp8 descale factors from user + BLOCK_M, + BLOCK_N, # block dim + BLOCK_D_MODEL, + BLOCK_D_MODEL_POW2, # head dim + MASK=False, # causal masking + ENABLE_DROPOUT=ENABLE_DROPOUT, # activate dropout + IS_FP8=IS_FP8, + FP8_MAX=FP8_MAX, + workgroup_id=seq_k_blk_idx, + ) + + # Write back dV and dK. + offs_dkdv = ( + batch_idx * stride_dk_b + + head_k_idx * stride_dk_h + + k_start * stride_dk_n + + offs_n[:, None] * stride_dk_n + + offs_k[None, :] * stride_dk_k + ) + tl.store(dv_ptr + offs_dkdv, dv, mask=mask_kv) + dk *= sm_scale + tl.store(dk_ptr + offs_dkdv, dk, mask=mask_kv) + + +@triton.jit +def _bwd_kernel_fused_atomics_dkdv_causal( + q_ptr, + k_ptr, + v_ptr, + sm_scale, + do_ptr, + dk_ptr, + dv_ptr, + m_ptr, + delta_ptr, + stride_q_b, + stride_q_h, + stride_q_m, + stride_q_k, + stride_k_b, + stride_k_h, + stride_k_n, + stride_k_k, + stride_v_b, + stride_v_h, + stride_v_n, + stride_v_k, + stride_dk_b, + stride_dk_h, + stride_dk_n, + stride_dk_k, + stride_delta_b, + stride_delta_h, + stride_delta_m, + stride_do_b, + stride_do_h, + stride_do_m, + stride_do_k, + stride_dropout_b, + stride_dropout_h, + stride_dropout_m, + stride_dropout_n, + stride_descale_q_z, + stride_descale_k_z, + stride_descale_v_z, + stride_descale_do_z, + cu_seqlens_q, + cu_seqlens_k, + max_seqlen_q, + max_seqlen_k, + dropout_mask, + dropout_p, + philox_seed, + philox_offset_base, + descale_q_ptr, + descale_k_ptr, + descale_v_ptr, + descale_do_ptr, + NUM_Q_HEADS: tl.constexpr, + NUM_K_HEADS: tl.constexpr, + BLOCK_M: tl.constexpr, + BLOCK_N: tl.constexpr, + BLK_SLICE_FACTOR: tl.constexpr, + BLOCK_D_MODEL: tl.constexpr, + BLOCK_D_MODEL_POW2: tl.constexpr, + ENABLE_DROPOUT: tl.constexpr, + IS_VARLEN: tl.constexpr, + IS_FP8: tl.constexpr, + FP8_MAX: tl.constexpr, +): + # seq block, batch, head_k + seq_k_blk_idx = tl.program_id(0) + batch_idx = tl.program_id(1) + head_k_idx = tl.program_id(2) + + # Determine q and k start along with seqlen_q and seqlen_k + q_start = 0 + k_start = 0 + seqlen_q = max_seqlen_q + seqlen_k = max_seqlen_k + if IS_VARLEN: + q_start = tl.load(cu_seqlens_q + batch_idx) + q_end = tl.load(cu_seqlens_q + batch_idx + 1) + k_start = tl.load(cu_seqlens_k + batch_idx) + k_end = tl.load(cu_seqlens_k + batch_idx + 1) + seqlen_q = q_end - q_start + seqlen_k = k_end - k_start + + dk = tl.zeros([BLOCK_N, BLOCK_D_MODEL_POW2], dtype=tl.float32) + dv = tl.zeros([BLOCK_N, BLOCK_D_MODEL_POW2], dtype=tl.float32) + + # Figure out causal starting block since we have seqlen_q >=< seqlen_k. + # Unlike forward pass where we tile on M dim and iterate on N dim, so that + # we can skip some M blocks, in backward pass, we tile on the N dim for kv + # and iterate over the M. In this way, we cannot skip N blocks, but only to + # determine the starting M blocks to skip some initial blocks masked by + # causal. + delta_qk = seqlen_q - seqlen_k + + # q > k: diretcly skip all the way until the start of causal block + start_delta_q_gt_k = delta_qk + + # q < k: some blocks will have no Masked block, other needs to re-calc + # starting position + # delta_qk is negative so flip it, only multiple of BLOCK_N can skip the + # masked op + num_blocks_skip = -delta_qk // BLOCK_N + delta_aligned = (num_blocks_skip + 1) * BLOCK_N + delta_qk + start_delta_q_lt_k = delta_aligned // BLOCK_M * BLOCK_M + if delta_qk >= 0: + start_delta = delta_qk + else: + start_delta = start_delta_q_lt_k + + start_n = seq_k_blk_idx * BLOCK_N + + offs_k = tl.arange(0, BLOCK_D_MODEL_POW2) + offs_n = start_n + tl.arange(0, BLOCK_N) + # Mask for loading K and V + mask_kv = offs_n[:, None] < seqlen_k + PADDED_HEAD: tl.constexpr = BLOCK_D_MODEL != BLOCK_D_MODEL_POW2 + if PADDED_HEAD: + mask_k = offs_k < BLOCK_D_MODEL + mask_kv &= mask_k[None, :] + + GROUP_SIZE = NUM_Q_HEADS // NUM_K_HEADS + adj_k = ( + batch_idx * stride_k_b + + head_k_idx * stride_k_h + + k_start * stride_k_n + + offs_n[:, None] * stride_k_n + + offs_k[None, :] * stride_k_k + ) + adj_v = ( + batch_idx * stride_v_b + + head_k_idx * stride_v_h + + k_start * stride_v_n + + offs_n[:, None] * stride_v_n + + offs_k[None, :] * stride_v_k + ) + # load K and V: they stay in SRAM throughout the inner loop. + k = tl.load(k_ptr + adj_k, mask=mask_kv, other=0.0) + v = tl.load(v_ptr + adj_v, mask=mask_kv, other=0.0) + + # If MQA / GQA, set the K and V head offsets appropriately. + for head_q_idx in range( + head_k_idx * GROUP_SIZE, head_k_idx * GROUP_SIZE + GROUP_SIZE + ): + if delta_qk >= 0: + start_m = start_n + start_delta + len_m = BLOCK_N + else: + start_m = max(start_n + delta_qk, 0) + start_m = start_m // BLOCK_M * BLOCK_M + # because we might shift the masked blocks up, we are deeper into + # the masked out region, so we would potentially increase the total + # steps with masked operation to get out of it + residue_m = max(start_n + delta_qk - start_m, 0) + len_m = BLOCK_N + residue_m + + # offset input and output tensor by batch and Q/K heads + adj_q = batch_idx * stride_q_b + head_q_idx * stride_q_h + q_start * stride_q_m + q_ptr_adj = q_ptr + adj_q + adj_do = ( + batch_idx * stride_do_b + head_q_idx * stride_do_h + q_start * stride_do_m + ) + do_ptr_adj = do_ptr + adj_do + adj_delta = ( + batch_idx * stride_delta_b + + head_q_idx * stride_delta_h + + q_start * stride_delta_m + ) + m_ptr_adj = m_ptr + adj_delta + delta_ptr_adj = delta_ptr + adj_delta + + # batch_philox_offset is the ACTUALLY dropout offset + # dropout_offset is for debug purpose and will be removed later + batch_philox_offset = 0 + dropout_offset = 0 + if ENABLE_DROPOUT: + batch_philox_offset = ( + philox_offset_base + + batch_idx * stride_dropout_b + + head_q_idx * stride_dropout_h + ) + dropout_offset = ( + dropout_mask + + batch_idx * stride_dropout_b + + head_q_idx * stride_dropout_h + ) + + MASK_BLOCK_M: tl.constexpr = BLOCK_M // BLK_SLICE_FACTOR + # bound the masked operation to q len so it does not have to wast cycles + len_m = min(len_m, seqlen_q) + num_steps = tl.cdiv(len_m, MASK_BLOCK_M) + # when q < k, we may skip the initial masked op + if seq_k_blk_idx < num_blocks_skip: + num_steps = 0 + + if IS_FP8: + descale_q = tl.load( + descale_q_ptr + batch_idx * stride_descale_q_z + head_q_idx + ) + descale_k = tl.load( + descale_k_ptr + batch_idx * stride_descale_k_z + head_k_idx + ) + descale_v = tl.load( + descale_v_ptr + batch_idx * stride_descale_v_z + head_k_idx + ) + descale_do = tl.load( + descale_do_ptr + batch_idx * stride_descale_do_z + head_q_idx + ) + else: + descale_q, descale_k, descale_v, descale_do = 1.0, 1.0, 1.0, 1.0 + + # if start_m is negative, the current N-tile has no block on the + # diagonal of causal mask, so everything have no causal mask + dk, dv = _bwd_fused_atomics_dkdv_inner( + dk, + dv, # output tensors + q_ptr_adj, + k, + v, + do_ptr_adj, + m_ptr_adj, + delta_ptr_adj, + sm_scale, # input tensors + stride_q_m, + stride_q_k, # strides for q + stride_do_m, + stride_do_k, # strides for o + stride_dropout_m, + stride_dropout_n, # strides for dropout + stride_delta_m, + dropout_p, + philox_seed, + batch_philox_offset, + dropout_offset, # + seqlen_q, + seqlen_k, # max sequence length for q and k + start_n, + start_m, + num_steps, # iteration numbers + descale_q, + descale_k, + descale_v, + descale_do, # fp8 descale factors from user + MASK_BLOCK_M, + BLOCK_N, # block dim + BLOCK_D_MODEL, + BLOCK_D_MODEL_POW2, # head dim + MASK=True, # causal masking + ENABLE_DROPOUT=ENABLE_DROPOUT, # activate dropout + IS_FP8=IS_FP8, + FP8_MAX=FP8_MAX, + ) + start_m += num_steps * MASK_BLOCK_M + num_steps = tl.cdiv(seqlen_q - start_m, BLOCK_M) + end_m = start_m + num_steps * BLOCK_M + + dk, dv = _bwd_fused_atomics_dkdv_inner( + dk, + dv, # output tensors + q_ptr_adj, + k, + v, + do_ptr_adj, + m_ptr_adj, + delta_ptr_adj, + sm_scale, # input tensors + stride_q_m, + stride_q_k, # strides for q + stride_do_m, + stride_do_k, # strides for o + stride_dropout_m, + stride_dropout_n, # strides for dropout + stride_delta_m, + dropout_p, + philox_seed, + batch_philox_offset, + dropout_offset, # + seqlen_q, + seqlen_k, # max sequence length for q and k + start_n, + start_m, + num_steps, # iteration numbers + descale_q, + descale_k, + descale_v, + descale_do, # fp8 descale factors from user + BLOCK_M, + BLOCK_N, # block dim + BLOCK_D_MODEL, + BLOCK_D_MODEL_POW2, # head dim + MASK=False, # causal masking + ENABLE_DROPOUT=ENABLE_DROPOUT, # activate dropout + IS_FP8=IS_FP8, + FP8_MAX=FP8_MAX, + ) + + # Write back dV and dK. + offs_dkdv = ( + batch_idx * stride_dk_b + + head_k_idx * stride_dk_h + + k_start * stride_dk_n + + offs_n[:, None] * stride_dk_n + + offs_k[None, :] * stride_dk_k + ) + tl.store(dv_ptr + offs_dkdv, dv, mask=mask_kv) + dk *= sm_scale + tl.store(dk_ptr + offs_dkdv, dk, mask=mask_kv) + + +@triton.jit +def _bwd_kernel_fused_atomics_dq_causal( + q_ptr, + k_ptr, + v_ptr, + sm_scale, + do_ptr, + dq_ptr, + m_ptr, + delta_ptr, + stride_q_b, + stride_q_h, + stride_q_m, + stride_q_k, + stride_k_b, + stride_k_h, + stride_k_n, + stride_k_k, + stride_v_b, + stride_v_h, + stride_v_n, + stride_v_k, + stride_dq_b, + stride_dq_h, + stride_dq_m, + stride_dq_k, + stride_delta_b, + stride_delta_h, + stride_delta_m, + stride_do_b, + stride_do_h, + stride_do_m, + stride_do_k, + stride_dropout_b, + stride_dropout_h, + stride_dropout_m, + stride_dropout_n, + stride_descale_q_z, + stride_descale_k_z, + stride_descale_v_z, + stride_descale_do_z, + cu_seqlens_q, + cu_seqlens_k, + max_seqlen_q, + max_seqlen_k, + dropout_mask, + dropout_p, + philox_seed, + philox_offset_base, + descale_q_ptr, + descale_k_ptr, + descale_v_ptr, + descale_do_ptr, + NUM_Q_HEADS: tl.constexpr, + NUM_K_HEADS: tl.constexpr, + BLOCK_M: tl.constexpr, + BLOCK_N: tl.constexpr, + BLK_SLICE_FACTOR: tl.constexpr, + BLOCK_D_MODEL: tl.constexpr, + BLOCK_D_MODEL_POW2: tl.constexpr, + ENABLE_DROPOUT: tl.constexpr, + IS_VARLEN: tl.constexpr, + IS_FP8: tl.constexpr, + FP8_MAX: tl.constexpr, +): + seq_q_blk_idx = tl.program_id(0) + batch_idx = tl.program_id(1) + head_k_idx = tl.program_id(2) + + q_start = 0 + k_start = 0 + seqlen_q = max_seqlen_q + seqlen_k = max_seqlen_k + if IS_VARLEN: + q_start = tl.load(cu_seqlens_q + batch_idx) + q_end = tl.load(cu_seqlens_q + batch_idx + 1) + k_start = tl.load(cu_seqlens_k + batch_idx) + k_end = tl.load(cu_seqlens_k + batch_idx + 1) + seqlen_q = q_end - q_start + seqlen_k = k_end - k_start + + # Figure out causal starting block since we have seqlen_q <=> seqlen_k. + # Unlike forward pass where we tile on M dim and iterate on N dim, so that + # we can skip some M blocks, in backward pass, we tile on the N dim for kv + # and iterate over the M. In this way, we cannot skip N blocks, but only to + # determine the starting M blocks to skip some initial blocks masked by + # causal. + # DQ tiles on M dim and iterate on N dim, so we there could be some tiles we + # can simply skip and we need to adjust starting position. + start_m = seq_q_blk_idx * BLOCK_M + # seqlen_q > seqlen_k, no need to process these tile for dq + delta_qk = seqlen_q - seqlen_k + if start_m + BLOCK_M < delta_qk: + return + + offs_k = tl.arange(0, BLOCK_D_MODEL_POW2) + offs_m = start_m + tl.arange(0, BLOCK_M) + # Mask for loading K and V + mask_q = offs_m[:, None] < seqlen_q + PADDED_HEAD: tl.constexpr = BLOCK_D_MODEL != BLOCK_D_MODEL_POW2 + if PADDED_HEAD: + mask_k = offs_k < BLOCK_D_MODEL + mask_q &= mask_k[None, :] + offs_q = offs_m[:, None] * stride_q_m + offs_k[None, :] * stride_q_k + offs_do = offs_m[:, None] * stride_do_m + offs_k[None, :] * stride_do_k + adj_k = batch_idx * stride_k_b + head_k_idx * stride_k_h + k_start * stride_k_n + adj_v = batch_idx * stride_v_b + head_k_idx * stride_v_h + k_start * stride_v_n + k_ptr_adj = k_ptr + v_ptr_adj = v_ptr + k_ptr_adj += adj_k + v_ptr_adj += adj_v + + # If MQA / GQA, set the K and V head offsets appropriately. + GROUP_SIZE = NUM_Q_HEADS // NUM_K_HEADS + for head_q_idx in range( + head_k_idx * GROUP_SIZE, head_k_idx * GROUP_SIZE + GROUP_SIZE + ): + # seqlen_q < seqlen_k: delta_qk more kv tokens are added at the front + # for every M-tile + end_n = start_m + BLOCK_M - delta_qk + # clamp end_n at [0, seqlen_k] + end_n = max(min(end_n, seqlen_k), 0) + + # offset input and output tensor by batch and Q/K heads + adj_q = batch_idx * stride_q_b + head_q_idx * stride_q_h + q_start * stride_q_m + adj_do = ( + batch_idx * stride_do_b + head_q_idx * stride_do_h + q_start * stride_do_m + ) + adj_delta = ( + batch_idx * stride_delta_b + + head_q_idx * stride_delta_h + + q_start * stride_delta_m + ) + delta_ptr_adj = delta_ptr + adj_delta + + # batch_philox_offset is the ACTUALLY dropout offset + # dropout_offset is for debug purpose and will be removed later + batch_philox_offset = 0 + dropout_offset = 0 + if ENABLE_DROPOUT: + batch_philox_offset = ( + philox_offset_base + + batch_idx * stride_dropout_b + + head_q_idx * stride_dropout_h + ) + dropout_offset = ( + dropout_mask + + batch_idx * stride_dropout_b + + head_q_idx * stride_dropout_h + ) + + q = tl.load(q_ptr + adj_q + offs_q, mask=mask_q, other=0.0) + do = tl.load(do_ptr + adj_do + offs_do, mask=mask_q, other=0.0) + m = tl.load(m_ptr + adj_delta + offs_m * stride_delta_m, mask=offs_m < seqlen_q) + m = m[:, None] + + MASK_BLOCK_N: tl.constexpr = BLOCK_N // BLK_SLICE_FACTOR + # start can only be 0 at minimum + start_n = max(end_n - BLOCK_M, 0) + num_steps = tl.cdiv(end_n - start_n, MASK_BLOCK_N) + + if IS_FP8: + descale_q = tl.load( + descale_q_ptr + batch_idx * stride_descale_q_z + head_q_idx + ) + descale_k = tl.load( + descale_k_ptr + batch_idx * stride_descale_k_z + head_k_idx + ) + descale_v = tl.load( + descale_v_ptr + batch_idx * stride_descale_v_z + head_k_idx + ) + descale_do = tl.load( + descale_do_ptr + batch_idx * stride_descale_do_z + head_q_idx + ) + else: + descale_q, descale_k, descale_v, descale_do = 1.0, 1.0, 1.0, 1.0 + + dq = tl.zeros([BLOCK_M, BLOCK_D_MODEL_POW2], dtype=tl.float32) + # Compute dQ for masked (diagonal) blocks. + # NOTE: This code scans each row of QK^T backward (from right to left, + # but inside each call to _bwd_dq_inner, from left to right), but that's + # not due to anything important. I just wanted to reuse the loop + # structure for dK & dV above as much as possible. + dq = _bwd_fused_atomics_dq_inner( + dq, + q, + k_ptr_adj, + v_ptr_adj, + do, + m, + delta_ptr_adj, + sm_scale, + stride_q_m, + stride_q_k, + stride_k_n, + stride_k_k, + stride_v_n, + stride_v_k, + stride_dropout_m, + stride_dropout_n, + stride_delta_m, + seqlen_q, + seqlen_k, + dropout_p, + philox_seed, + batch_philox_offset, + dropout_offset, + start_m, + start_n, + end_n, + num_steps, + descale_q, + descale_k, + descale_v, + descale_do, + BLOCK_M, + MASK_BLOCK_N, + BLOCK_D_MODEL, + BLOCK_D_MODEL_POW2, + MASK=True, + ENABLE_DROPOUT=ENABLE_DROPOUT, + IS_FP8=IS_FP8, + FP8_MAX=FP8_MAX, + ) + end_n -= num_steps * MASK_BLOCK_N + num_steps = tl.cdiv(end_n, BLOCK_N) + start_n = max(end_n - num_steps * BLOCK_N, 0) + dq = _bwd_fused_atomics_dq_inner( + dq, + q, + k_ptr_adj, + v_ptr_adj, + do, + m, + delta_ptr_adj, + sm_scale, + stride_q_m, + stride_q_k, + stride_k_n, + stride_k_k, + stride_v_n, + stride_v_k, + stride_dropout_m, + stride_dropout_n, + stride_delta_m, + seqlen_q, + seqlen_k, + dropout_p, + philox_seed, + batch_philox_offset, + dropout_offset, + start_m, + start_n, + end_n, + num_steps, + descale_q, + descale_k, + descale_v, + descale_do, + BLOCK_M, + BLOCK_N, + BLOCK_D_MODEL, + BLOCK_D_MODEL_POW2, + MASK=False, + ENABLE_DROPOUT=ENABLE_DROPOUT, + IS_FP8=IS_FP8, + FP8_MAX=FP8_MAX, + ) + # Write back dQ. + offs_dq = ( + batch_idx * stride_dq_b + + head_q_idx * stride_dq_h + + q_start * stride_dq_m + + offs_m[:, None] * stride_dq_m + + offs_k[None, :] * stride_dq_k + ) + dq *= sm_scale + tl.store(dq_ptr + offs_dq, dq, mask=mask_q) + + +@triton.jit +def _bwd_kernel_fused_atomics_dkdvdq_noncausal( + Q, + K, + V, + sm_scale, + DO, + DK, + DV, + DQ, + M, + Delta, + stride_qb, + stride_qh, + stride_qm, + stride_qk, + stride_kb, + stride_kh, + stride_kn, + stride_kk, + stride_vb, + stride_vh, + stride_vn, + stride_vk, + stride_dkb, + stride_dkh, + stride_dkn, + stride_dkk, + stride_dqb, + stride_dqh, + stride_dqm, + stride_dqk, + stride_deltab, + stride_deltah, + stride_deltam, + stride_dob, + stride_doh, + stride_dom, + stride_dok, + stride_dropoutb, + stride_dropouth, + stride_dropoutm, + stride_dropoutn, + stride_descale_q_z, + stride_descale_k_z, + stride_descale_v_z, + stride_descale_do_z, + cu_seqlens_q, + cu_seqlens_k, + max_seqlen_q, + max_seqlen_k, + dropout_mask, + dropout_p, + philox_seed, + philox_offset, + descale_q_ptr, + descale_k_ptr, + descale_v_ptr, + descale_do_ptr, + NUM_Q_HEADS: tl.constexpr, + NUM_K_HEADS: tl.constexpr, + BATCH, + NUM_K_PIDS, + BLOCK_M: tl.constexpr, + BLOCK_N: tl.constexpr, + BLK_SLICE_FACTOR: tl.constexpr, + BLOCK_D_MODEL: tl.constexpr, + BLOCK_D_MODEL_POW2: tl.constexpr, + ENABLE_DROPOUT: tl.constexpr, + IS_VARLEN: tl.constexpr, + IS_FP8: tl.constexpr, + FP8_MAX: tl.constexpr, +): + # workgroup id + wid = tl.program_id(0) # 0, ..., NUM_K_PIDS * BATCH * NUM_K_HEADS - 1 + + # Workgroups get launched first along batch dim, then in head_k dim, and then in seq k block dim + # This is in order to avoid contention for the tl.atomic_add (inside _bwd_dkdvdq_inner) that happens between workgroups that share the same batch and head_k. + bid = wid % BATCH + hkid = wid // BATCH % NUM_K_HEADS + pid = wid // (BATCH * NUM_K_HEADS) % NUM_K_PIDS + + q_start = 0 + k_start = 0 + seqlen_q = max_seqlen_q + seqlen_k = max_seqlen_k + + if IS_VARLEN: + q_start = tl.load(cu_seqlens_q + bid) + q_end = tl.load(cu_seqlens_q + bid + 1) + k_start = tl.load(cu_seqlens_k + bid) + k_end = tl.load(cu_seqlens_k + bid + 1) + seqlen_q = q_end - q_start + seqlen_k = k_end - k_start + + dk = tl.zeros([BLOCK_N, BLOCK_D_MODEL_POW2], dtype=tl.float32) + dv = tl.zeros([BLOCK_N, BLOCK_D_MODEL_POW2], dtype=tl.float32) + + start_n = pid * BLOCK_N + + offs_k = tl.arange(0, BLOCK_D_MODEL_POW2) + offs_n = start_n + tl.arange(0, BLOCK_N) + mask_kv = offs_n[:, None] < seqlen_k + PADDED_HEAD: tl.constexpr = BLOCK_D_MODEL != BLOCK_D_MODEL_POW2 + if PADDED_HEAD: + mask_kv &= offs_k < BLOCK_D_MODEL + + GROUP_SIZE = NUM_Q_HEADS // NUM_K_HEADS + adj_k = ( + bid * stride_kb + + hkid * stride_kh + + k_start * stride_kn + + offs_n[:, None] * stride_kn + + offs_k[None, :] * stride_kk + ) + adj_v = ( + bid * stride_vb + + hkid * stride_vh + + k_start * stride_vn + + offs_n[:, None] * stride_vn + + offs_k[None, :] * stride_vk + ) + + k = tl.load(K + adj_k, mask=mask_kv, other=0.0) + v = tl.load(V + adj_v, mask=mask_kv, other=0.0) + + for hqid in range(hkid * GROUP_SIZE, hkid * GROUP_SIZE + GROUP_SIZE): + adj_q = bid * stride_qb + hqid * stride_qh + q_start * stride_qm + adj_dq = bid * stride_dqb + hqid * stride_dqh + q_start * stride_dqm + + Q_ptr = Q + adj_q + DQ_ptr = DQ + adj_dq + + adj_do = bid * stride_dob + hqid * stride_doh + q_start * stride_dom + DO_ptr = DO + adj_do + adj_delta = bid * stride_deltab + hqid * stride_deltah + q_start * stride_deltam + M_ptr = M + adj_delta + Delta_ptr = Delta + adj_delta + + # dropout + batch_philox_offset = 0 + dropout_offset = 0 + if ENABLE_DROPOUT: + batch_philox_offset = ( + philox_offset + bid * stride_dropoutb + hqid * stride_dropouth + ) + dropout_offset = ( + dropout_mask + bid * stride_dropoutb + hqid * stride_dropouth + ) + + if IS_FP8: + descale_q = tl.load(descale_q_ptr + bid * stride_descale_q_z + hqid) + descale_k = tl.load(descale_k_ptr + bid * stride_descale_k_z + hkid) + descale_v = tl.load(descale_v_ptr + bid * stride_descale_v_z + hkid) + descale_do = tl.load(descale_do_ptr + bid * stride_descale_do_z + hqid) + else: + descale_q, descale_k, descale_v, descale_do = 1.0, 1.0, 1.0, 1.0 + + start_m = 0 + num_steps = tl.cdiv(seqlen_q, BLOCK_M) + + dk, dv = _bwd_fused_atomics_dkdvdq_inner( + dk, + dv, + Q_ptr, + k, + v, + DO_ptr, + DQ_ptr, + M_ptr, + Delta_ptr, + sm_scale, + stride_qm, + stride_qk, + stride_dqm, + stride_dqk, + stride_dom, + stride_dok, + stride_dropoutm, + stride_dropoutn, + stride_deltam, + dropout_p, + philox_seed, + batch_philox_offset, + dropout_offset, + seqlen_q, + seqlen_k, + start_n, + start_m, + num_steps, + descale_q, + descale_k, + descale_v, + descale_do, + BLOCK_M, + BLOCK_N, + BLOCK_D_MODEL, + BLOCK_D_MODEL_POW2, + MASK=False, + ENABLE_DROPOUT=ENABLE_DROPOUT, + IS_FP8=IS_FP8, + FP8_MAX=FP8_MAX, + workgroup_id=pid, + ) + + adj_dkdv = ( + bid * stride_dkb + + hkid * stride_dkh + + k_start * stride_dkn + + offs_n[:, None] * stride_dkn + + offs_k[None, :] * stride_dkk + ) + tl.store(DV + adj_dkdv, dv, mask=mask_kv) + dk *= sm_scale + tl.store(DK + adj_dkdv, dk, mask=mask_kv) + + +@triton.jit +def _bwd_kernel_fused_atomics_dkdv_noncausal( + Q, + K, + V, + sm_scale, + DO, + DK, + DV, + M, + Delta, + stride_qb, + stride_qh, + stride_qm, + stride_qk, + stride_kb, + stride_kh, + stride_kn, + stride_kk, + stride_vb, + stride_vh, + stride_vn, + stride_vk, + stride_dkb, + stride_dkh, + stride_dkn, + stride_dkk, + stride_deltab, + stride_deltah, + stride_deltam, + stride_dob, + stride_doh, + stride_dom, + stride_dok, + stride_dropoutb, + stride_dropouth, + stride_dropoutm, + stride_dropoutn, + stride_descale_q_z, + stride_descale_k_z, + stride_descale_v_z, + stride_descale_do_z, + cu_seqlens_q, + cu_seqlens_k, + max_seqlen_q, + max_seqlen_k, + dropout_mask, + dropout_p, + philox_seed, + philox_offset, + descale_q_ptr, + descale_k_ptr, + descale_v_ptr, + descale_do_ptr, + NUM_Q_HEADS: tl.constexpr, + NUM_K_HEADS: tl.constexpr, + BLOCK_M: tl.constexpr, + BLOCK_N: tl.constexpr, + BLK_SLICE_FACTOR: tl.constexpr, + BLOCK_D_MODEL: tl.constexpr, + BLOCK_D_MODEL_POW2: tl.constexpr, + ENABLE_DROPOUT: tl.constexpr, + IS_VARLEN: tl.constexpr, + IS_FP8: tl.constexpr, + FP8_MAX: tl.constexpr, +): + pid = tl.program_id(0) + bid = tl.program_id(1) + hkid = tl.program_id(2) + + q_start = 0 + k_start = 0 + seqlen_q = max_seqlen_q + seqlen_k = max_seqlen_k + + if IS_VARLEN: + q_start = tl.load(cu_seqlens_q + bid) + q_end = tl.load(cu_seqlens_q + bid + 1) + k_start = tl.load(cu_seqlens_k + bid) + k_end = tl.load(cu_seqlens_k + bid + 1) + seqlen_q = q_end - q_start + seqlen_k = k_end - k_start + + dk = tl.zeros([BLOCK_N, BLOCK_D_MODEL_POW2], dtype=tl.float32) + dv = tl.zeros([BLOCK_N, BLOCK_D_MODEL_POW2], dtype=tl.float32) + + start_n = pid * BLOCK_N + + offs_k = tl.arange(0, BLOCK_D_MODEL_POW2) + offs_n = start_n + tl.arange(0, BLOCK_N) + mask_kv = offs_n[:, None] < seqlen_k + PADDED_HEAD: tl.constexpr = BLOCK_D_MODEL != BLOCK_D_MODEL_POW2 + if PADDED_HEAD: + mask_kv &= offs_k < BLOCK_D_MODEL + + GROUP_SIZE = NUM_Q_HEADS // NUM_K_HEADS + adj_k = ( + bid * stride_kb + + hkid * stride_kh + + k_start * stride_kn + + offs_n[:, None] * stride_kn + + offs_k[None, :] * stride_kk + ) + adj_v = ( + bid * stride_vb + + hkid * stride_vh + + k_start * stride_vn + + offs_n[:, None] * stride_vn + + offs_k[None, :] * stride_vk + ) + + k = tl.load(K + adj_k, mask=mask_kv, other=0.0) + v = tl.load(V + adj_v, mask=mask_kv, other=0.0) + + for hqid in range(hkid * GROUP_SIZE, hkid * GROUP_SIZE + GROUP_SIZE): + adj_q = bid * stride_qb + hqid * stride_qh + q_start * stride_qm + Q_ptr = Q + adj_q + adj_do = bid * stride_dob + hqid * stride_doh + q_start * stride_dom + DO_ptr = DO + adj_do + adj_delta = bid * stride_deltab + hqid * stride_deltah + q_start * stride_deltam + M_ptr = M + adj_delta + Delta_ptr = Delta + adj_delta + + # dropout + batch_philox_offset = 0 + dropout_offset = 0 + if ENABLE_DROPOUT: + batch_philox_offset = ( + philox_offset + bid * stride_dropoutb + hqid * stride_dropouth + ) + dropout_offset = ( + dropout_mask + bid * stride_dropoutb + hqid * stride_dropouth + ) + + if IS_FP8: + descale_q = tl.load(descale_q_ptr + bid * stride_descale_q_z + hqid) + descale_k = tl.load(descale_k_ptr + bid * stride_descale_k_z + hkid) + descale_v = tl.load(descale_v_ptr + bid * stride_descale_v_z + hkid) + descale_do = tl.load(descale_do_ptr + bid * stride_descale_do_z + hqid) + else: + descale_q, descale_k, descale_v, descale_do = 1.0, 1.0, 1.0, 1.0 + + start_m = 0 + num_steps = tl.cdiv(seqlen_q, BLOCK_M) + dk, dv = _bwd_fused_atomics_dkdv_inner( + dk, + dv, + Q_ptr, + k, + v, + DO_ptr, + M_ptr, + Delta_ptr, + sm_scale, + stride_qm, + stride_qk, + stride_dom, + stride_dok, + stride_dropoutm, + stride_dropoutn, + stride_deltam, + dropout_p, + philox_seed, + batch_philox_offset, + dropout_offset, + seqlen_q, + seqlen_k, + start_n, + start_m, + num_steps, + descale_q, + descale_k, + descale_v, + descale_do, + BLOCK_M, + BLOCK_N, + BLOCK_D_MODEL, + BLOCK_D_MODEL_POW2, + MASK=False, + ENABLE_DROPOUT=ENABLE_DROPOUT, + IS_FP8=IS_FP8, + FP8_MAX=FP8_MAX, + ) + + adj_dkdv = ( + bid * stride_dkb + + hkid * stride_dkh + + k_start * stride_dkn + + offs_n[:, None] * stride_dkn + + offs_k[None, :] * stride_dkk + ) + tl.store(DV + adj_dkdv, dv, mask=mask_kv) + dk *= sm_scale + tl.store(DK + adj_dkdv, dk, mask=mask_kv) + + +@triton.jit +def _bwd_kernel_fused_atomics_dq_noncausal( + Q, + K, + V, + sm_scale, + DO, + DQ, + M, + delta, + stride_qb, + stride_qh, + stride_qm, + stride_qk, + stride_kb, + stride_kh, + stride_kn, + stride_kk, + stride_vb, + stride_vh, + stride_vn, + stride_vk, + stride_dqb, + stride_dqh, + stride_dqm, + stride_dqk, + stride_deltab, + stride_deltah, + stride_deltam, + stride_dob, + stride_doh, + stride_dom, + stride_dok, + stride_dropoutb, + stride_dropouth, + stride_dropoutm, + stride_dropoutn, + stride_descale_q_z, + stride_descale_k_z, + stride_descale_v_z, + stride_descale_do_z, + cu_seqlens_q, + cu_seqlens_k, + max_seqlen_q, + max_seqlen_k, + dropout_mask, + dropout_p, + philox_seed, + philox_offset_base, + descale_q_ptr, + descale_k_ptr, + descale_v_ptr, + descale_do_ptr, + NUM_Q_HEADS: tl.constexpr, + NUM_K_HEADS: tl.constexpr, + BLOCK_M: tl.constexpr, + BLOCK_N: tl.constexpr, + BLK_SLICE_FACTOR: tl.constexpr, + BLOCK_D_MODEL: tl.constexpr, + BLOCK_D_MODEL_POW2: tl.constexpr, + ENABLE_DROPOUT: tl.constexpr, + IS_VARLEN: tl.constexpr, + IS_FP8: tl.constexpr, + FP8_MAX: tl.constexpr, +): + pid = tl.program_id(0) # seqlen + bid = tl.program_id(1) # batch + hkid = tl.program_id(2) # head_k + + q_start = 0 + k_start = 0 + seqlen_q = max_seqlen_q + seqlen_k = max_seqlen_k + + if IS_VARLEN: + # Compute actual sequence lengths + q_start = tl.load(cu_seqlens_q + bid) + q_end = tl.load(cu_seqlens_q + bid + 1) + k_start = tl.load(cu_seqlens_k + bid) + k_end = tl.load(cu_seqlens_k + bid + 1) + seqlen_q = q_end - q_start + seqlen_k = k_end - k_start + + start_m = pid * BLOCK_M + + offs_k = tl.arange(0, BLOCK_D_MODEL_POW2) + offs_m = start_m + tl.arange(0, BLOCK_M) + + # mask for loading K and V + mask_q = offs_m[:, None] < seqlen_q + PADDED_HEAD: tl.constexpr = BLOCK_D_MODEL != BLOCK_D_MODEL_POW2 + if PADDED_HEAD: + mask_k = offs_k < BLOCK_D_MODEL + mask_q &= mask_k[None, :] + offs_q = offs_m[:, None] * stride_qm + offs_k[None, :] * stride_qk + offs_do = offs_m[:, None] * stride_dom + offs_k[None, :] * stride_dok + adj_k = bid * stride_kb + hkid * stride_kh + k_start * stride_kn + adj_v = bid * stride_vb + hkid * stride_vh + k_start * stride_vn + K += adj_k + V += adj_v + + GROUP_SIZE = NUM_Q_HEADS // NUM_K_HEADS + for hqid in range(hkid * GROUP_SIZE, hkid * GROUP_SIZE + GROUP_SIZE): + adj_q = bid * stride_qb + hqid * stride_qh + q_start * stride_qm + adj_do = bid * stride_dob + hqid * stride_doh + q_start * stride_dom + adj_delta = bid * stride_deltab + hqid * stride_deltah + q_start * stride_deltam + delta_ptr = delta + adj_delta + + batch_philox_offset = 0 + dropout_offset = 0 + if ENABLE_DROPOUT: + batch_philox_offset = ( + philox_offset_base + bid * stride_dropoutb + hqid * stride_dropouth + ) + dropout_offset = ( + dropout_mask + bid * stride_dropoutb + hqid * stride_dropouth + ) + + q = tl.load(Q + adj_q + offs_q, mask=mask_q, other=0.0) + do = tl.load(DO + adj_do + offs_do, mask=mask_q, other=0.0) + m = tl.load(M + adj_delta + offs_m * stride_deltam, mask=offs_m < seqlen_q) + m = m[:, None] + + # FP8 + if IS_FP8: + descale_q = tl.load(descale_q_ptr + bid * stride_descale_q_z + hqid) + descale_k = tl.load(descale_k_ptr + bid * stride_descale_k_z + hkid) + descale_v = tl.load(descale_v_ptr + bid * stride_descale_v_z + hkid) + descale_do = tl.load(descale_do_ptr + bid * stride_descale_do_z + hqid) + else: + descale_q, descale_k, descale_v, descale_do = 1.0, 1.0, 1.0, 1.0 + + start_n = 0 + end_n = seqlen_k + num_steps = tl.cdiv(seqlen_k, BLOCK_N) + dq = tl.zeros([BLOCK_M, BLOCK_D_MODEL_POW2], dtype=tl.float32) + dq = _bwd_fused_atomics_dq_inner( + dq, + q, + K, + V, + do, + m, + delta_ptr, + sm_scale, + stride_qm, + stride_qk, + stride_kn, + stride_kk, + stride_vn, + stride_vk, + stride_dropoutm, + stride_dropoutn, + stride_deltam, + seqlen_q, + seqlen_k, + dropout_p, + philox_seed, + batch_philox_offset, + dropout_offset, + start_m, + start_n, + end_n, + num_steps, + descale_q, + descale_k, + descale_v, + descale_do, + BLOCK_M, + BLOCK_N, + BLOCK_D_MODEL, + BLOCK_D_MODEL_POW2, + MASK=False, + ENABLE_DROPOUT=ENABLE_DROPOUT, + IS_FP8=IS_FP8, + FP8_MAX=FP8_MAX, + ) + + adj_dq = bid * stride_dqb + hqid * stride_dqh + q_start * stride_dqm + offs_dq = offs_m[:, None] * stride_dqm + offs_k[None, :] * stride_dqk + dq *= sm_scale + tl.store(DQ + adj_dq + offs_dq, dq, mask=mask_q) + + +# This function computes delta given output Out and gradient DO +# Here is the I/O shape: +# Out: (batch, nhead_q, max_seqlens_q, headDim) +# DO: (batch, nhead_q, max_seqlens_q, headDim) +# Delta: (batch, nheads_q, max_seqlens_q) +@triton.autotune( + configs=preprocess_autotune_configs, + key=preprocess_autotune_keys, + use_cuda_graph=True, +) +@triton.jit +def _bwd_preprocess( + O, + DO, # noqa: E741 + Delta, + stride_ob, + stride_oh, + stride_om, + stride_od, + stride_dob, + stride_doh, + stride_dom, + stride_dod, + stride_delta_b, + stride_delta_h, + stride_delta_m, + stride_descale_do_z, + cu_seqlens_q, + max_seqlen_q, + Descale_do, + PRE_BLOCK: tl.constexpr, + HEAD_DIM_V: tl.constexpr, + ACTUAL_HEAD_DIM_V: tl.constexpr, + IS_VARLEN: tl.constexpr, + IS_FP8: tl.constexpr, +): + pid_m = tl.program_id(0) + bid = tl.program_id(1) + hid = tl.program_id(2) + # Handle varlen + if IS_VARLEN: + q_start = tl.load(cu_seqlens_q + bid) + q_end = tl.load(cu_seqlens_q + bid + 1) + seqlen_q = q_end - q_start + else: + q_start = 0 + seqlen_q = max_seqlen_q + + # Compute offsets + offs_m = pid_m * PRE_BLOCK + tl.arange(0, PRE_BLOCK) + offs_d = tl.arange(0, HEAD_DIM_V) + # pointer offsets for O & DO + off_o = ( + bid * stride_ob + + hid * stride_oh + + q_start * stride_om + + offs_m[:, None] * stride_om + + offs_d[None, :] * stride_od + ) # noqa: E741 + off_do = ( + bid * stride_dob + + hid * stride_doh + + q_start * stride_dom + + offs_m[:, None] * stride_dom + + offs_d[None, :] * stride_dod + ) + + # create masks + mask_m = offs_m < seqlen_q + mask_md = mask_m[:, None] + PADDED_HEAD_V: tl.constexpr = ACTUAL_HEAD_DIM_V != HEAD_DIM_V + if PADDED_HEAD_V: + mask_md &= offs_d[None, :] < ACTUAL_HEAD_DIM_V + # load + o = tl.load(O + off_o, mask=mask_md, other=0.0) + do = tl.load(DO + off_do, mask=mask_md, other=0.0) + # compute and write-back to delta + if IS_FP8: + off_descale_do = bid * stride_descale_do_z + hid + descale_do = tl.load(Descale_do + off_descale_do) + + # NOTE: do is in the fp8 range and o is not in fp8 + delta = tl.sum(o.to(tl.float32) * (do.to(tl.float32) * descale_do), axis=1) + else: + delta = tl.sum(o.to(tl.float32) * do.to(tl.float32), axis=1) + off_delta = ( + bid * stride_delta_b + + hid * stride_delta_h + + q_start * stride_delta_m + + offs_m * stride_delta_m + ) + tl.store(Delta + off_delta, delta, mask=mask_m) + + +# The main inner-loop logic for computing dK and dV. +@triton.jit +def _bwd_dkdv_inner( + dk, + dv, # output + Q, + k, + v, + DO, + M, + D, + sm_scale, # input tensor + stride_qm, + stride_qk, + stride_dom, + stride_dok, + stride_dropoutm, + stride_dropoutn, + stride_lse_m, + stride_delta_m, + BLOCK_M: tl.constexpr, # 16 + BLOCK_N: tl.constexpr, # 128 + HEAD_DIM_QK: tl.constexpr, # + HEAD_DIM_V: tl.constexpr, # + ACTUAL_HEAD_DIM_QK: tl.constexpr, # + ACTUAL_HEAD_DIM_V: tl.constexpr, # + dropout_p, + philox_seed, + batch_philox_offset, + dropout_offset, + alibi_slope, + seqlen_q, + seqlen_k, # max sequence length for q and k + # Filled in by the wrapper. + start_n, + start_m, + num_steps, # iteration numbers + descale_q, + descale_k, + descale_v, + descale_do, # fp8 descale factors from user + MASK: tl.constexpr, # causal masking, only apply to tiles on mask diagonal + ENABLE_DROPOUT: tl.constexpr, # activate dropout + USE_ALIBI: tl.constexpr, + USE_EXP2: tl.constexpr, # activate exp2 + IS_FP8: tl.constexpr, + FP8_MAX: tl.constexpr, + DEBUG_TRITON: tl.constexpr, + DEBUG_TRITON_DETAIL: tl.constexpr, +): + # if HEAD_DIM is padded + PADDED_HEAD_QK: tl.constexpr = ACTUAL_HEAD_DIM_QK != HEAD_DIM_QK + PADDED_HEAD_V: tl.constexpr = ACTUAL_HEAD_DIM_V != HEAD_DIM_V + delta_qk = seqlen_q - seqlen_k + offs_m = start_m + tl.arange(0, BLOCK_M) # start_m + (0, 15) + offs_n = start_n + tl.arange(0, BLOCK_N) # start_m + (0, 127) + offs_k_qk = tl.arange(0, HEAD_DIM_QK) + offs_k_v = tl.arange(0, HEAD_DIM_V) + # mask to make sure not OOB of seqlen_q + mask_n = offs_n < seqlen_k + # Q and DO are (seqlen_q, head_dim) + # qT_ptrs = (1, BLOCK_M) + (HEAD_DIM_QK, 1), transpose of q + qT_ptrs = Q + offs_m[None, :] * stride_qm + offs_k_qk[:, None] * stride_qk + # do_ptrs = (BLOCK_M, 1) + (1, HEAD_DIM_V), NOT transposed + do_ptrs = DO + offs_m[:, None] * stride_dom + offs_k_v[None, :] * stride_dok + # BLOCK_N must be a multiple of BLOCK_M, otherwise the code wouldn't work. + tl.static_assert(BLOCK_N % BLOCK_M == 0) + curr_m = start_m + step_m = BLOCK_M + curr_philox_offset = batch_philox_offset + curr_dropout_offset = dropout_offset + RCP_LN2: tl.constexpr = 1.4426950408889634 # = 1.0 / ln(2) + + for blk_idx in range(num_steps): + if DEBUG_TRITON: + print(f"iter {blk_idx}: curr_m = {curr_m}") # noqa: E701 + offs_m = curr_m + tl.arange(0, BLOCK_M) + # update the mask because offs_m advanced + mask_m = offs_m < seqlen_q + mask_qT = mask_m[None, :] + mask_do = mask_m[:, None] + mask_nm = mask_n[:, None] & (offs_m[None, :] < seqlen_q) + if PADDED_HEAD_QK: + mask_qT &= offs_k_qk[:, None] < ACTUAL_HEAD_DIM_QK + if PADDED_HEAD_V: + mask_do &= offs_k_v[None, :] < ACTUAL_HEAD_DIM_V + qT = tl.load(qT_ptrs, mask=mask_qT, other=0.0) + # generate dropout mask + if ENABLE_DROPOUT: + # NOTE: dropout is transposed because it is used to mask pT + philox_offs = ( + curr_philox_offset + + offs_m[None, :] * stride_dropoutm + + offs_n[:, None] * stride_dropoutn + ) + if tl_DROPOUT_USE_PYTORCH: + dropout_offs = ( + offs_m[None, :] * stride_dropoutm + + offs_n[:, None] * stride_dropoutn + ) + dropout_mask = tl.load(curr_dropout_offset + dropout_offs, mask=mask_nm) + else: + rand_vals = tl.rand(philox_seed, philox_offs) + dropout_mask = rand_vals > dropout_p + dropout_scale = 1.0 / (1 - dropout_p) + # Load m before computing qk to reduce pipeline stall. + m = tl.load(M + offs_m * stride_lse_m, mask=mask_m, other=0.0) + if IS_FP8: + qkT = tl.dot(k, qT) * descale_q * descale_k + else: + qkT = tl.dot(k, qT) + qkT_scaled = qkT * sm_scale + + if USE_ALIBI: + relative_pos_block = offs_n[:, None] + seqlen_q - seqlen_k - offs_m[None, :] + alibi_block = -1 * alibi_slope * tl.abs(relative_pos_block) + qkT_scaled += alibi_block + + if DEBUG_TRITON_DETAIL: + if start_n == 256: + print(f"qT: {qT.shape}\n", qT) + print(f"k: {k.shape}\n", k) + print(f"qkT scaled: {qkT.shape}\n", qkT_scaled) + # TODO: remove the scaling of m later when we removed re-scaling in fwd + if USE_EXP2: + pT = tl.math.exp2(qkT_scaled * RCP_LN2 - m[None, :] * RCP_LN2) + else: + pT = tl.math.exp(qkT_scaled - m[None, :]) + + # Autoregressive masking. + if MASK: + # offset offs_m with delta_qk since the causal mask starts at + # bottom right of the (seqlen_q, seqlen_k) matrix + causal_mask = (offs_m[None, :] - delta_qk) >= offs_n[:, None] + mask = causal_mask & mask_nm + if DEBUG_TRITON_DETAIL: + if start_n == 256: + print(f"causal_mask: {causal_mask.shape}\n", causal_mask) + print( + f"qkT after causal: {qkT.shape}\n", + tl.where(causal_mask, qkT * sm_scale, 0.0), + ) + pT = tl.where(mask, pT, 0.0) + do = tl.load(do_ptrs, mask=mask_do, other=0.0) + # Compute dV. + if ENABLE_DROPOUT: + pT_dropout = tl.where(dropout_mask, pT, 0.0) * dropout_scale + if IS_FP8: + scale_p_dropout, descale_p_dropout = compute_fp8_scaling_factors( + pT_dropout, FP8_MAX + ) + dv += ( + tl.dot((pT_dropout * scale_p_dropout).to(do.type.element_ty), do) + * descale_p_dropout + * descale_do + ) + else: + dv += tl.dot(pT_dropout.to(do.type.element_ty), do) + else: + if IS_FP8: + scale_pT, descale_pT = compute_fp8_scaling_factors(pT, FP8_MAX) + dv += ( + tl.dot((pT * scale_pT).to(do.type.element_ty), do) + * descale_pT + * descale_do + ) + else: + dv += tl.dot(pT.to(do.type.element_ty), do) + + if DEBUG_TRITON_DETAIL: + if start_n == 256: + print(f"pT: {pT.shape}\n", pT) + # D (= delta) is pre-divided by ds_scale. + Di = tl.load(D + offs_m * stride_delta_m, mask=mask_m) + # Compute dP and dS. + if IS_FP8: + dpT = tl.dot(v, tl.trans(do)) * descale_v * descale_do + else: + dpT = tl.dot(v, tl.trans(do)) + if ENABLE_DROPOUT: + dpT = tl.where(dropout_mask, dpT, 0.0) * dropout_scale + delta_i = Di[None, :] + dsT = pT * (dpT - delta_i) + if IS_FP8: + scale_dsT, descale_dsT = compute_fp8_scaling_factors(dsT, FP8_MAX) + dk += ( + tl.dot((dsT * scale_dsT).to(qT.type.element_ty), tl.trans(qT)) + * descale_dsT + * descale_q + ) + else: + dk += tl.dot(dsT.to(qT.type.element_ty), tl.trans(qT)) + # Increment pointers. + curr_m += step_m + qT_ptrs += step_m * stride_qm + do_ptrs += step_m * stride_dom + return dk, dv + + +# the main inner-loop logic for computing dQ +@triton.jit +def _bwd_dq_inner( + dq, # output + q, + K, + V, + do, + m, + Delta, + sm_scale, # input + # shared by Q/K/V. + stride_qm, + stride_qk, + stride_kn, + stride_kk, + stride_vn, + stride_vk, + stride_dropoutm, + stride_dropoutn, # stride for dropout + stride_lse_m, + stride_delta_m, + seqlen_q, + seqlen_k, # + BLOCK_M2: tl.constexpr, # + BLOCK_N2: tl.constexpr, # + HEAD_DIM_QK: tl.constexpr, + HEAD_DIM_V: tl.constexpr, + ACTUAL_HEAD_DIM_QK: tl.constexpr, + ACTUAL_HEAD_DIM_V: tl.constexpr, # + dropout_p, + philox_seed, + batch_philox_offset, + dropout_offset, + alibi_slope, + # Filled in by the wrapper. + start_m, + start_n, + end_n, + num_steps, # + descale_q, + descale_k, + descale_v, + descale_do, # fp8 descale factors from user + MASK: tl.constexpr, + ENABLE_DROPOUT: tl.constexpr, + USE_ALIBI: tl.constexpr, + USE_EXP2: tl.constexpr, + IS_FP8: tl.constexpr, + FP8_MAX: tl.constexpr, + DEBUG_TRITON: tl.constexpr, + DEBUG_TRITON_DETAIL: tl.constexpr, +): + # if HEAD_DIM is padded + PADDED_HEAD_QK: tl.constexpr = ACTUAL_HEAD_DIM_QK != HEAD_DIM_QK + PADDED_HEAD_V: tl.constexpr = ACTUAL_HEAD_DIM_V != HEAD_DIM_V + delta_qk = seqlen_q - seqlen_k + offs_m = start_m + tl.arange(0, BLOCK_M2) + offs_n = start_n + tl.arange(0, BLOCK_N2) + offs_k_qk = tl.arange(0, HEAD_DIM_QK) + offs_k_v = tl.arange(0, HEAD_DIM_V) + + # mask to make sure not OOB of seqlen_q + mask_m = offs_m < seqlen_q + + kT_ptrs = K + offs_n[None, :] * stride_kn + offs_k_qk[:, None] * stride_kk + vT_ptrs = V + offs_n[None, :] * stride_vn + offs_k_v[:, None] * stride_vk + # D (= delta) is pre-divided by ds_scale. + Di = tl.load(Delta + offs_m * stride_delta_m, mask=mask_m, other=0.0) + # BLOCK_M2 must be a multiple of BLOCK_N2, otherwise the code wouldn't work. + tl.static_assert(BLOCK_M2 % BLOCK_N2 == 0) + curr_n = start_n + step_n = BLOCK_N2 + curr_philox_offset = batch_philox_offset + curr_dropout_offset = dropout_offset + RCP_LN2: tl.constexpr = 1.4426950408889634 # = 1.0 / ln(2) + for blk_idx in range(num_steps): + if DEBUG_TRITON: + print(f"iter {blk_idx}: curr_n = {curr_n}") # noqa: E701 + offs_n = curr_n + tl.arange(0, BLOCK_N2) + # end_n is needed because the end of causal True might not be perfectly + # aligned with the end of the block + mask_n = offs_n < end_n + if DEBUG_TRITON_DETAIL: + print( + f"start_n = {start_n}, end_n = {end_n}, offs_n: {offs_n.shape}\n{offs_n}" + ) # noqa: E701 + if DEBUG_TRITON_DETAIL: + print(f"mask_n: {mask_n.shape}\n{mask_n}") # noqa: E701 + mask_kT = mask_n[None, :] + mask_vT = mask_n[None, :] + mask_mn = mask_m[:, None] & (offs_n[None, :] < end_n) + if PADDED_HEAD_QK: + mask_kT &= offs_k_qk[:, None] < ACTUAL_HEAD_DIM_QK + if PADDED_HEAD_V: + mask_vT &= offs_k_v[:, None] < ACTUAL_HEAD_DIM_V + + kT = tl.load(kT_ptrs, mask=mask_kT, other=0.0) + vT = tl.load(vT_ptrs, mask=mask_vT, other=0.0) + + if ENABLE_DROPOUT: + # NOTE: dropout is transposed because it is used to mask pT + philox_offs = ( + curr_philox_offset + + offs_m[:, None] * stride_dropoutm + + offs_n[None, :] * stride_dropoutn + ) + if tl_DROPOUT_USE_PYTORCH: + dropout_offs = ( + offs_m[:, None] * stride_dropoutm + + offs_n[None, :] * stride_dropoutn + ) + dropout_mask = tl.load(curr_dropout_offset + dropout_offs, mask=mask_mn) + else: + rand_vals = tl.rand(philox_seed, philox_offs) + dropout_mask = rand_vals > dropout_p + dropout_scale = 1 / (1 - dropout_p) + + if IS_FP8: + qk = tl.dot(q, kT) * descale_q * descale_k + else: + qk = tl.dot(q, kT) + qk_scaled = qk * sm_scale + + if USE_ALIBI: + relative_pos_block = offs_m[:, None] + seqlen_k - seqlen_q - offs_n[None, :] + alibi_block = -1 * alibi_slope * tl.abs(relative_pos_block) + qk_scaled += alibi_block + + if DEBUG_TRITON_DETAIL: + print(f"qk scaled: {qk.shape}\n", qk_scaled) # noqa: E701 + if USE_EXP2: + p = tl.math.exp2(qk_scaled * RCP_LN2 - m * RCP_LN2) + else: + p = tl.math.exp(qk_scaled - m) + + # Autoregressive masking. + if MASK: + causal_mask = (offs_m[:, None] - delta_qk) >= offs_n[None, :] + mask = causal_mask & mask_mn + p = tl.where(mask, p, 0.0) + # Compute dP and dS. + if IS_FP8: + dp = tl.dot(do, vT) * descale_do * descale_v + else: + dp = tl.dot(do, vT) + if ENABLE_DROPOUT: + dp = tl.where(dropout_mask, dp, 0.0) * dropout_scale + delta_i = Di[:, None] + ds = p * (dp - delta_i) + # Compute dQ. + # NOTE: We need to de-scale dq in the end, because kT was pre-scaled. + if IS_FP8: + scale_ds, descale_ds = compute_fp8_scaling_factors(ds, FP8_MAX) + dq += ( + tl.dot((ds * scale_ds).to(kT.type.element_ty), tl.trans(kT)) + * descale_ds + * descale_k + ) + else: + dq += tl.dot(ds.to(kT.type.element_ty), tl.trans(kT)) + # Increment pointers. + curr_n += step_n + kT_ptrs += step_n * stride_kn + vT_ptrs += step_n * stride_vn + return dq + + +@triton.autotune( + configs=causal_autotune_configs, + key=causal_autotune_keys, + use_cuda_graph=True, +) +@triton.jit +def bwd_kernel_causal( # grid = (nheads_k, tl.cdiv(max_seqlen_q // BLOCK_M2), batch) + Q, + K, + V, + sm_scale, + DO, + DQ, + DK, + DV, + M, + Delta, + stride_qb, + stride_qh, + stride_qm, + stride_qd, + stride_kb, + stride_kh, + stride_kn, + stride_kd, + stride_vb, + stride_vh, + stride_vn, + stride_vd, + stride_dqb, + stride_dqh, + stride_dqm, + stride_dqd, + stride_dkb, + stride_dkh, + stride_dkn, + stride_dkd, + stride_dvb, + stride_dvh, + stride_dvn, + stride_dvd, + stride_lse_b, + stride_lse_h, + stride_lse_m, + stride_delta_b, + stride_delta_h, + stride_delta_m, + stride_dob, + stride_doh, + stride_dom, + stride_dod, + stride_dropoutb, + stride_dropouth, + stride_dropoutm, + stride_dropoutn, + stride_descale_q_z, + stride_descale_k_z, + stride_descale_v_z, + stride_descale_do_z, + stride_az, + stride_ah, + HQ, + HK, + cu_seqlens_q, + cu_seqlens_k, + seqused_q, + seqused_k, # Add seqused parameters + max_seqlen_q, + max_seqlen_k, + Dropout_mask, + dropout_p, + philox_seed, + philox_offset_base, + Alibi_slopes, + Descale_q, + Descale_k, + Descale_v, + Descale_do, + BLOCK_M1: tl.constexpr, + BLOCK_N1: tl.constexpr, + BLOCK_M2: tl.constexpr, + BLOCK_N2: tl.constexpr, + BLK_SLICE_FACTOR: tl.constexpr, + HEAD_DIM_QK: tl.constexpr, + HEAD_DIM_V: tl.constexpr, + ACTUAL_HEAD_DIM_QK: tl.constexpr, + ACTUAL_HEAD_DIM_V: tl.constexpr, + ENABLE_DROPOUT: tl.constexpr, + IS_VARLEN: tl.constexpr, + USE_ALIBI: tl.constexpr, + USE_EXP2: tl.constexpr, + IS_FP8: tl.constexpr, + FP8_MAX: tl.constexpr, + FP8_OUTPUT: tl.constexpr, + USE_SEQUSED: tl.constexpr, # Add flag for seqused + DEBUG_TRITON: tl.constexpr, + DEBUG_TRITON_DETAIL: tl.constexpr, +): + # program ids + hkid = tl.program_id(0) + pid = tl.program_id(1) + bid = tl.program_id(2) + if DEBUG_TRITON: + print(f"\npid: {pid}, bid: {bid}, hkid: {hkid}") # noqa: E701 + # figure out varlen start and end + q_start = 0 + k_start = 0 + seqlen_q = max_seqlen_q + seqlen_k = max_seqlen_k + if IS_VARLEN: + # Compute actual sequence lengths + q_start = tl.load(cu_seqlens_q + bid) + q_end = tl.load(cu_seqlens_q + bid + 1) + k_start = tl.load(cu_seqlens_k + bid) + k_end = tl.load(cu_seqlens_k + bid + 1) + + # If seqused is provided, use it to limit the actual sequence length + if USE_SEQUSED: + actual_seqlen_q = ( + tl.load(seqused_q + bid) if seqused_q is not None else q_end - q_start + ) + seqlen_q = tl.minimum(actual_seqlen_q, q_end - q_start) + actual_seqlen_k = ( + tl.load(seqused_k + bid) if seqused_k is not None else k_end - k_start + ) + seqlen_k = tl.minimum(actual_seqlen_k, k_end - k_start) + else: + seqlen_q = q_end - q_start + seqlen_k = k_end - k_start + + delta_qk = seqlen_q - seqlen_k + if DEBUG_TRITON: + print(f"delta_qk = {delta_qk}") # noqa: E701 + PADDED_HEAD_QK: tl.constexpr = ACTUAL_HEAD_DIM_QK != HEAD_DIM_QK + PADDED_HEAD_V: tl.constexpr = ACTUAL_HEAD_DIM_V != HEAD_DIM_V + offs_d_qk = tl.arange(0, HEAD_DIM_QK) + offs_d_v = tl.arange(0, HEAD_DIM_V) + GROUP_SIZE: tl.constexpr = HQ // HK + + # align the delta_qk + start_n = pid * BLOCK_N1 + if start_n < seqlen_k: + # This section does dk and dv + dk = tl.zeros([BLOCK_N1, HEAD_DIM_QK], dtype=tl.float32) + dv = tl.zeros([BLOCK_N1, HEAD_DIM_V], dtype=tl.float32) + + # q > k: diretcly skip all the way until the start of causal block + start_delta_q_gt_k = delta_qk + # q < k: some blocks will have no Masked block, other needs to re-calc + # starting position + # delta_qk is negative so flip it, only multiple of BLOCK_N can skip the + # masked op + num_blocks_skip = -delta_qk // BLOCK_N1 + delta_aligned = (num_blocks_skip + 1) * BLOCK_N1 + delta_qk + start_delta_q_lt_k = delta_aligned // BLOCK_M1 * BLOCK_M1 + if delta_qk >= 0: + start_delta = delta_qk + if DEBUG_TRITON: + print( + f"q >= k: start_delta = delta_qk aligned to BLOCK_M = {start_delta_q_gt_k}" + ) # noqa: E701 + else: + start_delta = start_delta_q_lt_k + if DEBUG_TRITON: + print( + f"q < k: start_delta = residue btw multiple BLOCK_N and delta_qk = {delta_aligned} = aligned to BLOCK_M = {start_delta_q_lt_k}" + ) # noqa: E701 + + offs_n = start_n + tl.arange(0, BLOCK_N1) + # Mask for loading K and V + mask_k = offs_n[:, None] < seqlen_k + mask_v = offs_n[:, None] < seqlen_k + if PADDED_HEAD_QK: + mask_d_qk = offs_d_qk < ACTUAL_HEAD_DIM_QK + mask_k &= mask_d_qk[None, :] + if PADDED_HEAD_V: + mask_d_v = offs_d_v < ACTUAL_HEAD_DIM_V + mask_v &= mask_d_v[None, :] + + # K/V tensors not changed for the group + adj_k = ( + bid * stride_kb + + hkid * stride_kh + + k_start * stride_kn + + offs_n[:, None] * stride_kn + + offs_d_qk[None, :] * stride_kd + ) + adj_v = ( + bid * stride_vb + + hkid * stride_vh + + k_start * stride_vn + + offs_n[:, None] * stride_vn + + offs_d_v[None, :] * stride_vd + ) + # load K and V: they stay in SRAM throughout the inner loop. + k = tl.load(K + adj_k, mask=mask_k, other=0.0) + v = tl.load(V + adj_v, mask=mask_v, other=0.0) + # If MQA / GQA, set the K and V head offsets appropriately. + # hqid = hkid + for hqid in range(hkid * GROUP_SIZE, hkid * GROUP_SIZE + GROUP_SIZE): + if delta_qk >= 0: + start_m = start_n + start_delta + len_m = BLOCK_N1 + else: + start_m = max(start_n + delta_qk, 0) + start_m = start_m // BLOCK_M1 * BLOCK_M1 + # because we might shift the masked blocks up, we are deeper into + # the masked out region, so we would potentially increase the total + # steps with masked operation to get out of it + residue_m = max(start_n + delta_qk - start_m, 0) + len_m = BLOCK_N1 + residue_m + if DEBUG_TRITON: + print(f"residue_m = {residue_m}") # noqa: E701 + + # offset input and output tensor by batch and Q/K heads + adj_q = bid * stride_qb + hqid * stride_qh + q_start * stride_qm + Q_ptr = Q + adj_q + adj_do = bid * stride_dob + hqid * stride_doh + q_start * stride_dom + DO_ptr = DO + adj_do + adj_delta = ( + bid * stride_delta_b + hqid * stride_delta_h + q_start * stride_delta_m + ) + Delta_ptr = Delta + adj_delta + adj_m = bid * stride_lse_b + hqid * stride_lse_h + q_start * stride_lse_m + M_ptr = M + adj_m + + if USE_ALIBI: + alibi_offset = bid * stride_az + hqid * stride_ah + alibi_slope = tl.load(Alibi_slopes + alibi_offset) + else: + alibi_slope = None + + # batch_philox_offset is the ACTUALLY dropout offset + # dropout_offset is for debug purpose and will be removed later + batch_philox_offset = 0 + dropout_offset = 0 + if ENABLE_DROPOUT: + batch_philox_offset = ( + philox_offset_base + bid * stride_dropoutb + hqid * stride_dropouth + ) + dropout_offset = ( + Dropout_mask + bid * stride_dropoutb + hqid * stride_dropouth + ) + + if IS_FP8: + descale_q = tl.load(Descale_q + bid * stride_descale_q_z + hqid) + descale_k = tl.load(Descale_k + bid * stride_descale_k_z + hkid) + descale_v = tl.load(Descale_v + bid * stride_descale_v_z + hkid) + descale_do = tl.load(Descale_do + bid * stride_descale_do_z + hqid) + else: + descale_q, descale_k, descale_v, descale_do = 1.0, 1.0, 1.0, 1.0 + + MASK_BLOCK_M1: tl.constexpr = BLOCK_M1 // BLK_SLICE_FACTOR + # bound the masked operation to q len so it does not have to wast cycles + len_m = min(len_m, seqlen_q) + num_steps = tl.cdiv(len_m, MASK_BLOCK_M1) + # when q < k, we may skip the initial masked op + if pid < num_blocks_skip: + num_steps = 0 + + # if start_m is negative, the current N-tile has no block on the + # diagonal of causal mask, so everything have no causal mask + if DEBUG_TRITON: + print( + f"Masked: start_n: {start_n}; start_m: {start_m}, num_steps: {num_steps}" + ) # noqa: E701 + dk, dv = _bwd_dkdv_inner( + dk, + dv, # output tensors + Q_ptr, + k, + v, + DO_ptr, + M_ptr, + Delta_ptr, + sm_scale, # input tensors + stride_qm, + stride_qd, # strides for q + stride_dom, + stride_dod, # strides for o + stride_dropoutm, + stride_dropoutn, # strides for dropout + stride_lse_m, + stride_delta_m, + MASK_BLOCK_M1, + BLOCK_N1, # block dim + HEAD_DIM_QK, + HEAD_DIM_V, + ACTUAL_HEAD_DIM_QK, + ACTUAL_HEAD_DIM_V, # head dim + dropout_p, + philox_seed, + batch_philox_offset, + dropout_offset, + alibi_slope, + seqlen_q, + seqlen_k, # max sequence length for q and k + start_n, + start_m, + num_steps, # iteration numbers + descale_q, + descale_k, + descale_v, + descale_do, + MASK=True, # causal masking + ENABLE_DROPOUT=ENABLE_DROPOUT, # activate dropout + USE_ALIBI=USE_ALIBI, + USE_EXP2=USE_EXP2, + IS_FP8=IS_FP8, + FP8_MAX=FP8_MAX, + DEBUG_TRITON=DEBUG_TRITON, + DEBUG_TRITON_DETAIL=DEBUG_TRITON_DETAIL, + ) + start_m += num_steps * MASK_BLOCK_M1 + num_steps = tl.cdiv(seqlen_q - start_m, BLOCK_M1) + end_m = start_m + num_steps * BLOCK_M1 + + if DEBUG_TRITON: + print( + f"start_m after Masked step: {start_m}; num_steps: {num_steps}" + ) # noqa: E701 + if DEBUG_TRITON: + print( + f"unMasked: start_n: {start_n}, start_m: {start_m}, end_m: {end_m}, num_steps: {num_steps}" + ) # noqa: E701 + if DEBUG_TRITON: + print("unMasked") # noqa: E701 + dk, dv = _bwd_dkdv_inner( + dk, + dv, # output tensors + Q_ptr, + k, + v, + DO_ptr, + M_ptr, + Delta_ptr, + sm_scale, # input tensors + stride_qm, + stride_qd, # strides for q + stride_dom, + stride_dod, # strides for o + stride_dropoutm, + stride_dropoutn, # strides for dropout + stride_lse_m, + stride_delta_m, + BLOCK_M1, + BLOCK_N1, # block dim + HEAD_DIM_QK, + HEAD_DIM_V, + ACTUAL_HEAD_DIM_QK, + ACTUAL_HEAD_DIM_V, # head dim + dropout_p, + philox_seed, + batch_philox_offset, + dropout_offset, + alibi_slope, + seqlen_q, + seqlen_k, # max sequence length for q and k + start_n, + start_m, + num_steps, # iteration numbers + descale_q, + descale_k, + descale_v, + descale_do, + MASK=False, # causal masking + ENABLE_DROPOUT=ENABLE_DROPOUT, # activate dropout + USE_ALIBI=USE_ALIBI, + USE_EXP2=USE_EXP2, + IS_FP8=IS_FP8, + FP8_MAX=FP8_MAX, + DEBUG_TRITON=DEBUG_TRITON, + DEBUG_TRITON_DETAIL=DEBUG_TRITON_DETAIL, + ) + # end of GQA/MQA of dkdv + # Write back dV + adj_dv = bid * stride_dvb + hkid * stride_dvh + k_start * stride_dvn + offs_dv = offs_n[:, None] * stride_dvn + offs_d_v[None, :] * stride_dvd + tl.store(DV + adj_dv + offs_dv, dv, mask=mask_v) + # write back dk + adj_dk = bid * stride_dkb + hkid * stride_dkh + k_start * stride_dkn + offs_dk = offs_n[:, None] * stride_dkn + offs_d_qk[None, :] * stride_dkd + dk *= sm_scale + tl.store(DK + adj_dk + offs_dk, dk, mask=mask_k) + + # This part does dq + start_m = pid * BLOCK_M2 + if start_m < seqlen_q: + # seqlen_q > seqlen_k, no need to process these tile for dq + if DEBUG_TRITON: + print( + f"end_n = start_m + BLOCK_M = {start_m} + {BLOCK_M2} = {start_m + BLOCK_M2}" + ) # noqa: E701 + if start_m + BLOCK_M2 < delta_qk: + if DEBUG_TRITON: + print( + f"start_m + BLOCK_M2 = {start_m} + {BLOCK_M2} = {start_m + BLOCK_M2} < delta_qk of {delta_qk}" + ) # noqa: E701 + return + + offs_m = start_m + tl.arange(0, BLOCK_M2) + # Mask for loading K and V + mask_q = offs_m[:, None] < seqlen_q + mask_do = offs_m[:, None] < seqlen_q + if PADDED_HEAD_QK: + mask_d_qk = offs_d_qk < ACTUAL_HEAD_DIM_QK + mask_q &= mask_d_qk[None, :] + if PADDED_HEAD_V: + mask_d_v = offs_d_v < ACTUAL_HEAD_DIM_V + mask_do &= mask_d_v[None, :] + offs_q = offs_m[:, None] * stride_qm + offs_d_qk[None, :] * stride_qd + offs_do = offs_m[:, None] * stride_dom + offs_d_v[None, :] * stride_dod + # NOTE: don't assume that the strides for k and v are the same! + K += bid * stride_kb + hkid * stride_kh + k_start * stride_kn + V += bid * stride_vb + hkid * stride_vh + k_start * stride_vn + + # If MQA / GQA, set the K and V head offsets appropriately. + for hqid in range(hkid * GROUP_SIZE, hkid * GROUP_SIZE + GROUP_SIZE): + # seqlen_q < seqlen_k: delta_qk more kv tokens are added at the front + # for every M-tile + end_n = start_m + BLOCK_M2 - delta_qk + # clamp end_n at [0, seqlen_k] + end_n = max(min(end_n, seqlen_k), 0) + if DEBUG_TRITON: + print(f"delta_qk: {delta_qk}; end_n: {end_n}") # noqa: E701 + # offset input and output tensor by batch and Q/K heads + adj_q = bid * stride_qb + hqid * stride_qh + q_start * stride_qm + adj_do = bid * stride_dob + hqid * stride_doh + q_start * stride_dom + adj_delta = ( + bid * stride_delta_b + hqid * stride_delta_h + q_start * stride_delta_m + ) + Delta_ptr = Delta + adj_delta + adj_m = bid * stride_lse_b + hqid * stride_lse_h + q_start * stride_lse_m + M_ptr = M + adj_m + + if USE_ALIBI: + alibi_offset = bid * stride_az + hqid * stride_ah + alibi_slope = tl.load(Alibi_slopes + alibi_offset) + else: + alibi_slope = None + + # batch_philox_offset is the ACTUALLY dropout offset + # dropout_offset is for debug purpose and will be removed later + batch_philox_offset = 0 + dropout_offset = 0 + if ENABLE_DROPOUT: + batch_philox_offset = ( + philox_offset_base + bid * stride_dropoutb + hqid * stride_dropouth + ) + dropout_offset = ( + Dropout_mask + bid * stride_dropoutb + hqid * stride_dropouth + ) + q = tl.load(Q + adj_q + offs_q, mask=mask_q, other=0.0) + do = tl.load(DO + adj_do + offs_do, mask=mask_do, other=0.0) + m = tl.load(M + adj_m + offs_m * stride_lse_m, mask=offs_m < seqlen_q) + m = m[:, None] + + MASK_BLOCK_N2: tl.constexpr = BLOCK_N2 // BLK_SLICE_FACTOR + # start can only be 0 at minimum + start_n = max(end_n - BLOCK_M2, 0) + num_steps = tl.cdiv(end_n - start_n, MASK_BLOCK_N2) + + if IS_FP8: + descale_q = tl.load(Descale_q + bid * stride_descale_q_z + hqid) + descale_k = tl.load(Descale_k + bid * stride_descale_k_z + hkid) + descale_v = tl.load(Descale_v + bid * stride_descale_v_z + hkid) + descale_do = tl.load(Descale_do + bid * stride_descale_do_z + hqid) + else: + descale_q, descale_k, descale_v, descale_do = 1.0, 1.0, 1.0, 1.0 + + dq = tl.zeros([BLOCK_M2, HEAD_DIM_QK], dtype=tl.float32) + dq = _bwd_dq_inner( + dq, + q, + K, + V, + do, + m, + Delta_ptr, + sm_scale, + stride_qm, + stride_qd, + stride_kn, + stride_kd, + stride_vn, + stride_vd, + stride_dropoutm, + stride_dropoutn, + stride_lse_m, + stride_delta_m, + seqlen_q, + seqlen_k, + BLOCK_M2, + MASK_BLOCK_N2, + HEAD_DIM_QK, + HEAD_DIM_V, + ACTUAL_HEAD_DIM_QK, + ACTUAL_HEAD_DIM_V, + dropout_p, + philox_seed, + batch_philox_offset, + dropout_offset, + alibi_slope, + start_m, + start_n, + end_n, + num_steps, + descale_q, + descale_k, + descale_v, + descale_do, + MASK=True, # + ENABLE_DROPOUT=ENABLE_DROPOUT, + USE_ALIBI=USE_ALIBI, + USE_EXP2=USE_EXP2, + IS_FP8=IS_FP8, + FP8_MAX=FP8_MAX, + DEBUG_TRITON=DEBUG_TRITON, + DEBUG_TRITON_DETAIL=DEBUG_TRITON_DETAIL, + ) + end_n -= num_steps * MASK_BLOCK_N2 + num_steps = tl.cdiv(end_n, BLOCK_N2) + start_n = max(end_n - num_steps * BLOCK_N2, 0) + if DEBUG_TRITON: + print( + f"unMasked: start_m: {start_m}, start_n: {start_n}, end_n: {end_n}, num_steps: {num_steps}" + ) # noqa: E701 + dq = _bwd_dq_inner( + dq, + q, + K, + V, + do, + m, + Delta_ptr, + sm_scale, + stride_qm, + stride_qd, + stride_kn, + stride_kd, + stride_vn, + stride_vd, + stride_dropoutm, + stride_dropoutn, + stride_lse_m, + stride_delta_m, + seqlen_q, + seqlen_k, + BLOCK_M2, + BLOCK_N2, + HEAD_DIM_QK, + HEAD_DIM_V, + ACTUAL_HEAD_DIM_QK, + ACTUAL_HEAD_DIM_V, + dropout_p, + philox_seed, + batch_philox_offset, + dropout_offset, + alibi_slope, + start_m, + start_n, + end_n, + num_steps, + descale_q, + descale_k, + descale_v, + descale_do, + MASK=False, + ENABLE_DROPOUT=ENABLE_DROPOUT, + USE_ALIBI=USE_ALIBI, + USE_EXP2=USE_EXP2, + IS_FP8=IS_FP8, + FP8_MAX=FP8_MAX, + DEBUG_TRITON=DEBUG_TRITON, + DEBUG_TRITON_DETAIL=DEBUG_TRITON_DETAIL, + ) + # Write back dQ. + adj_dq = bid * stride_dqb + hqid * stride_dqh + q_start * stride_dqm + offs_dq = offs_m[:, None] * stride_dqm + offs_d_qk[None, :] * stride_dqd + dq *= sm_scale + tl.store(DQ + adj_dq + offs_dq, dq, mask=mask_q) + # end of GQA/MQA of dq + + +@triton.autotune( + configs=noncausal_autotune_configs, + key=noncausal_autotune_keys, + use_cuda_graph=True, +) +@triton.jit +def bwd_kernel_noncausal( + Q, + K, + V, + sm_scale, + DO, + DQ, + DK, + DV, + M, + Delta, + stride_qb, + stride_qh, + stride_qm, + stride_qd, + stride_kb, + stride_kh, + stride_kn, + stride_kd, + stride_vb, + stride_vh, + stride_vn, + stride_vd, + stride_dqb, + stride_dqh, + stride_dqm, + stride_dqd, + stride_dkb, + stride_dkh, + stride_dkn, + stride_dkd, + stride_dvb, + stride_dvh, + stride_dvn, + stride_dvd, + stride_lse_b, + stride_lse_h, + stride_lse_m, + stride_delta_b, + stride_delta_h, + stride_delta_m, + stride_dob, + stride_doh, + stride_dom, + stride_dod, + stride_dropoutb, + stride_dropouth, + stride_dropoutm, + stride_dropoutn, + stride_descale_q_z, + stride_descale_k_z, + stride_descale_v_z, + stride_descale_do_z, + stride_az, + stride_ah, + HQ, + HK, + cu_seqlens_q, + cu_seqlens_k, + seqused_q, + seqused_k, # Add seqused parameters + max_seqlen_q, + max_seqlen_k, + Dropout_mask, + dropout_p, + philox_seed, + philox_offset_base, + Alibi_slopes, + Descale_q, + Descale_k, + Descale_v, + Descale_do, + BLOCK_M1: tl.constexpr, # 32 + BLOCK_N1: tl.constexpr, # 128 + BLOCK_M2: tl.constexpr, # 128 + BLOCK_N2: tl.constexpr, # 32 + BLK_SLICE_FACTOR: tl.constexpr, + HEAD_DIM_QK: tl.constexpr, + HEAD_DIM_V: tl.constexpr, + ACTUAL_HEAD_DIM_QK: tl.constexpr, + ACTUAL_HEAD_DIM_V: tl.constexpr, + ENABLE_DROPOUT: tl.constexpr, + IS_VARLEN: tl.constexpr, + USE_ALIBI: tl.constexpr, + USE_EXP2: tl.constexpr, + IS_FP8: tl.constexpr, + FP8_MAX: tl.constexpr, + FP8_OUTPUT: tl.constexpr, + USE_SEQUSED: tl.constexpr, # Add flag for seqused + DEBUG_TRITON: tl.constexpr, + DEBUG_TRITON_DETAIL: tl.constexpr, +): + # program ids + hkid = tl.program_id(0) + pid = tl.program_id(1) + bid = tl.program_id(2) + if DEBUG_TRITON: + print(f"\npid: {pid}, bid: {bid}, hkid: {hkid}") # noqa: E701 + # figure out varlen start and end + q_start = 0 + k_start = 0 + seqlen_q = max_seqlen_q + seqlen_k = max_seqlen_k + if IS_VARLEN: + # Compute actual sequence lengths + q_start = tl.load(cu_seqlens_q + bid) + q_end = tl.load(cu_seqlens_q + bid + 1) + k_start = tl.load(cu_seqlens_k + bid) + k_end = tl.load(cu_seqlens_k + bid + 1) + + # If seqused is provided, use it to limit the actual sequence length + if USE_SEQUSED: + actual_seqlen_q = ( + tl.load(seqused_q + bid) if seqused_q is not None else q_end - q_start + ) + seqlen_q = tl.minimum(actual_seqlen_q, q_end - q_start) + actual_seqlen_k = ( + tl.load(seqused_k + bid) if seqused_k is not None else k_end - k_start + ) + seqlen_k = tl.minimum(actual_seqlen_k, k_end - k_start) + else: + seqlen_q = q_end - q_start + seqlen_k = k_end - k_start + + PADDED_HEAD_QK: tl.constexpr = ACTUAL_HEAD_DIM_QK != HEAD_DIM_QK + PADDED_HEAD_V: tl.constexpr = ACTUAL_HEAD_DIM_V != HEAD_DIM_V + offs_d_qk = tl.arange(0, HEAD_DIM_QK) + offs_d_v = tl.arange(0, HEAD_DIM_V) + GROUP_SIZE: tl.constexpr = HQ // HK + + start_n = pid * BLOCK_N1 + if start_n < seqlen_k: + dk = tl.zeros([BLOCK_N1, HEAD_DIM_QK], dtype=tl.float32) + dv = tl.zeros([BLOCK_N1, HEAD_DIM_V], dtype=tl.float32) + + offs_n = start_n + tl.arange(0, BLOCK_N1) + # Mask for loading K and V + mask_k = offs_n[:, None] < seqlen_k + mask_v = offs_n[:, None] < seqlen_k + if PADDED_HEAD_QK: + mask_d_qk = offs_d_qk < ACTUAL_HEAD_DIM_QK + mask_k &= mask_d_qk[None, :] + if PADDED_HEAD_V: + mask_d_v = offs_d_v < ACTUAL_HEAD_DIM_V + mask_v &= mask_d_v[None, :] + # NOTE: don't assume that the strides for k and v are the same! + # K/V tensors not changed for the group + adj_k = ( + bid * stride_kb + + hkid * stride_kh + + k_start * stride_kn + + offs_n[:, None] * stride_kn + + offs_d_qk[None, :] * stride_kd + ) + adj_v = ( + bid * stride_vb + + hkid * stride_vh + + k_start * stride_vn + + offs_n[:, None] * stride_vn + + offs_d_v[None, :] * stride_vd + ) + # load K and V: they stay in SRAM throughout the inner loop. + k = tl.load(K + adj_k, mask=mask_k, other=0.0) + v = tl.load(V + adj_v, mask=mask_v, other=0.0) + # If MQA / GQA, set the K and V head offsets appropriately. + for hqid in range(hkid * GROUP_SIZE, hkid * GROUP_SIZE + GROUP_SIZE): + # offset input and output tensor by batch and Q/K heads + adj_q = bid * stride_qb + hqid * stride_qh + q_start * stride_qm + Q_ptr = Q + adj_q + adj_do = bid * stride_dob + hqid * stride_doh + q_start * stride_dom + DO_ptr = DO + adj_do + adj_delta = ( + bid * stride_delta_b + hqid * stride_delta_h + q_start * stride_delta_m + ) + Delta_ptr = Delta + adj_delta + adj_m = bid * stride_lse_b + hqid * stride_lse_h + q_start * stride_lse_m + M_ptr = M + adj_m + + if USE_ALIBI: + alibi_offset = bid * stride_az + hqid * stride_ah + alibi_slope = tl.load(Alibi_slopes + alibi_offset) + else: + alibi_slope = None + + # batch_philox_offset is the ACTUALLY dropout offset + # dropout_offset is for debug purpose and will be removed later + batch_philox_offset = 0 + dropout_offset = 0 + if ENABLE_DROPOUT: + batch_philox_offset = ( + philox_offset_base + bid * stride_dropoutb + hqid * stride_dropouth + ) + dropout_offset = ( + Dropout_mask + bid * stride_dropoutb + hqid * stride_dropouth + ) + + if IS_FP8: + descale_q = tl.load(Descale_q + bid * stride_descale_q_z + hqid) + descale_k = tl.load(Descale_k + bid * stride_descale_k_z + hkid) + descale_v = tl.load(Descale_v + bid * stride_descale_v_z + hkid) + descale_do = tl.load(Descale_do + bid * stride_descale_do_z + hqid) + else: + descale_q, descale_k, descale_v, descale_do = 1.0, 1.0, 1.0, 1.0 + + # because there is no causal, we always start from the beginning + start_m = 0 + num_steps = tl.cdiv(seqlen_q, BLOCK_M1) + dk, dv = _bwd_dkdv_inner( + dk, + dv, # output tensors + Q_ptr, + k, + v, + DO_ptr, + M_ptr, + Delta_ptr, + sm_scale, # input tensors + stride_qm, + stride_qd, # strides for q + stride_dom, + stride_dod, # strides for o + stride_dropoutm, + stride_dropoutn, # strides for dropout + stride_lse_m, + stride_delta_m, + BLOCK_M1, + BLOCK_N1, # block dim + HEAD_DIM_QK, + HEAD_DIM_V, + ACTUAL_HEAD_DIM_QK, + ACTUAL_HEAD_DIM_V, # head dim + dropout_p, + philox_seed, + batch_philox_offset, + dropout_offset, # + alibi_slope, + seqlen_q, + seqlen_k, # max sequence length for q and k + start_n, + start_m, + num_steps, # iteration numbers + descale_q, + descale_k, + descale_v, + descale_do, # fp8 descale factors from user + MASK=False, # causal masking + ENABLE_DROPOUT=ENABLE_DROPOUT, # activate dropout + USE_ALIBI=USE_ALIBI, + USE_EXP2=USE_EXP2, + IS_FP8=IS_FP8, + FP8_MAX=FP8_MAX, + DEBUG_TRITON=DEBUG_TRITON, + DEBUG_TRITON_DETAIL=DEBUG_TRITON_DETAIL, + ) + + # Write back dV + adj_dv = bid * stride_dvb + hkid * stride_dvh + k_start * stride_dvn + offs_dv = offs_n[:, None] * stride_dvn + offs_d_v[None, :] * stride_dvd + tl.store(DV + adj_dv + offs_dv, dv, mask=mask_v) + # write back dk + adj_dk = bid * stride_dkb + hkid * stride_dkh + k_start * stride_dkn + offs_dk = offs_n[:, None] * stride_dkn + offs_d_qk[None, :] * stride_dkd + dk *= sm_scale + tl.store(DK + adj_dk + offs_dk, dk, mask=mask_k) + + # THIS PART DOES DQ + start_m = pid * BLOCK_M2 + if start_m < seqlen_q: + offs_m = start_m + tl.arange(0, BLOCK_M2) + # Mask for loading K and V + mask_q = offs_m[:, None] < seqlen_q + mask_do = offs_m[:, None] < seqlen_q + if PADDED_HEAD_QK: + mask_d_qk = offs_d_qk < ACTUAL_HEAD_DIM_QK + mask_q &= mask_d_qk[None, :] + if PADDED_HEAD_V: + mask_d_v = offs_d_v < ACTUAL_HEAD_DIM_V + mask_do &= mask_d_v[None, :] + offs_q = offs_m[:, None] * stride_qm + offs_d_qk[None, :] * stride_qd + offs_do = offs_m[:, None] * stride_dom + offs_d_v[None, :] * stride_dod + K += bid * stride_kb + hkid * stride_kh + k_start * stride_kn + V += bid * stride_vb + hkid * stride_vh + k_start * stride_vn + # If MQA / GQA, set the K and V head offsets appropriately. + for hqid in range(hkid * GROUP_SIZE, hkid * GROUP_SIZE + GROUP_SIZE): + # offset input and output tensor by batch and Q/K heads + adj_q = bid * stride_qb + hqid * stride_qh + q_start * stride_qm + adj_do = bid * stride_dob + hqid * stride_doh + q_start * stride_dom + adj_delta = ( + bid * stride_delta_b + hqid * stride_delta_h + q_start * stride_delta_m + ) + Delta_ptr = Delta + adj_delta + adj_m = bid * stride_lse_b + hqid * stride_lse_h + q_start * stride_lse_m + M_ptr = M + adj_m + + if USE_ALIBI: + alibi_offset = bid * stride_az + hqid * stride_ah + alibi_slope = tl.load(Alibi_slopes + alibi_offset) + else: + alibi_slope = None + + # batch_philox_offset is the ACTUALLY dropout offset + # dropout_offset is for debug purpose and will be removed later + batch_philox_offset = 0 + dropout_offset = 0 + if ENABLE_DROPOUT: + batch_philox_offset = ( + philox_offset_base + bid * stride_dropoutb + hqid * stride_dropouth + ) + dropout_offset = ( + Dropout_mask + bid * stride_dropoutb + hqid * stride_dropouth + ) + + q = tl.load(Q + adj_q + offs_q, mask=mask_q, other=0.0) + do = tl.load(DO + adj_do + offs_do, mask=mask_do, other=0.0) + m = tl.load(M + adj_m + offs_m * stride_lse_m, mask=offs_m < seqlen_q) + m = m[:, None] + + if IS_FP8: + descale_q = tl.load(Descale_q + bid * stride_descale_q_z + hqid) + descale_k = tl.load(Descale_k + bid * stride_descale_k_z + hkid) + descale_v = tl.load(Descale_v + bid * stride_descale_v_z + hkid) + descale_do = tl.load(Descale_do + bid * stride_descale_do_z + hqid) + else: + descale_q, descale_k, descale_v, descale_do = 1.0, 1.0, 1.0, 1.0 + + # start can only be 0 at minimum + start_n = 0 + end_n = seqlen_k + num_steps = tl.cdiv(seqlen_k, BLOCK_N2) + + dq = tl.zeros([BLOCK_M2, HEAD_DIM_QK], dtype=tl.float32) + dq = _bwd_dq_inner( + dq, + q, + K, + V, + do, + m, + Delta_ptr, + sm_scale, + stride_qm, + stride_qd, + stride_kn, + stride_kd, + stride_vn, + stride_vd, + stride_dropoutm, + stride_dropoutn, + stride_lse_m, + stride_delta_m, + seqlen_q, + seqlen_k, + BLOCK_M2, + BLOCK_N2, + HEAD_DIM_QK, + HEAD_DIM_V, + ACTUAL_HEAD_DIM_QK, + ACTUAL_HEAD_DIM_V, + dropout_p, + philox_seed, + batch_philox_offset, + dropout_offset, + alibi_slope, + start_m, + start_n, + end_n, + num_steps, + descale_q, + descale_k, + descale_v, + descale_do, + MASK=False, + ENABLE_DROPOUT=ENABLE_DROPOUT, + USE_ALIBI=USE_ALIBI, + USE_EXP2=USE_EXP2, + IS_FP8=IS_FP8, + FP8_MAX=FP8_MAX, + DEBUG_TRITON=DEBUG_TRITON, + DEBUG_TRITON_DETAIL=DEBUG_TRITON_DETAIL, + ) + # Write back dQ. + adj_dq = bid * stride_dqb + hqid * stride_dqh + q_start * stride_dqm + offs_dq = offs_m[:, None] * stride_dqm + offs_d_qk[None, :] * stride_dqd + dq *= sm_scale + tl.store(DQ + adj_dq + offs_dq, dq, mask=mask_q) + + +def is_contiguous(x, name): + if x.is_contiguous(): + return x + else: + print(f"{name} is not contiguous") + return x.contiguous() + + +DEBUG_TRITON: bool = False +DEBUG_TRITON_DETAIL: bool = False + + +def attention_backward_triton_split_fused_no_atomics_impl( + do: torch.Tensor, + q: torch.Tensor, + k: torch.Tensor, + v: torch.Tensor, + o: torch.Tensor, + softmax_lse: torch.Tensor, + dq: torch.Tensor, + dk: torch.Tensor, + dv: torch.Tensor, + sm_scale: float, + alibi_slopes: Optional[torch.Tensor], + causal: bool, + layout: Literal["bshd", "bhsd", "thd"], + cu_seqlens_q: Optional[torch.Tensor], + cu_seqlens_k: Optional[torch.Tensor], + max_seqlen_q: Optional[int], + max_seqlen_k: Optional[int], + dropout_p: float, + philox_seed: Optional[int], + philox_offset: Optional[int], + use_exp2: bool, + # fp8 + descale_q: Optional[torch.Tensor], + descale_k: Optional[torch.Tensor], + descale_v: Optional[torch.Tensor], + descale_o: Optional[torch.Tensor], + descale_do: Optional[torch.Tensor], + descale_dq: Optional[torch.Tensor], + descale_dk: Optional[torch.Tensor], + descale_dv: Optional[torch.Tensor], + # seqused for FA v3 + seqused_q: Optional[torch.Tensor] = None, + seqused_k: Optional[torch.Tensor] = None, +): + # get params, strides and shape + IS_VARLEN = layout == "thd" + use_dropout = dropout_p > 0.0 + + # common assertions + assert ( + 0.0 <= dropout_p <= 1.0 + ), f"dropout_p must be between 0 and 1, got {dropout_p}" + assert ( + q.device == k.device == v.device == o.device == do.device == softmax_lse.device + ), f"All tensors must be on the same device. Got: q={q.device}, k={k.device}, v={v.device}, o={o.device}, do={do.device}, softmax_lse={softmax_lse.device}" + assert ( + q.dtype == k.dtype == v.dtype == do.dtype + ), "q, k, v, do must have the same dtype" + current_device = torch.cuda.current_device() + assert ( + q.is_cuda and q.device.index == current_device + ), f"Device mismatch: Kernel will launch on cuda:{current_device}, but tensors are on {q.device}" + + # get shapes and strides + if IS_VARLEN: + # shape + total_seqlen_q, nheads_q, head_size_q = q.shape + total_seqlen_k, nheads_k, head_size_k = k.shape + total_seqlen_v, nheads_v, head_size_v = v.shape + nheads_lse, total_seqlen_lse = softmax_lse.shape + + # assert shapes + assert ( + total_seqlen_lse == total_seqlen_q + ), f"softmax_lse seqlen {total_seqlen_lse} != q seqlen {total_seqlen_q}" + assert ( + cu_seqlens_q is not None + ), "cu_seqlens_q must be provided for varlen layout" + assert ( + cu_seqlens_k is not None + ), "cu_seqlens_k must be provided for varlen layout" + assert ( + max_seqlen_q is not None + ), "max_seqlen_q must be provided for varlen layout" + assert ( + max_seqlen_k is not None + ), "max_seqlen_k must be provided for varlen layout" + + # assert head dimensions + assert ( + head_size_q == head_size_k + ), f"head sizes must match: q={head_size_q}, k={head_size_k}" + assert ( + nheads_k == nheads_v + ), f"k and v must have same number of heads: k={nheads_k}, v={nheads_v}" + assert ( + nheads_q % nheads_k == 0 + ), f"nheads_q {nheads_q} must be divisible by nheads_k {nheads_k} for GQA/MQA" + assert ( + nheads_lse == nheads_q + ), f"softmax_lse heads {nheads_lse} != q heads {nheads_q}" + + # assert output shapes + assert o.shape == ( + total_seqlen_q, + nheads_q, + head_size_v, + ), f"o shape {o.shape} != expected {(total_seqlen_q, nheads_q, head_size_v)}" + assert do.shape == o.shape, f"do shape {do.shape} != o shape {o.shape}" + assert dq.shape == q.shape, f"dq shape {dq.shape} != q shape {q.shape}" + assert dk.shape == k.shape, f"dk shape {dk.shape} != k shape {k.shape}" + assert dv.shape == v.shape, f"dv shape {dv.shape} != v shape {v.shape}" + + # assert cu_seqlens + assert ( + cu_seqlens_q.dtype == torch.int32 + ), f"cu_seqlens_q must be int32, got {cu_seqlens_q.dtype}" + assert ( + cu_seqlens_k.dtype == torch.int32 + ), f"cu_seqlens_k must be int32, got {cu_seqlens_k.dtype}" + assert cu_seqlens_q[0] == 0, "cu_seqlens_q must start with 0" + assert cu_seqlens_k[0] == 0, "cu_seqlens_k must start with 0" + assert ( + cu_seqlens_q[-1] == total_seqlen_q + ), f"cu_seqlens_q[-1] {cu_seqlens_q[-1]} != total_seqlen_q {total_seqlen_q}" + assert ( + cu_seqlens_k[-1] == total_seqlen_k + ), f"cu_seqlens_k[-1] {cu_seqlens_k[-1]} != total_seqlen_k {total_seqlen_k}" + + # set vars + batch = len(cu_seqlens_q) - 1 + head_size_qk = head_size_q + + # strides + stride_qb, stride_qm, stride_qh, stride_qd = ( + 0, + q.stride(0), + q.stride(1), + q.stride(2), + ) + stride_kb, stride_kn, stride_kh, stride_kd = ( + 0, + k.stride(0), + k.stride(1), + k.stride(2), + ) + stride_vb, stride_vn, stride_vh, stride_vd = ( + 0, + v.stride(0), + v.stride(1), + v.stride(2), + ) + stride_ob, stride_om, stride_oh, stride_od = ( + 0, + o.stride(0), + o.stride(1), + o.stride(2), + ) + stride_dqb, stride_dqm, stride_dqh, stride_dqd = ( + 0, + dq.stride(0), + dq.stride(1), + dq.stride(2), + ) + stride_dkb, stride_dkn, stride_dkh, stride_dkd = ( + 0, + dk.stride(0), + dk.stride(1), + dk.stride(2), + ) + stride_dvb, stride_dvn, stride_dvh, stride_dvd = ( + 0, + dv.stride(0), + dv.stride(1), + dv.stride(2), + ) + stride_dob, stride_dom, stride_doh, stride_dod = ( + 0, + do.stride(0), + do.stride(1), + do.stride(2), + ) + stride_lse_b, stride_lse_h, stride_lse_m = ( + 0, + softmax_lse.stride(0), + softmax_lse.stride(1), + ) + else: + # shapes + batch_q, seqlen_q, nheads_q, head_size_q = q.shape + batch_k, seqlen_k, nheads_k, head_size_k = k.shape + batch_v, seqlen_v, nheads_v, head_size_v = v.shape + batch_lse, nheads_lse, seqlen_lse = softmax_lse.shape + + # assert batch dimensions + assert ( + batch_q == batch_k == batch_v + ), f"batch sizes must match: q={batch_q}, k={batch_k}, v={batch_v}" + + # assert head dimensions + assert ( + head_size_q == head_size_k + ), f"head sizes must match: q={head_size_q}, k={head_size_k}" + assert ( + nheads_k == nheads_v + ), f"k and v must have same number of heads: k={nheads_k}, v={nheads_v}" + assert ( + nheads_q % nheads_k == 0 + ), f"nheads_q {nheads_q} must be divisible by nheads_k {nheads_k} for GQA/MQA" + + # assert sequence lengths + assert ( + seqlen_k == seqlen_v + ), f"k and v sequence lengths must match: k={seqlen_k}, v={seqlen_v}" + + # assert output shapes + assert o.shape == ( + batch_q, + seqlen_q, + nheads_q, + head_size_v, + ), f"o shape {o.shape} != expected" + assert do.shape == o.shape, f"do shape {do.shape} != o shape {o.shape}" + assert dq.shape == q.shape, f"dq shape {dq.shape} != q shape {q.shape}" + assert dk.shape == k.shape, f"dk shape {dk.shape} != k shape {k.shape}" + assert dv.shape == v.shape, f"dv shape {dv.shape} != v shape {v.shape}" + + # assert softmax_lse shape + assert softmax_lse.shape == ( + batch_q, + nheads_q, + seqlen_q, + ), f"softmax_lse shape {softmax_lse.shape} != expected" + + # set vars + batch = batch_q + head_size_qk = head_size_q + max_seqlen_q = seqlen_q + max_seqlen_k = seqlen_k + + # strides + stride_qb, stride_qm, stride_qh, stride_qd = q.stride() + stride_kb, stride_kn, stride_kh, stride_kd = k.stride() + stride_vb, stride_vn, stride_vh, stride_vd = v.stride() + stride_ob, stride_om, stride_oh, stride_od = o.stride() + stride_dqb, stride_dqm, stride_dqh, stride_dqd = dq.stride() + stride_dkb, stride_dkn, stride_dkh, stride_dkd = dk.stride() + stride_dvb, stride_dvn, stride_dvh, stride_dvd = dv.stride() + stride_dob, stride_dom, stride_doh, stride_dod = do.stride() + stride_lse_b, stride_lse_h, stride_lse_m = softmax_lse.stride() + + # fp8 setup - moved after all assertions + IS_FP8 = is_fp8(q) + if IS_FP8: + FP8_MAX = torch.finfo(q.dtype).max + # we already asserted that do, q, k, v all have the same dtype, so no need to check each one + if is_fp8(o): + FP8_OUTPUT = True + assert ( + descale_o is not None + ), f"descale_o is None. In fp8, you need to pass a tensor for descale_o along with a tensor o." + assert ( + descale_dq is not None + ), f"descale_dq is None. In fp8, you need to pass a tensor for descale_dq along with a tensor dq." + assert ( + descale_dk is not None + ), f"descale_dk is None. In fp8, you need to pass a tensor for descale_dk along with a tensor dk." + assert ( + descale_dv is not None + ), f"descale_dv is None. In fp8, you need to pass a tensor for descale_dv along with a tensor dv." + else: + FP8_OUTPUT = False + + stride_descale_q_z = descale_q.stride(0) if descale_q is not None else None + stride_descale_k_z = descale_k.stride(0) if descale_k is not None else None + stride_descale_v_z = descale_v.stride(0) if descale_v is not None else None + stride_descale_o_z = descale_o.stride(0) if descale_o is not None else None + stride_descale_do_z = descale_do.stride(0) if descale_do is not None else None + + if DEBUG: + print(f"FP8 path triggered (FP8_OUTPUT={FP8_OUTPUT})") + else: + FP8_MAX = None + FP8_OUTPUT = False + stride_descale_q_z = stride_descale_k_z = stride_descale_v_z = ( + stride_descale_o_z + ) = stride_descale_do_z = None + + # alibi setup + use_alibi, (stride_az, stride_ah) = ( + (True, alibi_slopes.stride()) if alibi_slopes is not None else (False, (0, 0)) + ) + + # get closest power of 2 over or equal to 32. + padded_d_model_qk = 1 << (head_size_qk - 1).bit_length() + padded_d_model_qk = max(padded_d_model_qk, 32) + padded_d_model_v = 1 << (head_size_v - 1).bit_length() + padded_d_model_v = max(padded_d_model_v, 32) + HEAD_DIM_QK = padded_d_model_qk + HEAD_DIM_V = padded_d_model_v + ACTUAL_HEAD_DIM_QK = head_size_qk + ACTUAL_HEAD_DIM_V = head_size_v + + # init delta + if IS_VARLEN: + # Shape expected by interface varlen backward: (Hq, Total_Q) + total_q, _, _ = q.shape + delta = torch.zeros((nheads_q, total_q), device=q.device, dtype=torch.float32) + stride_delta_b, stride_delta_h, stride_delta_m = ( + 0, + delta.stride(0), + delta.stride(1), + ) + else: + # Shape expected by dense backward: (B, Hq, Sq) + seqlen_q = q.shape[1] + delta = torch.zeros( + (batch, nheads_q, seqlen_q), device=q.device, dtype=torch.float32 + ) + stride_delta_b, stride_delta_h, stride_delta_m = delta.stride() + + pre_grid = lambda META: ( + triton.cdiv(max_seqlen_q, META["PRE_BLOCK"]), + batch, + nheads_q, + ) + _bwd_preprocess[pre_grid]( + o, + do, + delta, + stride_ob, + stride_oh, + stride_om, + stride_od, + stride_dob, + stride_doh, + stride_dom, + stride_dod, + stride_delta_b, + stride_delta_h, + stride_delta_m, + stride_descale_do_z, + cu_seqlens_q, + max_seqlen_q, + descale_do, + HEAD_DIM_V=HEAD_DIM_V, + ACTUAL_HEAD_DIM_V=ACTUAL_HEAD_DIM_V, + IS_VARLEN=IS_VARLEN, + IS_FP8=IS_FP8, + ) + + if False: + print("delta:", delta, delta.shape) + + # dropout mask tensor for debugging. We dump the dropout mask created in + # the kernel for testing + dropout_mask = None + stride_dropoutb, stride_dropouth, stride_dropoutm, stride_dropoutn = (0, 0, 0, 0) + if use_dropout: + dropout_mask = torch.zeros( + (batch, nheads_q, max_seqlen_q, max_seqlen_k), + device=q.device, + dtype=torch.float32, + ) + + if DROPOUT_USE_PYTORCH: + if not IS_VARLEN: + dropout_mask = create_dropout_mask( + dropout_p, + (batch, nheads_q, max_seqlen_q, max_seqlen_k), + seed=philox_seed, + ) + else: + dropout_mask = create_dropout_mask_varlen( + dropout_p, batch, nheads_q, cu_seqlens_q, cu_seqlens_k, philox_seed + ) + stride_dropoutb, stride_dropouth, stride_dropoutm, stride_dropoutn = ( + dropout_mask.stride() + ) + + seqlen = max(max_seqlen_q, max_seqlen_k) + grid = lambda META: ( + nheads_k, + (seqlen + META["BLOCK_N1"] - 1) // META["BLOCK_N1"], + batch, + ) + if causal: + if DEBUG_TRITON: + print(f"bwd_kernel: grid = {grid}") # noqa: E701 + bwd_kernel_causal[grid]( + q, + k, + v, + sm_scale, + do, + dq, + dk, + dv, + softmax_lse, + delta, + stride_qb, + stride_qh, + stride_qm, + stride_qd, + stride_kb, + stride_kh, + stride_kn, + stride_kd, + stride_vb, + stride_vh, + stride_vn, + stride_vd, + stride_dqb, + stride_dqh, + stride_dqm, + stride_dqd, + stride_dkb, + stride_dkh, + stride_dkn, + stride_dkd, + stride_dvb, + stride_dvh, + stride_dvn, + stride_dvd, + stride_lse_b, + stride_lse_h, + stride_lse_m, + stride_delta_b, + stride_delta_h, + stride_delta_m, + stride_dob, + stride_doh, + stride_dom, + stride_dod, + stride_dropoutb, + stride_dropouth, + stride_dropoutm, + stride_dropoutn, + stride_descale_q_z, + stride_descale_k_z, + stride_descale_v_z, + stride_descale_do_z, + stride_az, + stride_ah, + nheads_q, + nheads_k, + cu_seqlens_q, + cu_seqlens_k, + seqused_q, + seqused_k, # Pass seqused tensors + max_seqlen_q, + max_seqlen_k, + dropout_mask, + dropout_p, + philox_seed, + philox_offset, + alibi_slopes, + descale_q, + descale_k, + descale_v, + descale_do, + HEAD_DIM_QK=HEAD_DIM_QK, + HEAD_DIM_V=HEAD_DIM_V, + ACTUAL_HEAD_DIM_QK=ACTUAL_HEAD_DIM_QK, + ACTUAL_HEAD_DIM_V=ACTUAL_HEAD_DIM_V, + ENABLE_DROPOUT=use_dropout, + IS_VARLEN=IS_VARLEN, + USE_ALIBI=use_alibi, + USE_EXP2=use_exp2, + IS_FP8=IS_FP8, + FP8_MAX=FP8_MAX, + FP8_OUTPUT=FP8_OUTPUT, + USE_SEQUSED=( + seqused_q is not None or seqused_k is not None + ), # Add flag for seqused + DEBUG_TRITON=DEBUG_TRITON, + DEBUG_TRITON_DETAIL=DEBUG_TRITON_DETAIL, + ) + else: + bwd_kernel_noncausal[grid]( + q, + k, + v, + sm_scale, + do, + dq, + dk, + dv, + softmax_lse, + delta, + stride_qb, + stride_qh, + stride_qm, + stride_qd, + stride_kb, + stride_kh, + stride_kn, + stride_kd, + stride_vb, + stride_vh, + stride_vn, + stride_vd, + stride_dqb, + stride_dqh, + stride_dqm, + stride_dqd, + stride_dkb, + stride_dkh, + stride_dkn, + stride_dkd, + stride_dvb, + stride_dvh, + stride_dvn, + stride_dvd, + stride_lse_b, + stride_lse_h, + stride_lse_m, + stride_delta_b, + stride_delta_h, + stride_delta_m, + stride_dob, + stride_doh, + stride_dom, + stride_dod, + stride_dropoutb, + stride_dropouth, + stride_dropoutm, + stride_dropoutn, + stride_descale_q_z, + stride_descale_k_z, + stride_descale_v_z, + stride_descale_do_z, + stride_az, + stride_ah, + nheads_q, + nheads_k, + cu_seqlens_q, + cu_seqlens_k, + seqused_q, + seqused_k, # Pass seqused tensors + max_seqlen_q, + max_seqlen_k, + dropout_mask, + dropout_p, + philox_seed, + philox_offset, + alibi_slopes, + descale_q, + descale_k, + descale_v, + descale_do, + HEAD_DIM_QK=HEAD_DIM_QK, + HEAD_DIM_V=HEAD_DIM_V, + ACTUAL_HEAD_DIM_QK=ACTUAL_HEAD_DIM_QK, + ACTUAL_HEAD_DIM_V=ACTUAL_HEAD_DIM_V, + ENABLE_DROPOUT=use_dropout, + IS_VARLEN=IS_VARLEN, + USE_ALIBI=use_alibi, + USE_EXP2=use_exp2, + IS_FP8=IS_FP8, + FP8_MAX=FP8_MAX, + FP8_OUTPUT=FP8_OUTPUT, + USE_SEQUSED=( + seqused_q is not None or seqused_k is not None + ), # Add flag for seqused + DEBUG_TRITON=DEBUG_TRITON, + DEBUG_TRITON_DETAIL=DEBUG_TRITON_DETAIL, + ) + + return delta + + +def attention_backward_triton_fused_atomics_impl( + do: torch.Tensor, + q: torch.Tensor, + k: torch.Tensor, + v: torch.Tensor, + o: torch.Tensor, + softmax_lse: torch.Tensor, + dq: torch.Tensor, + dk: torch.Tensor, + dv: torch.Tensor, + sm_scale: float, + alibi_slopes: Optional[torch.Tensor], + causal: bool, + cu_seqlens_q: Optional[torch.Tensor], + cu_seqlens_k: Optional[torch.Tensor], + max_seqlen_q: int, + max_seqlen_k: int, + dropout_p: float, + philox_seed: Optional[int] = 0, + philox_offset: Optional[int] = 0, + descale_q: Optional[torch.Tensor] = None, + descale_k: Optional[torch.Tensor] = None, + descale_v: Optional[torch.Tensor] = None, + descale_do: Optional[torch.Tensor] = None, + fused: bool = False, + # seqused for FA v3 (currently ignored in this implementation) + seqused_q: Optional[torch.Tensor] = None, + seqused_k: Optional[torch.Tensor] = None, +): + IS_FP8 = is_fp8(q) + if IS_FP8: + FP8_MAX = torch.finfo(q.dtype).max + descale_strides = ( + descale_q.stride(0), + descale_k.stride(0), + descale_v.stride(0), + descale_do.stride(0), + ) + + if DEBUG: + print(f"FP8 path triggered") + else: + FP8_MAX = None + stride_descale_q_z = stride_descale_k_z = stride_descale_v_z = ( + stride_descale_do_z + ) = None + descale_strides = ( + stride_descale_q_z, + stride_descale_k_z, + stride_descale_v_z, + stride_descale_do_z, + ) + + IS_VARLEN = True if cu_seqlens_q is not None else False + + # get strides and shape + if IS_VARLEN: + # Layout for q,k,v is thd ie [total tokens, num_head, head_dim] + batch, seqlen_q, num_q_heads, head_sz = ( + len(cu_seqlens_q) - 1, + max_seqlen_q, + q.shape[1], + q.shape[2], + ) + seqlen_k, num_k_heads = max_seqlen_k, k.shape[1] + q_strides = (0, q.stride(1), q.stride(0), q.stride(2)) + q_strides = (0, q.stride(1), q.stride(0), q.stride(2)) + k_strides = (0, k.stride(1), k.stride(0), k.stride(2)) + v_strides = (0, v.stride(1), v.stride(0), v.stride(2)) + o_strides = (0, o.stride(1), o.stride(0), o.stride(2)) + dq_strides = (0, dq.stride(1), dq.stride(0), dq.stride(2)) + dk_strides = (0, dk.stride(1), dk.stride(0), dk.stride(2)) + dv_strides = (0, dv.stride(1), dv.stride(0), dv.stride(2)) + do_strides = (0, do.stride(1), do.stride(0), do.stride(2)) + else: + # Layout for q,k,v is bshd ie [batch, seq_len, num_head, head_dim] + batch, seqlen_q, num_q_heads, head_sz = q.shape + seqlen_k, num_k_heads = k.shape[1], k.shape[2] + q_strides = (q.stride(0), q.stride(2), q.stride(1), q.stride(3)) + k_strides = (k.stride(0), k.stride(2), k.stride(1), k.stride(3)) + v_strides = (v.stride(0), v.stride(2), v.stride(1), v.stride(3)) + o_strides = (o.stride(0), o.stride(2), o.stride(1), o.stride(3)) + dq_strides = (dq.stride(0), dq.stride(2), dq.stride(1), dq.stride(3)) + dk_strides = (dk.stride(0), dk.stride(2), dk.stride(1), dk.stride(3)) + dv_strides = (dv.stride(0), dv.stride(2), dv.stride(1), dv.stride(3)) + do_strides = (do.stride(0), do.stride(2), do.stride(1), do.stride(3)) + + # BLOCK_D_MODEL, BLOCK_D_MODEL_POW2 + # padding for head_dim. Power of 2 or 16 + BLOCK_D_MODEL_POW2 = triton.next_power_of_2(head_sz) + BLOCK_D_MODEL_POW2 = max(BLOCK_D_MODEL_POW2, 16) + + # Configs + # PRE_BLOCK, BLOCK_M1, BLOCK_N1, BLOCK_M2, BLOCK_N2 + # BLK_SLICE_FACTOR + NUM_WARPS, NUM_STAGES = 4, 1 + WAVES_PER_EU = 1 + PRE_BLOCK = 128 + # BLOCK_M1, BLOCK_N1, BLOCK_M2, BLOCK_N2 = 32, 128, 128, 32 + BLOCK_M1, BLOCK_N1, BLOCK_M2, BLOCK_N2 = 64, 64, 64, 16 + BLK_SLICE_FACTOR = 2 + + # init delta + delta = torch.zeros_like(softmax_lse) + if IS_VARLEN: + # [total_tokens, num_q_heads, seqlen_q] + delta_strides = (0, delta.stride(1), delta.stride(0)) + else: + # [batch, num_q_heads, seqlen_q] + delta_strides = delta.stride() + + # preprocess + # compute D(delta) = rowsum(dO*O). Note, multiplication is element-wise. + pre_grid = (triton.cdiv(max_seqlen_q, PRE_BLOCK), batch, num_q_heads) + _bwd_fused_atomics_preprocess[pre_grid]( + o, + do, + delta, + *o_strides, + *delta_strides, + descale_strides[3], + cu_seqlens_q, + max_seqlen_q, + descale_do, + BLOCK_M=PRE_BLOCK, + BLOCK_D_MODEL=head_sz, + BLOCK_D_MODEL_POW2=BLOCK_D_MODEL_POW2, + IS_VARLEN=IS_VARLEN, + IS_FP8=IS_FP8, + ) + + # dropout_mask + use_dropout = dropout_p > 0.0 + if use_dropout: + dropout_mask = torch.zeros( + (batch, num_q_heads, max_seqlen_q, max_seqlen_k), + device=q.device, + dtype=torch.float32, + ) + dropout_strides = dropout_mask.stride() + else: + dropout_mask = None + dropout_strides = (0, 0, 0, 0) + + grid_dkdv = ((max_seqlen_k + BLOCK_N1 - 1) // BLOCK_N1, batch, num_k_heads) + grid_dq = ((max_seqlen_q + BLOCK_M2 - 1) // BLOCK_M2, batch, num_k_heads) + + if ( + fused + ): # fuses dk, dv, dq computations into one kernel by computing the dq using atomic adds between workgroups + + BLOCK_N = ( + 128 if BLOCK_D_MODEL_POW2 < 160 else 64 + ) # larger head sizes lead to oom + config = { + "BLOCK_M": 32, + "BLOCK_N": BLOCK_N, + "num_warps": 4, + "num_stages": 1, + "waves_per_eu": 1, + "BLK_SLICE_FACTOR": 2, + } + + num_k_pids = (max_seqlen_k + BLOCK_N - 1) // BLOCK_N + grid_dkdvdq = (batch * num_k_heads * num_k_pids,) + + if causal: + _bwd_kernel_fused_atomics_dkdvdq_causal[grid_dkdvdq]( + q, + k, + v, + sm_scale, + do, + dk, + dv, + dq, + softmax_lse, + delta, + *q_strides, + *k_strides, + *v_strides, + *dk_strides, + *dq_strides, + *delta_strides, + *do_strides, + *dropout_strides, + *descale_strides, + cu_seqlens_q, + cu_seqlens_k, + max_seqlen_q, + max_seqlen_k, + dropout_mask, + dropout_p, + philox_seed, + philox_offset, + descale_q, + descale_k, + descale_v, + descale_do, + NUM_Q_HEADS=num_q_heads, + NUM_K_HEADS=num_k_heads, + BATCH=batch, + NUM_K_PIDS=num_k_pids, + BLOCK_D_MODEL=head_sz, + BLOCK_D_MODEL_POW2=BLOCK_D_MODEL_POW2, + ENABLE_DROPOUT=use_dropout, + IS_VARLEN=IS_VARLEN, + IS_FP8=IS_FP8, + FP8_MAX=FP8_MAX, + **config, + ) + else: + _bwd_kernel_fused_atomics_dkdvdq_noncausal[grid_dkdvdq]( + q, + k, + v, + sm_scale, + do, + dk, + dv, + dq, + softmax_lse, + delta, + *q_strides, + *k_strides, + *v_strides, + *dk_strides, + *dq_strides, + *delta_strides, + *do_strides, + *dropout_strides, + *descale_strides, + cu_seqlens_q, + cu_seqlens_k, + max_seqlen_q, + max_seqlen_k, + dropout_mask, + dropout_p, + philox_seed, + philox_offset, + descale_q, + descale_k, + descale_v, + descale_do, + NUM_Q_HEADS=num_q_heads, + NUM_K_HEADS=num_k_heads, + BATCH=batch, + NUM_K_PIDS=num_k_pids, + BLOCK_D_MODEL=head_sz, + BLOCK_D_MODEL_POW2=BLOCK_D_MODEL_POW2, + ENABLE_DROPOUT=use_dropout, + IS_VARLEN=IS_VARLEN, + IS_FP8=IS_FP8, + FP8_MAX=FP8_MAX, + **config, + ) + + return delta + + # split kernels solution: one kernel computes dk, dv and the other computes dq + + if causal: + _bwd_kernel_fused_atomics_dkdv_causal[grid_dkdv]( + q, + k, + v, + sm_scale, + do, + dk, + dv, + softmax_lse, + delta, + *q_strides, + *k_strides, + *v_strides, + *dk_strides, + *delta_strides, + *do_strides, + *dropout_strides, + *descale_strides, + cu_seqlens_q, + cu_seqlens_k, + max_seqlen_q, + max_seqlen_k, + dropout_mask, + dropout_p, + philox_seed, + philox_offset, + descale_q, + descale_k, + descale_v, + descale_do, + NUM_Q_HEADS=num_q_heads, + NUM_K_HEADS=num_k_heads, + BLOCK_M=BLOCK_M1, + BLOCK_N=BLOCK_N1, + BLK_SLICE_FACTOR=BLK_SLICE_FACTOR, + BLOCK_D_MODEL=head_sz, + BLOCK_D_MODEL_POW2=BLOCK_D_MODEL_POW2, + ENABLE_DROPOUT=use_dropout, + IS_VARLEN=IS_VARLEN, + IS_FP8=IS_FP8, + FP8_MAX=FP8_MAX, + num_warps=NUM_WARPS, + num_stages=NUM_STAGES, + waves_per_eu=WAVES_PER_EU, + ) + _bwd_kernel_fused_atomics_dq_causal[grid_dq]( + q, + k, + v, + sm_scale, + do, + dq, + softmax_lse, + delta, + *q_strides, + *k_strides, + *v_strides, + *dq_strides, + *delta_strides, + *do_strides, + *dropout_strides, + *descale_strides, + cu_seqlens_q, + cu_seqlens_k, + max_seqlen_q, + max_seqlen_k, + dropout_mask, + dropout_p, + philox_seed, + philox_offset, + descale_q, + descale_k, + descale_v, + descale_do, + NUM_Q_HEADS=num_q_heads, + NUM_K_HEADS=num_k_heads, + BLOCK_M=BLOCK_M2, + BLOCK_N=BLOCK_N2, + BLK_SLICE_FACTOR=BLK_SLICE_FACTOR, + BLOCK_D_MODEL=head_sz, + BLOCK_D_MODEL_POW2=BLOCK_D_MODEL_POW2, + ENABLE_DROPOUT=use_dropout, + IS_VARLEN=IS_VARLEN, + IS_FP8=IS_FP8, + FP8_MAX=FP8_MAX, + num_warps=NUM_WARPS, + num_stages=NUM_STAGES, + waves_per_eu=WAVES_PER_EU, + ) + else: + _bwd_kernel_fused_atomics_dkdv_noncausal[grid_dkdv]( + q, + k, + v, + sm_scale, + do, + dk, + dv, + softmax_lse, + delta, + *q_strides, + *k_strides, + *v_strides, + *dk_strides, + *delta_strides, + *do_strides, + *dropout_strides, + *descale_strides, + cu_seqlens_q, + cu_seqlens_k, + max_seqlen_q, + max_seqlen_k, + dropout_mask, + dropout_p, + philox_seed, + philox_offset, + descale_q, + descale_k, + descale_v, + descale_do, + NUM_Q_HEADS=num_q_heads, + NUM_K_HEADS=num_k_heads, + BLOCK_M=BLOCK_M1, + BLOCK_N=BLOCK_N1, + BLK_SLICE_FACTOR=BLK_SLICE_FACTOR, + BLOCK_D_MODEL=head_sz, + BLOCK_D_MODEL_POW2=BLOCK_D_MODEL_POW2, + ENABLE_DROPOUT=use_dropout, + IS_VARLEN=IS_VARLEN, + IS_FP8=IS_FP8, + FP8_MAX=FP8_MAX, + num_warps=NUM_WARPS, + num_stages=NUM_STAGES, + waves_per_eu=WAVES_PER_EU, + ) + + _bwd_kernel_fused_atomics_dq_noncausal[grid_dq]( + q, + k, + v, + sm_scale, + do, + dq, + softmax_lse, + delta, + *q_strides, + *k_strides, + *v_strides, + *dq_strides, + *delta_strides, + *do_strides, + *dropout_strides, + *descale_strides, + cu_seqlens_q, + cu_seqlens_k, + max_seqlen_q, + max_seqlen_k, + dropout_mask, + dropout_p, + philox_seed, + philox_offset, + descale_q, + descale_k, + descale_v, + descale_do, + NUM_Q_HEADS=num_q_heads, + NUM_K_HEADS=num_k_heads, + BLOCK_M=BLOCK_M2, + BLOCK_N=BLOCK_N2, + BLK_SLICE_FACTOR=BLK_SLICE_FACTOR, + BLOCK_D_MODEL=head_sz, + BLOCK_D_MODEL_POW2=BLOCK_D_MODEL_POW2, + ENABLE_DROPOUT=use_dropout, + IS_VARLEN=IS_VARLEN, + IS_FP8=IS_FP8, + FP8_MAX=FP8_MAX, + num_warps=NUM_WARPS, + num_stages=NUM_STAGES, + waves_per_eu=WAVES_PER_EU, + ) + + return delta + + +def attention_backward_triton_impl( + *, + do: torch.Tensor, + q: torch.Tensor, + k: torch.Tensor, + v: torch.Tensor, + o: torch.Tensor, + softmax_lse: torch.Tensor, + dq: torch.Tensor, + dk: torch.Tensor, + dv: torch.Tensor, + sm_scale: float, + alibi_slopes: Optional[torch.Tensor], + causal: bool, + layout: str, + cu_seqlens_q: Optional[torch.Tensor], + cu_seqlens_k: Optional[torch.Tensor], + max_seqlen_q: Optional[int], + max_seqlen_k: Optional[int], + seqused_q: Optional[torch.Tensor] = None, + seqused_k: Optional[torch.Tensor] = None, + dropout_p: float = 0.0, + philox_seed: Optional[int] = None, + philox_offset: Optional[int] = None, + use_exp2: bool = True, + mode: str = "fused_no_atomics", +) -> torch.Tensor: + """Unified backward interface dispatching to atomics or no-atomics implementation. + + Parameters mirror the superset of the two legacy interfaces. The public API should + call ONLY this function going forward. + mode: 'fused_atomics' or 'fused_no_atomics'; layout: 'bshd' or 'thd'; use_exp2 retained for parity. + """ + # Enforce supported dtypes (mirror Hopper behavior: FP8 forward-only) + supported_dtypes = {torch.float16, torch.bfloat16, torch.float32} + for name, t in {"q": q, "k": k, "v": v, "o": o, "do": do}.items(): + if t.dtype not in supported_dtypes: + raise TypeError( + f"Backward only supports fp16/bf16/fp32; tensor '{name}' has dtype {t.dtype}" + ) + + if mode == "fused_atomics": + # Atomics path ignores layout & use_exp2; pass varlen metadata directly. + return attention_backward_triton_fused_atomics_impl( + do, + q, + k, + v, + o, + softmax_lse, + dq, + dk, + dv, + sm_scale, + alibi_slopes, + causal, + cu_seqlens_q, + cu_seqlens_k, + max_seqlen_q if max_seqlen_q is not None else q.shape[1], + max_seqlen_k if max_seqlen_k is not None else k.shape[1], + dropout_p, + philox_seed or 0, + philox_offset or 0, + None, + None, + None, + None, + True, # fused flag + None, + None, + ) + elif mode == "fused_no_atomics": + return attention_backward_triton_split_fused_no_atomics_impl( + do, + q, + k, + v, + o, + softmax_lse, + dq, + dk, + dv, + sm_scale, + alibi_slopes, + causal, + layout, # layout required here + cu_seqlens_q, + cu_seqlens_k, + max_seqlen_q, + max_seqlen_k, + dropout_p, + philox_seed, + philox_offset, + use_exp2, + None, + None, + None, + None, + None, + None, + None, + None, + seqused_q, + seqused_k, + ) + else: + raise ValueError( + f"Unknown backward mode '{mode}'. Expected 'fused_atomics' or 'fused_no_atomics'." + ) diff --git a/flash_attn/flash_attn_triton_amd/bwd_prefill_fused_atomics.py b/flash_attn/flash_attn_triton_amd/bwd_prefill_fused_atomics.py deleted file mode 100755 index 51e53daedc2..00000000000 --- a/flash_attn/flash_attn_triton_amd/bwd_prefill_fused_atomics.py +++ /dev/null @@ -1,1815 +0,0 @@ -import torch -import triton -import triton.language as tl -from flash_attn.flash_attn_triton_amd.utils import compute_fp8_scaling_factors, DEBUG, is_fp8 - -from typing import Optional, Tuple - -# This function computes delta given output Out and gradient DO -# Here is the I/O shape: -# Out: (batch, nhead_q, max_seqlens_q, headDim) -# DO: (batch, nhead_q, max_seqlens_q, headDim) -# Delta: (batch, nheads_q, max_seqlens_q), same as softmax_lse defined at -@triton.jit -def _bwd_preprocess( - o_ptr, do_ptr, # noqa: E741 - delta_ptr, - stride_o_b, stride_o_h, stride_o_m, stride_o_k, - stride_delta_b, stride_delta_h, stride_delta_m, - stride_descale_do_z, - cu_seqlens_q, max_seqlen_q, - descale_do_ptr, - BLOCK_M: tl.constexpr, - BLOCK_D_MODEL: tl.constexpr, - BLOCK_D_MODEL_POW2: tl.constexpr, - IS_VARLEN: tl.constexpr, - IS_FP8: tl.constexpr -): - pid_m = tl.program_id(0) #seqlen - bid = tl.program_id(1) #batch - hid = tl.program_id(2) #head - - # Handle varlen - q_start = 0 - seqlen_q = max_seqlen_q - if IS_VARLEN: - q_start = tl.load(cu_seqlens_q + bid) - q_end = tl.load(cu_seqlens_q + bid + 1) - seqlen_q = q_end - q_start - else: - q_start = 0 - seqlen_q = max_seqlen_q - - # Compute offsets - offs_m = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) - offs_k = tl.arange(0, BLOCK_D_MODEL_POW2) - - # Offset O/DO by batch, head and q_start - offs = (bid * stride_o_b + - hid * stride_o_h + - q_start * stride_o_m + offs_m[:, None] * stride_o_m + - offs_k[None, :] * stride_o_k) - - # create masks - mask_m = offs_m < seqlen_q - mask = mask_m[:, None] - PADDED_HEAD: tl.constexpr = (BLOCK_D_MODEL != BLOCK_D_MODEL_POW2) - if PADDED_HEAD: - mask &= offs_k[None, :] < BLOCK_D_MODEL - - # load [BLOCK_M, BLOCK_D_MODEL_POW2] - o = tl.load(o_ptr + offs, mask=mask, other=0.0) - do = tl.load(do_ptr + offs, mask=mask, other=0.0) - - # compute and write-back to delta - if IS_FP8: - descale_do = tl.load(descale_do_ptr + bid * stride_descale_do_z + hid) - - # NOTE: do is in the fp8 range and o is not in fp8 - delta = tl.sum(o.to(tl.float32) * (do.to(tl.float32) * descale_do), axis=1) - else: - delta = tl.sum(o.to(tl.float32) * do.to(tl.float32), axis=1) - - offs_delta = (bid * stride_delta_b + - hid * stride_delta_h + - q_start * stride_delta_m + offs_m * stride_delta_m) - tl.store(delta_ptr + offs_delta, delta, mask=mask_m) - -@triton.jit -def _bwd_dq_inner( - dq, - q, K, V, do, m, Delta, sm_scale, - stride_qm, stride_qk, stride_kn, stride_kk, stride_vn, stride_vk, - stride_dropout_m, stride_dropout_n, - stride_deltam, - seqlen_q, seqlen_k, - dropout_p, philox_seed, batch_philox_offset, dropout_offset, - start_m, start_n, end_n, num_steps, - descale_q, descale_k, descale_v, descale_do, - BLOCK_M: tl.constexpr, - BLOCK_N: tl.constexpr, - BLOCK_D_MODEL: tl.constexpr, - BLOCK_D_MODEL_POW2: tl.constexpr, - MASK: tl.constexpr, - ENABLE_DROPOUT: tl.constexpr, - IS_FP8: tl.constexpr, - FP8_MAX: tl.constexpr, -): - RCP_LN2: tl.constexpr = 1.4426950408889634 - - PADDED_HEAD: tl.constexpr = (BLOCK_D_MODEL != BLOCK_D_MODEL_POW2) - delta_qk = seqlen_q - seqlen_k - offs_m = start_m + tl.arange(0, BLOCK_M) - offs_n = start_n + tl.arange(0, BLOCK_N) - offs_k = tl.arange(0, BLOCK_D_MODEL_POW2) - - # mask to make sure not OOB of seqlen_q - mask_m = offs_m < seqlen_q - - kT_ptrs = K + offs_n[None, :] * stride_kn + offs_k[:, None] * stride_kk - vT_ptrs = V + offs_n[None, :] * stride_vn + offs_k[:, None] * stride_vk - - # D (= delta) is pre-divided by ds_scale. - Di = tl.load(Delta + offs_m * stride_deltam, mask=mask_m, other=0.0) - - curr_n = start_n - step_n = BLOCK_N - curr_philox_offset = batch_philox_offset - curr_dropout_offset = dropout_offset - for blk_idx in range(num_steps): - offs_n = curr_n + tl.arange(0, BLOCK_N) - # end_n is needed because the end of causal True might not be perfectly - # aligned with the end of the block - mask_n = offs_n < end_n - mask_kT = mask_n[None, :] - mask_mn = mask_m[:, None] & (offs_n[None, :] < end_n) - if PADDED_HEAD: - mask_kT &= offs_k[:, None] < BLOCK_D_MODEL - - kT = tl.load(kT_ptrs, mask=mask_kT, other=0.0) - vT = tl.load(vT_ptrs, mask=mask_kT, other=0.0) - - #dropout - if ENABLE_DROPOUT: - philox_offs = (curr_philox_offset + - offs_m[:, None] * stride_dropout_m + - offs_n[None, :] * stride_dropout_n) - rand_vals = tl.rand(philox_seed, philox_offs) - dropout_mask = rand_vals > dropout_p - dropout_scale = 1 / (1 - dropout_p) - - #qk - if IS_FP8: - qk = tl.dot(q, kT) * descale_q * descale_k - else: - qk = tl.dot(q, kT) - p = tl.math.exp2(qk * sm_scale * RCP_LN2 - m * RCP_LN2) - - if MASK: - causal_mask = (offs_m[:, None] - delta_qk) >= offs_n[None, :] - mask = causal_mask * mask_mn - p = tl.where(mask, p, 0.0) - - #dp - if IS_FP8: - dp = (tl.dot(do, vT) * descale_do * descale_v) - else: - dp = tl.dot(do, vT) - - if ENABLE_DROPOUT: - dp = tl.where(dropout_mask, dp, 0.0) * dropout_scale - - #ds - delta_i = Di[:, None] - ds = p * (dp - delta_i) - - #dq - # NOTE: We need to de-scale dq in the end, because kT was pre-scaled. - if IS_FP8: - scale_ds, descale_ds = compute_fp8_scaling_factors(ds, FP8_MAX) - dq += (tl.dot((ds*scale_ds).to(kT.type.element_ty), tl.trans(kT)) * descale_ds * descale_k) - else: - dq += tl.dot(ds.to(kT.type.element_ty), tl.trans(kT)) - - curr_n += step_n - kT_ptrs += step_n * stride_kn - vT_ptrs += step_n * stride_vn - return dq - - -@triton.jit -def _bwd_dkdv_inner( - dk, dv, - Q, k, v, DO, M, D, sm_scale, - stride_q_m, stride_q_k, - stride_do_m, stride_do_k, - stride_dropout_m, stride_dropout_n, - stride_deltam, - dropout_p, philox_seed, batch_philox_offset, dropout_offset, - seqlen_q, seqlen_k, - start_n, start_m, num_steps, - descale_q, descale_k, descale_v, descale_do, - BLOCK_M: tl.constexpr, - BLOCK_N: tl.constexpr, - BLOCK_D_MODEL: tl.constexpr, - BLOCK_D_MODEL_POW2: tl.constexpr, - MASK: tl.constexpr, - ENABLE_DROPOUT: tl.constexpr, - IS_FP8: tl.constexpr, - FP8_MAX: tl.constexpr, -): - PADDED_HEAD: tl.constexpr = (BLOCK_D_MODEL != BLOCK_D_MODEL_POW2) - delta_qk = seqlen_q - seqlen_k - offs_m = start_m + tl.arange(0, BLOCK_M) - offs_n = start_n + tl.arange(0, BLOCK_N) - offs_k = tl.arange(0, BLOCK_D_MODEL_POW2) - - # mask to make sure not OOB of seqlen_q - mask_n = offs_n < seqlen_k - qT_ptrs = Q + offs_m[None, :] * stride_q_m + offs_k[:, None] * stride_q_k #[BLOCK_D_MODEL_POW2, BLOCK_M] - do_ptrs = DO + offs_m[:, None] * stride_do_m + offs_k[None,: ] * stride_do_k - curr_m = start_m - step_m = BLOCK_M - curr_philox_offset = batch_philox_offset - curr_dropout_offset = dropout_offset - RCP_LN2: tl.constexpr = 1.4426950408889634 - - #Iterate over blocks(BLOCK_M size) of Q while calculating - #a fixed block(BLOCK_N) of dk and dv. Note, during backward - #pass P has to be recomputed. However, this kernel computes - #dV and dK, so we compute we need P^T and S^T. See backward pass - #equations - # - #From Flash Attention Paper: - #ForwardPass: S = QkT, P=softmax(S), O=PV - # - #BackwardPass equations - #dV = P^TdO - #dP = dOV^T - #dS = dsoftmax(dP) - #dQ = dSK - #dK = QdS^T - for blk_idx in range(num_steps): - offs_m = curr_m + tl.arange(0, BLOCK_M) - mask_m = offs_m < seqlen_q - mask_qT = mask_m[None, :] - mask_do = mask_m[:, None] - mask_nm = mask_n[:, None] & (offs_m[None, :] < seqlen_q) - if PADDED_HEAD: - mask_qT &= offs_k[:, None] < BLOCK_D_MODEL - mask_do &= offs_k[None, :] < BLOCK_D_MODEL - - #load qT - qT = tl.load(qT_ptrs, mask=mask_qT, other=0.0) - - #dropout - if ENABLE_DROPOUT: - # NOTE: dropout is transposed because it is used to mask pT - philox_offs = (curr_philox_offset + - offs_m[None, :] * stride_dropout_m + - offs_n[:, None] * stride_dropout_n) - rand_vals = tl.rand(philox_seed, philox_offs) - dropout_mask = rand_vals > dropout_p - dropout_scale = 1.0 / (1 - dropout_p) - - #Load M - m = tl.load(M + offs_m * stride_deltam, mask=mask_m, other=0.0) - - #Compute qkT - if IS_FP8: - qkT = (tl.dot(k, qT) * descale_q * descale_k) - else: - qkT = tl.dot(k, qT) - - #Compute pT(use m and also apply sm_scale) - pT = tl.math.exp(qkT * sm_scale - m[None, :]) - - if MASK: - causal_mask = (offs_m[None, :] - delta_qk) >= offs_n[:, None] - mask = causal_mask & mask_nm - pT = tl.where(mask, pT, 0.0) - - #load DO - do = tl.load(do_ptrs, mask=mask_do, other=0.0) - - #dV - if ENABLE_DROPOUT: - pT_dropout = tl.where(dropout_mask, pT, 0.0) * dropout_scale - if IS_FP8: - scale_p_dropout, descale_p_dropout = compute_fp8_scaling_factors(pT_dropout, FP8_MAX) - dv += (tl.dot((pT_dropout * scale_p_dropout).to(do.type.element_ty), do) * descale_p_dropout * descale_do) - else: - dv += tl.dot(pT_dropout.to(do.type.element_ty), do) - else: - if IS_FP8: - scale_pT, descale_pT = compute_fp8_scaling_factors(pT, FP8_MAX) - dv += (tl.dot((pT * scale_pT).to(do.type.element_ty), do) * descale_pT * descale_do) - else: - dv += tl.dot(pT.to(do.type.element_ty), do) - - #Load delta - Di = tl.load(D + offs_m * stride_deltam, mask=mask_m) - - #Compute dP and dS - if IS_FP8: - dpT = tl.dot(v, tl.trans(do)) * descale_v * descale_do - else: - dpT = tl.dot(v, tl.trans(do)) - - if ENABLE_DROPOUT: - dpT = tl.where(dropout_mask, dpT, 0.0) * dropout_scale - - delta_i = Di[None, :] - dsT = pT * (dpT - delta_i) - - #compute dk - if IS_FP8: - scale_dsT, descale_dsT = compute_fp8_scaling_factors(dsT, FP8_MAX) - dk += (tl.dot((dsT * scale_dsT).to(qT.type.element_ty), tl.trans(qT)) * descale_dsT * descale_q) - else: - dk += tl.dot(dsT.to(qT.type.element_ty), tl.trans(qT)) - - #increment pointers - curr_m += step_m - qT_ptrs += step_m * stride_q_m - do_ptrs += step_m * stride_do_m - - return dk, dv - - -@triton.jit -def _bwd_dkdvdq_inner( - dk, dv, - Q, k, v, DO, DQ, M, D, sm_scale, - stride_q_m, stride_q_k, - stride_dq_m, stride_dq_k, - stride_do_m, stride_do_k, - stride_dropout_m, stride_dropout_n, - stride_deltam, - dropout_p, philox_seed, batch_philox_offset, dropout_offset, - seqlen_q, seqlen_k, - start_n, start_m, num_steps, - descale_q, descale_k, descale_v, descale_do, - BLOCK_M: tl.constexpr, - BLOCK_N: tl.constexpr, - BLOCK_D_MODEL: tl.constexpr, - BLOCK_D_MODEL_POW2: tl.constexpr, - MASK: tl.constexpr, - ENABLE_DROPOUT: tl.constexpr, - IS_FP8: tl.constexpr, - FP8_MAX: tl.constexpr, - workgroup_id: tl.int32, -): - PADDED_HEAD: tl.constexpr = (BLOCK_D_MODEL != BLOCK_D_MODEL_POW2) - delta_qk = seqlen_q - seqlen_k - offs_m = start_m + tl.arange(0, BLOCK_M) - offs_n = start_n + tl.arange(0, BLOCK_N) - offs_k = tl.arange(0, BLOCK_D_MODEL_POW2) - - # mask to make sure not OOB of seqlen_q - mask_n = offs_n < seqlen_k - - qT_ptrs_start = Q + offs_m[None, :] * stride_q_m + offs_k[:, None] * stride_q_k #[BLOCK_D_MODEL_POW2, BLOCK_M] - dq_ptrs_start = DQ + offs_m[:, None] * stride_dq_m + offs_k[None,:] * stride_dq_k #[BLOCK_M, BLOCK_D_MODEL_POW2] - - do_ptrs_start = DO + offs_m[:, None] * stride_do_m + offs_k[None,: ] * stride_do_k - curr_m = start_m - step_m = BLOCK_M - curr_philox_offset = batch_philox_offset - curr_dropout_offset = dropout_offset - RCP_LN2: tl.constexpr = 1.4426950408889634 - - #Iterate over blocks(BLOCK_M size) of Q while calculating - #a fixed block(BLOCK_N) of dk and dv. Note, during backward - #pass P has to be recomputed. However, this kernel computes - #dV and dK, so we compute we need P^T and S^T. See backward pass - #equations - # - #From Flash Attention Paper: - #ForwardPass: S = QkT, P=softmax(S), O=PV - # - #BackwardPass equations - #dV = P^TdO - #dP = dOV^T - #dS = dsoftmax(dP) - #dQ = dSK - #dK = QdS^T - - # Compute a starting index and step based on workgroup_id - # Use a simple hash-like function to spread out the starting points - start_idx = (workgroup_id * 17) % num_steps # 17 is an arbitrary prime to spread indices - # Ensure step is coprime with num_steps to visit all indices exactly once - step = 1 # 3 if num_steps > 1 or num_steps==3 else 1 # coprime with num_steps - - - for iter in range(num_steps): - # Compute the permuted block index - blk_idx = (start_idx + iter * step) % num_steps - - curr_m = start_m + blk_idx * step_m - qT_ptrs = qT_ptrs_start + blk_idx * step_m * stride_q_m - dq_ptrs = dq_ptrs_start + blk_idx * step_m * stride_dq_m - do_ptrs = do_ptrs_start + blk_idx * step_m * stride_do_m - - offs_m = curr_m + tl.arange(0, BLOCK_M) - mask_m = offs_m < seqlen_q - mask_qT = mask_m[None, :] - mask_do = mask_m[:, None] - mask_nm = mask_n[:, None] & (offs_m[None, :] < seqlen_q) - - if PADDED_HEAD: - mask_qT &= offs_k[:, None] < BLOCK_D_MODEL - mask_do &= offs_k[None, :] < BLOCK_D_MODEL - - #load qT - qT = tl.load(qT_ptrs, mask=mask_qT, other=0.0) - - #dropout - if ENABLE_DROPOUT: - # NOTE: dropout is transposed because it is used to mask pT - philox_offs = (curr_philox_offset + - offs_m[None, :] * stride_dropout_m + - offs_n[:, None] * stride_dropout_n) - rand_vals = tl.rand(philox_seed, philox_offs) - dropout_mask = rand_vals > dropout_p - dropout_scale = 1.0 / (1 - dropout_p) - - #Load M - m = tl.load(M + offs_m * stride_deltam, mask=mask_m, other=0.0) - - #Compute qkT - if IS_FP8: - qkT = (tl.dot(k, qT) * descale_q * descale_k) - else: - qkT = tl.dot(k, qT) - - #Compute pT(use m and also apply sm_scale) - pT = tl.math.exp(qkT * sm_scale - m[None, :]) - - if MASK: - causal_mask = (offs_m[None, :] - delta_qk) >= (offs_n[:, None]) - mask = causal_mask & mask_nm - pT = tl.where(mask, pT, 0.0) - - #load DO - do = tl.load(do_ptrs, mask=mask_do, other=0.0) - - #dV - if ENABLE_DROPOUT: - pT_dropout = tl.where(dropout_mask, pT, 0.0) * dropout_scale - if IS_FP8: - scale_p_dropout, descale_p_dropout = compute_fp8_scaling_factors(pT_dropout, FP8_MAX) - dv += (tl.dot((pT_dropout * scale_p_dropout).to(do.type.element_ty), do) * descale_p_dropout * descale_do) - else: - dv += tl.dot(pT_dropout.to(do.type.element_ty), do) - else: - if IS_FP8: - scale_pT, descale_pT = compute_fp8_scaling_factors(pT, FP8_MAX) - dv += (tl.dot((pT * scale_pT).to(do.type.element_ty), do) * descale_pT * descale_do) - else: - dv += tl.dot(pT.to(do.type.element_ty), do) - - #Load delta - Di = tl.load(D + offs_m * stride_deltam, mask=mask_m) - - #Compute dP and dS - if IS_FP8: - dpT = tl.dot(v, tl.trans(do)) * descale_v * descale_do - else: - dpT = tl.dot(v, tl.trans(do)) - - if ENABLE_DROPOUT: - dpT = tl.where(dropout_mask, dpT, 0.0) * dropout_scale - - delta_i = Di[None, :] - dsT = pT * (dpT - delta_i) - - #compute dk - if IS_FP8: - scale_dsT, descale_dsT = compute_fp8_scaling_factors(dsT, FP8_MAX) - dk += (tl.dot((dsT * scale_dsT).to(qT.type.element_ty), tl.trans(qT)) * descale_dsT * descale_q) - else: - dk += tl.dot(dsT.to(qT.type.element_ty), tl.trans(qT)) - - - # We can compute the dq_partial here and do a atomic add to the correct memory location - # NOTE: Possible problems with the atomic add: contention, is inside a loop which has achieved bad perf before - # (BLOCK_M, BLOCK_N) x (BLOCK_N, D) - if IS_FP8: - dq_partial = tl.dot((dsT * scale_dsT).to(k.dtype).T, k) * descale_dsT * descale_k - else: - dq_partial = tl.dot(dsT.to(k.dtype).T, k) - tl.atomic_add( - dq_ptrs, - dq_partial * sm_scale, - mask=mask_m[:, None], - sem="relaxed", - ) - - return dk, dv - - -@triton.jit -def _bwd_kernel_dkdvdq_causal( - q_ptr, k_ptr, v_ptr, sm_scale, do_ptr, dk_ptr, dv_ptr, dq_ptr, - m_ptr, delta_ptr, - stride_q_b, stride_q_h, stride_q_m, stride_q_k, - stride_k_b, stride_k_h, stride_k_n, stride_k_k, - stride_v_b, stride_v_h, stride_v_n, stride_v_k, - stride_dk_b, stride_dk_h, stride_dk_n, stride_dk_k, - stride_dq_b, stride_dq_h, stride_dq_m, stride_dq_k, - stride_delta_b, stride_delta_h, stride_delta_m, - stride_do_b, stride_do_h, stride_do_m, stride_do_k, - stride_dropout_b, stride_dropout_h, stride_dropout_m, stride_dropout_n, - stride_descale_q_z, stride_descale_k_z, stride_descale_v_z, stride_descale_do_z, - cu_seqlens_q, cu_seqlens_k, - max_seqlen_q, max_seqlen_k, - dropout_mask, dropout_p, philox_seed, philox_offset_base, - descale_q_ptr, descale_k_ptr, descale_v_ptr, descale_do_ptr, - NUM_Q_HEADS: tl.constexpr, - NUM_K_HEADS: tl.constexpr, - BATCH, - NUM_K_PIDS, - BLOCK_M: tl.constexpr, - BLOCK_N: tl.constexpr, - BLK_SLICE_FACTOR: tl.constexpr, - BLOCK_D_MODEL: tl.constexpr, - BLOCK_D_MODEL_POW2: tl.constexpr, - ENABLE_DROPOUT: tl.constexpr, - IS_VARLEN: tl.constexpr, - IS_FP8: tl.constexpr, - FP8_MAX: tl.constexpr, -): - wid = tl.program_id(0) # workgoup id: 0, ..., NUM_K_PIDS * BATCH * NUM_K_HEADS - 1 - - # workgroups get launched first along batch dim, then in head_k dim, and then in seq k block dim - batch_idx = wid % BATCH - head_k_idx = wid // BATCH % NUM_K_HEADS - seq_k_blk_idx = wid // (BATCH * NUM_K_HEADS) % NUM_K_PIDS - - #Determine q and k start along with seqlen_q and seqlen_k - q_start = 0 - k_start = 0 - seqlen_q = max_seqlen_q - seqlen_k = max_seqlen_k - if IS_VARLEN: - q_start = tl.load(cu_seqlens_q + batch_idx) - q_end = tl.load(cu_seqlens_q + batch_idx + 1) - k_start = tl.load(cu_seqlens_k + batch_idx) - k_end = tl.load(cu_seqlens_k + batch_idx + 1) - seqlen_q = q_end - q_start - seqlen_k = k_end - k_start - - dk = tl.zeros([BLOCK_N, BLOCK_D_MODEL_POW2], dtype=tl.float32) - dv = tl.zeros([BLOCK_N, BLOCK_D_MODEL_POW2], dtype=tl.float32) - - # Figure out causal starting block since we have seqlen_q >=< seqlen_k. - # Unlike forward pass where we tile on M dim and iterate on N dim, so that - # we can skip some M blocks, in backward pass, we tile on the N dim for kv - # and iterate over the M. In this way, we cannot skip N blocks, but only to - # determine the starting M blocks to skip some initial blocks masked by - # causal. - delta_qk = seqlen_q - seqlen_k - - # q > k: diretcly skip all the way until the start of causal block - start_delta_q_gt_k = delta_qk - - # q < k: some blocks will have no Masked block, other needs to re-calc - # starting position - # delta_qk is negative so flip it, only multiple of BLOCK_N can skip the - # masked op - num_blocks_skip = -delta_qk // BLOCK_N - delta_aligned = (num_blocks_skip + 1) * BLOCK_N + delta_qk - start_delta_q_lt_k = delta_aligned // BLOCK_M * BLOCK_M - if delta_qk >= 0: - start_delta = delta_qk - else: - start_delta = start_delta_q_lt_k - - start_n = seq_k_blk_idx * BLOCK_N - - offs_k = tl.arange(0, BLOCK_D_MODEL_POW2) - offs_n = start_n + tl.arange(0, BLOCK_N) - # Mask for loading K and V - mask_kv = offs_n[:, None] < seqlen_k - PADDED_HEAD: tl.constexpr = (BLOCK_D_MODEL != BLOCK_D_MODEL_POW2) - if PADDED_HEAD: - mask_k = offs_k < BLOCK_D_MODEL - mask_kv &= mask_k[None, :] - - GROUP_SIZE = NUM_Q_HEADS // NUM_K_HEADS - adj_k = (batch_idx * stride_k_b + - head_k_idx * stride_k_h + - k_start * stride_k_n + offs_n[:, None] * stride_k_n + - offs_k[None, :] * stride_k_k) - adj_v = (batch_idx * stride_v_b + - head_k_idx * stride_v_h + - k_start * stride_v_n + offs_n[:, None] * stride_v_n + - offs_k[None, :] * stride_v_k) - # load K and V: they stay in SRAM throughout the inner loop. - k = tl.load(k_ptr + adj_k , mask=mask_kv, other=0.0) - v = tl.load(v_ptr + adj_v, mask=mask_kv, other=0.0) - - # If MQA / GQA, set the K and V head offsets appropriately. - for head_q_idx in range(head_k_idx * GROUP_SIZE, head_k_idx * GROUP_SIZE + GROUP_SIZE): - if delta_qk >= 0: - start_m = start_n + start_delta - len_m = BLOCK_N - else: - start_m = max(start_n + delta_qk, 0) - start_m = (start_m // BLOCK_M) * BLOCK_M - # because we might shift the masked blocks up, we are deeper into - # the masked out region, so we would potentially increase the total - # steps with masked operation to get out of it - residue_m = max(start_n + delta_qk - start_m, 0) - len_m = BLOCK_N + residue_m - - # offset input and output tensor by batch and Q/K heads - adj_q = batch_idx * stride_q_b + head_q_idx * stride_q_h + q_start * stride_q_m - adj_dq = batch_idx * stride_dq_b + head_q_idx * stride_dq_h + q_start * stride_dq_m - - q_ptr_adj = q_ptr + adj_q - dq_ptr_adj = dq_ptr + adj_dq - - adj_do = batch_idx * stride_do_b + head_q_idx * stride_do_h + q_start * stride_do_m - do_ptr_adj = do_ptr + adj_do - adj_delta = batch_idx * stride_delta_b + head_q_idx * stride_delta_h + q_start * stride_delta_m - m_ptr_adj = m_ptr + adj_delta - delta_ptr_adj = delta_ptr + adj_delta - - # batch_philox_offset is the ACTUALLY dropout offset - # dropout_offset is for debug purpose and will be removed later - batch_philox_offset = 0 - dropout_offset = 0 - if ENABLE_DROPOUT: - batch_philox_offset = (philox_offset_base + batch_idx * stride_dropout_b + - head_q_idx * stride_dropout_h) - dropout_offset = (dropout_mask + batch_idx * stride_dropout_b + - head_q_idx * stride_dropout_h) - - MASK_BLOCK_M: tl.constexpr = BLOCK_M // BLK_SLICE_FACTOR - # bound the masked operation to q len so it does not have to wast cycles - len_m = min(len_m, seqlen_q) - num_steps = tl.cdiv(len_m, MASK_BLOCK_M) - - - # when q < k, we may skip the initial masked op - # if seq_k_blk_idx < num_blocks_skip: - # num_steps = 0 - - if IS_FP8: - descale_q = tl.load(descale_q_ptr + batch_idx * stride_descale_q_z + head_q_idx) - descale_k = tl.load(descale_k_ptr + batch_idx * stride_descale_k_z + head_k_idx) - descale_v = tl.load(descale_v_ptr + batch_idx * stride_descale_v_z + head_k_idx) - descale_do = tl.load(descale_do_ptr + batch_idx * stride_descale_do_z + head_q_idx) - else: - descale_q, descale_k, descale_v, descale_do = 1.0, 1.0, 1.0, 1.0 - - # if unaligned start_m is negative, the current N-tile has no block on the - # diagonal of causal mask, so everything have no causal mask - dk, dv = _bwd_dkdvdq_inner( - dk, dv, # output tensors - q_ptr_adj, k, v, do_ptr_adj, dq_ptr_adj, m_ptr_adj, delta_ptr_adj, sm_scale, # input tensors - stride_q_m, stride_q_k, # strides for q - stride_dq_m, stride_dq_k, # strides for q - stride_do_m, stride_do_k, # strides for o - stride_dropout_m, stride_dropout_n, # strides for dropout - stride_delta_m, - dropout_p, philox_seed, batch_philox_offset, dropout_offset, # - seqlen_q, seqlen_k, # max sequence length for q and k - start_n, start_m, num_steps, # iteration numbers - descale_q, descale_k, descale_v, descale_do, # fp8 descale factors from user - MASK_BLOCK_M, BLOCK_N, # block dim - BLOCK_D_MODEL, BLOCK_D_MODEL_POW2, # head dim - MASK=True, # causal masking - ENABLE_DROPOUT=ENABLE_DROPOUT, # activate dropout - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - workgroup_id=seq_k_blk_idx, - ) - - - start_m += num_steps * MASK_BLOCK_M - num_steps = tl.cdiv(seqlen_q - start_m, BLOCK_M) - end_m = start_m + num_steps * BLOCK_M - - - - dk, dv = _bwd_dkdvdq_inner( - dk, dv, # output tensors - q_ptr_adj, k, v, do_ptr_adj, dq_ptr_adj, m_ptr_adj, delta_ptr_adj, sm_scale, # input tensors - stride_q_m, stride_q_k, # strides for q - stride_dq_m, stride_dq_k, # strides for dq - stride_do_m, stride_do_k, # strides for o - stride_dropout_m, stride_dropout_n, # strides for dropout - stride_delta_m, - dropout_p, philox_seed, batch_philox_offset, dropout_offset, # - seqlen_q, seqlen_k, # max sequence length for q and k - start_n, start_m, num_steps, # iteration numbers - descale_q, descale_k, descale_v, descale_do, # fp8 descale factors from user - BLOCK_M, BLOCK_N, # block dim - BLOCK_D_MODEL, BLOCK_D_MODEL_POW2, # head dim - MASK=False, # causal masking - ENABLE_DROPOUT=ENABLE_DROPOUT, # activate dropout - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - workgroup_id=seq_k_blk_idx, - ) - - # Write back dV and dK. - offs_dkdv = (batch_idx * stride_dk_b + - head_k_idx * stride_dk_h + - k_start * stride_dk_n + offs_n[:, None] * stride_dk_n + - offs_k[None, :] * stride_dk_k) - tl.store(dv_ptr + offs_dkdv, dv, mask=mask_kv) - dk *= sm_scale - tl.store(dk_ptr + offs_dkdv, dk, mask=mask_kv) - - -@triton.jit -def _bwd_kernel_dkdv_causal( - q_ptr, k_ptr, v_ptr, sm_scale, do_ptr, dk_ptr, dv_ptr, - m_ptr, delta_ptr, - stride_q_b, stride_q_h, stride_q_m, stride_q_k, - stride_k_b, stride_k_h, stride_k_n, stride_k_k, - stride_v_b, stride_v_h, stride_v_n, stride_v_k, - stride_dk_b, stride_dk_h, stride_dk_n, stride_dk_k, - stride_delta_b, stride_delta_h, stride_delta_m, - stride_do_b, stride_do_h, stride_do_m, stride_do_k, - stride_dropout_b, stride_dropout_h, stride_dropout_m, stride_dropout_n, - stride_descale_q_z, stride_descale_k_z, stride_descale_v_z, stride_descale_do_z, - cu_seqlens_q, cu_seqlens_k, - max_seqlen_q, max_seqlen_k, - dropout_mask, dropout_p, philox_seed, philox_offset_base, - descale_q_ptr, descale_k_ptr, descale_v_ptr, descale_do_ptr, - NUM_Q_HEADS: tl.constexpr, - NUM_K_HEADS: tl.constexpr, - BLOCK_M: tl.constexpr, - BLOCK_N: tl.constexpr, - BLK_SLICE_FACTOR: tl.constexpr, - BLOCK_D_MODEL: tl.constexpr, - BLOCK_D_MODEL_POW2: tl.constexpr, - ENABLE_DROPOUT: tl.constexpr, - IS_VARLEN: tl.constexpr, - IS_FP8: tl.constexpr, - FP8_MAX: tl.constexpr, -): - #seq block, batch, head_k - seq_k_blk_idx = tl.program_id(0) - batch_idx = tl.program_id(1) - head_k_idx = tl.program_id(2) - - #Determine q and k start along with seqlen_q and seqlen_k - q_start = 0 - k_start = 0 - seqlen_q = max_seqlen_q - seqlen_k = max_seqlen_k - if IS_VARLEN: - q_start = tl.load(cu_seqlens_q + batch_idx) - q_end = tl.load(cu_seqlens_q + batch_idx + 1) - k_start = tl.load(cu_seqlens_k + batch_idx) - k_end = tl.load(cu_seqlens_k + batch_idx + 1) - seqlen_q = q_end - q_start - seqlen_k = k_end - k_start - - dk = tl.zeros([BLOCK_N, BLOCK_D_MODEL_POW2], dtype=tl.float32) - dv = tl.zeros([BLOCK_N, BLOCK_D_MODEL_POW2], dtype=tl.float32) - - # Figure out causal starting block since we have seqlen_q >=< seqlen_k. - # Unlike forward pass where we tile on M dim and iterate on N dim, so that - # we can skip some M blocks, in backward pass, we tile on the N dim for kv - # and iterate over the M. In this way, we cannot skip N blocks, but only to - # determine the starting M blocks to skip some initial blocks masked by - # causal. - delta_qk = seqlen_q - seqlen_k - - # q > k: diretcly skip all the way until the start of causal block - start_delta_q_gt_k = delta_qk - - # q < k: some blocks will have no Masked block, other needs to re-calc - # starting position - # delta_qk is negative so flip it, only multiple of BLOCK_N can skip the - # masked op - num_blocks_skip = -delta_qk // BLOCK_N - delta_aligned = (num_blocks_skip + 1) * BLOCK_N + delta_qk - start_delta_q_lt_k = delta_aligned // BLOCK_M * BLOCK_M - if delta_qk >= 0: - start_delta = delta_qk - else: - start_delta = start_delta_q_lt_k - - start_n = seq_k_blk_idx *BLOCK_N - - offs_k = tl.arange(0, BLOCK_D_MODEL_POW2) - offs_n = start_n + tl.arange(0, BLOCK_N) - # Mask for loading K and V - mask_kv = offs_n[:, None] < seqlen_k - PADDED_HEAD: tl.constexpr = (BLOCK_D_MODEL != BLOCK_D_MODEL_POW2) - if PADDED_HEAD: - mask_k = offs_k < BLOCK_D_MODEL - mask_kv &= mask_k[None, :] - - GROUP_SIZE = NUM_Q_HEADS // NUM_K_HEADS - adj_k = (batch_idx * stride_k_b + - head_k_idx * stride_k_h + - k_start * stride_k_n + offs_n[:, None] * stride_k_n + - offs_k[None, :] * stride_k_k) - adj_v = (batch_idx * stride_v_b + - head_k_idx * stride_v_h + - k_start * stride_v_n + offs_n[:, None] * stride_v_n + - offs_k[None, :] * stride_v_k) - # load K and V: they stay in SRAM throughout the inner loop. - k = tl.load(k_ptr + adj_k , mask=mask_kv, other=0.0) - v = tl.load(v_ptr + adj_v, mask=mask_kv, other=0.0) - - # If MQA / GQA, set the K and V head offsets appropriately. - for head_q_idx in range(head_k_idx * GROUP_SIZE, head_k_idx * GROUP_SIZE + GROUP_SIZE): - if delta_qk >= 0: - start_m = start_n + start_delta - len_m = BLOCK_N - else: - start_m = max(start_n + delta_qk, 0) - start_m = start_m // BLOCK_M * BLOCK_M - # because we might shift the masked blocks up, we are deeper into - # the masked out region, so we would potentially increase the total - # steps with masked operation to get out of it - residue_m = max(start_n + delta_qk - start_m, 0) - len_m = BLOCK_N + residue_m - - # offset input and output tensor by batch and Q/K heads - adj_q = batch_idx * stride_q_b + head_q_idx * stride_q_h + q_start * stride_q_m - q_ptr_adj = q_ptr + adj_q - adj_do = batch_idx * stride_do_b + head_q_idx * stride_do_h + q_start * stride_do_m - do_ptr_adj = do_ptr + adj_do - adj_delta = batch_idx * stride_delta_b + head_q_idx * stride_delta_h + q_start * stride_delta_m - m_ptr_adj = m_ptr + adj_delta - delta_ptr_adj = delta_ptr + adj_delta - - # batch_philox_offset is the ACTUALLY dropout offset - # dropout_offset is for debug purpose and will be removed later - batch_philox_offset = 0 - dropout_offset = 0 - if ENABLE_DROPOUT: - batch_philox_offset = (philox_offset_base + batch_idx * stride_dropout_b + - head_q_idx * stride_dropout_h) - dropout_offset = (dropout_mask + batch_idx * stride_dropout_b + - head_q_idx * stride_dropout_h) - - MASK_BLOCK_M: tl.constexpr = BLOCK_M // BLK_SLICE_FACTOR - # bound the masked operation to q len so it does not have to wast cycles - len_m = min(len_m, seqlen_q) - num_steps = tl.cdiv(len_m, MASK_BLOCK_M) - # when q < k, we may skip the initial masked op - if seq_k_blk_idx < num_blocks_skip: - num_steps = 0 - - if IS_FP8: - descale_q = tl.load(descale_q_ptr + batch_idx * stride_descale_q_z + head_q_idx) - descale_k = tl.load(descale_k_ptr + batch_idx * stride_descale_k_z + head_k_idx) - descale_v = tl.load(descale_v_ptr + batch_idx * stride_descale_v_z + head_k_idx) - descale_do = tl.load(descale_do_ptr + batch_idx * stride_descale_do_z + head_q_idx) - else: - descale_q, descale_k, descale_v, descale_do = 1.0, 1.0, 1.0, 1.0 - - # if start_m is negative, the current N-tile has no block on the - # diagonal of causal mask, so everything have no causal mask - dk, dv = _bwd_dkdv_inner( - dk, dv, # output tensors - q_ptr_adj, k, v, do_ptr_adj, m_ptr_adj, delta_ptr_adj, sm_scale, # input tensors - stride_q_m, stride_q_k, # strides for q - stride_do_m, stride_do_k, # strides for o - stride_dropout_m, stride_dropout_n, # strides for dropout - stride_delta_m, - dropout_p, philox_seed, batch_philox_offset, dropout_offset, # - seqlen_q, seqlen_k, # max sequence length for q and k - start_n, start_m, num_steps, # iteration numbers - descale_q, descale_k, descale_v, descale_do, # fp8 descale factors from user - MASK_BLOCK_M, BLOCK_N, # block dim - BLOCK_D_MODEL, BLOCK_D_MODEL_POW2, # head dim - MASK=True, # causal masking - ENABLE_DROPOUT=ENABLE_DROPOUT, # activate dropout - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - ) - start_m += num_steps * MASK_BLOCK_M - num_steps = tl.cdiv(seqlen_q - start_m, BLOCK_M) - end_m = start_m + num_steps * BLOCK_M - - dk, dv = _bwd_dkdv_inner( - dk, dv, # output tensors - q_ptr_adj, k, v, do_ptr_adj, m_ptr_adj, delta_ptr_adj, sm_scale, # input tensors - stride_q_m, stride_q_k, # strides for q - stride_do_m, stride_do_k, # strides for o - stride_dropout_m, stride_dropout_n, # strides for dropout - stride_delta_m, - dropout_p, philox_seed, batch_philox_offset, dropout_offset, # - seqlen_q, seqlen_k, # max sequence length for q and k - start_n, start_m, num_steps, # iteration numbers - descale_q, descale_k, descale_v, descale_do, # fp8 descale factors from user - BLOCK_M, BLOCK_N, # block dim - BLOCK_D_MODEL, BLOCK_D_MODEL_POW2, # head dim - MASK=False, # causal masking - ENABLE_DROPOUT=ENABLE_DROPOUT, # activate dropout - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - ) - - # Write back dV and dK. - offs_dkdv = (batch_idx * stride_dk_b + - head_k_idx * stride_dk_h + - k_start * stride_dk_n + offs_n[:, None] * stride_dk_n + - offs_k[None, :] * stride_dk_k) - tl.store(dv_ptr + offs_dkdv, dv, mask=mask_kv) - dk *= sm_scale - tl.store(dk_ptr + offs_dkdv, dk, mask=mask_kv) - -@triton.jit -def _bwd_kernel_dq_causal( - q_ptr, k_ptr, v_ptr, sm_scale, do_ptr, dq_ptr, - m_ptr, delta_ptr, - stride_q_b, stride_q_h, stride_q_m, stride_q_k, - stride_k_b, stride_k_h, stride_k_n, stride_k_k, - stride_v_b, stride_v_h, stride_v_n, stride_v_k, - stride_dq_b, stride_dq_h, stride_dq_m, stride_dq_k, - stride_delta_b, stride_delta_h, stride_delta_m, - stride_do_b, stride_do_h, stride_do_m, stride_do_k, - stride_dropout_b, stride_dropout_h, stride_dropout_m, stride_dropout_n, - stride_descale_q_z, stride_descale_k_z, stride_descale_v_z, stride_descale_do_z, - cu_seqlens_q, cu_seqlens_k, - max_seqlen_q, max_seqlen_k, - dropout_mask, dropout_p, philox_seed, philox_offset_base, - descale_q_ptr, descale_k_ptr, descale_v_ptr, descale_do_ptr, - NUM_Q_HEADS: tl.constexpr, - NUM_K_HEADS: tl.constexpr, - BLOCK_M: tl.constexpr, - BLOCK_N: tl.constexpr, - BLK_SLICE_FACTOR: tl.constexpr, - BLOCK_D_MODEL: tl.constexpr, - BLOCK_D_MODEL_POW2: tl.constexpr, - ENABLE_DROPOUT: tl.constexpr, - IS_VARLEN: tl.constexpr, - IS_FP8: tl.constexpr, - FP8_MAX: tl.constexpr, -): - seq_q_blk_idx = tl.program_id(0) - batch_idx = tl.program_id(1) - head_k_idx = tl.program_id(2) - - q_start = 0 - k_start = 0 - seqlen_q = max_seqlen_q - seqlen_k = max_seqlen_k - if IS_VARLEN: - q_start = tl.load(cu_seqlens_q + batch_idx) - q_end = tl.load(cu_seqlens_q + batch_idx + 1) - k_start = tl.load(cu_seqlens_k + batch_idx) - k_end = tl.load(cu_seqlens_k + batch_idx + 1) - seqlen_q = q_end - q_start - seqlen_k = k_end - k_start - - # Figure out causal starting block since we have seqlen_q <=> seqlen_k. - # Unlike forward pass where we tile on M dim and iterate on N dim, so that - # we can skip some M blocks, in backward pass, we tile on the N dim for kv - # and iterate over the M. In this way, we cannot skip N blocks, but only to - # determine the starting M blocks to skip some initial blocks masked by - # causal. - # DQ tiles on M dim and iterate on N dim, so we there could be some tiles we - # can simply skip and we need to adjust starting position. - start_m = seq_q_blk_idx * BLOCK_M - # seqlen_q > seqlen_k, no need to process these tile for dq - delta_qk = seqlen_q - seqlen_k - if start_m + BLOCK_M < delta_qk: - return - - offs_k = tl.arange(0, BLOCK_D_MODEL_POW2) - offs_m = start_m + tl.arange(0, BLOCK_M) - # Mask for loading K and V - mask_q = offs_m[:, None] < seqlen_q - PADDED_HEAD: tl.constexpr = (BLOCK_D_MODEL != BLOCK_D_MODEL_POW2) - if PADDED_HEAD: - mask_k = offs_k < BLOCK_D_MODEL - mask_q &= mask_k[None, :] - offs_q = offs_m[:, None] * stride_q_m + offs_k[None, :] * stride_q_k - offs_do = offs_m[:, None] * stride_do_m + offs_k[None, :] * stride_do_k - adj_k = batch_idx * stride_k_b + head_k_idx * stride_k_h + k_start * stride_k_n - adj_v = batch_idx * stride_v_b + head_k_idx * stride_v_h + k_start * stride_v_n - k_ptr_adj = k_ptr - v_ptr_adj = v_ptr - k_ptr_adj += adj_k - v_ptr_adj += adj_v - - # If MQA / GQA, set the K and V head offsets appropriately. - GROUP_SIZE = NUM_Q_HEADS // NUM_K_HEADS - for head_q_idx in range(head_k_idx * GROUP_SIZE, head_k_idx * GROUP_SIZE + GROUP_SIZE): - # seqlen_q < seqlen_k: delta_qk more kv tokens are added at the front - # for every M-tile - end_n = start_m + BLOCK_M - delta_qk - # clamp end_n at [0, seqlen_k] - end_n = max(min(end_n, seqlen_k), 0) - - # offset input and output tensor by batch and Q/K heads - adj_q = (batch_idx * stride_q_b + - head_q_idx * stride_q_h + - q_start * stride_q_m) - adj_do = (batch_idx * stride_do_b + - head_q_idx * stride_do_h + - q_start * stride_do_m) - adj_delta = (batch_idx * stride_delta_b + - head_q_idx * stride_delta_h + - q_start * stride_delta_m) - delta_ptr_adj = delta_ptr + adj_delta - - # batch_philox_offset is the ACTUALLY dropout offset - # dropout_offset is for debug purpose and will be removed later - batch_philox_offset = 0 - dropout_offset = 0 - if ENABLE_DROPOUT: - batch_philox_offset = (philox_offset_base + - batch_idx * stride_dropout_b + - head_q_idx * stride_dropout_h) - dropout_offset = (dropout_mask + - batch_idx * stride_dropout_b + - head_q_idx * stride_dropout_h) - - q = tl.load(q_ptr + adj_q + offs_q, mask=mask_q, other=0.0) - do = tl.load(do_ptr + adj_do + offs_do, mask=mask_q, other=0.0) - m = tl.load(m_ptr + adj_delta + offs_m * stride_delta_m, - mask=offs_m < seqlen_q) - m = m[:, None] - - MASK_BLOCK_N: tl.constexpr = BLOCK_N // BLK_SLICE_FACTOR - # start can only be 0 at minimum - start_n = max(end_n - BLOCK_M, 0) - num_steps = tl.cdiv(end_n - start_n, MASK_BLOCK_N) - - if IS_FP8: - descale_q = tl.load(descale_q_ptr + batch_idx * stride_descale_q_z + head_q_idx) - descale_k = tl.load(descale_k_ptr + batch_idx * stride_descale_k_z + head_k_idx) - descale_v = tl.load(descale_v_ptr + batch_idx * stride_descale_v_z + head_k_idx) - descale_do = tl.load(descale_do_ptr + batch_idx * stride_descale_do_z + head_q_idx) - else: - descale_q, descale_k, descale_v, descale_do = 1.0, 1.0, 1.0, 1.0 - - dq = tl.zeros([BLOCK_M, BLOCK_D_MODEL_POW2], dtype=tl.float32) - # Compute dQ for masked (diagonal) blocks. - # NOTE: This code scans each row of QK^T backward (from right to left, - # but inside each call to _bwd_dq_inner, from left to right), but that's - # not due to anything important. I just wanted to reuse the loop - # structure for dK & dV above as much as possible. - dq = _bwd_dq_inner( - dq, - q, k_ptr_adj, v_ptr_adj, do, m, delta_ptr_adj, sm_scale, - stride_q_m, stride_q_k, stride_k_n, stride_k_k, stride_v_n, stride_v_k, - stride_dropout_m, stride_dropout_n, - stride_delta_m, - seqlen_q, seqlen_k, - dropout_p, philox_seed, batch_philox_offset, dropout_offset, - start_m, start_n, end_n, num_steps, - descale_q, descale_k, descale_v, descale_do, - BLOCK_M, MASK_BLOCK_N, - BLOCK_D_MODEL, BLOCK_D_MODEL_POW2, - MASK=True, - ENABLE_DROPOUT=ENABLE_DROPOUT, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - ) - end_n -= num_steps * MASK_BLOCK_N - num_steps = tl.cdiv(end_n, BLOCK_N) - start_n = max(end_n - num_steps * BLOCK_N, 0) - dq = _bwd_dq_inner( - dq, - q, k_ptr_adj, v_ptr_adj, do, m, delta_ptr_adj, sm_scale, - stride_q_m, stride_q_k, stride_k_n, stride_k_k, stride_v_n, stride_v_k, - stride_dropout_m, stride_dropout_n, - stride_delta_m, - seqlen_q, seqlen_k, - dropout_p, philox_seed, batch_philox_offset, dropout_offset, - start_m, start_n, end_n, num_steps, - descale_q, descale_k, descale_v, descale_do, - BLOCK_M, BLOCK_N, - BLOCK_D_MODEL, BLOCK_D_MODEL_POW2, - MASK=False, - ENABLE_DROPOUT=ENABLE_DROPOUT, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - ) - # Write back dQ. - offs_dq = (batch_idx * stride_dq_b + - head_q_idx * stride_dq_h + - q_start * stride_dq_m + - offs_m[:, None] * stride_dq_m + - offs_k[None, :] * stride_dq_k) - dq *= sm_scale - tl.store(dq_ptr + offs_dq, dq, mask=mask_q) - - -@triton.jit -def _bwd_kernel_dkdvdq_noncausal( - Q, K, V, sm_scale, DO, DK, DV, DQ, - M, Delta, - stride_qb, stride_qh, stride_qm, stride_qk, - stride_kb, stride_kh, stride_kn, stride_kk, - stride_vb, stride_vh, stride_vn, stride_vk, - stride_dkb, stride_dkh, stride_dkn, stride_dkk, - stride_dqb, stride_dqh, stride_dqm, stride_dqk, - stride_deltab, stride_deltah, stride_deltam, - stride_dob, stride_doh, stride_dom, stride_dok, - stride_dropoutb, stride_dropouth, stride_dropoutm, stride_dropoutn, - stride_descale_q_z, stride_descale_k_z, stride_descale_v_z, stride_descale_do_z, - cu_seqlens_q, cu_seqlens_k, - max_seqlen_q, max_seqlen_k, - dropout_mask, dropout_p, philox_seed, philox_offset, - descale_q_ptr, descale_k_ptr, descale_v_ptr, descale_do_ptr, - NUM_Q_HEADS: tl.constexpr, - NUM_K_HEADS: tl.constexpr, - BATCH, - NUM_K_PIDS, - BLOCK_M: tl.constexpr, - BLOCK_N: tl.constexpr, - BLK_SLICE_FACTOR: tl.constexpr, - BLOCK_D_MODEL: tl.constexpr, - BLOCK_D_MODEL_POW2: tl.constexpr, - ENABLE_DROPOUT: tl.constexpr, - IS_VARLEN: tl.constexpr, - IS_FP8: tl.constexpr, - FP8_MAX: tl.constexpr, -): - # workgroup id - wid = tl.program_id(0) # 0, ..., NUM_K_PIDS * BATCH * NUM_K_HEADS - 1 - - # Workgroups get launched first along batch dim, then in head_k dim, and then in seq k block dim - # This is in order to avoid contention for the tl.atomic_add (inside _bwd_dkdvdq_inner) that happens between workgroups that share the same batch and head_k. - bid = wid % BATCH - hkid = wid // BATCH % NUM_K_HEADS - pid = wid // (BATCH * NUM_K_HEADS) % NUM_K_PIDS - - q_start = 0 - k_start = 0 - seqlen_q = max_seqlen_q - seqlen_k = max_seqlen_k - - if IS_VARLEN: - q_start = tl.load(cu_seqlens_q + bid) - q_end = tl.load(cu_seqlens_q + bid + 1) - k_start = tl.load(cu_seqlens_k + bid) - k_end = tl.load(cu_seqlens_k + bid + 1) - seqlen_q = q_end - q_start - seqlen_k = k_end - k_start - - - dk = tl.zeros([BLOCK_N, BLOCK_D_MODEL_POW2], dtype=tl.float32) - dv = tl.zeros([BLOCK_N, BLOCK_D_MODEL_POW2], dtype=tl.float32) - - start_n = pid * BLOCK_N - - offs_k = tl.arange(0, BLOCK_D_MODEL_POW2) - offs_n = start_n + tl.arange(0, BLOCK_N) - mask_kv = offs_n[:, None] < seqlen_k - PADDED_HEAD: tl.constexpr = (BLOCK_D_MODEL != BLOCK_D_MODEL_POW2) - if PADDED_HEAD: - mask_kv &= offs_k < BLOCK_D_MODEL - - GROUP_SIZE = NUM_Q_HEADS // NUM_K_HEADS - adj_k = (bid * stride_kb + - hkid * stride_kh + - k_start * stride_kn + - offs_n[:, None] * stride_kn + - offs_k[None, :] * stride_kk) - adj_v = (bid * stride_vb + - hkid * stride_vh + - k_start * stride_vn + - offs_n[:, None] * stride_vn + - offs_k[None, :] * stride_vk) - - k = tl.load(K + adj_k, mask=mask_kv, other=0.0) - v = tl.load(V + adj_v, mask=mask_kv, other=0.0) - - for hqid in range(hkid * GROUP_SIZE, hkid * GROUP_SIZE + GROUP_SIZE): - adj_q = (bid * stride_qb + hqid * stride_qh + q_start * stride_qm) - adj_dq = (bid * stride_dqb + hqid * stride_dqh + q_start * stride_dqm) - - Q_ptr = Q + adj_q - DQ_ptr = DQ + adj_dq - - adj_do = (bid * stride_dob + hqid * stride_doh + q_start * stride_dom) - DO_ptr = DO + adj_do - adj_delta = (bid * stride_deltab + hqid * stride_deltah + q_start * stride_deltam) - M_ptr = M + adj_delta - Delta_ptr = Delta + adj_delta - - #dropout - batch_philox_offset = 0 - dropout_offset = 0 - if ENABLE_DROPOUT: - batch_philox_offset = philox_offset + bid * stride_dropoutb + \ - hqid * stride_dropouth - dropout_offset = dropout_mask + bid * stride_dropoutb + \ - hqid * stride_dropouth - - if IS_FP8: - descale_q = tl.load(descale_q_ptr + bid * stride_descale_q_z + hqid) - descale_k = tl.load(descale_k_ptr + bid * stride_descale_k_z + hkid) - descale_v = tl.load(descale_v_ptr + bid * stride_descale_v_z + hkid) - descale_do = tl.load(descale_do_ptr + bid * stride_descale_do_z + hqid) - else: - descale_q, descale_k, descale_v, descale_do = 1.0, 1.0, 1.0, 1.0 - - start_m = 0 - num_steps = tl.cdiv(seqlen_q, BLOCK_M) - - dk, dv = _bwd_dkdvdq_inner( - dk, dv, - Q_ptr, k, v, DO_ptr, DQ_ptr, M_ptr, Delta_ptr, sm_scale, - stride_qm, stride_qk, - stride_dqm, stride_dqk, - stride_dom, stride_dok, - stride_dropoutm, stride_dropoutn, - stride_deltam, - dropout_p, philox_seed, batch_philox_offset, dropout_offset, - seqlen_q, seqlen_k, - start_n, start_m, num_steps, - descale_q, descale_k, descale_v, descale_do, - BLOCK_M, BLOCK_N, - BLOCK_D_MODEL, BLOCK_D_MODEL_POW2, - MASK=False, - ENABLE_DROPOUT=ENABLE_DROPOUT, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - workgroup_id=pid, - ) - - adj_dkdv = (bid * stride_dkb + - hkid * stride_dkh + - k_start * stride_dkn + offs_n[:, None] * stride_dkn + - offs_k[None, :] * stride_dkk) - tl.store(DV + adj_dkdv, dv, mask=mask_kv) - dk *= sm_scale - tl.store(DK + adj_dkdv, dk, mask=mask_kv) - - - -@triton.jit -def _bwd_kernel_dkdv_noncausal( - Q, K, V, sm_scale, DO, DK, DV, - M, Delta, - stride_qb, stride_qh, stride_qm, stride_qk, - stride_kb, stride_kh, stride_kn, stride_kk, - stride_vb, stride_vh, stride_vn, stride_vk, - stride_dkb, stride_dkh, stride_dkn, stride_dkk, - stride_deltab, stride_deltah, stride_deltam, - stride_dob, stride_doh, stride_dom, stride_dok, - stride_dropoutb, stride_dropouth, stride_dropoutm, stride_dropoutn, - stride_descale_q_z, stride_descale_k_z, stride_descale_v_z, stride_descale_do_z, - cu_seqlens_q, cu_seqlens_k, - max_seqlen_q, max_seqlen_k, - dropout_mask, dropout_p, philox_seed, philox_offset, - descale_q_ptr, descale_k_ptr, descale_v_ptr, descale_do_ptr, - NUM_Q_HEADS: tl.constexpr, - NUM_K_HEADS: tl.constexpr, - BLOCK_M: tl.constexpr, - BLOCK_N: tl.constexpr, - BLK_SLICE_FACTOR: tl.constexpr, - BLOCK_D_MODEL: tl.constexpr, - BLOCK_D_MODEL_POW2: tl.constexpr, - ENABLE_DROPOUT: tl.constexpr, - IS_VARLEN: tl.constexpr, - IS_FP8: tl.constexpr, - FP8_MAX: tl.constexpr, -): - pid = tl.program_id(0) - bid = tl.program_id(1) - hkid = tl.program_id(2) - - q_start = 0 - k_start = 0 - seqlen_q = max_seqlen_q - seqlen_k = max_seqlen_k - - if IS_VARLEN: - q_start = tl.load(cu_seqlens_q + bid) - q_end = tl.load(cu_seqlens_q + bid + 1) - k_start = tl.load(cu_seqlens_k + bid) - k_end = tl.load(cu_seqlens_k + bid + 1) - seqlen_q = q_end - q_start - seqlen_k = k_end - k_start - - - dk = tl.zeros([BLOCK_N, BLOCK_D_MODEL_POW2], dtype=tl.float32) - dv = tl.zeros([BLOCK_N, BLOCK_D_MODEL_POW2], dtype=tl.float32) - - start_n = pid * BLOCK_N - - offs_k = tl.arange(0, BLOCK_D_MODEL_POW2) - offs_n = start_n + tl.arange(0, BLOCK_N) - mask_kv = offs_n[:, None] < seqlen_k - PADDED_HEAD: tl.constexpr = (BLOCK_D_MODEL != BLOCK_D_MODEL_POW2) - if PADDED_HEAD: - mask_kv &= offs_k < BLOCK_D_MODEL - - GROUP_SIZE = NUM_Q_HEADS // NUM_K_HEADS - adj_k = (bid * stride_kb + - hkid * stride_kh + - k_start * stride_kn + - offs_n[:, None] * stride_kn + - offs_k[None, :] * stride_kk) - adj_v = (bid * stride_vb + - hkid * stride_vh + - k_start * stride_vn + - offs_n[:, None] * stride_vn + - offs_k[None, :] * stride_vk) - - k = tl.load(K + adj_k, mask=mask_kv, other=0.0) - v = tl.load(V + adj_v, mask=mask_kv, other=0.0) - - for hqid in range(hkid * GROUP_SIZE, hkid * GROUP_SIZE + GROUP_SIZE): - adj_q = (bid * stride_qb + hqid * stride_qh + q_start * stride_qm) - Q_ptr = Q + adj_q - adj_do = (bid * stride_dob + hqid * stride_doh + q_start * stride_dom) - DO_ptr = DO + adj_do - adj_delta = (bid * stride_deltab + hqid * stride_deltah + q_start * stride_deltam) - M_ptr = M + adj_delta - Delta_ptr = Delta + adj_delta - - #dropout - batch_philox_offset = 0 - dropout_offset = 0 - if ENABLE_DROPOUT: - batch_philox_offset = philox_offset + bid * stride_dropoutb + \ - hqid * stride_dropouth - dropout_offset = dropout_mask + bid * stride_dropoutb + \ - hqid * stride_dropouth - - if IS_FP8: - descale_q = tl.load(descale_q_ptr + bid * stride_descale_q_z + hqid) - descale_k = tl.load(descale_k_ptr + bid * stride_descale_k_z + hkid) - descale_v = tl.load(descale_v_ptr + bid * stride_descale_v_z + hkid) - descale_do = tl.load(descale_do_ptr + bid * stride_descale_do_z + hqid) - else: - descale_q, descale_k, descale_v, descale_do = 1.0, 1.0, 1.0, 1.0 - - start_m = 0 - num_steps = tl.cdiv(seqlen_q, BLOCK_M) - dk, dv = _bwd_dkdv_inner( - dk, dv, - Q_ptr, k, v, DO_ptr, M_ptr, Delta_ptr, sm_scale, - stride_qm, stride_qk, - stride_dom, stride_dok, - stride_dropoutm, stride_dropoutn, - stride_deltam, - dropout_p, philox_seed, batch_philox_offset, dropout_offset, - seqlen_q, seqlen_k, - start_n, start_m, num_steps, - descale_q, descale_k, descale_v, descale_do, - BLOCK_M, BLOCK_N, - BLOCK_D_MODEL, BLOCK_D_MODEL_POW2, - MASK=False, - ENABLE_DROPOUT=ENABLE_DROPOUT, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - ) - - adj_dkdv = (bid * stride_dkb + - hkid * stride_dkh + - k_start * stride_dkn + offs_n[:, None] * stride_dkn + - offs_k[None, :] * stride_dkk) - tl.store(DV + adj_dkdv, dv, mask=mask_kv) - dk *= sm_scale - tl.store(DK + adj_dkdv, dk, mask=mask_kv) - - -@triton.jit -def _bwd_kernel_dq_noncausal( - Q, K, V, sm_scale, DO, DQ, - M, delta, - stride_qb, stride_qh, stride_qm, stride_qk, - stride_kb, stride_kh, stride_kn, stride_kk, - stride_vb, stride_vh, stride_vn, stride_vk, - stride_dqb, stride_dqh, stride_dqm, stride_dqk, - stride_deltab, stride_deltah, stride_deltam, - stride_dob, stride_doh, stride_dom, stride_dok, - stride_dropoutb, stride_dropouth, stride_dropoutm, stride_dropoutn, - stride_descale_q_z, stride_descale_k_z, stride_descale_v_z, stride_descale_do_z, - cu_seqlens_q, cu_seqlens_k, - max_seqlen_q, max_seqlen_k, - dropout_mask, dropout_p, philox_seed, philox_offset_base, - descale_q_ptr, descale_k_ptr, descale_v_ptr, descale_do_ptr, - NUM_Q_HEADS: tl.constexpr, - NUM_K_HEADS: tl.constexpr, - BLOCK_M: tl.constexpr, - BLOCK_N: tl.constexpr, - BLK_SLICE_FACTOR: tl.constexpr, - BLOCK_D_MODEL: tl.constexpr, - BLOCK_D_MODEL_POW2: tl.constexpr, - ENABLE_DROPOUT: tl.constexpr, - IS_VARLEN: tl.constexpr, - IS_FP8: tl.constexpr, - FP8_MAX: tl.constexpr, -): - pid = tl.program_id(0) #seqlen - bid = tl.program_id(1) #batch - hkid = tl.program_id(2) #head_k - - q_start = 0 - k_start = 0 - seqlen_q = max_seqlen_q - seqlen_k = max_seqlen_k - - if IS_VARLEN: - # Compute actual sequence lengths - q_start = tl.load(cu_seqlens_q + bid) - q_end = tl.load(cu_seqlens_q + bid + 1) - k_start = tl.load(cu_seqlens_k + bid) - k_end = tl.load(cu_seqlens_k + bid + 1) - seqlen_q = q_end - q_start - seqlen_k = k_end - k_start - - start_m = pid * BLOCK_M - - offs_k = tl.arange(0, BLOCK_D_MODEL_POW2) - offs_m = start_m + tl.arange(0, BLOCK_M) - - #mask for loading K and V - mask_q = offs_m[:, None] < seqlen_q - PADDED_HEAD: tl.constexpr = (BLOCK_D_MODEL != BLOCK_D_MODEL_POW2) - if PADDED_HEAD: - mask_k = offs_k < BLOCK_D_MODEL - mask_q &= mask_k[None, :] - offs_q = offs_m[:, None] * stride_qm + offs_k[None, :] * stride_qk - offs_do = offs_m[:, None] * stride_dom + offs_k[None, :] * stride_dok - adj_k = bid * stride_kb + hkid * stride_kh + k_start * stride_kn - adj_v = bid * stride_vb + hkid * stride_vh + k_start * stride_vn - K += adj_k - V += adj_v - - GROUP_SIZE = NUM_Q_HEADS // NUM_K_HEADS - for hqid in range(hkid * GROUP_SIZE, hkid * GROUP_SIZE + GROUP_SIZE): - adj_q = bid * stride_qb + hqid * stride_qh + q_start * stride_qm - adj_do = bid * stride_dob + hqid * stride_doh + q_start * stride_dom - adj_delta = bid * stride_deltab + hqid * stride_deltah + q_start * stride_deltam - delta_ptr = delta + adj_delta - - batch_philox_offset = 0 - dropout_offset = 0 - if ENABLE_DROPOUT: - batch_philox_offset = (philox_offset_base + - bid * stride_dropoutb + - hqid * stride_dropouth) - dropout_offset = ( - dropout_mask + bid * stride_dropoutb + hqid * stride_dropouth) - - q = tl.load(Q + adj_q + offs_q, mask=mask_q, other=0.0) - do = tl.load(DO + adj_do + offs_do, mask=mask_q, other=0.0) - m = tl.load(M + adj_delta + offs_m * stride_deltam, mask=offs_m < seqlen_q) - m = m[:, None] - - #FP8 - if IS_FP8: - descale_q = tl.load(descale_q_ptr + bid * stride_descale_q_z + hqid) - descale_k = tl.load(descale_k_ptr + bid * stride_descale_k_z + hkid) - descale_v = tl.load(descale_v_ptr + bid * stride_descale_v_z + hkid) - descale_do = tl.load(descale_do_ptr + bid * stride_descale_do_z + hqid) - else: - descale_q, descale_k, descale_v, descale_do = 1.0, 1.0, 1.0, 1.0 - - start_n = 0 - end_n = seqlen_k - num_steps = tl.cdiv(seqlen_k, BLOCK_N) - dq = tl.zeros([BLOCK_M, BLOCK_D_MODEL_POW2], dtype=tl.float32) - dq = _bwd_dq_inner( - dq, - q, K, V, do, m, delta_ptr, sm_scale, - stride_qm, stride_qk, stride_kn, stride_kk, stride_vn, stride_vk, - stride_dropoutm, stride_dropoutn, - stride_deltam, - seqlen_q, seqlen_k, - dropout_p, philox_seed, batch_philox_offset, dropout_offset, - start_m, start_n, end_n, num_steps, - descale_q, descale_k, descale_v, descale_do, - BLOCK_M, BLOCK_N, - BLOCK_D_MODEL, BLOCK_D_MODEL_POW2, - MASK=False, - ENABLE_DROPOUT=ENABLE_DROPOUT, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - ) - - adj_dq = bid * stride_dqb + hqid * stride_dqh + q_start * stride_dqm - offs_dq = offs_m[:, None] * stride_dqm + offs_k[None, :] * stride_dqk - dq *= sm_scale - tl.store(DQ + adj_dq + offs_dq, dq, mask=mask_q) - -def attention_prefill_backward_triton_fused_atomics_impl( - do: torch.Tensor, - q: torch.Tensor, - k: torch.Tensor, - v: torch.Tensor, - o: torch.Tensor, - softmax_lse: torch.Tensor, - dq: torch.Tensor, - dk: torch.Tensor, - dv: torch.Tensor, - sm_scale: float, - alibi_slopes: Optional[torch.Tensor], - causal: bool, - cu_seqlens_q: Optional[torch.Tensor], - cu_seqlens_k: Optional[torch.Tensor], - max_seqlen_q: int, - max_seqlen_k: int, - dropout_p: float, - philox_seed: Optional[int] = 0, - philox_offset: Optional[int] = 0, - descale_q: Optional[torch.Tensor] = None, - descale_k: Optional[torch.Tensor] = None, - descale_v: Optional[torch.Tensor] = None, - descale_do: Optional[torch.Tensor] = None, - fused: bool = False, - # seqused for FA v3 (currently ignored in this implementation) - seqused_q: Optional[torch.Tensor] = None, - seqused_k: Optional[torch.Tensor] = None, -): - IS_FP8 = is_fp8(q) - if IS_FP8: - FP8_MAX = torch.finfo(q.dtype).max - descale_strides = (descale_q.stride(0),descale_k.stride(0),descale_v.stride(0),descale_do.stride(0) ) - - if DEBUG: - print(f"FP8 path triggered in bwd_prefill_fused_atomics.py") - else: - FP8_MAX = None - stride_descale_q_z = stride_descale_k_z = stride_descale_v_z = stride_descale_do_z = None - descale_strides = (stride_descale_q_z, stride_descale_k_z, stride_descale_v_z, stride_descale_do_z) - - IS_VARLEN = True if cu_seqlens_q is not None else False - - #get strides and shape - if IS_VARLEN: - #Layout for q,k,v is thd ie [total tokens, num_head, head_dim] - batch, seqlen_q, num_q_heads, head_sz = len(cu_seqlens_q) - 1, max_seqlen_q, q.shape[1], q.shape[2] - seqlen_k, num_k_heads = max_seqlen_k, k.shape[1] - q_strides = (0, q.stride(1), q.stride(0), q.stride(2)) - q_strides = (0, q.stride(1), q.stride(0), q.stride(2)) - k_strides = (0, k.stride(1), k.stride(0), k.stride(2)) - v_strides = (0, v.stride(1), v.stride(0), v.stride(2)) - o_strides = (0, o.stride(1), o.stride(0), o.stride(2)) - dq_strides = (0, dq.stride(1), dq.stride(0), dq.stride(2)) - dk_strides = (0, dk.stride(1), dk.stride(0), dk.stride(2)) - dv_strides = (0, dv.stride(1), dv.stride(0), dv.stride(2)) - do_strides = (0, do.stride(1), do.stride(0), do.stride(2)) - else: - #Layout for q,k,v is bshd ie [batch, seq_len, num_head, head_dim] - batch, seqlen_q, num_q_heads, head_sz = q.shape - seqlen_k, num_k_heads = k.shape[1], k.shape[2] - q_strides = (q.stride(0), q.stride(2), q.stride(1), q.stride(3)) - k_strides = (k.stride(0), k.stride(2), k.stride(1), k.stride(3)) - v_strides = (v.stride(0), v.stride(2), v.stride(1), v.stride(3)) - o_strides = (o.stride(0), o.stride(2), o.stride(1), o.stride(3)) - dq_strides = (dq.stride(0), dq.stride(2), dq.stride(1), dq.stride(3)) - dk_strides = (dk.stride(0), dk.stride(2), dk.stride(1), dk.stride(3)) - dv_strides = (dv.stride(0), dv.stride(2), dv.stride(1), dv.stride(3)) - do_strides = (do.stride(0), do.stride(2), do.stride(1), do.stride(3)) - - #BLOCK_D_MODEL, BLOCK_D_MODEL_POW2 - #padding for head_dim. Power of 2 or 16 - BLOCK_D_MODEL_POW2 = triton.next_power_of_2(head_sz) - BLOCK_D_MODEL_POW2 = max(BLOCK_D_MODEL_POW2, 16) - - #Configs - #PRE_BLOCK, BLOCK_M1, BLOCK_N1, BLOCK_M2, BLOCK_N2 - #BLK_SLICE_FACTOR - NUM_WARPS, NUM_STAGES = 4, 1 - WAVES_PER_EU = 1 - PRE_BLOCK = 128 - #BLOCK_M1, BLOCK_N1, BLOCK_M2, BLOCK_N2 = 32, 128, 128, 32 - BLOCK_M1, BLOCK_N1, BLOCK_M2, BLOCK_N2 = 64, 64, 64, 16 - BLK_SLICE_FACTOR = 2 - - #init delta - delta = torch.zeros_like(softmax_lse) - if IS_VARLEN: - #[total_tokens, num_q_heads, seqlen_q] - delta_strides = (0, delta.stride(1), delta.stride(0)) - else: - #[batch, num_q_heads, seqlen_q] - delta_strides = delta.stride() - - #preprocess - #compute D(delta) = rowsum(dO*O). Note, multiplication is element-wise. - pre_grid = (triton.cdiv(max_seqlen_q, PRE_BLOCK), batch, num_q_heads) - _bwd_preprocess[pre_grid]( - o, do, - delta, - *o_strides, - *delta_strides, - descale_strides[3], - cu_seqlens_q, max_seqlen_q, - descale_do, - BLOCK_M=PRE_BLOCK, - BLOCK_D_MODEL=head_sz, - BLOCK_D_MODEL_POW2=BLOCK_D_MODEL_POW2, - IS_VARLEN=IS_VARLEN, - IS_FP8=IS_FP8 - ) - - #dropout_mask - use_dropout = (dropout_p > 0.0) - if use_dropout: - dropout_mask = torch.zeros( - (batch, num_q_heads, max_seqlen_q, max_seqlen_k), - device=q.device, - dtype=torch.float32) - dropout_strides = dropout_mask.stride() - else: - dropout_mask = None - dropout_strides = (0, 0, 0, 0) - - grid_dkdv = ((max_seqlen_k + BLOCK_N1 - 1) // BLOCK_N1, batch, num_k_heads) - grid_dq = ((max_seqlen_q + BLOCK_M2 - 1) // BLOCK_M2, batch, num_k_heads) - - if fused: # fuses dk, dv, dq computations into one kernel by computing the dq using atomic adds between workgroups - - BLOCK_N = 128 if BLOCK_D_MODEL_POW2 < 160 else 64 # larger head sizes lead to oom - config = { - "BLOCK_M": 32, - "BLOCK_N": BLOCK_N, - "num_warps": 4, - "num_stages": 1, - "waves_per_eu": 1, - "BLK_SLICE_FACTOR": 2, - } - - num_k_pids = (max_seqlen_k + BLOCK_N - 1) // BLOCK_N - grid_dkdvdq = (batch * num_k_heads * num_k_pids,) - - if causal: - _bwd_kernel_dkdvdq_causal[grid_dkdvdq]( - q, k, v, sm_scale, do, dk, dv, dq, - softmax_lse, delta, - *q_strides, - *k_strides, - *v_strides, - *dk_strides, - *dq_strides, - *delta_strides, - *do_strides, - *dropout_strides, - *descale_strides, - cu_seqlens_q, cu_seqlens_k, - max_seqlen_q, - max_seqlen_k, - dropout_mask,dropout_p, philox_seed, philox_offset, - descale_q, descale_k, descale_v, descale_do, - NUM_Q_HEADS=num_q_heads, - NUM_K_HEADS=num_k_heads, - BATCH=batch, - NUM_K_PIDS=num_k_pids, - BLOCK_D_MODEL=head_sz, - BLOCK_D_MODEL_POW2=BLOCK_D_MODEL_POW2, - ENABLE_DROPOUT=use_dropout, - IS_VARLEN=IS_VARLEN, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - **config, - ) - else: - _bwd_kernel_dkdvdq_noncausal[grid_dkdvdq]( - q, k, v, sm_scale, do, dk, dv, dq, - softmax_lse, delta, - *q_strides, - *k_strides, - *v_strides, - *dk_strides, - *dq_strides, - *delta_strides, - *do_strides, - *dropout_strides, - *descale_strides, - cu_seqlens_q, cu_seqlens_k, - max_seqlen_q, - max_seqlen_k, - dropout_mask,dropout_p, philox_seed, philox_offset, - descale_q, descale_k, descale_v, descale_do, - NUM_Q_HEADS=num_q_heads, - NUM_K_HEADS=num_k_heads, - BATCH=batch, - NUM_K_PIDS=num_k_pids, - BLOCK_D_MODEL=head_sz, - BLOCK_D_MODEL_POW2=BLOCK_D_MODEL_POW2, - ENABLE_DROPOUT=use_dropout, - IS_VARLEN=IS_VARLEN, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - **config, - ) - - return delta - - # split kernels solution: one kernel computes dk, dv and the other computes dq - - if causal: - _bwd_kernel_dkdv_causal[grid_dkdv]( - q, k, v, sm_scale, do, dk, dv, - softmax_lse, delta, - *q_strides, - *k_strides, - *v_strides, - *dk_strides, - *delta_strides, - *do_strides, - *dropout_strides, - *descale_strides, - cu_seqlens_q, cu_seqlens_k, - max_seqlen_q, - max_seqlen_k, - dropout_mask,dropout_p, philox_seed, philox_offset, - descale_q, descale_k, descale_v, descale_do, - NUM_Q_HEADS=num_q_heads, - NUM_K_HEADS=num_k_heads, - BLOCK_M=BLOCK_M1, - BLOCK_N=BLOCK_N1, - BLK_SLICE_FACTOR=BLK_SLICE_FACTOR, - BLOCK_D_MODEL=head_sz, - BLOCK_D_MODEL_POW2=BLOCK_D_MODEL_POW2, - ENABLE_DROPOUT=use_dropout, - IS_VARLEN=IS_VARLEN, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - num_warps=NUM_WARPS, - num_stages=NUM_STAGES, - waves_per_eu=WAVES_PER_EU, - ) - _bwd_kernel_dq_causal[grid_dq]( - q, k, v, sm_scale, do, dq, - softmax_lse, delta, - *q_strides, - *k_strides, - *v_strides, - *dq_strides, - *delta_strides, - *do_strides, - *dropout_strides, - *descale_strides, - cu_seqlens_q, cu_seqlens_k, - max_seqlen_q, max_seqlen_k, - dropout_mask,dropout_p, philox_seed, philox_offset, - descale_q, descale_k, descale_v, descale_do, - NUM_Q_HEADS=num_q_heads, - NUM_K_HEADS=num_k_heads, - BLOCK_M=BLOCK_M2, - BLOCK_N=BLOCK_N2, - BLK_SLICE_FACTOR=BLK_SLICE_FACTOR, - BLOCK_D_MODEL=head_sz, - BLOCK_D_MODEL_POW2=BLOCK_D_MODEL_POW2, - ENABLE_DROPOUT=use_dropout, - IS_VARLEN=IS_VARLEN, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - num_warps=NUM_WARPS, - num_stages=NUM_STAGES, - waves_per_eu=WAVES_PER_EU, - ) - else: - _bwd_kernel_dkdv_noncausal[grid_dkdv]( - q, k, v, sm_scale, do, dk, dv, - softmax_lse, delta, - *q_strides, - *k_strides, - *v_strides, - *dk_strides, - *delta_strides, - *do_strides, - *dropout_strides, - *descale_strides, - cu_seqlens_q, cu_seqlens_k, - max_seqlen_q, - max_seqlen_k, - dropout_mask,dropout_p, philox_seed, philox_offset, - descale_q, descale_k, descale_v, descale_do, - NUM_Q_HEADS=num_q_heads, - NUM_K_HEADS=num_k_heads, - BLOCK_M=BLOCK_M1, - BLOCK_N=BLOCK_N1, - BLK_SLICE_FACTOR=BLK_SLICE_FACTOR, - BLOCK_D_MODEL=head_sz, - BLOCK_D_MODEL_POW2=BLOCK_D_MODEL_POW2, - ENABLE_DROPOUT=use_dropout, - IS_VARLEN=IS_VARLEN, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - num_warps=NUM_WARPS, - num_stages=NUM_STAGES, - waves_per_eu=WAVES_PER_EU, - ) - - _bwd_kernel_dq_noncausal[grid_dq]( - q, k, v, sm_scale, do, dq, - softmax_lse, delta, - *q_strides, - *k_strides, - *v_strides, - *dq_strides, - *delta_strides, - *do_strides, - *dropout_strides, - *descale_strides, - cu_seqlens_q, cu_seqlens_k, - max_seqlen_q, - max_seqlen_k, - dropout_mask,dropout_p, philox_seed, philox_offset, - descale_q, descale_k, descale_v, descale_do, - NUM_Q_HEADS=num_q_heads, - NUM_K_HEADS=num_k_heads, - BLOCK_M=BLOCK_M2, - BLOCK_N=BLOCK_N2, - BLK_SLICE_FACTOR=BLK_SLICE_FACTOR, - BLOCK_D_MODEL=head_sz, - BLOCK_D_MODEL_POW2=BLOCK_D_MODEL_POW2, - ENABLE_DROPOUT=use_dropout, - IS_VARLEN=IS_VARLEN, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - num_warps=NUM_WARPS, - num_stages=NUM_STAGES, - waves_per_eu=WAVES_PER_EU, - ) - - return delta \ No newline at end of file diff --git a/flash_attn/flash_attn_triton_amd/bwd_prefill_fused_no_atomics.py b/flash_attn/flash_attn_triton_amd/bwd_prefill_fused_no_atomics.py deleted file mode 100755 index 0d3b3a6fdf4..00000000000 --- a/flash_attn/flash_attn_triton_amd/bwd_prefill_fused_no_atomics.py +++ /dev/null @@ -1,1467 +0,0 @@ -import os -import torch -import triton # type: ignore -import triton.language as tl # type: ignore -from typing import Literal, Optional -from .utils import AUTOTUNE, DROPOUT_USE_PYTORCH, DROPOUT_DUMP, DEBUG, compute_fp8_scaling_factors, \ - create_dropout_mask, create_dropout_mask_varlen, is_cdna, is_fp8, is_rdna, round_multiple - -# NOTE: triton fails to import tl.constexprs so create them here for the file -tl_DROPOUT_USE_PYTORCH: tl.constexpr = triton.language.constexpr(DROPOUT_USE_PYTORCH) -tl_DROPOUT_DUMP: tl.constexpr = triton.language.constexpr(DROPOUT_DUMP) - - -def get_autotune_configs(): - if False: - if is_cdna(): - # shared meta-parameters - NUM_STAGES = 1 - NUM_WARPS = 4 - WAVES_PER_EU = 2 - MATRIX_INSTR_NONKDIM = 16 - - preprocess_autotune_configs = [ - triton.Config({"PRE_BLOCK": 128, "waves_per_eu": WAVES_PER_EU, "matrix_instr_nonkdim": MATRIX_INSTR_NONKDIM}, num_stages=NUM_STAGES, num_warps=NUM_WARPS), # og config - triton.Config({"PRE_BLOCK": 64, "waves_per_eu": WAVES_PER_EU, "matrix_instr_nonkdim": MATRIX_INSTR_NONKDIM}, num_stages=NUM_STAGES, num_warps=NUM_WARPS), - triton.Config({"PRE_BLOCK": 32, "waves_per_eu": WAVES_PER_EU, "matrix_instr_nonkdim": MATRIX_INSTR_NONKDIM}, num_stages=NUM_STAGES, num_warps=NUM_WARPS), - triton.Config({"PRE_BLOCK": 16, "waves_per_eu": WAVES_PER_EU, "matrix_instr_nonkdim": MATRIX_INSTR_NONKDIM}, num_stages=NUM_STAGES, num_warps=NUM_WARPS), - ] - preprocess_autotune_keys = [ - "IS_CAUSAL", "dropout_p", "MAX_SEQLENS_Q", "MAX_SEQLENS_K", - "ACTUAL_HEAD_DIM_QK", "ACTUAL_HEAD_DIM_V", "IS_VARLEN", "HQ", "HK", - ] - causal_autotune_configs = [ - triton.Config({"BLOCK_M1": 32, "BLOCK_N1": 128, "BLOCK_M2": 128, "BLOCK_N2": 32, "BLK_SLICE_FACTOR": 2, "waves_per_eu": WAVES_PER_EU, "matrix_instr_nonkdim": MATRIX_INSTR_NONKDIM}, num_stages=NUM_STAGES, num_warps=NUM_WARPS), # og config - triton.Config({'BLOCK_M1': 16, 'BLOCK_N1': 128, 'BLOCK_M2': 128, 'BLOCK_N2': 16, 'BLK_SLICE_FACTOR': 2, "waves_per_eu": WAVES_PER_EU, "matrix_instr_nonkdim": MATRIX_INSTR_NONKDIM}, num_stages=NUM_STAGES, num_warps=NUM_WARPS), - triton.Config({'BLOCK_M1': 16, 'BLOCK_N1': 64, 'BLOCK_M2': 64, 'BLOCK_N2': 16, 'BLK_SLICE_FACTOR': 2, "waves_per_eu": WAVES_PER_EU, "matrix_instr_nonkdim": MATRIX_INSTR_NONKDIM}, num_stages=NUM_STAGES, num_warps=NUM_WARPS), - triton.Config({'BLOCK_M1': 32, 'BLOCK_N1': 64, 'BLOCK_M2': 64, 'BLOCK_N2': 32, 'BLK_SLICE_FACTOR': 2, "waves_per_eu": WAVES_PER_EU, "matrix_instr_nonkdim": MATRIX_INSTR_NONKDIM}, num_stages=NUM_STAGES, num_warps=NUM_WARPS), - ] - causal_autotune_keys = [ - "IS_CAUSAL", "dropout_p", "MAX_SEQLENS_Q", "MAX_SEQLENS_K", - "ACTUAL_HEAD_DIM_QK", "ACTUAL_HEAD_DIM_V", "IS_VARLEN", "HQ", "HK", - ] - noncausal_autotune_configs = [ - triton.Config({"BLOCK_M1": 32, "BLOCK_N1": 128, "BLOCK_M2": 128, "BLOCK_N2": 32, "BLK_SLICE_FACTOR": 2, "waves_per_eu": WAVES_PER_EU, "matrix_instr_nonkdim": MATRIX_INSTR_NONKDIM}, num_stages=NUM_STAGES, num_warps=NUM_WARPS), # og config - triton.Config({'BLOCK_M1': 16, 'BLOCK_N1': 128, 'BLOCK_M2': 128, 'BLOCK_N2': 16, 'BLK_SLICE_FACTOR': 2, "waves_per_eu": WAVES_PER_EU, "matrix_instr_nonkdim": MATRIX_INSTR_NONKDIM}, num_stages=NUM_STAGES, num_warps=NUM_WARPS), - triton.Config({'BLOCK_M1': 16, 'BLOCK_N1': 64, 'BLOCK_M2': 64, 'BLOCK_N2': 16, 'BLK_SLICE_FACTOR': 2, "waves_per_eu": WAVES_PER_EU, "matrix_instr_nonkdim": MATRIX_INSTR_NONKDIM}, num_stages=NUM_STAGES, num_warps=NUM_WARPS), - triton.Config({'BLOCK_M1': 32, 'BLOCK_N1': 64, 'BLOCK_M2': 64, 'BLOCK_N2': 32, 'BLK_SLICE_FACTOR': 2, "waves_per_eu": WAVES_PER_EU, "matrix_instr_nonkdim": MATRIX_INSTR_NONKDIM}, num_stages=NUM_STAGES, num_warps=NUM_WARPS), - ] - noncausal_autotune_keys = [ - "IS_CAUSAL", "dropout_p", "MAX_SEQLENS_Q", "MAX_SEQLENS_K", - "ACTUAL_HEAD_DIM_QK", "ACTUAL_HEAD_DIM_V", "IS_VARLEN", "HQ", "HK", - ] - - return (preprocess_autotune_configs, preprocess_autotune_keys), (causal_autotune_configs, causal_autotune_keys), (noncausal_autotune_configs, noncausal_autotune_keys) - else: - raise ValueError("Unknown Device Type") - else: - # meta-parameters - # TODO: fix num_stages later - NUM_WARPS, NUM_STAGES = 4, 1 - WAVES_PER_EU = 1 - PRE_BLOCK = 128 - BLOCK_M1, BLOCK_N1, BLOCK_M2, BLOCK_N2 = 32, 128, 128, 32 - BLK_SLICE_FACTOR = 2 - - assert BLOCK_N1 == BLOCK_M2 - - # configs for the kernels - preprocess_autotune_configs = [ - triton.Config({"PRE_BLOCK": PRE_BLOCK, "waves_per_eu": WAVES_PER_EU}, num_stages=NUM_STAGES, num_warps=NUM_WARPS), - ] - preprocess_autotune_keys = [ - "max_seqlen_q", - "ACTUAL_HEAD_DIM_V", "IS_VARLEN", - ] - causal_autotune_configs = [ - triton.Config({"BLOCK_M1": BLOCK_M1, "BLOCK_N1": BLOCK_N1, "BLOCK_M2": BLOCK_M2, "BLOCK_N2": BLOCK_N2, "BLK_SLICE_FACTOR": BLK_SLICE_FACTOR, "waves_per_eu": WAVES_PER_EU}, num_stages=NUM_STAGES, num_warps=NUM_WARPS), - ] - causal_autotune_keys = [ - "dropout_p", "max_seqlen_q", "max_seqlen_k", - "ACTUAL_HEAD_DIM_QK", "ACTUAL_HEAD_DIM_V", "IS_VARLEN", "HQ", "HK", - ] - noncausal_autotune_configs = [ - triton.Config({"BLOCK_M1": BLOCK_M1, "BLOCK_N1": BLOCK_N1, "BLOCK_M2": BLOCK_M2, "BLOCK_N2": BLOCK_N2, "BLK_SLICE_FACTOR": BLK_SLICE_FACTOR, "waves_per_eu": WAVES_PER_EU}, num_stages=NUM_STAGES, num_warps=NUM_WARPS), - ] - noncausal_autotune_keys = [ - "dropout_p", "max_seqlen_q", "max_seqlen_k", - "ACTUAL_HEAD_DIM_QK", "ACTUAL_HEAD_DIM_V", "IS_VARLEN", "HQ", "HK", - ] - return (preprocess_autotune_configs, preprocess_autotune_keys), (causal_autotune_configs, causal_autotune_keys), (noncausal_autotune_configs, noncausal_autotune_keys) - - - -(preprocess_autotune_configs, preprocess_autotune_keys), (causal_autotune_configs, causal_autotune_keys), (noncausal_autotune_configs, noncausal_autotune_keys) = get_autotune_configs() - - -# This function computes delta given output Out and gradient DO -# Here is the I/O shape: -# Out: (batch, nhead_q, max_seqlens_q, headDim) -# DO: (batch, nhead_q, max_seqlens_q, headDim) -# Delta: (batch, nheads_q, max_seqlens_q) -@triton.autotune( - configs=preprocess_autotune_configs, - key=preprocess_autotune_keys, - use_cuda_graph=True, -) -@triton.jit -def _bwd_preprocess( - O, - DO, # noqa: E741 - Delta, - stride_ob, stride_oh, stride_om, stride_od, - stride_dob, stride_doh, stride_dom, stride_dod, - stride_delta_b, stride_delta_h, stride_delta_m, - stride_descale_do_z, - cu_seqlens_q, max_seqlen_q, - Descale_do, - PRE_BLOCK: tl.constexpr, - HEAD_DIM_V: tl.constexpr, - ACTUAL_HEAD_DIM_V: tl.constexpr, - IS_VARLEN: tl.constexpr, - IS_FP8: tl.constexpr -): - pid_m = tl.program_id(0) - bid = tl.program_id(1) - hid = tl.program_id(2) - # Handle varlen - if IS_VARLEN: - q_start = tl.load(cu_seqlens_q + bid) - q_end = tl.load(cu_seqlens_q + bid + 1) - seqlen_q = q_end - q_start - else: - q_start = 0 - seqlen_q = max_seqlen_q - - # Compute offsets - offs_m = pid_m * PRE_BLOCK + tl.arange(0, PRE_BLOCK) - offs_d = tl.arange(0, HEAD_DIM_V) - # pointer offsets for O & DO - off_o = ( bid * stride_ob - + hid * stride_oh - + q_start * stride_om - + offs_m[:, None] * stride_om - + offs_d[None, :] * stride_od) # noqa: E741 - off_do = (bid * stride_dob - + hid * stride_doh - + q_start * stride_dom - + offs_m[:, None] * stride_dom - + offs_d[None, :] * stride_dod) - - # create masks - mask_m = offs_m < seqlen_q - mask_md = mask_m[:, None] - PADDED_HEAD_V: tl.constexpr = (ACTUAL_HEAD_DIM_V != HEAD_DIM_V) - if PADDED_HEAD_V: - mask_md &= offs_d[None, :] < ACTUAL_HEAD_DIM_V - # load - o = tl.load(O + off_o, mask=mask_md, other=0.0) - do = tl.load(DO + off_do, mask=mask_md, other=0.0) - # compute and write-back to delta - if IS_FP8: - off_descale_do = bid * stride_descale_do_z + hid - descale_do = tl.load(Descale_do + off_descale_do) - - # NOTE: do is in the fp8 range and o is not in fp8 - delta = tl.sum(o.to(tl.float32) * (do.to(tl.float32) * descale_do), axis=1) - else: - delta = tl.sum(o.to(tl.float32) * do.to(tl.float32), axis=1) - off_delta = (bid * stride_delta_b - + hid * stride_delta_h - + q_start * stride_delta_m - + offs_m * stride_delta_m) - tl.store(Delta + off_delta , delta, mask=mask_m) - - -# The main inner-loop logic for computing dK and dV. -@triton.jit -def _bwd_dkdv_inner( - dk, dv, # output - Q, k, v, DO, M, D, sm_scale, # input tensor - stride_qm, stride_qk, - stride_dom, stride_dok, - stride_dropoutm, stride_dropoutn, - stride_lse_m, stride_delta_m, - BLOCK_M: tl.constexpr, # 16 - BLOCK_N: tl.constexpr, # 128 - HEAD_DIM_QK: tl.constexpr, # - HEAD_DIM_V: tl.constexpr, # - ACTUAL_HEAD_DIM_QK: tl.constexpr, # - ACTUAL_HEAD_DIM_V: tl.constexpr, # - dropout_p, philox_seed, batch_philox_offset, dropout_offset, - alibi_slope, - seqlen_q, seqlen_k, # max sequence length for q and k - # Filled in by the wrapper. - start_n, start_m, num_steps, # iteration numbers - descale_q, descale_k, descale_v, descale_do, # fp8 descale factors from user - MASK: tl.constexpr, # causal masking, only apply to tiles on mask diagonal - ENABLE_DROPOUT: tl.constexpr, # activate dropout - USE_ALIBI: tl.constexpr, - USE_EXP2: tl.constexpr, # activate exp2 - IS_FP8: tl.constexpr, - FP8_MAX: tl.constexpr, - DEBUG_TRITON: tl.constexpr, - DEBUG_TRITON_DETAIL: tl.constexpr, -): - # if HEAD_DIM is padded - PADDED_HEAD_QK: tl.constexpr = (ACTUAL_HEAD_DIM_QK != HEAD_DIM_QK) - PADDED_HEAD_V: tl.constexpr = (ACTUAL_HEAD_DIM_V != HEAD_DIM_V) - delta_qk = seqlen_q - seqlen_k - offs_m = start_m + tl.arange(0, BLOCK_M) # start_m + (0, 15) - offs_n = start_n + tl.arange(0, BLOCK_N) # start_m + (0, 127) - offs_k_qk = tl.arange(0, HEAD_DIM_QK) - offs_k_v = tl.arange(0, HEAD_DIM_V) - # mask to make sure not OOB of seqlen_q - mask_n = offs_n < seqlen_k - # Q and DO are (seqlen_q, head_dim) - # qT_ptrs = (1, BLOCK_M) + (HEAD_DIM_QK, 1), transpose of q - qT_ptrs = Q + offs_m[None, :] * stride_qm + offs_k_qk[:, None] * stride_qk - # do_ptrs = (BLOCK_M, 1) + (1, HEAD_DIM_V), NOT transposed - do_ptrs = DO + offs_m[:, None] * stride_dom + offs_k_v[None, :] * stride_dok - # BLOCK_N must be a multiple of BLOCK_M, otherwise the code wouldn't work. - tl.static_assert(BLOCK_N % BLOCK_M == 0) - curr_m = start_m - step_m = BLOCK_M - curr_philox_offset = batch_philox_offset - curr_dropout_offset = dropout_offset - RCP_LN2: tl.constexpr = 1.4426950408889634 # = 1.0 / ln(2) - - for blk_idx in range(num_steps): - if DEBUG_TRITON: print(f"iter {blk_idx}: curr_m = {curr_m}") # noqa: E701 - offs_m = curr_m + tl.arange(0, BLOCK_M) - # update the mask because offs_m advanced - mask_m = offs_m < seqlen_q - mask_qT = mask_m[None, :] - mask_do = mask_m[:, None] - mask_nm = mask_n[:, None] & (offs_m[None, :] < seqlen_q) - if PADDED_HEAD_QK: - mask_qT &= offs_k_qk[:, None] < ACTUAL_HEAD_DIM_QK - if PADDED_HEAD_V: - mask_do &= offs_k_v[None, :] < ACTUAL_HEAD_DIM_V - qT = tl.load(qT_ptrs, mask=mask_qT, other=0.0) - # generate dropout mask - if ENABLE_DROPOUT: - # NOTE: dropout is transposed because it is used to mask pT - philox_offs = curr_philox_offset + \ - offs_m[None, :] * stride_dropoutm + \ - offs_n[:, None] * stride_dropoutn - if tl_DROPOUT_USE_PYTORCH: - dropout_offs = offs_m[None, :] * stride_dropoutm + \ - offs_n[:, None] * stride_dropoutn - dropout_mask = tl.load( - curr_dropout_offset + dropout_offs, - mask=mask_nm - ) - else: - rand_vals = tl.rand(philox_seed, philox_offs) - dropout_mask = rand_vals > dropout_p - dropout_scale = 1.0 / (1 - dropout_p) - # Load m before computing qk to reduce pipeline stall. - m = tl.load(M + offs_m * stride_lse_m, mask=mask_m, other=0.0) - if IS_FP8: - qkT = (tl.dot(k, qT) * descale_q * descale_k) - else: - qkT = tl.dot(k, qT) - qkT_scaled = qkT * sm_scale - - if USE_ALIBI: - relative_pos_block = offs_n[:, None] + seqlen_q - seqlen_k - offs_m[None, :] - alibi_block = -1 * alibi_slope * tl.abs(relative_pos_block) - qkT_scaled += alibi_block - - if DEBUG_TRITON_DETAIL: - if start_n == 256: - print(f"qT: {qT.shape}\n", qT) - print(f"k: {k.shape}\n", k) - print(f"qkT scaled: {qkT.shape}\n", qkT_scaled) - # TODO: remove the scaling of m later when we removed re-scaling in fwd - if USE_EXP2: - pT = tl.math.exp2(qkT_scaled * RCP_LN2 - m[None, :] * RCP_LN2) - else: - pT = tl.math.exp(qkT_scaled - m[None, :]) - - # Autoregressive masking. - if MASK: - # offset offs_m with delta_qk since the causal mask starts at - # bottom right of the (seqlen_q, seqlen_k) matrix - causal_mask = (offs_m[None, :] - delta_qk) >= offs_n[:, None] - mask = causal_mask & mask_nm - if DEBUG_TRITON_DETAIL: - if start_n == 256: - print(f"causal_mask: {causal_mask.shape}\n", causal_mask) - print(f"qkT after causal: {qkT.shape}\n", tl.where(causal_mask, qkT * sm_scale, 0.0)) - pT = tl.where(mask, pT, 0.0) - do = tl.load(do_ptrs, mask=mask_do, other=0.0) - # Compute dV. - if ENABLE_DROPOUT: - pT_dropout = tl.where(dropout_mask, pT, 0.0) * dropout_scale - if IS_FP8: - scale_p_dropout, descale_p_dropout = compute_fp8_scaling_factors(pT_dropout, FP8_MAX) - dv += (tl.dot((pT_dropout * scale_p_dropout).to(do.type.element_ty), do)* descale_p_dropout * descale_do) - else: - dv += tl.dot(pT_dropout.to(do.type.element_ty), do) - else: - if IS_FP8: - scale_pT, descale_pT = compute_fp8_scaling_factors(pT, FP8_MAX) - dv += (tl.dot((pT * scale_pT).to(do.type.element_ty), do) * descale_pT * descale_do) - else: - dv += tl.dot(pT.to(do.type.element_ty), do) - - if DEBUG_TRITON_DETAIL: - if start_n == 256: - print(f"pT: {pT.shape}\n", pT) - # D (= delta) is pre-divided by ds_scale. - Di = tl.load(D + offs_m * stride_delta_m, mask=mask_m) - # Compute dP and dS. - if IS_FP8: - dpT = (tl.dot(v, tl.trans(do)) * descale_v * descale_do) - else: - dpT = tl.dot(v, tl.trans(do)) - if ENABLE_DROPOUT: - dpT = tl.where(dropout_mask, dpT, 0.0) * dropout_scale - delta_i = Di[None, :] - dsT = pT * (dpT - delta_i) - if IS_FP8: - scale_dsT, descale_dsT = compute_fp8_scaling_factors(dsT, FP8_MAX) - dk += (tl.dot((dsT * scale_dsT).to(qT.type.element_ty), tl.trans(qT)) * descale_dsT * descale_q) - else: - dk += tl.dot(dsT.to(qT.type.element_ty), tl.trans(qT)) - # Increment pointers. - curr_m += step_m - qT_ptrs += step_m * stride_qm - do_ptrs += step_m * stride_dom - return dk, dv - -# the main inner-loop logic for computing dQ -@triton.jit -def _bwd_dq_inner( - dq, # output - q, K, V, do, m, Delta, sm_scale, # input - # shared by Q/K/V. - stride_qm, stride_qk, stride_kn, stride_kk, stride_vn, stride_vk, - stride_dropoutm, stride_dropoutn, # stride for dropout - stride_lse_m, - stride_delta_m, - seqlen_q, seqlen_k, # - BLOCK_M2: tl.constexpr, # - BLOCK_N2: tl.constexpr, # - HEAD_DIM_QK: tl.constexpr, - HEAD_DIM_V: tl.constexpr, - ACTUAL_HEAD_DIM_QK: tl.constexpr, - ACTUAL_HEAD_DIM_V: tl.constexpr, # - dropout_p, philox_seed, batch_philox_offset, dropout_offset, - alibi_slope, - # Filled in by the wrapper. - start_m, start_n, end_n, num_steps, # - descale_q, descale_k, descale_v, descale_do, # fp8 descale factors from user - MASK: tl.constexpr, - ENABLE_DROPOUT: tl.constexpr, - USE_ALIBI: tl.constexpr, - USE_EXP2: tl.constexpr, - IS_FP8: tl.constexpr, - FP8_MAX: tl.constexpr, - DEBUG_TRITON: tl.constexpr, - DEBUG_TRITON_DETAIL: tl.constexpr, -): - # if HEAD_DIM is padded - PADDED_HEAD_QK: tl.constexpr = (ACTUAL_HEAD_DIM_QK != HEAD_DIM_QK) - PADDED_HEAD_V: tl.constexpr = (ACTUAL_HEAD_DIM_V != HEAD_DIM_V) - delta_qk = seqlen_q - seqlen_k - offs_m = start_m + tl.arange(0, BLOCK_M2) - offs_n = start_n + tl.arange(0, BLOCK_N2) - offs_k_qk = tl.arange(0, HEAD_DIM_QK) - offs_k_v = tl.arange(0, HEAD_DIM_V) - - # mask to make sure not OOB of seqlen_q - mask_m = offs_m < seqlen_q - - kT_ptrs = K + offs_n[None, :] * stride_kn + offs_k_qk[:, None] * stride_kk - vT_ptrs = V + offs_n[None, :] * stride_vn + offs_k_v[:, None] * stride_vk - # D (= delta) is pre-divided by ds_scale. - Di = tl.load(Delta + offs_m * stride_delta_m, mask=mask_m, other=0.0) - # BLOCK_M2 must be a multiple of BLOCK_N2, otherwise the code wouldn't work. - tl.static_assert(BLOCK_M2 % BLOCK_N2 == 0) - curr_n = start_n - step_n = BLOCK_N2 - curr_philox_offset = batch_philox_offset - curr_dropout_offset = dropout_offset - RCP_LN2: tl.constexpr = 1.4426950408889634 # = 1.0 / ln(2) - for blk_idx in range(num_steps): - if DEBUG_TRITON: print(f"iter {blk_idx}: curr_n = {curr_n}") # noqa: E701 - offs_n = curr_n + tl.arange(0, BLOCK_N2) - # end_n is needed because the end of causal True might not be perfectly - # aligned with the end of the block - mask_n = offs_n < end_n - if DEBUG_TRITON_DETAIL: print(f"start_n = {start_n}, end_n = {end_n}, offs_n: {offs_n.shape}\n{offs_n}") # noqa: E701 - if DEBUG_TRITON_DETAIL: print(f"mask_n: {mask_n.shape}\n{mask_n}") # noqa: E701 - mask_kT = mask_n[None, :] - mask_vT = mask_n[None, :] - mask_mn = mask_m[:, None] & (offs_n[None, :] < end_n) - if PADDED_HEAD_QK: - mask_kT &= offs_k_qk[:, None] < ACTUAL_HEAD_DIM_QK - if PADDED_HEAD_V: - mask_vT &= offs_k_v[:, None] < ACTUAL_HEAD_DIM_V - - kT = tl.load(kT_ptrs, mask=mask_kT, other=0.0) - vT = tl.load(vT_ptrs, mask=mask_vT, other=0.0) - - if ENABLE_DROPOUT: - # NOTE: dropout is transposed because it is used to mask pT - philox_offs = curr_philox_offset + \ - offs_m[:, None] * stride_dropoutm + \ - offs_n[None, :] * stride_dropoutn - if tl_DROPOUT_USE_PYTORCH: - dropout_offs = offs_m[:, None] * stride_dropoutm + \ - offs_n[None, :] * stride_dropoutn - dropout_mask = tl.load( - curr_dropout_offset + dropout_offs, - mask=mask_mn) - else: - rand_vals = tl.rand(philox_seed, philox_offs) - dropout_mask = rand_vals > dropout_p - dropout_scale = 1 / (1 - dropout_p) - - if IS_FP8: - qk = (tl.dot(q, kT) * descale_q * descale_k) - else: - qk = tl.dot(q, kT) - qk_scaled = qk * sm_scale - - if USE_ALIBI: - relative_pos_block = offs_m[:, None] + seqlen_k - seqlen_q - offs_n[None, :] - alibi_block = -1 * alibi_slope * tl.abs(relative_pos_block) - qk_scaled += alibi_block - - if DEBUG_TRITON_DETAIL: print(f"qk scaled: {qk.shape}\n", qk_scaled) # noqa: E701 - if USE_EXP2: - p = tl.math.exp2(qk_scaled * RCP_LN2 - m * RCP_LN2) - else: - p = tl.math.exp(qk_scaled - m) - - # Autoregressive masking. - if MASK: - causal_mask = (offs_m[:, None] - delta_qk) >= offs_n[None, :] - mask = causal_mask & mask_mn - p = tl.where(mask, p, 0.0) - # Compute dP and dS. - if IS_FP8: - dp = (tl.dot(do, vT) * descale_do * descale_v) - else: - dp = tl.dot(do, vT) - if ENABLE_DROPOUT: - dp = tl.where(dropout_mask, dp, 0.0) * dropout_scale - delta_i = Di[:, None] - ds = p * (dp -delta_i) - # Compute dQ. - # NOTE: We need to de-scale dq in the end, because kT was pre-scaled. - if IS_FP8: - scale_ds, descale_ds = compute_fp8_scaling_factors(ds, FP8_MAX) - dq += (tl.dot((ds * scale_ds).to(kT.type.element_ty), tl.trans(kT)) * descale_ds * descale_k) - else: - dq += tl.dot(ds.to(kT.type.element_ty), tl.trans(kT)) - # Increment pointers. - curr_n += step_n - kT_ptrs += step_n * stride_kn - vT_ptrs += step_n * stride_vn - return dq - -@triton.autotune( - configs=causal_autotune_configs, - key=causal_autotune_keys, - use_cuda_graph=True, -) -@triton.jit -def bwd_kernel_causal( # grid = (nheads_k, tl.cdiv(max_seqlen_q // BLOCK_M2), batch) - Q, K, V, sm_scale, DO, DQ, DK, DV, - M, Delta, - stride_qb, stride_qh, stride_qm, stride_qd, - stride_kb, stride_kh, stride_kn, stride_kd, - stride_vb, stride_vh, stride_vn, stride_vd, - stride_dqb, stride_dqh, stride_dqm, stride_dqd, - stride_dkb, stride_dkh, stride_dkn, stride_dkd, - stride_dvb, stride_dvh, stride_dvn, stride_dvd, - stride_lse_b, stride_lse_h, stride_lse_m, - stride_delta_b, stride_delta_h, stride_delta_m, - stride_dob, stride_doh, stride_dom, stride_dod, - stride_dropoutb, stride_dropouth, stride_dropoutm, stride_dropoutn, - stride_descale_q_z, stride_descale_k_z, stride_descale_v_z, stride_descale_do_z, - stride_az, stride_ah, - HQ, HK, - cu_seqlens_q, cu_seqlens_k, - seqused_q, seqused_k, # Add seqused parameters - max_seqlen_q, max_seqlen_k, - Dropout_mask, dropout_p, philox_seed, philox_offset_base, - Alibi_slopes, - Descale_q, Descale_k, Descale_v, Descale_do, - BLOCK_M1: tl.constexpr, - BLOCK_N1: tl.constexpr, - BLOCK_M2: tl.constexpr, - BLOCK_N2: tl.constexpr, - BLK_SLICE_FACTOR: tl.constexpr, - HEAD_DIM_QK: tl.constexpr, - HEAD_DIM_V: tl.constexpr, - ACTUAL_HEAD_DIM_QK: tl.constexpr, - ACTUAL_HEAD_DIM_V: tl.constexpr, - ENABLE_DROPOUT: tl.constexpr, - IS_VARLEN: tl.constexpr, - USE_ALIBI: tl.constexpr, - USE_EXP2: tl.constexpr, - IS_FP8: tl.constexpr, - FP8_MAX: tl.constexpr, - FP8_OUTPUT: tl.constexpr, - USE_SEQUSED: tl.constexpr, # Add flag for seqused - DEBUG_TRITON: tl.constexpr, - DEBUG_TRITON_DETAIL: tl.constexpr, -): - # program ids - hkid = tl.program_id(0) - pid = tl.program_id(1) - bid = tl.program_id(2) - if DEBUG_TRITON: print(f"\npid: {pid}, bid: {bid}, hkid: {hkid}") # noqa: E701 - # figure out varlen start and end - q_start = 0 - k_start = 0 - seqlen_q = max_seqlen_q - seqlen_k = max_seqlen_k - if IS_VARLEN: - # Compute actual sequence lengths - q_start = tl.load(cu_seqlens_q + bid) - q_end = tl.load(cu_seqlens_q + bid + 1) - k_start = tl.load(cu_seqlens_k + bid) - k_end = tl.load(cu_seqlens_k + bid + 1) - - # If seqused is provided, use it to limit the actual sequence length - if USE_SEQUSED: - actual_seqlen_q = tl.load(seqused_q + bid) if seqused_q is not None else q_end - q_start - seqlen_q = tl.minimum(actual_seqlen_q, q_end - q_start) - actual_seqlen_k = tl.load(seqused_k + bid) if seqused_k is not None else k_end - k_start - seqlen_k = tl.minimum(actual_seqlen_k, k_end - k_start) - else: - seqlen_q = q_end - q_start - seqlen_k = k_end - k_start - - delta_qk = seqlen_q - seqlen_k - if DEBUG_TRITON: print(f"delta_qk = {delta_qk}") # noqa: E701 - PADDED_HEAD_QK: tl.constexpr = (ACTUAL_HEAD_DIM_QK != HEAD_DIM_QK) - PADDED_HEAD_V: tl.constexpr = (ACTUAL_HEAD_DIM_V != HEAD_DIM_V) - offs_d_qk = tl.arange(0, HEAD_DIM_QK) - offs_d_v = tl.arange(0, HEAD_DIM_V) - GROUP_SIZE: tl.constexpr = HQ // HK - - # align the delta_qk - start_n = pid * BLOCK_N1 - if start_n < seqlen_k: - # This section does dk and dv - dk = tl.zeros([BLOCK_N1, HEAD_DIM_QK], dtype=tl.float32) - dv = tl.zeros([BLOCK_N1, HEAD_DIM_V], dtype=tl.float32) - - # q > k: diretcly skip all the way until the start of causal block - start_delta_q_gt_k = delta_qk - # q < k: some blocks will have no Masked block, other needs to re-calc - # starting position - # delta_qk is negative so flip it, only multiple of BLOCK_N can skip the - # masked op - num_blocks_skip = -delta_qk // BLOCK_N1 - delta_aligned = (num_blocks_skip + 1) * BLOCK_N1 + delta_qk - start_delta_q_lt_k = delta_aligned // BLOCK_M1 * BLOCK_M1 - if delta_qk >= 0: - start_delta = delta_qk - if DEBUG_TRITON: print(f"q >= k: start_delta = delta_qk aligned to BLOCK_M = {start_delta_q_gt_k}") # noqa: E701 - else: - start_delta = start_delta_q_lt_k - if DEBUG_TRITON: print(f"q < k: start_delta = residue btw multiple BLOCK_N and delta_qk = {delta_aligned} = aligned to BLOCK_M = {start_delta_q_lt_k}") # noqa: E701 - - offs_n = start_n + tl.arange(0, BLOCK_N1) - # Mask for loading K and V - mask_k = offs_n[:, None] < seqlen_k - mask_v = offs_n[:, None] < seqlen_k - if PADDED_HEAD_QK: - mask_d_qk = offs_d_qk < ACTUAL_HEAD_DIM_QK - mask_k &= mask_d_qk[None, :] - if PADDED_HEAD_V: - mask_d_v = offs_d_v < ACTUAL_HEAD_DIM_V - mask_v &= mask_d_v[None, :] - - # K/V tensors not changed for the group - adj_k = bid * stride_kb + hkid * stride_kh + k_start * stride_kn + offs_n[:, None] * stride_kn + offs_d_qk[None, :] * stride_kd - adj_v = bid * stride_vb + hkid * stride_vh + k_start * stride_vn + offs_n[:, None] * stride_vn + offs_d_v[None, :] * stride_vd - # load K and V: they stay in SRAM throughout the inner loop. - k = tl.load(K + adj_k, mask=mask_k, other=0.0) - v = tl.load(V + adj_v, mask=mask_v, other=0.0) - # If MQA / GQA, set the K and V head offsets appropriately. - # hqid = hkid - for hqid in range(hkid * GROUP_SIZE, hkid * GROUP_SIZE + GROUP_SIZE): - if delta_qk >= 0: - start_m = start_n + start_delta - len_m = BLOCK_N1 - else: - start_m = max(start_n + delta_qk, 0) - start_m = start_m // BLOCK_M1 * BLOCK_M1 - # because we might shift the masked blocks up, we are deeper into - # the masked out region, so we would potentially increase the total - # steps with masked operation to get out of it - residue_m = max(start_n + delta_qk - start_m, 0) - len_m = BLOCK_N1 + residue_m - if DEBUG_TRITON: print(f"residue_m = {residue_m}") # noqa: E701 - - # offset input and output tensor by batch and Q/K heads - adj_q = bid * stride_qb + hqid * stride_qh + q_start * stride_qm - Q_ptr = Q + adj_q - adj_do = bid * stride_dob + hqid * stride_doh + q_start * stride_dom - DO_ptr = DO + adj_do - adj_delta = bid * stride_delta_b + hqid * stride_delta_h + q_start * stride_delta_m - Delta_ptr = Delta + adj_delta - adj_m = bid * stride_lse_b + hqid * stride_lse_h + q_start * stride_lse_m - M_ptr = M + adj_m - - if USE_ALIBI: - alibi_offset = bid * stride_az + hqid * stride_ah - alibi_slope = tl.load(Alibi_slopes + alibi_offset) - else: - alibi_slope = None - - # batch_philox_offset is the ACTUALLY dropout offset - # dropout_offset is for debug purpose and will be removed later - batch_philox_offset = 0 - dropout_offset = 0 - if ENABLE_DROPOUT: - batch_philox_offset = philox_offset_base + bid * stride_dropoutb + \ - hqid * stride_dropouth - dropout_offset = Dropout_mask + bid * stride_dropoutb + \ - hqid * stride_dropouth - - if IS_FP8: - descale_q = tl.load(Descale_q + bid * stride_descale_q_z + hqid) - descale_k = tl.load(Descale_k + bid * stride_descale_k_z + hkid) - descale_v = tl.load(Descale_v + bid * stride_descale_v_z + hkid) - descale_do = tl.load(Descale_do + bid * stride_descale_do_z + hqid) - else: - descale_q, descale_k, descale_v, descale_do = 1.0, 1.0, 1.0, 1.0 - - MASK_BLOCK_M1: tl.constexpr = BLOCK_M1 // BLK_SLICE_FACTOR - # bound the masked operation to q len so it does not have to wast cycles - len_m = min(len_m, seqlen_q) - num_steps = tl.cdiv(len_m, MASK_BLOCK_M1) - # when q < k, we may skip the initial masked op - if pid < num_blocks_skip: - num_steps = 0 - - # if start_m is negative, the current N-tile has no block on the - # diagonal of causal mask, so everything have no causal mask - if DEBUG_TRITON: print(f"Masked: start_n: {start_n}; start_m: {start_m}, num_steps: {num_steps}") # noqa: E701 - dk, dv = _bwd_dkdv_inner( - dk, dv, # output tensors - Q_ptr, k, v, DO_ptr, M_ptr, Delta_ptr, sm_scale, # input tensors - stride_qm, stride_qd, # strides for q - stride_dom, stride_dod, # strides for o - stride_dropoutm, stride_dropoutn, # strides for dropout - stride_lse_m, stride_delta_m, - MASK_BLOCK_M1, BLOCK_N1, # block dim - HEAD_DIM_QK, HEAD_DIM_V, ACTUAL_HEAD_DIM_QK, ACTUAL_HEAD_DIM_V, # head dim - dropout_p, philox_seed, batch_philox_offset, dropout_offset, - alibi_slope, - seqlen_q, seqlen_k, # max sequence length for q and k - start_n, start_m, num_steps, # iteration numbers - descale_q, descale_k, descale_v, descale_do, - MASK=True, # causal masking - ENABLE_DROPOUT=ENABLE_DROPOUT, # activate dropout - USE_ALIBI=USE_ALIBI, - USE_EXP2=USE_EXP2, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - DEBUG_TRITON=DEBUG_TRITON, - DEBUG_TRITON_DETAIL=DEBUG_TRITON_DETAIL, - ) - start_m += num_steps * MASK_BLOCK_M1 - num_steps = tl.cdiv(seqlen_q - start_m, BLOCK_M1) - end_m = start_m + num_steps * BLOCK_M1 - - if DEBUG_TRITON: print(f"start_m after Masked step: {start_m}; num_steps: {num_steps}") # noqa: E701 - if DEBUG_TRITON: print(f"unMasked: start_n: {start_n}, start_m: {start_m}, end_m: {end_m}, num_steps: {num_steps}") # noqa: E701 - if DEBUG_TRITON: print("unMasked") # noqa: E701 - dk, dv = _bwd_dkdv_inner( - dk, dv, # output tensors - Q_ptr, k, v, DO_ptr, M_ptr, Delta_ptr, sm_scale, # input tensors - stride_qm, stride_qd, # strides for q - stride_dom, stride_dod, # strides for o - stride_dropoutm, stride_dropoutn, # strides for dropout - stride_lse_m, stride_delta_m, - BLOCK_M1, BLOCK_N1, # block dim - HEAD_DIM_QK, HEAD_DIM_V, ACTUAL_HEAD_DIM_QK, ACTUAL_HEAD_DIM_V, # head dim - dropout_p, philox_seed, batch_philox_offset, dropout_offset, - alibi_slope, - seqlen_q, seqlen_k, # max sequence length for q and k - start_n, start_m, num_steps, # iteration numbers - descale_q, descale_k, descale_v, descale_do, - MASK=False, # causal masking - ENABLE_DROPOUT=ENABLE_DROPOUT, # activate dropout - USE_ALIBI=USE_ALIBI, - USE_EXP2=USE_EXP2, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - DEBUG_TRITON=DEBUG_TRITON, - DEBUG_TRITON_DETAIL=DEBUG_TRITON_DETAIL, - ) - # end of GQA/MQA of dkdv - # Write back dV - adj_dv = bid * stride_dvb + hkid * stride_dvh + k_start * stride_dvn - offs_dv = offs_n[:, None] * stride_dvn + offs_d_v[None, :] * stride_dvd - tl.store(DV + adj_dv + offs_dv, dv, mask=mask_v) - # write back dk - adj_dk = bid * stride_dkb + hkid * stride_dkh + k_start * stride_dkn - offs_dk = offs_n[:, None] * stride_dkn + offs_d_qk[None, :] * stride_dkd - dk *= sm_scale - tl.store(DK + adj_dk + offs_dk, dk, mask=mask_k) - - # This part does dq - start_m = pid * BLOCK_M2 - if start_m < seqlen_q: - # seqlen_q > seqlen_k, no need to process these tile for dq - if DEBUG_TRITON: print(f"end_n = start_m + BLOCK_M = {start_m} + {BLOCK_M2} = {start_m + BLOCK_M2}") # noqa: E701 - if start_m + BLOCK_M2 < delta_qk: - if DEBUG_TRITON: print(f"start_m + BLOCK_M2 = {start_m} + {BLOCK_M2} = {start_m + BLOCK_M2} < delta_qk of {delta_qk}") # noqa: E701 - return - - offs_m = start_m + tl.arange(0, BLOCK_M2) - # Mask for loading K and V - mask_q = offs_m[:, None] < seqlen_q - mask_do = offs_m[:, None] < seqlen_q - if PADDED_HEAD_QK: - mask_d_qk = offs_d_qk < ACTUAL_HEAD_DIM_QK - mask_q &= mask_d_qk[None, :] - if PADDED_HEAD_V: - mask_d_v = offs_d_v < ACTUAL_HEAD_DIM_V - mask_do &= mask_d_v[None, :] - offs_q = offs_m[:, None] * stride_qm + offs_d_qk[None, :] * stride_qd - offs_do = offs_m[:, None] * stride_dom + offs_d_v[None, :] * stride_dod - # NOTE: don't assume that the strides for k and v are the same! - K += bid * stride_kb + hkid * stride_kh + k_start * stride_kn - V += bid * stride_vb + hkid * stride_vh + k_start * stride_vn - - # If MQA / GQA, set the K and V head offsets appropriately. - for hqid in range(hkid * GROUP_SIZE, hkid * GROUP_SIZE + GROUP_SIZE): - # seqlen_q < seqlen_k: delta_qk more kv tokens are added at the front - # for every M-tile - end_n = start_m + BLOCK_M2 - delta_qk - # clamp end_n at [0, seqlen_k] - end_n = max(min(end_n, seqlen_k), 0) - if DEBUG_TRITON: print(f"delta_qk: {delta_qk}; end_n: {end_n}") # noqa: E701 - # offset input and output tensor by batch and Q/K heads - adj_q = bid * stride_qb + hqid * stride_qh + q_start * stride_qm - adj_do = bid * stride_dob + hqid * stride_doh + q_start * stride_dom - adj_delta = \ - bid * stride_delta_b + hqid * stride_delta_h + q_start * stride_delta_m - Delta_ptr = Delta + adj_delta - adj_m = bid * stride_lse_b + hqid * stride_lse_h + q_start * stride_lse_m - M_ptr = M + adj_m - - if USE_ALIBI: - alibi_offset = bid * stride_az + hqid * stride_ah - alibi_slope = tl.load(Alibi_slopes + alibi_offset) - else: - alibi_slope = None - - # batch_philox_offset is the ACTUALLY dropout offset - # dropout_offset is for debug purpose and will be removed later - batch_philox_offset = 0 - dropout_offset = 0 - if ENABLE_DROPOUT: - batch_philox_offset = philox_offset_base + \ - bid * stride_dropoutb + \ - hqid * stride_dropouth - dropout_offset = \ - Dropout_mask + bid * stride_dropoutb + hqid * stride_dropouth - q = tl.load(Q + adj_q + offs_q, mask=mask_q, other=0.0) - do = tl.load(DO + adj_do + offs_do, mask=mask_do, other=0.0) - m = tl.load(M + adj_m + offs_m * stride_lse_m, - mask=offs_m < seqlen_q) - m = m[:, None] - - MASK_BLOCK_N2: tl.constexpr = BLOCK_N2 // BLK_SLICE_FACTOR - # start can only be 0 at minimum - start_n = max(end_n - BLOCK_M2, 0) - num_steps = tl.cdiv(end_n - start_n, MASK_BLOCK_N2) - - if IS_FP8: - descale_q = tl.load(Descale_q + bid * stride_descale_q_z + hqid) - descale_k = tl.load(Descale_k + bid * stride_descale_k_z + hkid) - descale_v = tl.load(Descale_v + bid * stride_descale_v_z + hkid) - descale_do = tl.load(Descale_do + bid * stride_descale_do_z + hqid) - else: - descale_q, descale_k, descale_v, descale_do = 1.0, 1.0, 1.0, 1.0 - - dq = tl.zeros([BLOCK_M2, HEAD_DIM_QK], dtype=tl.float32) - dq = _bwd_dq_inner( - dq, - q, K, V, do, m, Delta_ptr, sm_scale, - stride_qm, stride_qd, stride_kn, stride_kd, stride_vn, stride_vd, - stride_dropoutm, stride_dropoutn, - stride_lse_m, - stride_delta_m, - seqlen_q, seqlen_k, - BLOCK_M2, MASK_BLOCK_N2, - HEAD_DIM_QK, HEAD_DIM_V, ACTUAL_HEAD_DIM_QK, ACTUAL_HEAD_DIM_V, - dropout_p, philox_seed, batch_philox_offset, dropout_offset, - alibi_slope, - start_m, start_n, end_n, num_steps, - descale_q, descale_k, descale_v, descale_do, - MASK=True, # - ENABLE_DROPOUT=ENABLE_DROPOUT, - USE_ALIBI=USE_ALIBI, - USE_EXP2=USE_EXP2, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - DEBUG_TRITON=DEBUG_TRITON, - DEBUG_TRITON_DETAIL=DEBUG_TRITON_DETAIL, - ) - end_n -= num_steps * MASK_BLOCK_N2 - num_steps = tl.cdiv(end_n, BLOCK_N2) - start_n = max(end_n - num_steps * BLOCK_N2, 0) - if DEBUG_TRITON: print(f"unMasked: start_m: {start_m}, start_n: {start_n}, end_n: {end_n}, num_steps: {num_steps}") # noqa: E701 - dq = _bwd_dq_inner( - dq, - q, K, V, do, m, Delta_ptr, sm_scale, - stride_qm, stride_qd, stride_kn, stride_kd, stride_vn, stride_vd, - stride_dropoutm, stride_dropoutn, - stride_lse_m, - stride_delta_m, - seqlen_q, seqlen_k, - BLOCK_M2, BLOCK_N2, - HEAD_DIM_QK, HEAD_DIM_V, ACTUAL_HEAD_DIM_QK, ACTUAL_HEAD_DIM_V, - dropout_p, philox_seed, batch_philox_offset, dropout_offset, - alibi_slope, - start_m, start_n, end_n, num_steps, - descale_q, descale_k, descale_v, descale_do, - MASK=False, - ENABLE_DROPOUT=ENABLE_DROPOUT, - USE_ALIBI=USE_ALIBI, - USE_EXP2=USE_EXP2, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - DEBUG_TRITON=DEBUG_TRITON, - DEBUG_TRITON_DETAIL=DEBUG_TRITON_DETAIL, - ) - # Write back dQ. - adj_dq = bid * stride_dqb + hqid * stride_dqh + q_start * stride_dqm - offs_dq = offs_m[:, None] * stride_dqm + offs_d_qk[None, :] * stride_dqd - dq *= sm_scale - tl.store(DQ + adj_dq + offs_dq, dq, mask=mask_q) - # end of GQA/MQA of dq - -@triton.autotune( - configs=noncausal_autotune_configs, - key=noncausal_autotune_keys, - use_cuda_graph=True, -) -@triton.jit -def bwd_kernel_noncausal( - Q, K, V, sm_scale, DO, DQ, DK, DV, - M, Delta, - stride_qb, stride_qh, stride_qm, stride_qd, - stride_kb, stride_kh, stride_kn, stride_kd, - stride_vb, stride_vh, stride_vn, stride_vd, - stride_dqb, stride_dqh, stride_dqm, stride_dqd, - stride_dkb, stride_dkh, stride_dkn, stride_dkd, - stride_dvb, stride_dvh, stride_dvn, stride_dvd, - stride_lse_b, stride_lse_h, stride_lse_m, - stride_delta_b, stride_delta_h, stride_delta_m, - stride_dob, stride_doh, stride_dom, stride_dod, - stride_dropoutb, stride_dropouth, stride_dropoutm, stride_dropoutn, - stride_descale_q_z, stride_descale_k_z, stride_descale_v_z, stride_descale_do_z, - stride_az, stride_ah, - HQ, HK, - cu_seqlens_q, cu_seqlens_k, - seqused_q, seqused_k, # Add seqused parameters - max_seqlen_q, max_seqlen_k, - Dropout_mask, dropout_p, philox_seed, philox_offset_base, - Alibi_slopes, - Descale_q, Descale_k, Descale_v, Descale_do, - BLOCK_M1: tl.constexpr, # 32 - BLOCK_N1: tl.constexpr, # 128 - BLOCK_M2: tl.constexpr, # 128 - BLOCK_N2: tl.constexpr, # 32 - BLK_SLICE_FACTOR: tl.constexpr, - HEAD_DIM_QK: tl.constexpr, - HEAD_DIM_V: tl.constexpr, - ACTUAL_HEAD_DIM_QK: tl.constexpr, - ACTUAL_HEAD_DIM_V: tl.constexpr, - ENABLE_DROPOUT: tl.constexpr, - IS_VARLEN: tl.constexpr, - USE_ALIBI: tl.constexpr, - USE_EXP2: tl.constexpr, - IS_FP8: tl.constexpr, - FP8_MAX: tl.constexpr, - FP8_OUTPUT: tl.constexpr, - USE_SEQUSED: tl.constexpr, # Add flag for seqused - DEBUG_TRITON: tl.constexpr, - DEBUG_TRITON_DETAIL: tl.constexpr, -): - # program ids - hkid = tl.program_id(0) - pid = tl.program_id(1) - bid = tl.program_id(2) - if DEBUG_TRITON: print(f"\npid: {pid}, bid: {bid}, hkid: {hkid}") # noqa: E701 - # figure out varlen start and end - q_start = 0 - k_start = 0 - seqlen_q = max_seqlen_q - seqlen_k = max_seqlen_k - if IS_VARLEN: - # Compute actual sequence lengths - q_start = tl.load(cu_seqlens_q + bid) - q_end = tl.load(cu_seqlens_q + bid + 1) - k_start = tl.load(cu_seqlens_k + bid) - k_end = tl.load(cu_seqlens_k + bid + 1) - - # If seqused is provided, use it to limit the actual sequence length - if USE_SEQUSED: - actual_seqlen_q = tl.load(seqused_q + bid) if seqused_q is not None else q_end - q_start - seqlen_q = tl.minimum(actual_seqlen_q, q_end - q_start) - actual_seqlen_k = tl.load(seqused_k + bid) if seqused_k is not None else k_end - k_start - seqlen_k = tl.minimum(actual_seqlen_k, k_end - k_start) - else: - seqlen_q = q_end - q_start - seqlen_k = k_end - k_start - - PADDED_HEAD_QK: tl.constexpr = (ACTUAL_HEAD_DIM_QK != HEAD_DIM_QK) - PADDED_HEAD_V: tl.constexpr = (ACTUAL_HEAD_DIM_V != HEAD_DIM_V) - offs_d_qk = tl.arange(0, HEAD_DIM_QK) - offs_d_v = tl.arange(0, HEAD_DIM_V) - GROUP_SIZE: tl.constexpr = HQ // HK - - start_n = pid * BLOCK_N1 - if start_n < seqlen_k: - dk = tl.zeros([BLOCK_N1, HEAD_DIM_QK], dtype=tl.float32) - dv = tl.zeros([BLOCK_N1, HEAD_DIM_V], dtype=tl.float32) - - offs_n = start_n + tl.arange(0, BLOCK_N1) - # Mask for loading K and V - mask_k = offs_n[:, None] < seqlen_k - mask_v = offs_n[:, None] < seqlen_k - if PADDED_HEAD_QK: - mask_d_qk = offs_d_qk < ACTUAL_HEAD_DIM_QK - mask_k &= mask_d_qk[None, :] - if PADDED_HEAD_V: - mask_d_v = offs_d_v < ACTUAL_HEAD_DIM_V - mask_v &= mask_d_v[None, :] - # NOTE: don't assume that the strides for k and v are the same! - # K/V tensors not changed for the group - adj_k = bid * stride_kb + hkid * stride_kh + k_start * stride_kn + offs_n[:, None] * stride_kn + offs_d_qk[None, :] * stride_kd - adj_v = bid * stride_vb + hkid * stride_vh + k_start * stride_vn + offs_n[:, None] * stride_vn + offs_d_v[None, :] * stride_vd - # load K and V: they stay in SRAM throughout the inner loop. - k = tl.load(K + adj_k, mask=mask_k, other=0.0) - v = tl.load(V + adj_v, mask=mask_v, other=0.0) - # If MQA / GQA, set the K and V head offsets appropriately. - for hqid in range(hkid * GROUP_SIZE, hkid * GROUP_SIZE + GROUP_SIZE): - # offset input and output tensor by batch and Q/K heads - adj_q = bid * stride_qb + hqid * stride_qh + q_start * stride_qm - Q_ptr = Q + adj_q - adj_do = bid * stride_dob + hqid * stride_doh + q_start * stride_dom - DO_ptr = DO + adj_do - adj_delta = bid * stride_delta_b + hqid * stride_delta_h + q_start * stride_delta_m - Delta_ptr = Delta + adj_delta - adj_m = bid * stride_lse_b + hqid * stride_lse_h + q_start * stride_lse_m - M_ptr = M + adj_m - - if USE_ALIBI: - alibi_offset = bid * stride_az + hqid * stride_ah - alibi_slope = tl.load(Alibi_slopes + alibi_offset) - else: - alibi_slope = None - - # batch_philox_offset is the ACTUALLY dropout offset - # dropout_offset is for debug purpose and will be removed later - batch_philox_offset = 0 - dropout_offset = 0 - if ENABLE_DROPOUT: - batch_philox_offset = philox_offset_base + bid * stride_dropoutb + \ - hqid * stride_dropouth - dropout_offset = Dropout_mask + bid * stride_dropoutb + \ - hqid * stride_dropouth - - if IS_FP8: - descale_q = tl.load(Descale_q + bid * stride_descale_q_z + hqid) - descale_k = tl.load(Descale_k + bid * stride_descale_k_z + hkid) - descale_v = tl.load(Descale_v + bid * stride_descale_v_z + hkid) - descale_do = tl.load(Descale_do + bid * stride_descale_do_z + hqid) - else: - descale_q, descale_k, descale_v, descale_do = 1.0, 1.0, 1.0, 1.0 - - # because there is no causal, we always start from the beginning - start_m = 0 - num_steps = tl.cdiv(seqlen_q, BLOCK_M1) - dk, dv = _bwd_dkdv_inner( - dk, dv, # output tensors - Q_ptr, k, v, DO_ptr, M_ptr, Delta_ptr, sm_scale, # input tensors - stride_qm, stride_qd, # strides for q - stride_dom, stride_dod, # strides for o - stride_dropoutm, stride_dropoutn, # strides for dropout - stride_lse_m, - stride_delta_m, - BLOCK_M1, BLOCK_N1, # block dim - HEAD_DIM_QK, HEAD_DIM_V, ACTUAL_HEAD_DIM_QK, ACTUAL_HEAD_DIM_V, # head dim - dropout_p, philox_seed, batch_philox_offset, dropout_offset, # - alibi_slope, - seqlen_q, seqlen_k, # max sequence length for q and k - start_n, start_m, num_steps, # iteration numbers - descale_q, descale_k, descale_v, descale_do, # fp8 descale factors from user - MASK=False, # causal masking - ENABLE_DROPOUT=ENABLE_DROPOUT, # activate dropout - USE_ALIBI=USE_ALIBI, - USE_EXP2=USE_EXP2, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - DEBUG_TRITON=DEBUG_TRITON, - DEBUG_TRITON_DETAIL=DEBUG_TRITON_DETAIL, - ) - - # Write back dV - adj_dv = bid * stride_dvb + hkid * stride_dvh + k_start * stride_dvn - offs_dv = offs_n[:, None] * stride_dvn + offs_d_v[None, :] * stride_dvd - tl.store(DV + adj_dv + offs_dv, dv, mask=mask_v) - # write back dk - adj_dk = bid * stride_dkb + hkid * stride_dkh + k_start * stride_dkn - offs_dk = offs_n[:, None] * stride_dkn + offs_d_qk[None, :] * stride_dkd - dk *= sm_scale - tl.store(DK + adj_dk + offs_dk, dk, mask=mask_k) - - # THIS PART DOES DQ - start_m = pid * BLOCK_M2 - if start_m < seqlen_q: - offs_m = start_m + tl.arange(0, BLOCK_M2) - # Mask for loading K and V - mask_q = offs_m[:, None] < seqlen_q - mask_do = offs_m[:, None] < seqlen_q - if PADDED_HEAD_QK: - mask_d_qk = offs_d_qk < ACTUAL_HEAD_DIM_QK - mask_q &= mask_d_qk[None, :] - if PADDED_HEAD_V: - mask_d_v = offs_d_v < ACTUAL_HEAD_DIM_V - mask_do &= mask_d_v[None, :] - offs_q = offs_m[:, None] * stride_qm + offs_d_qk[None, :] * stride_qd - offs_do = offs_m[:, None] * stride_dom + offs_d_v[None, :] * stride_dod - K += bid * stride_kb + hkid * stride_kh + k_start * stride_kn - V += bid * stride_vb + hkid * stride_vh + k_start * stride_vn - # If MQA / GQA, set the K and V head offsets appropriately. - for hqid in range(hkid * GROUP_SIZE, hkid * GROUP_SIZE + GROUP_SIZE): - # offset input and output tensor by batch and Q/K heads - adj_q = bid * stride_qb + hqid * stride_qh + q_start * stride_qm - adj_do = bid * stride_dob + hqid * stride_doh + q_start * stride_dom - adj_delta = \ - bid * stride_delta_b + hqid * stride_delta_h + q_start * stride_delta_m - Delta_ptr = Delta + adj_delta - adj_m = bid * stride_lse_b + hqid * stride_lse_h + q_start * stride_lse_m - M_ptr = M + adj_m - - if USE_ALIBI: - alibi_offset = bid * stride_az + hqid * stride_ah - alibi_slope = tl.load(Alibi_slopes + alibi_offset) - else: - alibi_slope = None - - # batch_philox_offset is the ACTUALLY dropout offset - # dropout_offset is for debug purpose and will be removed later - batch_philox_offset = 0 - dropout_offset = 0 - if ENABLE_DROPOUT: - batch_philox_offset = philox_offset_base + \ - bid * stride_dropoutb + \ - hqid * stride_dropouth - dropout_offset = \ - Dropout_mask + bid * stride_dropoutb + hqid * stride_dropouth - - q = tl.load(Q + adj_q + offs_q, mask=mask_q, other=0.0) - do = tl.load(DO + adj_do + offs_do, mask=mask_do, other=0.0) - m = tl.load(M + adj_m + offs_m * stride_lse_m, - mask=offs_m < seqlen_q) - m = m[:, None] - - if IS_FP8: - descale_q = tl.load(Descale_q + bid * stride_descale_q_z + hqid) - descale_k = tl.load(Descale_k + bid * stride_descale_k_z + hkid) - descale_v = tl.load(Descale_v + bid * stride_descale_v_z + hkid) - descale_do = tl.load(Descale_do + bid * stride_descale_do_z + hqid) - else: - descale_q, descale_k, descale_v, descale_do = 1.0, 1.0, 1.0, 1.0 - - # start can only be 0 at minimum - start_n = 0 - end_n = seqlen_k - num_steps = tl.cdiv(seqlen_k, BLOCK_N2) - - dq = tl.zeros([BLOCK_M2, HEAD_DIM_QK], dtype=tl.float32) - dq = _bwd_dq_inner( - dq, - q, K, V, do, m, Delta_ptr, sm_scale, - stride_qm, stride_qd, stride_kn, stride_kd, stride_vn, stride_vd, - stride_dropoutm, stride_dropoutn, - stride_lse_m, - stride_delta_m, - seqlen_q, seqlen_k, - BLOCK_M2, BLOCK_N2, - HEAD_DIM_QK, HEAD_DIM_V, ACTUAL_HEAD_DIM_QK, ACTUAL_HEAD_DIM_V, - dropout_p, philox_seed, batch_philox_offset, dropout_offset, - alibi_slope, - start_m, start_n, end_n, num_steps, - descale_q, descale_k, descale_v, descale_do, - MASK=False, - ENABLE_DROPOUT=ENABLE_DROPOUT, - USE_ALIBI=USE_ALIBI, - USE_EXP2=USE_EXP2, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - DEBUG_TRITON=DEBUG_TRITON, - DEBUG_TRITON_DETAIL=DEBUG_TRITON_DETAIL, - ) - # Write back dQ. - adj_dq = bid * stride_dqb + hqid * stride_dqh + q_start * stride_dqm - offs_dq = offs_m[:, None] * stride_dqm + offs_d_qk[None, :] * stride_dqd - dq *= sm_scale - tl.store(DQ + adj_dq + offs_dq, dq, mask=mask_q) - -def is_contiguous(x, name): - if x.is_contiguous(): - return x - else: - print(f"{name} is not contiguous") - return x.contiguous() - -OLD_LSE: bool = False -DEBUG_TRITON: bool = False -DEBUG_TRITON_DETAIL: bool = False - -def attention_prefill_backward_triton_split_fused_no_atomics_impl( - do: torch.Tensor, - q: torch.Tensor, - k: torch.Tensor, - v: torch.Tensor, - o: torch.Tensor, - softmax_lse: torch.Tensor, - dq: torch.Tensor, - dk: torch.Tensor, - dv: torch.Tensor, - sm_scale: float, - alibi_slopes: Optional[torch.Tensor], - causal: bool, - layout: Literal["bshd", "bhsd", "thd"], - cu_seqlens_q: Optional[torch.Tensor], - cu_seqlens_k: Optional[torch.Tensor], - max_seqlen_q: Optional[int], - max_seqlen_k: Optional[int], - dropout_p: float, - philox_seed: Optional[int], - philox_offset: Optional[int], - use_exp2: bool, - # fp8 - descale_q: Optional[torch.Tensor], - descale_k: Optional[torch.Tensor], - descale_v: Optional[torch.Tensor], - descale_o: Optional[torch.Tensor], - descale_do: Optional[torch.Tensor], - descale_dq: Optional[torch.Tensor], - descale_dk: Optional[torch.Tensor], - descale_dv: Optional[torch.Tensor], - # seqused for FA v3 - seqused_q: Optional[torch.Tensor] = None, - seqused_k: Optional[torch.Tensor] = None, -): - # get params, strides and shape - IS_VARLEN = layout == "thd" - use_dropout = (dropout_p > 0.0) - - # common assertions - assert 0.0 <= dropout_p <= 1.0, f"dropout_p must be between 0 and 1, got {dropout_p}" - assert q.device == k.device == v.device == o.device == do.device == softmax_lse.device, \ - f"All tensors must be on the same device. Got: q={q.device}, k={k.device}, v={v.device}, o={o.device}, do={do.device}, softmax_lse={softmax_lse.device}" - assert q.dtype == k.dtype == v.dtype == do.dtype, "q, k, v, do must have the same dtype" - current_device = torch.cuda.current_device() - assert q.is_cuda and q.device.index == current_device, f"Device mismatch: Kernel will launch on cuda:{current_device}, but tensors are on {q.device}" - - # get shapes and strides - if IS_VARLEN: - # shape - total_seqlen_q, nheads_q, head_size_q = q.shape - total_seqlen_k, nheads_k, head_size_k = k.shape - total_seqlen_v, nheads_v, head_size_v = v.shape - nheads_lse, total_seqlen_lse = softmax_lse.shape - - # assert shapes - assert total_seqlen_lse == total_seqlen_q, f"softmax_lse seqlen {total_seqlen_lse} != q seqlen {total_seqlen_q}" - assert cu_seqlens_q is not None, "cu_seqlens_q must be provided for varlen layout" - assert cu_seqlens_k is not None, "cu_seqlens_k must be provided for varlen layout" - assert max_seqlen_q is not None, "max_seqlen_q must be provided for varlen layout" - assert max_seqlen_k is not None, "max_seqlen_k must be provided for varlen layout" - - # assert head dimensions - assert head_size_q == head_size_k, f"head sizes must match: q={head_size_q}, k={head_size_k}" - assert nheads_k == nheads_v, f"k and v must have same number of heads: k={nheads_k}, v={nheads_v}" - assert nheads_q % nheads_k == 0, f"nheads_q {nheads_q} must be divisible by nheads_k {nheads_k} for GQA/MQA" - assert nheads_lse == nheads_q, f"softmax_lse heads {nheads_lse} != q heads {nheads_q}" - - # assert output shapes - assert o.shape == (total_seqlen_q, nheads_q, head_size_v), f"o shape {o.shape} != expected {(total_seqlen_q, nheads_q, head_size_v)}" - assert do.shape == o.shape, f"do shape {do.shape} != o shape {o.shape}" - assert dq.shape == q.shape, f"dq shape {dq.shape} != q shape {q.shape}" - assert dk.shape == k.shape, f"dk shape {dk.shape} != k shape {k.shape}" - assert dv.shape == v.shape, f"dv shape {dv.shape} != v shape {v.shape}" - - # assert cu_seqlens - assert cu_seqlens_q.dtype == torch.int32, f"cu_seqlens_q must be int32, got {cu_seqlens_q.dtype}" - assert cu_seqlens_k.dtype == torch.int32, f"cu_seqlens_k must be int32, got {cu_seqlens_k.dtype}" - assert cu_seqlens_q[0] == 0, "cu_seqlens_q must start with 0" - assert cu_seqlens_k[0] == 0, "cu_seqlens_k must start with 0" - assert cu_seqlens_q[-1] == total_seqlen_q, f"cu_seqlens_q[-1] {cu_seqlens_q[-1]} != total_seqlen_q {total_seqlen_q}" - assert cu_seqlens_k[-1] == total_seqlen_k, f"cu_seqlens_k[-1] {cu_seqlens_k[-1]} != total_seqlen_k {total_seqlen_k}" - - # set vars - batch = len(cu_seqlens_q) - 1 - head_size_qk = head_size_q - - # strides - stride_qb, stride_qm, stride_qh, stride_qd = 0, q.stride(0), q.stride(1), q.stride(2) - stride_kb, stride_kn, stride_kh, stride_kd = 0, k.stride(0), k.stride(1), k.stride(2) - stride_vb, stride_vn, stride_vh, stride_vd = 0, v.stride(0), v.stride(1), v.stride(2) - stride_ob, stride_om, stride_oh, stride_od = 0, o.stride(0), o.stride(1), o.stride(2) - stride_dqb, stride_dqm, stride_dqh, stride_dqd = 0, dq.stride(0), dq.stride(1), dq.stride(2) - stride_dkb, stride_dkn, stride_dkh, stride_dkd = 0, dk.stride(0), dk.stride(1), dk.stride(2) - stride_dvb, stride_dvn, stride_dvh, stride_dvd = 0, dv.stride(0), dv.stride(1), dv.stride(2) - stride_dob, stride_dom, stride_doh, stride_dod = 0, do.stride(0), do.stride(1), do.stride(2) - stride_lse_b, stride_lse_h, stride_lse_m = (0, softmax_lse.stride(0), softmax_lse.stride(1)) - else: - # shapes - batch_q, seqlen_q, nheads_q, head_size_q = q.shape - batch_k, seqlen_k, nheads_k, head_size_k = k.shape - batch_v, seqlen_v, nheads_v, head_size_v = v.shape - batch_lse, nheads_lse, seqlen_lse = softmax_lse.shape - - # assert batch dimensions - assert batch_q == batch_k == batch_v, f"batch sizes must match: q={batch_q}, k={batch_k}, v={batch_v}" - - # assert head dimensions - assert head_size_q == head_size_k, f"head sizes must match: q={head_size_q}, k={head_size_k}" - assert nheads_k == nheads_v, f"k and v must have same number of heads: k={nheads_k}, v={nheads_v}" - assert nheads_q % nheads_k == 0, f"nheads_q {nheads_q} must be divisible by nheads_k {nheads_k} for GQA/MQA" - - # assert sequence lengths - assert seqlen_k == seqlen_v, f"k and v sequence lengths must match: k={seqlen_k}, v={seqlen_v}" - - # assert output shapes - assert o.shape == (batch_q, seqlen_q, nheads_q, head_size_v), f"o shape {o.shape} != expected" - assert do.shape == o.shape, f"do shape {do.shape} != o shape {o.shape}" - assert dq.shape == q.shape, f"dq shape {dq.shape} != q shape {q.shape}" - assert dk.shape == k.shape, f"dk shape {dk.shape} != k shape {k.shape}" - assert dv.shape == v.shape, f"dv shape {dv.shape} != v shape {v.shape}" - - # assert softmax_lse shape - assert softmax_lse.shape == (batch_q, nheads_q, seqlen_q), f"softmax_lse shape {softmax_lse.shape} != expected" - - # set vars - batch = batch_q - head_size_qk = head_size_q - max_seqlen_q = seqlen_q - max_seqlen_k = seqlen_k - - # strides - stride_qb, stride_qm, stride_qh, stride_qd = q.stride() - stride_kb, stride_kn, stride_kh, stride_kd = k.stride() - stride_vb, stride_vn, stride_vh, stride_vd = v.stride() - stride_ob, stride_om, stride_oh, stride_od = o.stride() - stride_dqb, stride_dqm, stride_dqh, stride_dqd = dq.stride() - stride_dkb, stride_dkn, stride_dkh, stride_dkd = dk.stride() - stride_dvb, stride_dvn, stride_dvh, stride_dvd = dv.stride() - stride_dob, stride_dom, stride_doh, stride_dod = do.stride() - stride_lse_b, stride_lse_h, stride_lse_m = softmax_lse.stride() - - # fp8 setup - moved after all assertions - IS_FP8 = is_fp8(q) - if IS_FP8: - FP8_MAX = torch.finfo(q.dtype).max - # we already asserted that do, q, k, v all have the same dtype, so no need to check each one - if is_fp8(o): - FP8_OUTPUT = True - assert descale_o is not None, f"descale_o is None. In fp8, you need to pass a tensor for descale_o along with a tensor o." - assert descale_dq is not None, f"descale_dq is None. In fp8, you need to pass a tensor for descale_dq along with a tensor dq." - assert descale_dk is not None, f"descale_dk is None. In fp8, you need to pass a tensor for descale_dk along with a tensor dk." - assert descale_dv is not None, f"descale_dv is None. In fp8, you need to pass a tensor for descale_dv along with a tensor dv." - else: - FP8_OUTPUT = False - - stride_descale_q_z = descale_q.stride(0) if descale_q is not None else None - stride_descale_k_z = descale_k.stride(0) if descale_k is not None else None - stride_descale_v_z = descale_v.stride(0) if descale_v is not None else None - stride_descale_o_z = descale_o.stride(0) if descale_o is not None else None - stride_descale_do_z = descale_do.stride(0) if descale_do is not None else None - - if DEBUG: - print(f"FP8 path triggered in bwd_prefill_fused_no_atomics.py (FP8_OUTPUT={FP8_OUTPUT})") - else: - FP8_MAX = None - FP8_OUTPUT = False - stride_descale_q_z = stride_descale_k_z = stride_descale_v_z = stride_descale_o_z = stride_descale_do_z = None - - # alibi setup - use_alibi, (stride_az, stride_ah) = (True, alibi_slopes.stride()) if alibi_slopes is not None else (False, (0, 0)) - - # get closest power of 2 over or equal to 32. - padded_d_model_qk = 1 << (head_size_qk - 1).bit_length() - padded_d_model_qk = max(padded_d_model_qk, 32) - padded_d_model_v = 1 << (head_size_v - 1).bit_length() - padded_d_model_v = max(padded_d_model_v, 32) - HEAD_DIM_QK = padded_d_model_qk - HEAD_DIM_V = padded_d_model_v - ACTUAL_HEAD_DIM_QK = head_size_qk - ACTUAL_HEAD_DIM_V = head_size_v - - # init delta - if OLD_LSE: - delta = torch.empty_like(softmax_lse) - if IS_VARLEN: - stride_delta_b, stride_delta_h, stride_delta_m = 0, delta.stride(0), delta.stride(1) - else: - stride_delta_b, stride_delta_h, stride_delta_m = delta.stride() - else: - if IS_VARLEN: - # interface expects the varlen sequence dims to rounded like this. Not sure why. - total_q, num_heads, _ = q.shape - total_q_rounded = total_q + 128 * batch - delta_padded = torch.zeros((nheads_q, total_q_rounded), device=q.device, dtype=torch.float32) - delta = delta_padded[:, :total_q] - stride_delta_b, stride_delta_h, stride_delta_m = 0, delta.stride(0), delta.stride(1) - else: - # the interface expects the sequence dimension to be rounded to 128 - max_seqlen_q_rounded = round_multiple(max_seqlen_q, 128) - delta_padded = torch.zeros((batch, nheads_q, max_seqlen_q_rounded), - device=q.device, dtype=torch.float32) - delta = delta_padded[:, :, :max_seqlen_q] - stride_delta_b, stride_delta_h, stride_delta_m = delta.stride() - - pre_grid = lambda META: (triton.cdiv(max_seqlen_q, META['PRE_BLOCK']), batch, nheads_q) - _bwd_preprocess[pre_grid]( - o, do, - delta, - stride_ob, stride_oh, stride_om, stride_od, - stride_dob, stride_doh, stride_dom, stride_dod, - stride_delta_b, stride_delta_h, stride_delta_m, - stride_descale_do_z, - cu_seqlens_q, max_seqlen_q, - descale_do, - HEAD_DIM_V=HEAD_DIM_V, - ACTUAL_HEAD_DIM_V=ACTUAL_HEAD_DIM_V, - IS_VARLEN=IS_VARLEN, - IS_FP8=IS_FP8 - ) - - if False: - print("delta:", delta, delta.shape) - - # dropout mask tensor for debugging. We dump the dropout mask created in - # the kernel for testing - dropout_mask = None - stride_dropoutb, stride_dropouth, stride_dropoutm, stride_dropoutn = \ - (0, 0 , 0 , 0) - if use_dropout: - dropout_mask = torch.zeros( - (batch, nheads_q, max_seqlen_q, max_seqlen_k), - device=q.device, - dtype=torch.float32 - ) - - if DROPOUT_USE_PYTORCH: - if not IS_VARLEN: - dropout_mask = create_dropout_mask( - dropout_p, - (batch, nheads_q, max_seqlen_q, max_seqlen_k), - seed = philox_seed - ) - else: - dropout_mask = create_dropout_mask_varlen( - dropout_p, batch, nheads_q, - cu_seqlens_q, cu_seqlens_k, philox_seed - ) - stride_dropoutb, stride_dropouth, stride_dropoutm, stride_dropoutn = \ - dropout_mask.stride() - - seqlen = max(max_seqlen_q, max_seqlen_k) - grid = lambda META: (nheads_k, (seqlen + META['BLOCK_N1'] - 1) // META['BLOCK_N1'], batch, ) - if causal: - if DEBUG_TRITON: print(f"bwd_kernel: grid = {grid}" ) # noqa: E701 - bwd_kernel_causal[grid]( - q, k, v, sm_scale, do, dq, dk, dv, - softmax_lse, delta, - stride_qb, stride_qh, stride_qm, stride_qd, - stride_kb, stride_kh, stride_kn, stride_kd, - stride_vb, stride_vh, stride_vn, stride_vd, - stride_dqb, stride_dqh, stride_dqm, stride_dqd, - stride_dkb, stride_dkh, stride_dkn, stride_dkd, - stride_dvb, stride_dvh, stride_dvn, stride_dvd, - stride_lse_b, stride_lse_h, stride_lse_m, - stride_delta_b, stride_delta_h, stride_delta_m, - stride_dob, stride_doh, stride_dom, stride_dod, - stride_dropoutb, stride_dropouth, stride_dropoutm, stride_dropoutn, - stride_descale_q_z, stride_descale_k_z, stride_descale_v_z, stride_descale_do_z, - stride_az, stride_ah, - nheads_q, nheads_k, - cu_seqlens_q, cu_seqlens_k, - seqused_q, seqused_k, # Pass seqused tensors - max_seqlen_q, max_seqlen_k, - dropout_mask, dropout_p, philox_seed, philox_offset, - alibi_slopes, - descale_q, descale_k, descale_v, descale_do, - HEAD_DIM_QK=HEAD_DIM_QK, - HEAD_DIM_V=HEAD_DIM_V, - ACTUAL_HEAD_DIM_QK=ACTUAL_HEAD_DIM_QK, - ACTUAL_HEAD_DIM_V=ACTUAL_HEAD_DIM_V, - ENABLE_DROPOUT=use_dropout, - IS_VARLEN=IS_VARLEN, - USE_ALIBI=use_alibi, - USE_EXP2=use_exp2, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - FP8_OUTPUT=FP8_OUTPUT, - USE_SEQUSED=(seqused_q is not None or seqused_k is not None), # Add flag for seqused - DEBUG_TRITON=DEBUG_TRITON, - DEBUG_TRITON_DETAIL=DEBUG_TRITON_DETAIL, - ) - else: - bwd_kernel_noncausal[grid]( - q, k, v, sm_scale, do, dq, dk, dv, - softmax_lse, delta, - stride_qb, stride_qh, stride_qm, stride_qd, - stride_kb, stride_kh, stride_kn, stride_kd, - stride_vb, stride_vh, stride_vn, stride_vd, - stride_dqb, stride_dqh, stride_dqm, stride_dqd, - stride_dkb, stride_dkh, stride_dkn, stride_dkd, - stride_dvb, stride_dvh, stride_dvn, stride_dvd, - stride_lse_b, stride_lse_h, stride_lse_m, - stride_delta_b, stride_delta_h, stride_delta_m, - stride_dob, stride_doh, stride_dom, stride_dod, - stride_dropoutb, stride_dropouth, stride_dropoutm, stride_dropoutn, - stride_descale_q_z, stride_descale_k_z, stride_descale_v_z, stride_descale_do_z, - stride_az, stride_ah, - nheads_q, nheads_k, - cu_seqlens_q, cu_seqlens_k, - seqused_q, seqused_k, # Pass seqused tensors - max_seqlen_q, max_seqlen_k, - dropout_mask, dropout_p, philox_seed, philox_offset, - alibi_slopes, - descale_q, descale_k, descale_v, descale_do, - HEAD_DIM_QK=HEAD_DIM_QK, - HEAD_DIM_V=HEAD_DIM_V, - ACTUAL_HEAD_DIM_QK=ACTUAL_HEAD_DIM_QK, - ACTUAL_HEAD_DIM_V=ACTUAL_HEAD_DIM_V, - ENABLE_DROPOUT=use_dropout, - IS_VARLEN=IS_VARLEN, - USE_ALIBI=use_alibi, - USE_EXP2=use_exp2, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - FP8_OUTPUT=FP8_OUTPUT, - USE_SEQUSED=(seqused_q is not None or seqused_k is not None), # Add flag for seqused - DEBUG_TRITON=DEBUG_TRITON, - DEBUG_TRITON_DETAIL=DEBUG_TRITON_DETAIL, - ) - - if OLD_LSE: - return delta - else: - return delta_padded diff --git a/flash_attn/flash_attn_triton_amd/bwd_prefill_split.py b/flash_attn/flash_attn_triton_amd/bwd_prefill_split.py deleted file mode 100755 index 9ffdc9dea1a..00000000000 --- a/flash_attn/flash_attn_triton_amd/bwd_prefill_split.py +++ /dev/null @@ -1,1360 +0,0 @@ -import torch -import triton # type: ignore -import triton.language as tl # type: ignore -from typing import Literal, Optional -from .utils import DROPOUT_USE_PYTORCH, DROPOUT_DUMP, DEBUG, compute_fp8_scaling_factors, get_shapes_from_layout, \ - get_strides_from_layout, create_dropout_mask, create_dropout_mask_varlen, is_fp8 - -# NOTE: triton fails to import tl.constexprs so create them here for the file -tl_DROPOUT_USE_PYTORCH: tl.constexpr = triton.language.constexpr(DROPOUT_USE_PYTORCH) -tl_DROPOUT_DUMP: tl.constexpr = triton.language.constexpr(DROPOUT_DUMP) - -# This function computes delta given output Out and gradient DO -# Here is the I/O shape: -# Out: (batch, nhead_q, max_seqlens_q, headDim) -# DO: (batch, nhead_q, max_seqlens_q, headDim) -# Delta: (batch, nheads_q, max_seqlens_q), same as softmax_lse defined at -# fwd_prefill.py line 607 -@triton.jit -def _bwd_preprocess( - O, DO, # noqa: E741 - Delta, - stride_ob, stride_oh, stride_om, stride_ok, - stride_deltab, stride_deltah, stride_deltam, - stride_descale_do_z, - cu_seqlens_q, max_seqlen_q, - Descale_do, - BLOCK_M: tl.constexpr, - HEAD_DIM: tl.constexpr, - ACTUAL_HEAD_DIM: tl.constexpr, - IS_VARLEN: tl.constexpr, - IS_FP8: tl.constexpr -): - pid_m = tl.program_id(0) - bid = tl.program_id(1) - hid = tl.program_id(2) - # Handle varlen - q_start = 0 - seqlen_q = max_seqlen_q - if IS_VARLEN: - q_start = tl.load(cu_seqlens_q + bid) - q_end = tl.load(cu_seqlens_q + bid + 1) - seqlen_q = q_end - q_start - else: - q_start = 0 - seqlen_q = max_seqlen_q - - # Compute offsets - offs_m = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) - offs_k = tl.arange(0, HEAD_DIM) - # Offset O/DO by batch, head and q_start - O += bid * stride_ob + hid * stride_oh + q_start * stride_om # noqa: E741 - DO += bid * stride_ob + hid * stride_oh + q_start * stride_om - # create masks - mask_m = offs_m < seqlen_q - mask_md = mask_m[:, None] - PADDED_HEAD: tl.constexpr = (ACTUAL_HEAD_DIM != HEAD_DIM) - if PADDED_HEAD: - mask_md &= offs_k[None, :] < ACTUAL_HEAD_DIM - # compute pointers - offs_do = offs_m[:, None] * stride_om + offs_k[None, :] * stride_ok - out_ptrs = O + offs_do - do_ptrs = DO + offs_do - # load - o = tl.load(out_ptrs, mask=mask_md, other=0.0) - do = tl.load(do_ptrs, mask=mask_md, other=0.0) - # compute and write-back to delta - if IS_FP8: - descale_do = tl.load(Descale_do + bid * stride_descale_do_z + hid) - - # NOTE: do is in the fp8 range and o is not in fp8 - delta = tl.sum(o.to(tl.float32) * (do.to(tl.float32) * descale_do), axis=1) - else: - delta = tl.sum(o.to(tl.float32) * do.to(tl.float32), axis=1) - delta_offset = Delta + bid * stride_deltab + hid * stride_deltah + q_start * stride_deltam - tl.store(delta_offset + offs_m * stride_deltam, delta, mask=mask_m) - - -# The main inner-loop logic for computing dK and dV. -@triton.jit -def _bwd_dkdv_inner( - dk, dv, # output - Q, k, v, DO, M, D, sm_scale, # input tensor - stride_qm, stride_qk, - stride_dom, stride_dok, - stride_dropoutm, stride_dropoutn, - stride_deltam, - BLOCK_M: tl.constexpr, # 16 - BLOCK_N: tl.constexpr, # 128 - HEAD_DIM: tl.constexpr, # - ACTUAL_HEAD_DIM: tl.constexpr, # - dropout_p, philox_seed, batch_philox_offset, dropout_offset, - alibi_slope, - seqlen_q, seqlen_k, # max sequence length for q and k - # Filled in by the wrapper. - start_n, start_m, num_steps, # iteration numbers - descale_q, descale_k, descale_v, descale_do, # fp8 descale factors from user - MASK: tl.constexpr, # causal masking, only apply to tiles on mask diagonal - ENABLE_DROPOUT: tl.constexpr, # activate dropout - USE_ALIBI: tl.constexpr, - USE_EXP2: tl.constexpr, # activate exp2 - IS_FP8: tl.constexpr, - FP8_MAX: tl.constexpr, - DEBUG_TRITON: tl.constexpr, - DEBUG_TRITON_DETAIL: tl.constexpr, -): - # if HEAD_DIM is padded - PADDED_HEAD: tl.constexpr = (ACTUAL_HEAD_DIM != HEAD_DIM) - delta_qk = seqlen_q - seqlen_k - offs_m = start_m + tl.arange(0, BLOCK_M) # start_m + (0, 15) - offs_n = start_n + tl.arange(0, BLOCK_N) # start_m + (0, 127) - offs_k = tl.arange(0, HEAD_DIM) - # mask to make sure not OOB of seqlen_q - mask_n = offs_n < seqlen_k - # Q and DO are (seqlen_q, head_dim) - # qT_ptrs = (1, BLOCK_M) + (HEAD_DIM, 1), transpose of q - qT_ptrs = Q + offs_m[None, :] * stride_qm + offs_k[:, None] * stride_qk - # do_ptrs = (BLOCK_M, 1) + (1, HEAD_DIM), NOT transposed - do_ptrs = DO + offs_m[:, None] * stride_dom + offs_k[None, :] * stride_dok - # BLOCK_N must be a multiple of BLOCK_M, otherwise the code wouldn't work. - tl.static_assert(BLOCK_N % BLOCK_M == 0) - curr_m = start_m - step_m = BLOCK_M - curr_philox_offset = batch_philox_offset - curr_dropout_offset = dropout_offset - RCP_LN2: tl.constexpr = 1.4426950408889634 # = 1.0 / ln(2) - - for blk_idx in range(num_steps): - if DEBUG_TRITON: print(f"iter {blk_idx}: curr_m = {curr_m}") # noqa: E701 - offs_m = curr_m + tl.arange(0, BLOCK_M) - # update the mask because offs_m advanced - mask_m = offs_m < seqlen_q - mask_qT = mask_m[None, :] - mask_do = mask_m[:, None] - mask_nm = mask_n[:, None] & (offs_m[None, :] < seqlen_q) - if PADDED_HEAD: - mask_qT &= offs_k[:, None] < ACTUAL_HEAD_DIM - mask_do &= offs_k[None, :] < ACTUAL_HEAD_DIM - qT = tl.load(qT_ptrs, mask=mask_qT, other=0.0) - # generate dropout mask - if ENABLE_DROPOUT: - # NOTE: dropout is transposed because it is used to mask pT - philox_offs = curr_philox_offset + \ - offs_m[None, :] * stride_dropoutm + \ - offs_n[:, None] * stride_dropoutn - if tl_DROPOUT_USE_PYTORCH: - dropout_offs = offs_m[None, :] * stride_dropoutm + \ - offs_n[:, None] * stride_dropoutn - dropout_mask = tl.load( - curr_dropout_offset + dropout_offs, - mask=mask_nm - ) - else: - rand_vals = tl.rand(philox_seed, philox_offs) - dropout_mask = rand_vals > dropout_p - dropout_scale = 1.0 / (1 - dropout_p) - # Load m before computing qk to reduce pipeline stall. - m = tl.load(M + offs_m * stride_deltam, mask=mask_m, other=0.0) - if IS_FP8: - qkT = (tl.dot(k, qT) * descale_q * descale_k) - else: - qkT = tl.dot(k, qT) - qkT_scaled = qkT * sm_scale - - if USE_ALIBI: - relative_pos_block = offs_n[:, None] + seqlen_q - seqlen_k - offs_m[None, :] - alibi_block = -1 * alibi_slope * tl.abs(relative_pos_block) - qkT_scaled += alibi_block - - if DEBUG_TRITON_DETAIL: - if start_n == 256: - print(f"qT: {qT.shape}\n", qT) - print(f"k: {k.shape}\n", k) - print(f"qkT scaled: {qkT.shape}\n", qkT_scaled) - # TODO: remove the scaling of m later when we removed re-scaling in fwd - if USE_EXP2: - pT = tl.math.exp2(qkT_scaled * RCP_LN2 - m[None, :] * RCP_LN2) - else: - pT = tl.math.exp(qkT_scaled - m[None, :]) - - # Autoregressive masking. - if MASK: - # offset offs_m with delta_qk since the causal mask starts at - # bottom right of the (seqlen_q, seqlen_k) matrix - causal_mask = (offs_m[None, :] - delta_qk) >= offs_n[:, None] - mask = causal_mask & mask_nm - if DEBUG_TRITON_DETAIL: - if start_n == 256: - print(f"causal_mask: {causal_mask.shape}\n", causal_mask) - print(f"qkT after causal: {qkT.shape}\n", tl.where(causal_mask, qkT * sm_scale, 0.0)) - pT = tl.where(mask, pT, 0.0) - do = tl.load(do_ptrs, mask=mask_do, other=0.0) - # Compute dV. - if ENABLE_DROPOUT: - pT_dropout = tl.where(dropout_mask, pT, 0.0) * dropout_scale - if IS_FP8: - scale_p_dropout, descale_p_dropout = compute_fp8_scaling_factors(pT_dropout, FP8_MAX) - dv += (tl.dot((pT_dropout * scale_p_dropout).to(do.type.element_ty), do)* descale_p_dropout * descale_do) - else: - dv += tl.dot(pT_dropout.to(do.type.element_ty), do) - else: - if IS_FP8: - scale_pT, descale_pT = compute_fp8_scaling_factors(pT, FP8_MAX) - dv += (tl.dot((pT * scale_pT).to(do.type.element_ty), do) * descale_pT * descale_do) - else: - dv += tl.dot(pT.to(do.type.element_ty), do) - - if DEBUG_TRITON_DETAIL: - if start_n == 256: - print(f"pT: {pT.shape}\n", pT) - # D (= delta) is pre-divided by ds_scale. - Di = tl.load(D + offs_m * stride_deltam, mask=mask_m) - # Compute dP and dS. - if IS_FP8: - dpT = (tl.dot(v, tl.trans(do)) * descale_v * descale_do) - else: - dpT = tl.dot(v, tl.trans(do)) - if ENABLE_DROPOUT: - dpT = tl.where(dropout_mask, dpT, 0.0) * dropout_scale - delta_i = Di[None, :] - dsT = pT * (dpT - delta_i) - if IS_FP8: - scale_dsT, descale_dsT = compute_fp8_scaling_factors(dsT, FP8_MAX) - dk += (tl.dot((dsT * scale_dsT).to(qT.type.element_ty), tl.trans(qT)) * descale_dsT * descale_q) - else: - dk += tl.dot(dsT.to(qT.type.element_ty), tl.trans(qT)) - # Increment pointers. - curr_m += step_m - qT_ptrs += step_m * stride_qm - do_ptrs += step_m * stride_dom - return dk, dv - - -# grid = (max_seqlen_k // BLOCK_N, batch, nheads_q) -@triton.jit -def _bwd_kernel_dkdv_causal( - Q, K, V, sm_scale, DO, DK, DV, - M, Delta, - stride_qb, stride_qh, stride_qm, stride_qk, - stride_kb, stride_kh, stride_kn, stride_kk, - stride_vb, stride_vh, stride_vn, stride_vk, - stride_dkb, stride_dkh, stride_dkn, stride_dkk, - stride_deltab, stride_deltah, stride_deltam, - stride_dob, stride_doh, stride_dom, stride_dok, - stride_dropoutb, stride_dropouth, stride_dropoutm, stride_dropoutn, - stride_descale_q_z, stride_descale_k_z, stride_descale_v_z, stride_descale_do_z, - stride_az, stride_ah, - HQ, HK, - cu_seqlens_q, cu_seqlens_k, - max_seqlen_q, max_seqlen_k, - Dropout_mask, dropout_p, philox_seed, philox_offset_base, - Alibi_slopes, - Descale_q, Descale_k, Descale_v, Descale_do, - BLOCK_M: tl.constexpr, # 32 - BLOCK_N: tl.constexpr, # 128 - BLK_SLICE_FACTOR: tl.constexpr, - HEAD_DIM: tl.constexpr, - ACTUAL_HEAD_DIM: tl.constexpr, - ENABLE_DROPOUT: tl.constexpr, - IS_VARLEN: tl.constexpr, - USE_ALIBI: tl.constexpr, - USE_EXP2: tl.constexpr, - IS_FP8: tl.constexpr, - FP8_MAX: tl.constexpr, - FP8_OUTPUT: tl.constexpr, - DEBUG_TRITON: tl.constexpr, - DEBUG_TRITON_DETAIL: tl.constexpr, -): - # program ids - pid = tl.program_id(0) - bid = tl.program_id(1) - hkid = tl.program_id(2) - # figure out varlen start and end - q_start = 0 - k_start = 0 - seqlen_q = max_seqlen_q - seqlen_k = max_seqlen_k - if IS_VARLEN: - # Compute actual sequence lengths - q_start = tl.load(cu_seqlens_q + bid) - q_end = tl.load(cu_seqlens_q + bid + 1) - k_start = tl.load(cu_seqlens_k + bid) - k_end = tl.load(cu_seqlens_k + bid + 1) - seqlen_q = q_end - q_start - seqlen_k = k_end - k_start - - dk = tl.zeros([BLOCK_N, HEAD_DIM], dtype=tl.float32) - dv = tl.zeros([BLOCK_N, HEAD_DIM], dtype=tl.float32) - # Figure out causal starting block since we have seqlen_q >=< seqlen_k. - # Unlike forward pass where we tile on M dim and iterate on N dim, so that - # we can skip some M blocks, in backward pass, we tile on the N dim for kv - # and iterate over the M. In this way, we cannot skip N blocks, but only to - # determine the starting M blocks to skip some initial blocks masked by - # causal. - delta_qk = seqlen_q - seqlen_k - if DEBUG_TRITON: print(f"\npid: {pid}, bid: {bid}, hkid: {hkid}") - if DEBUG_TRITON: print(f"delta_qk = {delta_qk}") - # q > k: diretcly skip all the way until the start of causal block - start_delta_q_gt_k = delta_qk - # q < k: some blocks will have no Masked block, other needs to re-calc - # starting position - # delta_qk is negative so flip it, only multiple of BLOCK_N can skip the - # masked op - num_blocks_skip = -delta_qk // BLOCK_N - delta_aligned = (num_blocks_skip + 1) * BLOCK_N + delta_qk - start_delta_q_lt_k = delta_aligned // BLOCK_M * BLOCK_M - if delta_qk >= 0: - start_delta = delta_qk - if DEBUG_TRITON: print(f"q >= k: start_delta = delta_qk aligned to BLOCK_M = {start_delta_q_gt_k}") - else: - start_delta = start_delta_q_lt_k - if DEBUG_TRITON: print(f"q < k: start_delta = residue btw multiple BLOCK_N and delta_qk = {delta_aligned} = aligned to BLOCK_M = {start_delta_q_lt_k}") - # align the delta_qk - start_n = pid * BLOCK_N - - offs_k = tl.arange(0, HEAD_DIM) - offs_n = start_n + tl.arange(0, BLOCK_N) - # Mask for loading K and V - mask_kv = offs_n[:, None] < seqlen_k - PADDED_HEAD: tl.constexpr = (ACTUAL_HEAD_DIM != HEAD_DIM) - if PADDED_HEAD: - mask_k = offs_k < ACTUAL_HEAD_DIM - mask_kv &= mask_k[None, :] - - GROUP_SIZE = HQ // HK - # K/V tensors not changed for the group - adj_k = bid * stride_kb + hkid * stride_kh + k_start * stride_kn + offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk - adj_v = bid * stride_vb + hkid * stride_vh + k_start * stride_vn + offs_n[:, None] * stride_vn + offs_k[None, :] * stride_vk - # load K and V: they stay in SRAM throughout the inner loop. - k = tl.load(K + adj_k , mask=mask_kv, other=0.0) - v = tl.load(V + adj_v, mask=mask_kv, other=0.0) - # If MQA / GQA, set the K and V head offsets appropriately. - for hqid in range(hkid * GROUP_SIZE, hkid * GROUP_SIZE + GROUP_SIZE): - if delta_qk >= 0: - start_m = start_n + start_delta - len_m = BLOCK_N - else: - start_m = max(start_n + delta_qk, 0) - start_m = start_m // BLOCK_M * BLOCK_M - # because we might shift the masked blocks up, we are deeper into - # the masked out region, so we would potentially increase the total - # steps with masked operation to get out of it - residue_m = max(start_n + delta_qk - start_m, 0) - len_m = BLOCK_N + residue_m - if DEBUG_TRITON: print(f"residue_m = {residue_m}") - - # offset input and output tensor by batch and Q/K heads - adj_q = bid * stride_qb + hqid * stride_qh + q_start * stride_qm - Q_ptr = Q + adj_q - adj_do = bid * stride_dob + hqid * stride_doh + q_start * stride_dom - DO_ptr = DO + adj_do - adj_delta = bid * stride_deltab + hqid * stride_deltah + q_start * stride_deltam - M_ptr = M + adj_delta - Delta_ptr = Delta + adj_delta - - if USE_ALIBI: - alibi_offset = bid * stride_az + hqid * stride_ah - alibi_slope = tl.load(Alibi_slopes + alibi_offset) - else: - alibi_slope = None - - # batch_philox_offset is the ACTUALLY dropout offset - # dropout_offset is for debug purpose and will be removed later - batch_philox_offset = 0 - dropout_offset = 0 - if ENABLE_DROPOUT: - batch_philox_offset = philox_offset_base + bid * stride_dropoutb + \ - hqid * stride_dropouth - dropout_offset = Dropout_mask + bid * stride_dropoutb + \ - hqid * stride_dropouth - - MASK_BLOCK_M: tl.constexpr = BLOCK_M // BLK_SLICE_FACTOR - # bound the masked operation to q len so it does not have to wast cycles - len_m = min(len_m, seqlen_q) - num_steps = tl.cdiv(len_m, MASK_BLOCK_M) - # when q < k, we may skip the initial masked op - if pid < num_blocks_skip: - num_steps = 0 - - if IS_FP8: - descale_q = tl.load(Descale_q + bid * stride_descale_q_z + hqid) - descale_k = tl.load(Descale_k + bid * stride_descale_k_z + hkid) - descale_v = tl.load(Descale_v + bid * stride_descale_v_z + hkid) - descale_do = tl.load(Descale_do + bid * stride_descale_do_z + hqid) - else: - descale_q, descale_k, descale_v, descale_do = 1.0, 1.0, 1.0, 1.0 - - # if start_m is negative, the current N-tile has no block on the - # diagonal of causal mask, so everything have no causal mask - if DEBUG_TRITON: print(f"Masked: start_n: {start_n}; start_m: {start_m}, num_steps: {num_steps}") - dk, dv = _bwd_dkdv_inner( - dk, dv, # output tensors - Q_ptr, k, v, DO_ptr, M_ptr, Delta_ptr, sm_scale, # input tensors - stride_qm, stride_qk, # strides for q - stride_dom, stride_dok, # strides for o - stride_dropoutm, stride_dropoutn, # strides for dropout - stride_deltam, - MASK_BLOCK_M, BLOCK_N, # block dim - HEAD_DIM, ACTUAL_HEAD_DIM, # head dim - dropout_p, philox_seed, batch_philox_offset, dropout_offset, # - alibi_slope, - seqlen_q, seqlen_k, # max sequence length for q and k - start_n, start_m, num_steps, # iteration numbers - descale_q, descale_k, descale_v, descale_do, # fp8 descale factors from user - MASK=True, # causal masking - ENABLE_DROPOUT=ENABLE_DROPOUT, # activate dropout - USE_ALIBI=USE_ALIBI, - USE_EXP2=USE_EXP2, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - DEBUG_TRITON=DEBUG_TRITON, - DEBUG_TRITON_DETAIL=DEBUG_TRITON_DETAIL, - ) - start_m += num_steps * MASK_BLOCK_M - num_steps = tl.cdiv(seqlen_q - start_m, BLOCK_M) - end_m = start_m + num_steps * BLOCK_M - - if DEBUG_TRITON: print(f"start_m after Masked step: {start_m}; num_steps: {num_steps}") # noqa: E701 - if DEBUG_TRITON: print(f"unMasked: start_n: {start_n}, start_m: {start_m}, end_m: {end_m}, num_steps: {num_steps}") # noqa: E701 - if DEBUG_TRITON: print("unMasked") # noqa: E701 - dk, dv = _bwd_dkdv_inner( - dk, dv, # output tensors - Q_ptr, k, v, DO_ptr, M_ptr, Delta_ptr, sm_scale, # input tensors - stride_qm, stride_qk, # strides for q - stride_dom, stride_dok, # strides for o - stride_dropoutm, stride_dropoutn, # strides for dropout - stride_deltam, - BLOCK_M, BLOCK_N, # block dim - HEAD_DIM, ACTUAL_HEAD_DIM, # head dim - dropout_p, philox_seed, batch_philox_offset, dropout_offset, # - alibi_slope, - seqlen_q, seqlen_k, # max sequence length for q and k - start_n, start_m, num_steps, # iteration numbers - descale_q, descale_k, descale_v, descale_do, # fp8 descale factors from user - MASK=False, # causal masking - ENABLE_DROPOUT=ENABLE_DROPOUT, # activate dropout - USE_ALIBI=USE_ALIBI, - USE_EXP2=USE_EXP2, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - DEBUG_TRITON=DEBUG_TRITON, - DEBUG_TRITON_DETAIL=DEBUG_TRITON_DETAIL, - ) - - # Write back dV and dK. - adj_dkdv = bid * stride_dkb + hkid * stride_kh + k_start * stride_dkn - offs_dkdv = offs_n[:, None] * stride_dkn + offs_k[None, :] * stride_dkk - tl.store(DV + adj_dkdv + offs_dkdv, dv, mask=mask_kv) - dk *= sm_scale - tl.store(DK + adj_dkdv + offs_dkdv, dk, mask=mask_kv) - - -# the main inner-loop logic for computing dQ -@triton.jit -def _bwd_dq_inner( - dq, # output - q, K, V, do, m, Delta, sm_scale, # input - # shared by Q/K/V. - stride_qm, stride_qk, stride_kn, stride_kk, stride_vn, stride_vk, - stride_dropoutm, stride_dropoutn, # stride for dropout - stride_deltam, - seqlen_q, seqlen_k, # - BLOCK_M2: tl.constexpr, # - BLOCK_N2: tl.constexpr, # - HEAD_DIM: tl.constexpr, - ACTUAL_HEAD_DIM: tl.constexpr, # - dropout_p, philox_seed, batch_philox_offset, dropout_offset, - alibi_slope, - # Filled in by the wrapper. - start_m, start_n, end_n, num_steps, # - descale_q, descale_k, descale_v, descale_do, # fp8 descale factors from user - MASK: tl.constexpr, - ENABLE_DROPOUT: tl.constexpr, - USE_ALIBI: tl.constexpr, - USE_EXP2: tl.constexpr, - IS_FP8: tl.constexpr, - FP8_MAX: tl.constexpr, - DEBUG_TRITON: tl.constexpr, - DEBUG_TRITON_DETAIL: tl.constexpr, -): - # if HEAD_DIM is padded - PADDED_HEAD: tl.constexpr = (ACTUAL_HEAD_DIM != HEAD_DIM) - delta_qk = seqlen_q - seqlen_k - offs_m = start_m + tl.arange(0, BLOCK_M2) - offs_n = start_n + tl.arange(0, BLOCK_N2) - offs_k = tl.arange(0, HEAD_DIM) - - # mask to make sure not OOB of seqlen_q - mask_m = offs_m < seqlen_q - - kT_ptrs = K + offs_n[None, :] * stride_kn + offs_k[:, None] * stride_kk - vT_ptrs = V + offs_n[None, :] * stride_vn + offs_k[:, None] * stride_vk - # D (= delta) is pre-divided by ds_scale. - Di = tl.load(Delta + offs_m * stride_deltam, mask=mask_m, other=0.0) - # BLOCK_M2 must be a multiple of BLOCK_N2, otherwise the code wouldn't work. - tl.static_assert(BLOCK_M2 % BLOCK_N2 == 0) - curr_n = start_n - step_n = BLOCK_N2 - curr_philox_offset = batch_philox_offset - curr_dropout_offset = dropout_offset - RCP_LN2: tl.constexpr = 1.4426950408889634 # = 1.0 / ln(2) - for blk_idx in range(num_steps): - if DEBUG_TRITON: print(f"iter {blk_idx}: curr_n = {curr_n}") # noqa: E701 - offs_n = curr_n + tl.arange(0, BLOCK_N2) - # end_n is needed because the end of causal True might not be perfectly - # aligned with the end of the block - mask_n = offs_n < end_n - if DEBUG_TRITON_DETAIL: print(f"start_n = {start_n}, end_n = {end_n}, offs_n: {offs_n.shape}\n{offs_n}") # noqa: E701 - if DEBUG_TRITON_DETAIL: print(f"mask_n: {mask_n.shape}\n{mask_n}") # noqa: E701 - mask_kT = mask_n[None, :] - mask_mn = mask_m[:, None] & (offs_n[None, :] < end_n) - if PADDED_HEAD: - mask_kT &= offs_k[:, None] < ACTUAL_HEAD_DIM - - kT = tl.load(kT_ptrs, mask=mask_kT, other=0.0) - vT = tl.load(vT_ptrs, mask=mask_kT, other=0.0) - - if ENABLE_DROPOUT: - # NOTE: dropout is transposed because it is used to mask pT - philox_offs = curr_philox_offset + \ - offs_m[:, None] * stride_dropoutm + \ - offs_n[None, :] * stride_dropoutn - if tl_DROPOUT_USE_PYTORCH: - dropout_offs = offs_m[:, None] * stride_dropoutm + \ - offs_n[None, :] * stride_dropoutn - dropout_mask = tl.load( - curr_dropout_offset + dropout_offs, - mask=mask_mn) - else: - rand_vals = tl.rand(philox_seed, philox_offs) - dropout_mask = rand_vals > dropout_p - dropout_scale = 1 / (1 - dropout_p) - - if IS_FP8: - qk = (tl.dot(q, kT) * descale_q * descale_k) - else: - qk = tl.dot(q, kT) - qk_scaled = qk * sm_scale - - if USE_ALIBI: - relative_pos_block = offs_m[:, None] + seqlen_k - seqlen_q - offs_n[None, :] - alibi_block = -1 * alibi_slope * tl.abs(relative_pos_block) - qk_scaled += alibi_block - - if DEBUG_TRITON_DETAIL: print(f"qk scaled: {qk.shape}\n", qk_scaled) # noqa: E701 - if USE_EXP2: - p = tl.math.exp2(qk_scaled * RCP_LN2 - m * RCP_LN2) - else: - p = tl.math.exp(qk_scaled - m) - - # Autoregressive masking. - if MASK: - causal_mask = (offs_m[:, None] - delta_qk) >= offs_n[None, :] - mask = causal_mask & mask_mn - p = tl.where(mask, p, 0.0) - # Compute dP and dS. - if IS_FP8: - dp = (tl.dot(do, vT) * descale_do * descale_v) - else: - dp = tl.dot(do, vT) - if ENABLE_DROPOUT: - dp = tl.where(dropout_mask, dp, 0.0) * dropout_scale - delta_i = Di[:, None] - ds = p * (dp -delta_i) - # Compute dQ. - # NOTE: We need to de-scale dq in the end, because kT was pre-scaled. - if IS_FP8: - scale_ds, descale_ds = compute_fp8_scaling_factors(ds, FP8_MAX) - dq += (tl.dot((ds * scale_ds).to(kT.type.element_ty), tl.trans(kT)) * descale_ds * descale_k) - else: - dq += tl.dot(ds.to(kT.type.element_ty), tl.trans(kT)) - # Increment pointers. - curr_n += step_n - kT_ptrs += step_n * stride_kn - vT_ptrs += step_n * stride_vn - return dq - - -# grid = (tl.cdiv(max_seqlen_q // BLOCK_M2), batch, nheads_q) -@triton.jit -def _bwd_kernel_dq_causal( - Q, K, V, sm_scale, DO, DQ, - M, Delta, - stride_qb, stride_qh, stride_qm, stride_qk, - stride_kb, stride_kh, stride_kn, stride_kk, - stride_vb, stride_vh, stride_vn, stride_vk, - stride_dqb, stride_dqh, stride_dqm, stride_dqk, - stride_deltab, stride_deltah, stride_deltam, - stride_dob, stride_doh, stride_dom, stride_dok, - stride_dropoutb, stride_dropouth, stride_dropoutm, stride_dropoutn, - stride_descale_q_z, stride_descale_k_z, stride_descale_v_z, stride_descale_do_z, - stride_az, stride_ah, - HQ, HK, - cu_seqlens_q, cu_seqlens_k, - max_seqlen_q, max_seqlen_k, - Dropout_mask, dropout_p, philox_seed, philox_offset_base, - Alibi_slopes, - Descale_q, Descale_k, Descale_v, Descale_do, - BLOCK_M: tl.constexpr, - BLOCK_N: tl.constexpr, - BLK_SLICE_FACTOR: tl.constexpr, - HEAD_DIM: tl.constexpr, - ACTUAL_HEAD_DIM: tl.constexpr, - ENABLE_DROPOUT: tl.constexpr, - IS_VARLEN: tl.constexpr, - USE_ALIBI: tl.constexpr, - USE_EXP2: tl.constexpr, - IS_FP8: tl.constexpr, - FP8_MAX: tl.constexpr, - FP8_OUTPUT: tl.constexpr, - DEBUG_TRITON: tl.constexpr, - DEBUG_TRITON_DETAIL: tl.constexpr, -): - # program ids - pid = tl.program_id(0) - bid = tl.program_id(1) - hkid = tl.program_id(2) - # figure out varlen start and end - q_start = 0 - k_start = 0 - seqlen_q = max_seqlen_q - seqlen_k = max_seqlen_k - if IS_VARLEN: - # Compute actual sequence lengths - q_start = tl.load(cu_seqlens_q + bid) - q_end = tl.load(cu_seqlens_q + bid + 1) - k_start = tl.load(cu_seqlens_k + bid) - k_end = tl.load(cu_seqlens_k + bid + 1) - seqlen_q = q_end - q_start - seqlen_k = k_end - k_start - - # Figure out causal starting block since we have seqlen_q <=> seqlen_k. - # Unlike forward pass where we tile on M dim and iterate on N dim, so that - # we can skip some M blocks, in backward pass, we tile on the N dim for kv - # and iterate over the M. In this way, we cannot skip N blocks, but only to - # determine the starting M blocks to skip some initial blocks masked by - # causal. - # DQ tiles on M dim and iterate on N dim, so we there could be some tiles we - # can simply skip and we need to adjust starting position. - start_m = pid * BLOCK_M - # seqlen_q > seqlen_k, no need to process these tile for dq - delta_qk = seqlen_q - seqlen_k - if DEBUG_TRITON: print(f"end_n = start_m + BLOCK_M = {start_m} + {BLOCK_M} = {start_m + BLOCK_M}") # noqa: E701 - if start_m + BLOCK_M < delta_qk: - if DEBUG_TRITON: print(f"start_m + BLOCK_M = {start_m} + {BLOCK_M} = {start_m + BLOCK_M} < delta_qk of {delta_qk}") # noqa: E701 - return - - offs_k = tl.arange(0, HEAD_DIM) - offs_m = start_m + tl.arange(0, BLOCK_M) - # Mask for loading K and V - mask_q = offs_m[:, None] < seqlen_q - PADDED_HEAD: tl.constexpr = (ACTUAL_HEAD_DIM != HEAD_DIM) - if PADDED_HEAD: - mask_k = offs_k < ACTUAL_HEAD_DIM - mask_q &= mask_k[None, :] - offs_q = offs_m[:, None] * stride_qm + offs_k[None, :] * stride_qk - offs_do = offs_m[:, None] * stride_dom + offs_k[None, :] * stride_dok - adj_k = bid * stride_kb + hkid * stride_kh + k_start * stride_kn - adj_v = bid * stride_vb + hkid * stride_vh + k_start * stride_vn - K += adj_k - V += adj_v - # If MQA / GQA, set the K and V head offsets appropriately. - GROUP_SIZE = HQ // HK - for hqid in range(hkid * GROUP_SIZE, hkid * GROUP_SIZE + GROUP_SIZE): - # seqlen_q < seqlen_k: delta_qk more kv tokens are added at the front - # for every M-tile - end_n = start_m + BLOCK_M - delta_qk - # clamp end_n at [0, seqlen_k] - end_n = max(min(end_n, seqlen_k), 0) - if DEBUG_TRITON: print(f"delta_qk: {delta_qk}; end_n: {end_n}") # noqa: E701 - # offset input and output tensor by batch and Q/K heads - adj_q = bid * stride_qb + hqid * stride_qh + q_start * stride_qm - adj_do = bid * stride_dob + hqid * stride_doh + q_start * stride_dom - adj_delta = \ - bid * stride_deltab + hqid * stride_deltah + q_start * stride_deltam - Delta_ptr = Delta + adj_delta - - if USE_ALIBI: - alibi_offset = bid * stride_az + hqid * stride_ah - alibi_slope = tl.load(Alibi_slopes + alibi_offset) - else: - alibi_slope = None - - # batch_philox_offset is the ACTUALLY dropout offset - # dropout_offset is for debug purpose and will be removed later - batch_philox_offset = 0 - dropout_offset = 0 - if ENABLE_DROPOUT: - batch_philox_offset = philox_offset_base + \ - bid * stride_dropoutb + \ - hqid * stride_dropouth - dropout_offset = \ - Dropout_mask + bid * stride_dropoutb + hqid * stride_dropouth - - q = tl.load(Q + adj_q + offs_q, mask=mask_q, other=0.0) - do = tl.load(DO + adj_do + offs_do, mask=mask_q, other=0.0) - m = tl.load(M + adj_delta + offs_m * stride_deltam, - mask=offs_m < seqlen_q) - m = m[:, None] - - MASK_BLOCK_N: tl.constexpr = BLOCK_N // BLK_SLICE_FACTOR - # start can only be 0 at minimum - start_n = max(end_n - BLOCK_M, 0) - num_steps = tl.cdiv(end_n - start_n, MASK_BLOCK_N) - - if IS_FP8: - descale_q = tl.load(Descale_q + bid * stride_descale_q_z + hqid) - descale_k = tl.load(Descale_k + bid * stride_descale_k_z + hkid) - descale_v = tl.load(Descale_v + bid * stride_descale_v_z + hkid) - descale_do = tl.load(Descale_do + bid * stride_descale_do_z + hqid) - else: - descale_q, descale_k, descale_v, descale_do = 1.0, 1.0, 1.0, 1.0 - - dq = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32) - if DEBUG_TRITON: print(f"pid: {pid}; end_n: {end_n}, start_m: {start_m}") # noqa: E701 - # Compute dQ for masked (diagonal) blocks. - # NOTE: This code scans each row of QK^T backward (from right to left, - # but inside each call to _bwd_dq_inner, from left to right), but that's - # not due to anything important. I just wanted to reuse the loop - # structure for dK & dV above as much as possible. - if DEBUG_TRITON: print(f"Masked: start_m: {start_m}, start_n: {start_n}, end_n: {end_n}, num_steps: {num_steps}") # noqa: E701 - dq = _bwd_dq_inner( - dq, - q, K, V, do, m, Delta_ptr, sm_scale, - stride_qm, stride_qk, stride_kn, stride_kk, stride_vn, stride_vk, - stride_dropoutm, stride_dropoutn, - stride_deltam, - seqlen_q, seqlen_k, - BLOCK_M, MASK_BLOCK_N, - HEAD_DIM, ACTUAL_HEAD_DIM, - dropout_p, philox_seed, batch_philox_offset, dropout_offset, - alibi_slope, - start_m, start_n, end_n, num_steps, - descale_q, descale_k, descale_v, descale_do, - MASK=True, - ENABLE_DROPOUT=ENABLE_DROPOUT, - USE_ALIBI=USE_ALIBI, - USE_EXP2=USE_EXP2, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - DEBUG_TRITON=DEBUG_TRITON, - DEBUG_TRITON_DETAIL=DEBUG_TRITON_DETAIL, - ) - end_n -= num_steps * MASK_BLOCK_N - num_steps = tl.cdiv(end_n, BLOCK_N) - start_n = max(end_n - num_steps * BLOCK_N, 0) - if DEBUG_TRITON: print(f"unMasked: start_m: {start_m}, start_n: {start_n}, end_n: {end_n}, num_steps: {num_steps}") # noqa: E701 - dq = _bwd_dq_inner( - dq, - q, K, V, do, m, Delta_ptr, sm_scale, - stride_qm, stride_qk, stride_kn, stride_kk, stride_vn, stride_vk, - stride_dropoutm, stride_dropoutn, - stride_deltam, - seqlen_q, seqlen_k, - BLOCK_M, BLOCK_N, - HEAD_DIM, ACTUAL_HEAD_DIM, - dropout_p, philox_seed, batch_philox_offset, dropout_offset, - alibi_slope, - start_m, start_n, end_n, num_steps, - descale_q, descale_k, descale_v, descale_do, - MASK=False, - ENABLE_DROPOUT=ENABLE_DROPOUT, - USE_ALIBI=USE_ALIBI, - USE_EXP2=USE_EXP2, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - DEBUG_TRITON=DEBUG_TRITON, - DEBUG_TRITON_DETAIL=DEBUG_TRITON_DETAIL, - ) - # Write back dQ. - adj_dq = bid * stride_dqb + hqid * stride_dqh + q_start * stride_dqm - offs_dq = offs_m[:, None] * stride_dqm + offs_k[None, :] * stride_dqk - dq *= sm_scale - tl.store(DQ + adj_dq + offs_dq, dq, mask=mask_q) - - -@triton.jit -def _bwd_kernel_dkdv_noncausal( - Q, K, V, sm_scale, DO, DK, DV, - M, Delta, - stride_qb, stride_qh, stride_qm, stride_qk, - stride_kb, stride_kh, stride_kn, stride_kk, - stride_vb, stride_vh, stride_vn, stride_vk, - stride_dkb, stride_dkh, stride_dkn, stride_dkk, - stride_deltab, stride_deltah, stride_deltam, - stride_dob, stride_doh, stride_dom, stride_dok, - stride_dropoutb, stride_dropouth, stride_dropoutm, stride_dropoutn, - stride_descale_q_z, stride_descale_k_z, stride_descale_v_z, stride_descale_do_z, - stride_az, stride_ah, - HQ, HK, - cu_seqlens_q, cu_seqlens_k, - max_seqlen_q, max_seqlen_k, - Dropout_mask, dropout_p, philox_seed, philox_offset_base, - Alibi_slopes, - Descale_q, Descale_k, Descale_v, Descale_do, - BLOCK_M: tl.constexpr, # 32 - BLOCK_N: tl.constexpr, # 128 - BLK_SLICE_FACTOR: tl.constexpr, - HEAD_DIM: tl.constexpr, - ACTUAL_HEAD_DIM: tl.constexpr, - ENABLE_DROPOUT: tl.constexpr, - IS_VARLEN: tl.constexpr, - USE_ALIBI: tl.constexpr, - USE_EXP2: tl.constexpr, - IS_FP8: tl.constexpr, - FP8_MAX: tl.constexpr, - FP8_OUTPUT: tl.constexpr, - DEBUG_TRITON: tl.constexpr, - DEBUG_TRITON_DETAIL: tl.constexpr, -): - # program ids - pid = tl.program_id(0) - bid = tl.program_id(1) - hkid = tl.program_id(2) - # figure out varlen start and end - q_start = 0 - k_start = 0 - seqlen_q = max_seqlen_q - seqlen_k = max_seqlen_k - if IS_VARLEN: - # Compute actual sequence lengths - q_start = tl.load(cu_seqlens_q + bid) - q_end = tl.load(cu_seqlens_q + bid + 1) - k_start = tl.load(cu_seqlens_k + bid) - k_end = tl.load(cu_seqlens_k + bid + 1) - seqlen_q = q_end - q_start - seqlen_k = k_end - k_start - - dk = tl.zeros([BLOCK_N, HEAD_DIM], dtype=tl.float32) - dv = tl.zeros([BLOCK_N, HEAD_DIM], dtype=tl.float32) - - start_n = pid * BLOCK_N - - offs_k = tl.arange(0, HEAD_DIM) - offs_n = start_n + tl.arange(0, BLOCK_N) - # Mask for loading K and V - mask_kv = offs_n[:, None] < seqlen_k - PADDED_HEAD: tl.constexpr = (ACTUAL_HEAD_DIM != HEAD_DIM) - if PADDED_HEAD: - mask_k = offs_k < ACTUAL_HEAD_DIM - mask_kv &= mask_k[None, :] - - GROUP_SIZE = HQ // HK - # K/V tensors not changed for the group - adj_k = bid * stride_kb + hkid * stride_kh + k_start * stride_kn + offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk - adj_v = bid * stride_vb + hkid * stride_vh + k_start * stride_vn + offs_n[:, None] * stride_vn + offs_k[None, :] * stride_vk - # load K and V: they stay in SRAM throughout the inner loop. - k = tl.load(K + adj_k, mask=mask_kv, other=0.0) - v = tl.load(V + adj_v, mask=mask_kv, other=0.0) - # If MQA / GQA, set the K and V head offsets appropriately. - for hqid in range(hkid * GROUP_SIZE, hkid * GROUP_SIZE + GROUP_SIZE): - # offset input and output tensor by batch and Q/K heads - adj_q = bid * stride_qb + hqid * stride_qh + q_start * stride_qm - Q_ptr = Q + adj_q - adj_do = bid * stride_dob + hqid * stride_doh + q_start * stride_dom - DO_ptr = DO + adj_do - adj_delta = bid * stride_deltab + hqid * stride_deltah + q_start * stride_deltam - M_ptr = M + adj_delta - Delta_ptr = Delta + adj_delta - - if USE_ALIBI: - alibi_offset = bid * stride_az + hqid * stride_ah - alibi_slope = tl.load(Alibi_slopes + alibi_offset) - else: - alibi_slope = None - - # batch_philox_offset is the ACTUALLY dropout offset - # dropout_offset is for debug purpose and will be removed later - batch_philox_offset = 0 - dropout_offset = 0 - if ENABLE_DROPOUT: - batch_philox_offset = philox_offset_base + bid * stride_dropoutb + \ - hqid * stride_dropouth - dropout_offset = Dropout_mask + bid * stride_dropoutb + \ - hqid * stride_dropouth - - if IS_FP8: - descale_q = tl.load(Descale_q + bid * stride_descale_q_z + hqid) - descale_k = tl.load(Descale_k + bid * stride_descale_k_z + hkid) - descale_v = tl.load(Descale_v + bid * stride_descale_v_z + hkid) - descale_do = tl.load(Descale_do + bid * stride_descale_do_z + hqid) - else: - descale_q, descale_k, descale_v, descale_do = 1.0, 1.0, 1.0, 1.0 - - # because there is no causal, we always start from the beginning - start_m = 0 - num_steps = tl.cdiv(seqlen_q, BLOCK_M) - dk, dv = _bwd_dkdv_inner( - dk, dv, # output tensors - Q_ptr, k, v, DO_ptr, M_ptr, Delta_ptr, sm_scale, # input tensors - stride_qm, stride_qk, # strides for q - stride_dom, stride_dok, # strides for o - stride_dropoutm, stride_dropoutn, # strides for dropout - stride_deltam, - BLOCK_M, BLOCK_N, # block dim - HEAD_DIM, ACTUAL_HEAD_DIM, # head dim - dropout_p, philox_seed, batch_philox_offset, dropout_offset, # - alibi_slope, - seqlen_q, seqlen_k, # max sequence length for q and k - start_n, start_m, num_steps, # iteration numbers - descale_q, descale_k, descale_v, descale_do, # fp8 descale factors from user - MASK=False, # causal masking - ENABLE_DROPOUT=ENABLE_DROPOUT, # activate dropout - USE_ALIBI=USE_ALIBI, - USE_EXP2=USE_EXP2, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - DEBUG_TRITON=DEBUG_TRITON, - DEBUG_TRITON_DETAIL=DEBUG_TRITON_DETAIL, - ) - - # Write back dV and dK. - adj_dkdv = bid * stride_dkb + hkid * stride_kh + k_start * stride_dkn - offs_dkdv = offs_n[:, None] * stride_dkn + offs_k[None, :] * stride_dkk - tl.store(DV + adj_dkdv + offs_dkdv, dv, mask=mask_kv) - dk *= sm_scale - tl.store(DK + adj_dkdv + offs_dkdv, dk, mask=mask_kv) - - -@triton.jit -def _bwd_kernel_dq_noncausal( - Q, K, V, sm_scale, DO, DQ, - M, Delta, - stride_qb, stride_qh, stride_qm, stride_qk, - stride_kb, stride_kh, stride_kn, stride_kk, - stride_vb, stride_vh, stride_vn, stride_vk, - stride_dqb, stride_dqh, stride_dqm, stride_dqk, - stride_deltab, stride_deltah, stride_deltam, - stride_dob, stride_doh, stride_dom, stride_dok, - stride_dropoutb, stride_dropouth, stride_dropoutm, stride_dropoutn, - stride_descale_q_z, stride_descale_k_z, stride_descale_v_z, stride_descale_do_z, - stride_az, stride_ah, - HQ, HK, - cu_seqlens_q, cu_seqlens_k, - max_seqlen_q, max_seqlen_k, - Dropout_mask, dropout_p, philox_seed, philox_offset_base, - Alibi_slopes, - Descale_q, Descale_k, Descale_v, Descale_do, - BLOCK_M: tl.constexpr, - BLOCK_N: tl.constexpr, - BLK_SLICE_FACTOR: tl.constexpr, - HEAD_DIM: tl.constexpr, - ACTUAL_HEAD_DIM: tl.constexpr, - ENABLE_DROPOUT: tl.constexpr, - IS_VARLEN: tl.constexpr, - USE_ALIBI: tl.constexpr, - USE_EXP2: tl.constexpr, - IS_FP8: tl.constexpr, - FP8_MAX: tl.constexpr, - FP8_OUTPUT: tl.constexpr, - DEBUG_TRITON: tl.constexpr, - DEBUG_TRITON_DETAIL: tl.constexpr, -): - # program ids - pid = tl.program_id(0) - bid = tl.program_id(1) - hkid = tl.program_id(2) - # figure out varlen start and end - q_start = 0 - k_start = 0 - seqlen_q = max_seqlen_q - seqlen_k = max_seqlen_k - if IS_VARLEN: - # Compute actual sequence lengths - q_start = tl.load(cu_seqlens_q + bid) - q_end = tl.load(cu_seqlens_q + bid + 1) - k_start = tl.load(cu_seqlens_k + bid) - k_end = tl.load(cu_seqlens_k + bid + 1) - seqlen_q = q_end - q_start - seqlen_k = k_end - k_start - - start_m = pid * BLOCK_M - - offs_k = tl.arange(0, HEAD_DIM) - offs_m = start_m + tl.arange(0, BLOCK_M) - # Mask for loading K and V - mask_q = offs_m[:, None] < seqlen_q - PADDED_HEAD: tl.constexpr = (ACTUAL_HEAD_DIM != HEAD_DIM) - if PADDED_HEAD: - mask_k = offs_k < ACTUAL_HEAD_DIM - mask_q &= mask_k[None, :] - offs_q = offs_m[:, None] * stride_qm + offs_k[None, :] * stride_qk - offs_do = offs_m[:, None] * stride_dom + offs_k[None, :] * stride_dok - adj_k = bid * stride_kb + hkid * stride_kh + k_start * stride_kn - adj_v = bid * stride_vb + hkid * stride_vh + k_start * stride_vn - K += adj_k - V += adj_v - # If MQA / GQA, set the K and V head offsets appropriately. - GROUP_SIZE = HQ // HK - for hqid in range(hkid * GROUP_SIZE, hkid * GROUP_SIZE + GROUP_SIZE): - # offset input and output tensor by batch and Q/K heads - adj_q = bid * stride_qb + hqid * stride_qh + q_start * stride_qm - adj_do = bid * stride_dob + hqid * stride_doh + q_start * stride_dom - adj_delta = \ - bid * stride_deltab + hqid * stride_deltah + q_start * stride_deltam - Delta_ptr = Delta + adj_delta - - if USE_ALIBI: - alibi_offset = bid * stride_az + hqid * stride_ah - alibi_slope = tl.load(Alibi_slopes + alibi_offset) - else: - alibi_slope = None - - # batch_philox_offset is the ACTUALLY dropout offset - # dropout_offset is for debug purpose and will be removed later - batch_philox_offset = 0 - dropout_offset = 0 - if ENABLE_DROPOUT: - batch_philox_offset = philox_offset_base + \ - bid * stride_dropoutb + \ - hqid * stride_dropouth - dropout_offset = \ - Dropout_mask + bid * stride_dropoutb + hqid * stride_dropouth - - q = tl.load(Q + adj_q + offs_q, mask=mask_q, other=0.0) - do = tl.load(DO + adj_do + offs_do, mask=mask_q, other=0.0) - m = tl.load(M + adj_delta + offs_m * stride_deltam, - mask=offs_m < seqlen_q) - m = m[:, None] - - if IS_FP8: - descale_q = tl.load(Descale_q + bid * stride_descale_q_z + hqid) - descale_k = tl.load(Descale_k + bid * stride_descale_k_z + hkid) - descale_v = tl.load(Descale_v + bid * stride_descale_v_z + hkid) - descale_do = tl.load(Descale_do + bid * stride_descale_do_z + hqid) - else: - descale_q, descale_k, descale_v, descale_do = 1.0, 1.0, 1.0, 1.0 - - # start can only be 0 at minimum - start_n = 0 - end_n = seqlen_k - num_steps = tl.cdiv(seqlen_k, BLOCK_N) - dq = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32) - dq = _bwd_dq_inner( - dq, - q, K, V, do, m, Delta_ptr, sm_scale, - stride_qm, stride_qk, stride_kn, stride_kk, stride_vn, stride_vk, - stride_dropoutm, stride_dropoutn, - stride_deltam, - seqlen_q, seqlen_k, - BLOCK_M, BLOCK_N, - HEAD_DIM, ACTUAL_HEAD_DIM, - dropout_p, philox_seed, batch_philox_offset, dropout_offset, - alibi_slope, - start_m, start_n, end_n, num_steps, - descale_q, descale_k, descale_v, descale_do, - MASK=False, - ENABLE_DROPOUT=ENABLE_DROPOUT, - USE_ALIBI=USE_ALIBI, - USE_EXP2=USE_EXP2, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - DEBUG_TRITON=DEBUG_TRITON, - DEBUG_TRITON_DETAIL=DEBUG_TRITON_DETAIL, - ) - # Write back dQ. - adj_dq = bid * stride_dqb + hqid * stride_dqh + q_start * stride_dqm - offs_dq = offs_m[:, None] * stride_dqm + offs_k[None, :] * stride_dqk - dq *= sm_scale - tl.store(DQ + adj_dq + offs_dq, dq, mask=mask_q) - - -def attention_prefill_backward_triton_split_impl( - do: torch.Tensor, - q: torch.Tensor, - k: torch.Tensor, - v: torch.Tensor, - o: torch.Tensor, - softmax_lse: torch.Tensor, - dq: torch.Tensor, - dk: torch.Tensor, - dv: torch.Tensor, - sm_scale: float, - alibi_slopes: Optional[torch.Tensor], - causal: bool, - layout: Literal["bshd", "bhsd", "thd"], - cu_seqlens_q: Optional[torch.Tensor], - cu_seqlens_k: Optional[torch.Tensor], - max_seqlen_q: Optional[int], - max_seqlen_k: Optional[int], - dropout_p: float, - philox_seed: Optional[int], - philox_offset: Optional[int], - use_exp2: bool, - # fp8 - descale_q: Optional[torch.Tensor], - descale_k: Optional[torch.Tensor], - descale_v: Optional[torch.Tensor], - descale_o: Optional[torch.Tensor], - descale_do: Optional[torch.Tensor], - descale_dq: Optional[torch.Tensor], - descale_dk: Optional[torch.Tensor], - descale_dv: Optional[torch.Tensor], - # seqused for FA v3 (currently ignored in this implementation) - seqused_q: Optional[torch.Tensor] = None, - seqused_k: Optional[torch.Tensor] = None, -): - # debug - DEBUG_TRITON: bool = False - DEBUG_TRITON_DETAIL: bool = False - - # fp8 - IS_FP8 = is_fp8(q) - if IS_FP8: - FP8_MAX = torch.finfo(q.dtype).max - # assert that the main inputs are fp8 - assert is_fp8(do) and is_fp8(q) and is_fp8(k) and is_fp8(v), f"Non fp8 type found: do.dtype={do.dtype}, q.dtype={q.dtype}, k.dtype={k.dtype}, v.dtype={v.dtype}. All tensors must be fp8." - if is_fp8(o): - FP8_OUTPUT = True - assert descale_o is not None, f"descale_o is None. In fp8, you need to pass a tensor for descale_o along with a tensor o." - assert descale_dq is not None, f"descale_dq is None. In fp8, you need to pass a tensor for descale_dq along with a tensor dq." - assert descale_dk is not None, f"descale_dk is None. In fp8, you need to pass a tensor for descale_dk along with a tensor dk." - assert descale_dv is not None, f"descale_dv is None. In fp8, you need to pass a tensor for descale_dv along with a tensor dv." - else: - FP8_OUTPUT = False - - stride_descale_q_z = descale_q.stride(0) if descale_q is not None else None - stride_descale_k_z = descale_k.stride(0) if descale_k is not None else None - stride_descale_v_z = descale_v.stride(0) if descale_v is not None else None - stride_descale_o_z = descale_o.stride(0) if descale_o is not None else None - stride_descale_do_z = descale_do.stride(0) if descale_do is not None else None - - if DEBUG: - print(f"FP8 path triggered in bwd_prefill_split.py (FP8_OUTPUT={FP8_OUTPUT})") - else: - FP8_MAX = None - FP8_OUTPUT = False - stride_descale_q_z = stride_descale_k_z = stride_descale_v_z = stride_descale_o_z = stride_descale_do_z = None - - - # get strides and shape - batch, nheads_q, nheads_k, head_size, max_seqlen_q_final, max_seqlen_k_final = \ - get_shapes_from_layout( - q, k, layout, - cu_seqlens_q, cu_seqlens_k, - max_seqlen_q, max_seqlen_k - ) - q_strides, k_strides, v_strides, o_strides = \ - get_strides_from_layout(q, k, v, o, layout) - stride_qb, stride_qh, stride_qm, stride_qk = q_strides - stride_kb, stride_kh, stride_kn, stride_kk = k_strides - stride_vb, stride_vh, stride_vn, stride_vk = v_strides - stride_ob, stride_oh, stride_om, stride_ok = o_strides - dq_strides, dk_strides, dv_strides, do_strides = \ - get_strides_from_layout(dq, dk, dv, do, layout) - stride_dqb, stride_dqh, stride_dqm, stride_dqk = dq_strides - stride_dkb, stride_dkh, stride_dkn, stride_dkk = dk_strides - stride_dvb, stride_dvh, stride_dvn, stride_dvk = dv_strides - stride_dob, stride_doh, stride_dom, stride_dok = do_strides - IS_VARLEN = layout == "thd" - use_dropout = (dropout_p > 0.0) - use_alibi, (stride_az, stride_ah) = (True, alibi_slopes.stride()) if alibi_slopes is not None else (False, (0, 0)) - - # get closest power of 2 over or equal to 32. - padded_d_model = 1 << (head_size - 1).bit_length() - padded_d_model = max(padded_d_model, 32) # NOTE: the causal path expects a min of 32. It will cause a compiler assert. - HEAD_DIM = padded_d_model - ACTUAL_HEAD_DIM = head_size - # meta-parameters - # TODO: fix num_stages later - NUM_WARPS, NUM_STAGES = 4, 1 - WAVES_PER_EU = 1 - PRE_BLOCK = 128 - BLOCK_M1, BLOCK_N1, BLOCK_M2, BLOCK_N2 = 32, 128, 128, 32 - BLK_SLICE_FACTOR = 2 - - # init delta - delta = torch.zeros_like(softmax_lse) - if IS_VARLEN: - stride_deltab = 0 - stride_deltam, stride_deltah = delta.stride() - else: - stride_deltab, stride_deltah, stride_deltam = delta.stride() - pre_grid = (triton.cdiv(max_seqlen_q_final, PRE_BLOCK), batch, nheads_q) - _bwd_preprocess[pre_grid]( - o, do, - delta, - stride_ob, stride_oh, stride_om, stride_ok, - stride_deltab, stride_deltah, stride_deltam, - stride_descale_do_z, - cu_seqlens_q, max_seqlen_q_final, - descale_do, - BLOCK_M=PRE_BLOCK, - HEAD_DIM=HEAD_DIM, - ACTUAL_HEAD_DIM=ACTUAL_HEAD_DIM, - IS_VARLEN=IS_VARLEN, - IS_FP8=IS_FP8 - ) - - if DEBUG: - print("delta:", delta, delta.shape) - - # dropout mask tensor for debugging. We dump the dropout mask created in - # the kernel for testing - dropout_mask = None - stride_dropoutb, stride_dropouth, stride_dropoutm, stride_dropoutn = \ - (0, 0 , 0 , 0) - if use_dropout: - dropout_mask = torch.zeros( - (batch, nheads_q, max_seqlen_q_final, max_seqlen_k_final), - device=q.device, - dtype=torch.float32 - ) - - if DROPOUT_USE_PYTORCH: - if not IS_VARLEN: - dropout_mask = create_dropout_mask( - dropout_p, - (batch, nheads_q, max_seqlen_q_final, max_seqlen_k_final), - seed = philox_seed - ) - else: - dropout_mask = create_dropout_mask_varlen( - dropout_p, batch, nheads_q, - cu_seqlens_q, cu_seqlens_k, philox_seed - ) - stride_dropoutb, stride_dropouth, stride_dropoutm, stride_dropoutn = \ - dropout_mask.stride() - - grid_dkdv = ((max_seqlen_k_final + BLOCK_N1 - 1) // BLOCK_N1, batch, nheads_k) - grid_dq = ((max_seqlen_q_final + BLOCK_M2 - 1) // BLOCK_M2, batch, nheads_k) - if causal: - if DEBUG_TRITON: print(f"_bwd_kernel_dkdv: grid = {grid_dkdv}, block_size = ({BLOCK_M1, BLOCK_N1})", ) # noqa: E701 - _bwd_kernel_dkdv_causal[grid_dkdv]( - q, k, v, sm_scale, do, dk, dv, - softmax_lse, delta, - stride_qb, stride_qh, stride_qm, stride_qk, - stride_kb, stride_kh, stride_kn, stride_kk, - stride_vb, stride_vh, stride_vn, stride_vk, - stride_dkb, stride_dkh, stride_dkn, stride_dkk, - stride_deltab, stride_deltah, stride_deltam, - stride_dob, stride_doh, stride_dom, stride_dok, - stride_dropoutb, stride_dropouth, stride_dropoutm, stride_dropoutn, - stride_descale_q_z, stride_descale_k_z, stride_descale_v_z, stride_descale_do_z, - stride_az, stride_ah, - nheads_q, nheads_k, - cu_seqlens_q, cu_seqlens_k, - max_seqlen_q_final, max_seqlen_k_final, - dropout_mask, dropout_p, philox_seed, philox_offset, - alibi_slopes, - descale_q, descale_k, descale_v, descale_do, - BLOCK_M1, BLOCK_N1, BLK_SLICE_FACTOR, - HEAD_DIM, ACTUAL_HEAD_DIM, - ENABLE_DROPOUT=use_dropout, - IS_VARLEN=IS_VARLEN, - USE_ALIBI=use_alibi, - USE_EXP2=use_exp2, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - FP8_OUTPUT=FP8_OUTPUT, - num_warps=NUM_WARPS, - num_stages=NUM_STAGES, - waves_per_eu = WAVES_PER_EU, - DEBUG_TRITON=DEBUG_TRITON, - DEBUG_TRITON_DETAIL=DEBUG_TRITON_DETAIL, - ) - - if DEBUG_TRITON: print(f"\n_bwd_kernel_dq: grid = {grid_dq}, block_size = ({BLOCK_M2, BLOCK_N2})", ) # noqa: E701 - _bwd_kernel_dq_causal[grid_dq]( - q, k, v, sm_scale, do, dq, - softmax_lse, delta, - stride_qb, stride_qh, stride_qm, stride_qk, - stride_kb, stride_kh, stride_kn, stride_kk, - stride_vb, stride_vh, stride_vn, stride_vk, - stride_dqb, stride_dqh, stride_dqm, stride_dqk, - stride_deltab, stride_deltah, stride_deltam, - stride_dob, stride_doh, stride_dom, stride_dok, - stride_dropoutb, stride_dropouth, stride_dropoutm, stride_dropoutn, - stride_descale_q_z, stride_descale_k_z, stride_descale_v_z, stride_descale_do_z, - stride_az, stride_ah, - nheads_q, nheads_k, - cu_seqlens_q, cu_seqlens_k, - max_seqlen_q_final, max_seqlen_k_final, - dropout_mask, dropout_p, philox_seed, philox_offset, - alibi_slopes, - descale_q, descale_k, descale_v, descale_do, - BLOCK_M2, BLOCK_N2, BLK_SLICE_FACTOR, - HEAD_DIM, ACTUAL_HEAD_DIM, - ENABLE_DROPOUT=use_dropout, - IS_VARLEN=IS_VARLEN, - USE_ALIBI=use_alibi, - USE_EXP2=use_exp2, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - FP8_OUTPUT=FP8_OUTPUT, - num_warps=NUM_WARPS, - num_stages=NUM_STAGES, - waves_per_eu = WAVES_PER_EU, - DEBUG_TRITON=DEBUG_TRITON, - DEBUG_TRITON_DETAIL=DEBUG_TRITON_DETAIL, - ) - else: - _bwd_kernel_dkdv_noncausal[grid_dkdv]( - q, k, v, sm_scale, do, dk, dv, - softmax_lse, delta, - stride_qb, stride_qh, stride_qm, stride_qk, - stride_kb, stride_kh, stride_kn, stride_kk, - stride_vb, stride_vh, stride_vn, stride_vk, - stride_dkb, stride_dkh, stride_dkn, stride_dkk, - stride_deltab, stride_deltah, stride_deltam, - stride_dob, stride_doh, stride_dom, stride_dok, - stride_dropoutb, stride_dropouth, stride_dropoutm, stride_dropoutn, - stride_descale_q_z, stride_descale_k_z, stride_descale_v_z, stride_descale_do_z, - stride_az, stride_ah, - nheads_q, nheads_k, - cu_seqlens_q, cu_seqlens_k, - max_seqlen_q_final, max_seqlen_k_final, - dropout_mask, dropout_p, philox_seed, philox_offset, - alibi_slopes, - descale_q, descale_k, descale_v, descale_do, - BLOCK_M1, BLOCK_N1, BLK_SLICE_FACTOR, - HEAD_DIM, ACTUAL_HEAD_DIM, - ENABLE_DROPOUT=use_dropout, - IS_VARLEN=IS_VARLEN, - USE_ALIBI=use_alibi, - USE_EXP2=use_exp2, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - FP8_OUTPUT=FP8_OUTPUT, - num_warps=NUM_WARPS, - num_stages=NUM_STAGES, - waves_per_eu = WAVES_PER_EU, - DEBUG_TRITON=DEBUG_TRITON, - DEBUG_TRITON_DETAIL=DEBUG_TRITON_DETAIL, - ) - - _bwd_kernel_dq_noncausal[grid_dq]( - q, k, v, sm_scale, do, dq, - softmax_lse, delta, - stride_qb, stride_qh, stride_qm, stride_qk, - stride_kb, stride_kh, stride_kn, stride_kk, - stride_vb, stride_vh, stride_vn, stride_vk, - stride_dqb, stride_dqh, stride_dqm, stride_dqk, - stride_deltab, stride_deltah, stride_deltam, - stride_dob, stride_doh, stride_dom, stride_dok, - stride_dropoutb, stride_dropouth, stride_dropoutm, stride_dropoutn, - stride_descale_q_z, stride_descale_k_z, stride_descale_v_z, stride_descale_do_z, - stride_az, stride_ah, - nheads_q, nheads_k, - cu_seqlens_q, cu_seqlens_k, - max_seqlen_q_final, max_seqlen_k_final, - dropout_mask, dropout_p, philox_seed, philox_offset, - alibi_slopes, - descale_q, descale_k, descale_v, descale_do, - BLOCK_M2, BLOCK_N2, BLK_SLICE_FACTOR, - HEAD_DIM, ACTUAL_HEAD_DIM, - ENABLE_DROPOUT=use_dropout, - IS_VARLEN=IS_VARLEN, - USE_ALIBI=use_alibi, - USE_EXP2=use_exp2, - IS_FP8=IS_FP8, - FP8_MAX=FP8_MAX, - FP8_OUTPUT=FP8_OUTPUT, - num_warps=NUM_WARPS, - num_stages=NUM_STAGES, - waves_per_eu = WAVES_PER_EU, - DEBUG_TRITON=DEBUG_TRITON, - DEBUG_TRITON_DETAIL=DEBUG_TRITON_DETAIL, - ) - - return delta diff --git a/flash_attn/flash_attn_triton_amd/bwd_ref.py b/flash_attn/flash_attn_triton_amd/bwd_ref.py deleted file mode 100644 index cb1637157a0..00000000000 --- a/flash_attn/flash_attn_triton_amd/bwd_ref.py +++ /dev/null @@ -1,545 +0,0 @@ -import torch -import math -from typing import Literal, Optional -from .utils import compute_alibi_tensor_ref - -DEBUG = False -DEBUG_CORE = False - -def attention_backward_core_ref_impl( - do, q, k, v, o, softmax_lse, sm_scale, causal, window_size_left, window_size_right, - dropout_p, philox_seed, philox_offset, alibi_slopes, use_exp2 -): - if DEBUG_CORE: - print() - print("attention_backward_core_ref_impl") - print("do:", do, do.shape) - print("q:", q, q.shape) - print("k:", k, k.shape) - print("v:", v, v.shape) - print("o:", o, o.shape) - print("softmax_lse:", softmax_lse, softmax_lse.shape) - print("sm_scale:", sm_scale) - print("causal:", causal) - print("window_size_left:", window_size_left) - print("window_size_right:", window_size_right) - print("dropout_p:", dropout_p) - print("philox_seed:", philox_seed) - print("philox_offset:", philox_offset) - print("use_exp2:", use_exp2) - - # cast to float32 - do = do.to(torch.float32) - q = q.to(torch.float32) - k = k.to(torch.float32) - v = v.to(torch.float32) - o = o.to(torch.float32) - softmax_lse = softmax_lse.to(torch.float32) - - # recompute attention_scores. Make sure it matches the forward impl. i.e. It use float32 - attention_scores = torch.matmul(q, k.transpose(-2, -1)) - if DEBUG_CORE: - print("attention_scores:", attention_scores, attention_scores.shape) - - # scale scores - attention_scaled_scores = sm_scale * attention_scores - if DEBUG_CORE: - print("attention_scaled_scores:", attention_scaled_scores, attention_scaled_scores.shape) - - if alibi_slopes is not None: - L_q, L_k = q.shape[1], k.shape[1] - if DEBUG_CORE: - print("alibi_slopes:", alibi_slopes, alibi_slopes.shape) - alibi_bias = compute_alibi_tensor_ref(alibi_slopes, L_q, L_k) - alibi_bias = alibi_bias.reshape(-1, L_q, L_k) - if DEBUG_CORE: - print("alibi_bias:", alibi_bias, alibi_bias.shape) - attention_scaled_scores = attention_scaled_scores + alibi_bias - if DEBUG_CORE: - print("attention_scaled_scores after alibi:", attention_scaled_scores, attention_scaled_scores.shape) - - # Apply masks - L_q, L_k = q.shape[1], k.shape[1] - row_idx = torch.arange(L_q, device=q.device).unsqueeze(1) - col_idx = torch.arange(L_k, device=q.device).unsqueeze(0) - col_offset = L_k - L_q - - mask_applied = False - if causal and (window_size_left, window_size_right) == (-1, -1): - # Pure causal: ensure query doesn't attend to future keys - mask = row_idx >= (col_idx - col_offset) - mask_applied = True - if DEBUG_CORE: - print("causal_mask:", mask) - elif (window_size_left, window_size_right) != (-1, -1): - # Handle the case where window sizes exceed sequence length - if window_size_left >= L_k: - window_size_left = -1 # No left limit - if window_size_right >= L_k: - window_size_right = -1 # No right limit - - if causal: - # Causal + sliding window: ensure we don't attend to future - window_size_right = min(window_size_right, 0) if window_size_right != -1 else 0 - - # Create sliding window mask - # Each query at position i attends to keys in [i + offset - left, i + offset + right] - if window_size_left == -1 and window_size_right == -1: - # No window restriction - mask = torch.ones((L_q, L_k), dtype=torch.bool, device=q.device) - else: - mask = torch.ones((L_q, L_k), dtype=torch.bool, device=q.device) - if window_size_left != -1: - # Each query at position i attends to keys from position (i - left) accounting for offset - mask = mask & (col_idx >= (row_idx + col_offset - window_size_left)) - if window_size_right != -1: - # Each query at position i attends to keys up to position (i + right) accounting for offset - mask = mask & (col_idx <= (row_idx + col_offset + window_size_right)) - - # Apply causal constraint - if causal: - causal_mask = row_idx >= (col_idx - col_offset) - mask = mask & causal_mask - - mask_applied = True - if DEBUG_CORE: - print(f"sliding_window_mask (left={window_size_left}, right={window_size_right}):", mask) - - # Apply the mask if created - if mask_applied: - attention_scaled_scores = attention_scaled_scores.masked_fill( - torch.logical_not(mask.unsqueeze(0)), float('-inf') - ) - if DEBUG_CORE: - print("attention_scaled_scores after masking:", attention_scaled_scores, attention_scaled_scores.shape) - - # compute probabilities using softmax_lse - if use_exp2: - RCP_LN = 1 / math.log(2) - attention_scaled_scores_base2 = attention_scaled_scores * RCP_LN - softmax_lse_base2 = softmax_lse * RCP_LN - softmax_lse_3d = softmax_lse_base2.unsqueeze(-1) - p = torch.exp2(attention_scaled_scores_base2 - softmax_lse_3d) - else: - softmax_lse_3d = softmax_lse.unsqueeze(-1) - p = torch.exp(attention_scaled_scores - softmax_lse_3d) - - # Zero out positions outside the mask - if mask_applied: - p = p.masked_fill(torch.logical_not(mask.unsqueeze(0)), 0.0) - - if DEBUG_CORE: - print("softmax_lse_3d:", softmax_lse_3d, softmax_lse_3d.shape) - print("p:", p, p.shape) - - if dropout_p > 0.0: - rand_vals = torch.rand(p.shape, generator=torch.Generator(device=p.device).manual_seed(philox_seed), device=p.device, dtype=p.dtype) - dropout_mask, dropout_scale = rand_vals > dropout_p, (1.0 / (1 - dropout_p)) - if DEBUG_CORE: - print("dropout_scale:", dropout_scale) - print("dropout_mask:", dropout_mask) - - p_drop = torch.where(dropout_mask, p, torch.zeros_like(p)) - p_drop_scaled = p_drop * dropout_scale - if DEBUG_CORE: - print("dropout_scale:", dropout_scale) - print("p_drop:", p_drop, p_drop.shape) - print("p_drop_scaled:", p_drop_scaled, p_drop_scaled.shape) - - # compute dv - dv = torch.matmul(p_drop_scaled.transpose(-2, -1), do) - if DEBUG_CORE: - print("dv:", dv, dv.shape) - - # compute dp - dp_dropout = torch.matmul(do, v.transpose(-2, -1)) - dp = torch.where(dropout_mask, dp_dropout, torch.zeros_like(dp_dropout)) * dropout_scale - if DEBUG_CORE: - print("dp_dropout:", dp_dropout, dp_dropout.shape) - print("dp:", dp, dp.shape) - else: - # compute dv - dv = torch.matmul(p.transpose(-2, -1), do) - if DEBUG_CORE: - print("dv:", dv, dv.shape) - - # compute dp - dp = torch.matmul(do, v.transpose(-2, -1)) - if DEBUG_CORE: - print("dp:", dp, dp.shape) - - # calculate ds - if True: - delta = torch.sum(o * do, axis=-1).unsqueeze(-1) - else: - delta = torch.sum(p * dp, axis=-1).unsqueeze(-1) - if DEBUG_CORE: - print("delta:", delta, delta.shape) - dscores_scaled = p * (dp - delta) - - # Zero out gradients for positions outside the mask - if mask_applied: - dscores_scaled = dscores_scaled.masked_fill(torch.logical_not(mask.unsqueeze(0)), 0.0) - - ds = dscores_scaled * sm_scale - if DEBUG_CORE: - print("dscores_scaled:", dscores_scaled, dscores_scaled.shape) - print("ds:", ds, ds.shape) - - # compute gradient wrt k & q - dk = torch.matmul(ds.transpose(-2, -1), q) - dq = torch.matmul(ds, k) - if DEBUG_CORE: - print("dk:", dk, dk.shape) - print("dq:", dq, dq.shape) - - # cast back to original dtype - dq = dq.to(torch.float16) - dk = dk.to(torch.float16) - dv = dv.to(torch.float16) - # remove d dim with size 1 - delta = delta.squeeze(-1) - - if DEBUG_CORE: - print("attention_backward_core_ref_impl output") - print("delta:", delta, delta.shape) - print("dv:", dv, dv.shape) - print("dk:", dk, dk.shape) - print("dq:", dq, dq.shape) - - return dq, dk, dv, delta - -def attention_varlen_backward_pytorch_ref_impl( - do, - q, - k, - v, - o, - softmax_lse, - sm_scale, - causal, - window_size_left, - window_size_right, - layout, - cu_seqlens_q, - cu_seqlens_k, - max_seqlen_q, - max_seqlen_k, - dropout_p, - philox_seed, - philox_offset, - alibi_slopes, - use_exp2, -): - # Ensure the layout is 'thd' - if layout != 'thd': - raise ValueError(f"Unsupported layout {layout}. Expected 'thd'.") - - batch_size = cu_seqlens_q.shape[0] - 1 - nheads_q, head_dim = q.shape[1], q.shape[2] - nheads_k = k.shape[1] - - group_size = nheads_q // nheads_k - if nheads_q % nheads_k != 0: - raise ValueError("nheads_q must be divisible by nheads_k") - - # Pre-allocate outputs - total_L_q = q.shape[0] - total_L_k = k.shape[0] - - dq = torch.zeros_like(q) - dk = torch.zeros_like(k) - dv = torch.zeros_like(v) - # delta has the same shape as softmax_lse - delta = torch.zeros_like(softmax_lse) - - for i in range(batch_size): - # Get the start and end indices for the current sequence - start_q = cu_seqlens_q[i].item() - end_q = cu_seqlens_q[i + 1].item() - start_k = cu_seqlens_k[i].item() - end_k = cu_seqlens_k[i + 1].item() - - # Extract q_i, k_i, v_i, do_i, o_i, softmax_lse_i - q_i = q[start_q:end_q, :, :] # [L_q_i, nheads_q, head_dim] - k_i = k[start_k:end_k, :, :] # [L_k_i, nheads_k, head_dim] - v_i = v[start_k:end_k, :, :] # [L_k_i, nheads_k, head_dim] - do_i = do[start_q:end_q, :, :] # [L_q_i, nheads_q, head_dim] - o_i = o[start_q:end_q, :, :] # [L_q_i, nheads_q, head_dim] - softmax_lse_i = softmax_lse[:, start_q:end_q] # [nheads_q, L_q_i] - - if group_size != 1: - # MQA or GQA case - # Reshape tensors to include group dimension - q_i = q_i.view(q_i.shape[0], nheads_k, group_size, head_dim) - do_i = do_i.view(do_i.shape[0], nheads_k, group_size, head_dim) - o_i = o_i.view(o_i.shape[0], nheads_k, group_size, head_dim) - softmax_lse_i = softmax_lse_i.view(nheads_k, group_size, softmax_lse_i.shape[1]) - # Expand k_i and v_i to match group_size - k_i = k_i.unsqueeze(2).expand(-1, -1, group_size, -1) - v_i = v_i.unsqueeze(2).expand(-1, -1, group_size, -1) - # Flatten the nheads_k and group_size dimensions - q_i = q_i.reshape(q_i.shape[0], nheads_k * group_size, head_dim) - do_i = do_i.reshape(do_i.shape[0], nheads_k * group_size, head_dim) - o_i = o_i.reshape(o_i.shape[0], nheads_k * group_size, head_dim) - softmax_lse_i = softmax_lse_i.reshape(nheads_k * group_size, softmax_lse_i.shape[2]) - k_i = k_i.reshape(k_i.shape[0], nheads_k * group_size, head_dim) - v_i = v_i.reshape(v_i.shape[0], nheads_k * group_size, head_dim) - - # Permute to [nheads_total, L, head_dim] - q_i = q_i.permute(1, 0, 2) - k_i = k_i.permute(1, 0, 2) - v_i = v_i.permute(1, 0, 2) - do_i = do_i.permute(1, 0, 2) - o_i = o_i.permute(1, 0, 2) - - if alibi_slopes is not None: - alibi_slopes_i = alibi_slopes[i] - else: - alibi_slopes_i = None - - # Call the core backward function for this sequence - dq_i, dk_i, dv_i, delta_i = attention_backward_core_ref_impl( - do_i, - q_i, - k_i, - v_i, - o_i, - softmax_lse_i, - sm_scale, - causal, - window_size_left, - window_size_right, - dropout_p, - philox_seed, - philox_offset, - alibi_slopes_i, - use_exp2 - ) - - # Convert back to 'thd' layout - dq_i = dq_i.permute(1, 0, 2) # [L_q_i, nheads_total, head_dim] - dk_i = dk_i.permute(1, 0, 2) # [L_k_i, nheads_total, head_dim] - dv_i = dv_i.permute(1, 0, 2) # [L_k_i, nheads_total, head_dim] - - if group_size != 1: - # Reshape dq_i and delta_i back to original shape - dq_i = dq_i.view(dq_i.shape[0], nheads_k, group_size, head_dim) - L_q_i = delta_i.shape[1] - delta_i = delta_i.view(nheads_k, group_size, L_q_i) - # Sum dk_i and dv_i over group dimension - dk_i = dk_i.view(dk_i.shape[0], nheads_k, group_size, head_dim) - dv_i = dv_i.view(dv_i.shape[0], nheads_k, group_size, head_dim) - dk_i = dk_i.sum(dim=2) - dv_i = dv_i.sum(dim=2) - # Reshape dq_i back to [L_q_i, nheads_q, head_dim] - dq_i = dq_i.reshape(dq_i.shape[0], nheads_q, head_dim) - delta_i = delta_i.reshape(nheads_q, L_q_i) - else: - # No need to reshape - pass - - # Place outputs in pre-allocated tensors - dq[start_q:end_q, :, :] = dq_i - dk[start_k:end_k, :, :] += dk_i # Accumulate gradients for shared keys - dv[start_k:end_k, :, :] += dv_i # Accumulate gradients for shared values - delta[:, start_q:end_q] = delta_i - - return dq, dk, dv, delta - -def attention_vanilla_backward_pytorch_ref_impl( - do, - q, - k, - v, - o, - softmax_lse, - sm_scale, - causal, - window_size_left, - window_size_right, - layout, - dropout_p, - philox_seed, - philox_offset, - alibi_slopes, - use_exp2, -): - if layout == "bshd": - if DEBUG: - print() - print("Changing layout to bhsd!") - do = do.transpose(1, 2).contiguous() - q = q.transpose(1, 2).contiguous() - k = k.transpose(1, 2).contiguous() - v = v.transpose(1, 2).contiguous() - o = o.transpose(1, 2).contiguous() - elif layout == "bhsd": - pass - else: - raise ValueError(f"Unknown layout {layout}") - - # Prepare tensors - batch_size, nheads_q, seq_len_q, head_dim = q.shape - batch_size, nheads_k, seq_len_k, head_dim = k.shape - - group_size = nheads_q // nheads_k - if nheads_q % nheads_k != 0: - raise ValueError("nheads_q must be divisible by nheads_k") - - if group_size != 1: - # MQA or GQA case - # Reshape do, q, o to [batch_size, nheads_k, group_size, seq_len_q, head_dim] - do = do.reshape(batch_size, nheads_k, group_size, seq_len_q, head_dim) - q = q.reshape(batch_size, nheads_k, group_size, seq_len_q, head_dim) - o = o.reshape(batch_size, nheads_k, group_size, seq_len_q, head_dim) - # Reshape softmax_lse to [batch_size, nheads_k, group_size, seq_len_q] - softmax_lse = softmax_lse.reshape(batch_size, nheads_k, group_size, seq_len_q) - # Expand k and v to match group_size - k = k.unsqueeze(2).expand(-1, -1, group_size, -1, -1) # [batch_size, nheads_k, group_size, seq_len_k, head_dim] - v = v.unsqueeze(2).expand(-1, -1, group_size, -1, -1) - # Flatten the first three dimensions for computation - do = do.reshape(batch_size * nheads_k * group_size, seq_len_q, head_dim) - q = q.reshape(batch_size * nheads_k * group_size, seq_len_q, head_dim) - k = k.reshape(batch_size * nheads_k * group_size, seq_len_k, head_dim) - v = v.reshape(batch_size * nheads_k * group_size, seq_len_k, head_dim) - o = o.reshape(batch_size * nheads_k * group_size, seq_len_q, head_dim) - softmax_lse = softmax_lse.reshape(batch_size * nheads_k * group_size, seq_len_q) - else: - # Standard case - do = do.reshape(batch_size * nheads_q, seq_len_q, head_dim) - q = q.reshape(batch_size * nheads_q, seq_len_q, head_dim) - k = k.reshape(batch_size * nheads_k, seq_len_k, head_dim) - v = v.reshape(batch_size * nheads_k, seq_len_k, head_dim) - o = o.reshape(batch_size * nheads_q, seq_len_q, head_dim) - softmax_lse = softmax_lse.reshape(batch_size * nheads_q, seq_len_q) - - # Call the core backward function - dq, dk, dv, delta = attention_backward_core_ref_impl( - do, - q, - k, - v, - o, - softmax_lse, - sm_scale, - causal, - window_size_left, - window_size_right, - dropout_p, - philox_seed, - philox_offset, - alibi_slopes, - use_exp2 - ) - - if group_size != 1: - # Reshape dq back to [batch_size, nheads_k, group_size, seq_len_q, head_dim] - dq = dq.reshape(batch_size, nheads_k, group_size, seq_len_q, head_dim) - # Reshape delta back to [batch_size, nheads_k, group_size, seq_len_q] - delta = delta.reshape(batch_size, nheads_k, group_size, seq_len_q) - # Sum dk and dv over group_size dimension, since k and v are shared across groups - dk = dk.reshape(batch_size, nheads_k, group_size, seq_len_k, head_dim) - dk = dk.sum(dim=2) # Sum over group_size dimension - dv = dv.reshape(batch_size, nheads_k, group_size, seq_len_k, head_dim) - dv = dv.sum(dim=2) - # Reshape dq to [batch_size, nheads_q, seq_len_q, head_dim] - dq = dq.reshape(batch_size, nheads_k * group_size, seq_len_q, head_dim) - delta = delta.reshape(batch_size, nheads_k * group_size, seq_len_q) - else: - # Standard case - dq = dq.reshape(batch_size, nheads_q, seq_len_q, head_dim) - dk = dk.reshape(batch_size, nheads_k, seq_len_k, head_dim) - dv = dv.reshape(batch_size, nheads_k, seq_len_k, head_dim) - delta = delta.reshape(batch_size, nheads_q, seq_len_q) - - # Go back to original layout - if layout == "bshd": - if DEBUG: - print() - print("Changing back to bshd!") - dq = dq.transpose(1, 2) - dk = dk.transpose(1, 2) - dv = dv.transpose(1, 2) - elif layout == "bhsd": - pass - else: - raise ValueError(f"Unknown layout {layout}") - - return dq, dk, dv, delta - -def attention_backward_pytorch_ref_impl( - do: torch.Tensor, - q: torch.Tensor, - k: torch.Tensor, - v: torch.Tensor, - o: torch.Tensor, - softmax_lse: torch.Tensor, - dq: torch.Tensor, - dk: torch.Tensor, - dv: torch.Tensor, - sm_scale: float, - alibi_slopes: Optional[torch.Tensor], - causal: bool, - window_size_left: int, - window_size_right: int, - layout: Literal["bshd", "bhsd", "thd"], - cu_seqlens_q: Optional[torch.Tensor], - cu_seqlens_k: Optional[torch.Tensor], - max_seqlen_q: Optional[int], - max_seqlen_k: Optional[int], - dropout_p: float, - philox_seed: Optional[int], - philox_offset: Optional[int], - use_exp2: bool -): - if layout == "thd": - dq_ref, dk_ref, dv_ref, delta = attention_varlen_backward_pytorch_ref_impl( - do, - q, - k, - v, - o, - softmax_lse, - sm_scale, - causal, - window_size_left, - window_size_right, - layout, - cu_seqlens_q, - cu_seqlens_k, - max_seqlen_q, - max_seqlen_k, - dropout_p, - philox_seed, - philox_offset, - alibi_slopes, - use_exp2, - ) - else: - dq_ref, dk_ref, dv_ref, delta = attention_vanilla_backward_pytorch_ref_impl( - do, - q, - k, - v, - o, - softmax_lse, - sm_scale, - causal, - window_size_left, - window_size_right, - layout, - dropout_p, - philox_seed, - philox_offset, - alibi_slopes, - use_exp2, - ) - - - # copy into output tensor - dv.copy_(dv_ref.to(dv.dtype)) - dk.copy_(dk_ref.to(dk.dtype)) - dq.copy_(dq_ref.to(dq.dtype)) - - return delta \ No newline at end of file diff --git a/flash_attn/flash_attn_triton_amd/fwd_decode.py b/flash_attn/flash_attn_triton_amd/fwd_decode.py index 327967cebf7..bb7edad3494 100755 --- a/flash_attn/flash_attn_triton_amd/fwd_decode.py +++ b/flash_attn/flash_attn_triton_amd/fwd_decode.py @@ -1,37 +1,81 @@ import torch import triton import triton.language as tl -from typing import Literal, Optional, Union -from .utils import AUTOTUNE, get_padded_headsize, get_shape_and_strides_from_layout, is_cdna, is_fp8 +from typing import Literal, Optional +from .utils import ( + DEBUG, + AUTOTUNE, + get_padded_headsize, + get_shape_and_strides_from_layout, + apply_rotary, + is_cdna, + is_fp8, +) -DEBUG = False def get_cdna_autotune_configs(): return [ - triton.Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'waves_per_eu': 2, 'PRE_LOAD_V': False}, num_stages=1, - num_warps=4), - triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'waves_per_eu': 2, 'PRE_LOAD_V': False}, num_stages=1, - num_warps=4), - triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'waves_per_eu': 3, 'PRE_LOAD_V': False}, num_stages=1, - num_warps=4), - triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'waves_per_eu': 1, 'PRE_LOAD_V': False}, num_stages=1, - num_warps=4), - triton.Config({'BLOCK_M': 128, 'BLOCK_N': 32, 'waves_per_eu': 2, 'PRE_LOAD_V': False}, num_stages=1, - num_warps=4), - triton.Config({'BLOCK_M': 64, 'BLOCK_N': 64, 'waves_per_eu': 1, 'PRE_LOAD_V': False}, num_stages=1, - num_warps=4), + triton.Config( + {"BLOCK_M": 128, "BLOCK_N": 128, "waves_per_eu": 2, "PRE_LOAD_V": False}, + num_stages=1, + num_warps=4, + ), + triton.Config( + {"BLOCK_M": 128, "BLOCK_N": 64, "waves_per_eu": 2, "PRE_LOAD_V": False}, + num_stages=1, + num_warps=4, + ), + triton.Config( + {"BLOCK_M": 128, "BLOCK_N": 64, "waves_per_eu": 3, "PRE_LOAD_V": False}, + num_stages=1, + num_warps=4, + ), + triton.Config( + {"BLOCK_M": 128, "BLOCK_N": 64, "waves_per_eu": 1, "PRE_LOAD_V": False}, + num_stages=1, + num_warps=4, + ), + triton.Config( + {"BLOCK_M": 128, "BLOCK_N": 32, "waves_per_eu": 2, "PRE_LOAD_V": False}, + num_stages=1, + num_warps=4, + ), + triton.Config( + {"BLOCK_M": 64, "BLOCK_N": 64, "waves_per_eu": 1, "PRE_LOAD_V": False}, + num_stages=1, + num_warps=4, + ), # Fall-back config. - triton.Config({'BLOCK_M': 16, 'BLOCK_N': 16, 'waves_per_eu': 1, 'PRE_LOAD_V': False}, num_stages=1, - num_warps=4), - ], ['IS_CAUSAL', 'dropout_p', 'MAX_SEQLENS_Q', 'MAX_SEQLENS_K', 'ACTUAL_BLOCK_DMODEL', 'VARLEN', 'HQ', 'HK'] + triton.Config( + {"BLOCK_M": 16, "BLOCK_N": 16, "waves_per_eu": 1, "PRE_LOAD_V": False}, + num_stages=1, + num_warps=4, + ), + ], [ + "IS_CAUSAL", + "dropout_p", + "MAX_SEQLENS_Q", + "MAX_SEQLENS_K", + "ACTUAL_BLOCK_DMODEL", + "VARLEN", + "HQ", + "HK", + ] + def get_autotune_configs(): if AUTOTUNE: if is_cdna(): autotune_configs, autotune_keys = get_cdna_autotune_configs() - fwd_auto_tune_configs, fwd_autotune_keys= autotune_configs, autotune_keys - reduce_auto_tune_configs, reduce_autotune_keys = autotune_configs, autotune_keys - return (fwd_auto_tune_configs, fwd_autotune_keys), (reduce_auto_tune_configs, reduce_autotune_keys) + fwd_auto_tune_configs, fwd_autotune_keys = autotune_configs, autotune_keys + reduce_auto_tune_configs, reduce_autotune_keys = ( + autotune_configs, + autotune_keys, + ) + return (fwd_auto_tune_configs, fwd_autotune_keys), ( + reduce_auto_tune_configs, + reduce_autotune_keys, + ) else: raise ValueError("Unknown Device Type") else: @@ -52,19 +96,34 @@ def get_autotune_configs(): "HK", ] - fwd_auto_tune_configs, fwd_autotune_keys= autotune_configs, autotune_keys + fwd_auto_tune_configs, fwd_autotune_keys = autotune_configs, autotune_keys reduce_auto_tune_configs, reduce_autotune_keys = autotune_configs, autotune_keys - return (fwd_auto_tune_configs, fwd_autotune_keys), (reduce_auto_tune_configs, reduce_autotune_keys) + return (fwd_auto_tune_configs, fwd_autotune_keys), ( + reduce_auto_tune_configs, + reduce_autotune_keys, + ) -(fwd_auto_tune_configs, fwd_autotune_keys), (reduce_auto_tune_configs, reduce_autotune_keys) = get_autotune_configs() +(fwd_auto_tune_configs, fwd_autotune_keys), ( + reduce_auto_tune_configs, + reduce_autotune_keys, +) = get_autotune_configs() + @triton.jit def _attn_fwd_inner( - q, kT, v, pos, col_mask, - m_i, l_i, acc, + q, + kT, + v, + pos, + col_mask, + m_i, + l_i, + acc, pid_m, - q_descale, k_descale, v_descale, # FP8 scaling factors + q_descale, + k_descale, + v_descale, # FP8 scaling factors IS_FP8: tl.constexpr, # FP8 flag BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, @@ -81,41 +140,42 @@ def _attn_fwd_inner( # -- compute qk --- qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) if IS_FP8: - qk += (tl.dot(q, kT) * q_descale * k_descale) # Apply FP8 scaling + qk += tl.dot(q, kT) * q_descale * k_descale # Apply FP8 scaling else: qk += tl.dot(q, kT) # noqa: F821 if USE_ALIBI: row_idx = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) col_idx = pos + tl.arange(0, BLOCK_N) - + # Compute relative positions relative_pos = row_idx[:, None] + N_CTX_K_FINAL - (N_CTX_Q + col_idx[None, :]) relative_pos = tl.abs(relative_pos) - + # Compute ALiBi bias alibi_bias = -1 * alibi_slope * relative_pos - qk += (alibi_bias * 1.44269504) + qk += alibi_bias * 1.44269504 # ------------------------------------------------------------------ # masking # ------------------------------------------------------------------ if USE_SLIDING_WINDOW: - row_idx = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) # q positions - col_idx = pos + tl.arange(0, BLOCK_N) # k positions - row = row_idx[:, None] # [M,1] - col = col_idx[None, :] # [1,N] + row_idx = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) # q positions + col_idx = pos + tl.arange(0, BLOCK_N) # k positions + row = row_idx[:, None] # [M,1] + col = col_idx[None, :] # [1,N] if IS_CAUSAL: # -------- causal + window -------- - diag = N_CTX_K_FINAL - N_CTX_Q # sk-sq + diag = N_CTX_K_FINAL - N_CTX_Q # sk-sq causal_ok = col <= row + diag - if WINDOW_SIZE_LEFT < 0: # only right window + if WINDOW_SIZE_LEFT < 0: # only right window win_ok = col <= row + diag + WINDOW_SIZE_RIGHT - else: # both sides - win_ok = ((col >= row + diag - WINDOW_SIZE_LEFT) & - (col <= row + diag + WINDOW_SIZE_RIGHT)) - mask = ~(causal_ok & win_ok) # True ⇒ -inf + else: # both sides + win_ok = (col >= row + diag - WINDOW_SIZE_LEFT) & ( + col <= row + diag + WINDOW_SIZE_RIGHT + ) + mask = ~(causal_ok & win_ok) # True ⇒ -inf else: # -------- non-causal window -------- sk, sq = N_CTX_K_FINAL, N_CTX_Q @@ -123,8 +183,8 @@ def _attn_fwd_inner( mask = col > row + (sk - sq) + WINDOW_SIZE_RIGHT else: right = tl.minimum(row + (sk - sq) + WINDOW_SIZE_RIGHT, sk) - left = row + (sk - sq) - WINDOW_SIZE_LEFT - mask = (col > right) | (col < left) + left = row + (sk - sq) - WINDOW_SIZE_LEFT + mask = (col > right) | (col < left) qk = tl.where(mask, float("-inf"), qk) else: if IS_CAUSAL: @@ -144,16 +204,16 @@ def _attn_fwd_inner( # Expect col_mask shape: [BLOCK_N]. True where column is within sequence. qk = tl.where(col_mask[None, :], qk, float("-inf")) - m_i_new = tl.maximum(m_i, tl.max(qk, 1)) # per-row max so far + m_i_new = tl.maximum(m_i, tl.max(qk, 1)) # per-row max so far # rows that are *all* -inf after masking - valid = m_i_new > float("-inf") + valid = m_i_new > float("-inf") # scale previous partial sums safely - alpha = tl.where(valid, tl.math.exp2(m_i - m_i_new), 0.0) + alpha = tl.where(valid, tl.math.exp2(m_i - m_i_new), 0.0) # subtract the row max only on valid rows - qk = tl.where(valid[:, None], qk - m_i_new[:, None], float("-inf")) + qk = tl.where(valid[:, None], qk - m_i_new[:, None], float("-inf")) p = tl.math.exp2(qk) # -- update m_i and l_i -- @@ -167,7 +227,7 @@ def _attn_fwd_inner( acc += tl.dot(p.to(v.dtype), v) * v_descale # Apply FP8 scaling for V else: acc += tl.dot(p.to(v.dtype), v) - + return m_i, l_i, acc @@ -228,7 +288,7 @@ def _fwd_kernel_splitK( stride_vn_d, stride_bt_b, stride_bt_s, - stride_az, + stride_az, stride_ah, stride_q_descale_z, # FP8 descale strides stride_q_descale_h, @@ -286,12 +346,20 @@ def _fwd_kernel_splitK( if IS_FP8: if IS_GQA: # For MQA/GQA, q_descale uses the same indexing as k/v (hk_id) - q_descale = tl.load(Q_Descale + z_id * stride_q_descale_z + hk_id * stride_q_descale_h) + q_descale = tl.load( + Q_Descale + z_id * stride_q_descale_z + hk_id * stride_q_descale_h + ) else: # For MHA, q_descale uses hq_id - q_descale = tl.load(Q_Descale + z_id * stride_q_descale_z + hq_id * stride_q_descale_h) - k_descale = tl.load(K_Descale + z_id * stride_k_descale_z + hk_id * stride_k_descale_h) - v_descale = tl.load(V_Descale + z_id * stride_v_descale_z + hv_id * stride_v_descale_h) + q_descale = tl.load( + Q_Descale + z_id * stride_q_descale_z + hq_id * stride_q_descale_h + ) + k_descale = tl.load( + K_Descale + z_id * stride_k_descale_z + hk_id * stride_k_descale_h + ) + v_descale = tl.load( + V_Descale + z_id * stride_v_descale_z + hv_id * stride_v_descale_h + ) else: q_descale, k_descale, v_descale = 1.0, 1.0, 1.0 @@ -318,21 +386,29 @@ def _fwd_kernel_splitK( # compute ptrs q_offset = Q + hq_id * stride_qh + z_id * stride_qz + g_id * stride_qg q_ptrs = q_offset + offs_m[:, None] * stride_qm + offs_d[None, :] * stride_qd - + # Handle block table for paged attention if USE_BLOCK_TABLE: # K and V now point to paged cache # Each batch has its own block table row block_table_ptr = Block_table + z_id * stride_bt_b else: - k_offset = K + hk_id * stride_kh + cache_batch_idx * stride_kz + g_id * stride_kg - v_offset = V + hv_id * stride_vh + cache_batch_idx * stride_vz + g_id * stride_vg + k_offset = ( + K + hk_id * stride_kh + cache_batch_idx * stride_kz + g_id * stride_kg + ) + v_offset = ( + V + hv_id * stride_vh + cache_batch_idx * stride_vz + g_id * stride_vg + ) # compute masks if PADDED_HEAD: q_mask = (offs_m < N_CTX_Q)[:, None] & (offs_d < ACTUAL_BLOCK_DMODEL)[None, :] - kT_mask = (offs_d < ACTUAL_BLOCK_DMODEL)[:, None] & (offs_n < N_CTX_K_FINAL)[None, :] - v_mask = (offs_n < N_CTX_K_FINAL)[:, None] & (offs_d < ACTUAL_BLOCK_DMODEL)[None, :] + kT_mask = (offs_d < ACTUAL_BLOCK_DMODEL)[:, None] & (offs_n < N_CTX_K_FINAL)[ + None, : + ] + v_mask = (offs_n < N_CTX_K_FINAL)[:, None] & (offs_d < ACTUAL_BLOCK_DMODEL)[ + None, : + ] osk_mask = (offs_m < N_CTX_Q)[:, None] & (offs_d < ACTUAL_BLOCK_DMODEL)[None, :] else: q_mask = (offs_m < N_CTX_Q)[:, None] @@ -344,7 +420,7 @@ def _fwd_kernel_splitK( # 2^x instead of exp in the loop because CSE and LICM # don't work as expected with `exp` in the loop qk_scale = sm_scale * 1.44269504 - + # load q: it will stay in SRAM throughout q = tl.load(q_ptrs, mask=q_mask, other=0.0) q = (q * qk_scale).to(q.dtype) @@ -366,22 +442,22 @@ def _fwd_kernel_splitK( # Paged attention: process all KV blocks from cache # Note: Cache should be updated externally before calling this kernel num_kv_blocks = (N_CTX_K_FINAL + BLOCK_SIZE_K - 1) // BLOCK_SIZE_K - + for block_idx in range(num_kv_blocks): # Calculate sequence range for this block block_start = block_idx * BLOCK_SIZE_K block_end = tl.minimum(block_start + BLOCK_SIZE_K, N_CTX_K_FINAL) - + # Check if block overlaps with our split-k range [lo, hi) if block_end > lo and block_start < hi: # Load physical block number physical_block = tl.load(block_table_ptr + block_idx * stride_bt_s) - + # Calculate the range within this block that overlaps with [lo, hi) process_start = tl.maximum(lo - block_start, 0) process_end = tl.minimum(hi - block_start, BLOCK_SIZE_K) process_end = tl.minimum(process_end, block_end - block_start) - + # Instead of forcing a floor alignment to BLOCK_N (which can still skip # part of the intended range if start falls mid-tile for small splits), # start from the raw (possibly unaligned) process_start rounded *down* but @@ -395,51 +471,82 @@ def _fwd_kernel_splitK( process_start = aligned_start else: process_start = aligned_start - + for offset in range(process_start, process_end, BLOCK_N): # Current position (may begin slightly before logical split range; masking fixes it) pos = block_start + offset # Proceed unconditionally; masking below enforces [lo, hi) # Calculate base addresses for K and V in this physical block - k_base = K + physical_block * BLOCK_SIZE_K * stride_kn + hk_id * stride_kh + g_id * stride_kg - v_base = V + physical_block * BLOCK_SIZE_K * stride_vn + hv_id * stride_vh + g_id * stride_vg - + k_base = ( + K + + physical_block * BLOCK_SIZE_K * stride_kn + + hk_id * stride_kh + + g_id * stride_kg + ) + v_base = ( + V + + physical_block * BLOCK_SIZE_K * stride_vn + + hv_id * stride_vh + + g_id * stride_vg + ) + # Offsets within the current block block_offs = offset + offs_n - + # Masks for valid data respecting: # (1) global key length (seq_mask) # (2) block bounds (block_mask) # (3) current split range [lo, hi) - seq_mask = ((pos + offs_n) < N_CTX_K_FINAL) - block_mask = (block_offs < BLOCK_SIZE_K) - end_mask = (block_offs < process_end) + seq_mask = (pos + offs_n) < N_CTX_K_FINAL + block_mask = block_offs < BLOCK_SIZE_K + end_mask = block_offs < process_end split_mask = ((pos + offs_n) >= lo) & ((pos + offs_n) < hi) col_mask = seq_mask & block_mask & end_mask & split_mask - + # Apply masks kT_mask_final = kT_mask & col_mask[None, :] v_mask_final = v_mask & col_mask[:, None] - + # Load K and V - kT_ptrs = k_base + offs_d[:, None] * stride_kd + block_offs[None, :] * stride_kn - v_ptrs = v_base + block_offs[:, None] * stride_vn + offs_d[None, :] * stride_vd - + kT_ptrs = ( + k_base + + offs_d[:, None] * stride_kd + + block_offs[None, :] * stride_kn + ) + v_ptrs = ( + v_base + + block_offs[:, None] * stride_vn + + offs_d[None, :] * stride_vd + ) + kT = tl.load(kT_ptrs, mask=kT_mask_final, other=0.0) v = tl.load(v_ptrs, mask=v_mask_final, other=0.0) - + # Unified inner function handles both paged and contiguous m_i, l_i, acc = _attn_fwd_inner( - q, kT, v, pos, col_mask, - m_i, l_i, acc, + q, + kT, + v, + pos, + col_mask, + m_i, + l_i, + acc, pid_m, - q_descale, k_descale, v_descale, + q_descale, + k_descale, + v_descale, IS_FP8, - BLOCK_M, BLOCK_N, - N_CTX_Q, N_CTX_K_FINAL, - USE_ALIBI, alibi_slope, - USE_SLIDING_WINDOW, IS_CAUSAL, - WINDOW_SIZE_LEFT, WINDOW_SIZE_RIGHT, + BLOCK_M, + BLOCK_N, + N_CTX_Q, + N_CTX_K_FINAL, + USE_ALIBI, + alibi_slope, + USE_SLIDING_WINDOW, + IS_CAUSAL, + WINDOW_SIZE_LEFT, + WINDOW_SIZE_RIGHT, True, ) else: @@ -447,8 +554,16 @@ def _fwd_kernel_splitK( # Note: Cache should be updated externally before calling this kernel # loop over k, v and update accumulator for start_n in range(lo, hi, BLOCK_N): - kT_ptrs = k_offset + offs_d[:, None] * stride_kd + (start_n + offs_n)[None, :] * stride_kn - V_ptrs = v_offset + (start_n + offs_n)[:, None] * stride_vn + offs_d[None, :] * stride_vd + kT_ptrs = ( + k_offset + + offs_d[:, None] * stride_kd + + (start_n + offs_n)[None, :] * stride_kn + ) + V_ptrs = ( + v_offset + + (start_n + offs_n)[:, None] * stride_vn + + offs_d[None, :] * stride_vd + ) # load k kT = tl.load(kT_ptrs, mask=kT_mask, other=0.0) @@ -460,22 +575,37 @@ def _fwd_kernel_splitK( col_valid_mask = offs_n < (hi - start_n) m_i, l_i, acc = _attn_fwd_inner( - q, kT, v, start_n, col_valid_mask, - m_i, l_i, acc, + q, + kT, + v, + start_n, + col_valid_mask, + m_i, + l_i, + acc, pid_m, - q_descale, k_descale, v_descale, + q_descale, + k_descale, + v_descale, IS_FP8, - BLOCK_M, BLOCK_N, - N_CTX_Q, N_CTX_K_FINAL, - USE_ALIBI, alibi_slope, - USE_SLIDING_WINDOW, IS_CAUSAL, - WINDOW_SIZE_LEFT, WINDOW_SIZE_RIGHT, + BLOCK_M, + BLOCK_N, + N_CTX_Q, + N_CTX_K_FINAL, + USE_ALIBI, + alibi_slope, + USE_SLIDING_WINDOW, + IS_CAUSAL, + WINDOW_SIZE_LEFT, + WINDOW_SIZE_RIGHT, BOUNDS_CHECKS_N, ) # write back O osk_offset = Out_splitK + pid_zhg * stride_osk_zhg + pid_splitk * stride_osk_s - osk_ptrs = osk_offset + offs_m[:, None] * stride_osk_m + offs_d[None, :] * stride_osk_d + osk_ptrs = ( + osk_offset + offs_m[:, None] * stride_osk_m + offs_d[None, :] * stride_osk_d + ) tl.store( osk_ptrs, acc, @@ -534,7 +664,6 @@ def _splitK_reduce( offs_splitK = tl.arange(0, splitK_pow2) offs_k = pid_k * K_BLOCK_SIZE + tl.arange(0, K_BLOCK_SIZE) - # compute masks if PADDED_HEAD: o_mask = offs_k < ACTUAL_BLOCK_DMODEL @@ -546,7 +675,11 @@ def _splitK_reduce( metadata_ptr = metadata_offset + offs_splitK * stride_ms + pid_m * stride_mm osk_offset = Out_splitK + pid_zhg * stride_osk_zhg + pid_m * stride_osk_m - osk_ptr = osk_offset + offs_splitK[:, None] * stride_osk_s + offs_k[None, :] * stride_osk_k + osk_ptr = ( + osk_offset + + offs_splitK[:, None] * stride_osk_s + + offs_k[None, :] * stride_osk_k + ) # read max values of each splitK if MASK_SPLITK: @@ -560,7 +693,7 @@ def _splitK_reduce( acc = tl.load(osk_ptr) g_m = tl.max(l_m, axis=0) - + alpha = tl.where(l_m > float("-inf"), tl.math.exp2(l_m - g_m), 0.0) # read sum @@ -569,21 +702,19 @@ def _splitK_reduce( acc = acc * alpha[:, None] g_sum_safe = tl.where(g_sum > 0, g_sum, 1.0) - acc_out = tl.sum(acc, axis=0) / g_sum_safe + acc_out = tl.sum(acc, axis=0) / g_sum_safe # Store output z_id = pid_zhg // (H * G) h_id = (pid_zhg // G) % H g_id = pid_zhg % G - out_offset = Out + z_id * stride_oz + h_id * stride_oh + g_id * stride_og + out_offset = Out + z_id * stride_oz + h_id * stride_oh + g_id * stride_og out_ptr = out_offset + pid_m * stride_om + offs_k tl.store(out_ptr, acc_out, mask=o_mask) # Store lse l_ptrs = LSE + pid_zhg * stride_lse_zhg + pid_m - lse_val = tl.where(g_sum > 0, - (g_m + tl.math.log2(g_sum)) / 1.44269504, - g_m) + lse_val = tl.where(g_sum > 0, (g_m + tl.math.log2(g_sum)) / 1.44269504, g_m) tl.store(l_ptrs, lse_val) @@ -596,6 +727,7 @@ def cast_uint32_to_half2(scale_shift): shift = shift.to(tl.uint16).to(tl.float16, bitcast=True) return scale, shift + @triton.jit def dequantize( x_, @@ -605,14 +737,18 @@ def dequantize( ): # PACKED_PER_VAL is the number of values packed into # each element x_. For example, for int4 quantization - #and x_ of type int32, PACKED_PER_VAL is 8. + # and x_ of type int32, PACKED_PER_VAL is 8. BLOCK_N: tl.constexpr = x_.shape[0] BLOCK_DMODEL_PACKED: tl.constexpr = x_.shape[1] offsets = tl.arange(0, PACKED_PER_VAL) * 4 - quant_offset = (x_[:, None, :] >> offsets[None, :, None]) # (BLOCK_N, PACKED_PER_VAL, D // PACKED_PER_VAL) + quant_offset = ( + x_[:, None, :] >> offsets[None, :, None] + ) # (BLOCK_N, PACKED_PER_VAL, D // PACKED_PER_VAL) - quant_offset = tl.view(quant_offset, (BLOCK_N, BLOCK_DMODEL_PACKED * PACKED_PER_VAL)) + quant_offset = tl.view( + quant_offset, (BLOCK_N, BLOCK_DMODEL_PACKED * PACKED_PER_VAL) + ) # Trick - instead of converting int4 to float16 we view it as float16 # and then multiply by 32768 * 512 == 2**24 quant_offset = (quant_offset & 0xF).to(tl.uint16).to(tl.float16, bitcast=True) @@ -622,6 +758,7 @@ def dequantize( dequant = quant_offset * scale_512 + shift return dequant + def quantize_kv_int4(k: torch.Tensor, num_groups: int = 1) -> torch.Tensor: # Scale and shift are such that quantization linearly maps # int4 values range [0..15] to input values range min(k)..max(k) @@ -639,7 +776,9 @@ def quantize_kv_int4(k: torch.Tensor, num_groups: int = 1) -> torch.Tensor: in_bytes = in_bytes.to(torch.uint8) in_int4 = in_bytes & 0xF in_int4_packed = in_int4[..., ::2] + (in_int4[..., 1::2] << 4) - scale_shift = torch.concat([scale_k.view(torch.uint8), shift_k.view(torch.uint8)], dim=-1) + scale_shift = torch.concat( + [scale_k.view(torch.uint8), shift_k.view(torch.uint8)], dim=-1 + ) k_quant = torch.concat( [ scale_shift.flatten(start_dim=-2), @@ -656,7 +795,9 @@ def dequantize_kv_fp16(quant_k: torch.Tensor, num_groups: int = 1) -> torch.Tens ss_size = num_groups * 4 scale_shift_ui8 = k_ui8[..., 0:ss_size] - scale_shift_ui8 = scale_shift_ui8.reshape(*scale_shift_ui8.shape[:-1], num_groups, 4) + scale_shift_ui8 = scale_shift_ui8.reshape( + *scale_shift_ui8.shape[:-1], num_groups, 4 + ) scale = scale_shift_ui8[..., 0:2].view(torch.float16) shift = scale_shift_ui8[..., 2:4].view(torch.float16) @@ -668,7 +809,11 @@ def dequantize_kv_fp16(quant_k: torch.Tensor, num_groups: int = 1) -> torch.Tens k1_f16 = k1_i4.to(torch.float16) * scale.expand(k_shape) + shift.expand(k_shape) k2_f16 = k2_i4.to(torch.float16) * scale.expand(k_shape) + shift.expand(k_shape) - out = torch.empty((*k1_f16.shape[:-1], k1_f16.shape[-1] * 2), dtype=torch.float16, device=quant_k.device) + out = torch.empty( + (*k1_f16.shape[:-1], k1_f16.shape[-1] * 2), + dtype=torch.float16, + device=quant_k.device, + ) out[..., ::2] = k1_f16 out[..., 1::2] = k2_f16 out = out.reshape(*k_shape[:-2], -1) @@ -689,26 +834,52 @@ def get_split_k(B: int, G: int, H: int, Mk: int) -> int: split_k = max(split_k, 1) return split_k -def attention_decode_forward_triton_impl( - q: torch.Tensor, - k_cache: torch.Tensor, - v_cache: torch.Tensor, - k_new: Optional[torch.Tensor], - v_new: Optional[torch.Tensor], - out: torch.Tensor, - sm_scale: float, - causal: bool, - window_size_left: int, - window_size_right: int, - alibi_slopes: Optional[torch.Tensor], - layout: Literal["bshd"], - cache_seqlens: Optional[torch.Tensor], - cache_batch_idx: Optional[torch.Tensor], - block_table: Optional[torch.Tensor] = None, - q_descale: Optional[torch.Tensor] = None, - k_descale: Optional[torch.Tensor] = None, - v_descale: Optional[torch.Tensor] = None, + +def attention_forward_decode_triton_impl( + q: torch.Tensor, + k_cache: torch.Tensor, + v_cache: torch.Tensor, + k_new: Optional[torch.Tensor], + v_new: Optional[torch.Tensor], + out: torch.Tensor, + sm_scale: float, + causal: bool, + window_size_left: int, + window_size_right: int, + alibi_slopes: Optional[torch.Tensor], + layout: Literal["bshd"], + cache_seqlens: Optional[torch.Tensor], + cache_batch_idx: Optional[torch.Tensor], + block_table: Optional[torch.Tensor] = None, + q_descale: Optional[torch.Tensor] = None, + k_descale: Optional[torch.Tensor] = None, + v_descale: Optional[torch.Tensor] = None, + # rotary (optional) + rotary_cos: Optional[torch.Tensor] = None, + rotary_sin: Optional[torch.Tensor] = None, + rotary_interleaved: bool = False, + seqlens_rotary: Optional[torch.Tensor] = None, ): + # apply rotary embedding + if rotary_cos is not None and rotary_sin is not None: + # Prefer explicitly provided rotary sequence start offsets if given; fall back to cache_seqlens. + seqlen_offsets = ( + seqlens_rotary + if seqlens_rotary is not None + else (cache_seqlens if cache_seqlens is not None else 0) + ) + local = (window_size_left != -1) or (window_size_right != -1) + q, k_new = apply_rotary( + q, + k_new, + rotary_cos, + rotary_sin, + causal=causal, + local=local, + interleaved=rotary_interleaved, + seqlen_offsets=seqlen_offsets, + ) + # handle cache updates if k_new is not None and v_new is not None: # Update cache with new KV values @@ -716,7 +887,7 @@ def attention_decode_forward_triton_impl( # Non-paged attention: update cache directly batch_size = k_new.shape[0] seqlen_new = k_new.shape[1] - + if cache_seqlens is not None: # Use cache_seqlens to determine where to insert new KV for b in range(batch_size): @@ -728,14 +899,16 @@ def attention_decode_forward_triton_impl( else: # Append at the end of existing cache seqlen_cache = k_cache.shape[1] - k_cache[:, seqlen_cache - seqlen_new:] = k_new - v_cache[:, seqlen_cache - seqlen_new:] = v_new + k_cache[:, seqlen_cache - seqlen_new :] = k_new + v_cache[:, seqlen_cache - seqlen_new :] = v_new else: # Paged attention: update cache using block table batch_size = k_new.shape[0] seqlen_new = k_new.shape[1] - block_size = k_cache.shape[1] # k_cache shape: [num_blocks, block_size, nheads, head_dim] - + block_size = k_cache.shape[ + 1 + ] # k_cache shape: [num_blocks, block_size, nheads, head_dim] + # Update cache for each batch element for b in range(batch_size): if cache_seqlens is not None: @@ -750,35 +923,37 @@ def attention_decode_forward_triton_impl( else: start_idx = block_idx * block_size break - + # Copy new KV values into the paged cache for i in range(seqlen_new): pos = start_idx + i block_idx = pos // block_size within_block_idx = pos % block_size - + # Get the physical block number from block table if block_idx < block_table.shape[1]: physical_block = int(block_table[b, block_idx].item()) - + # Update k_cache and v_cache at the physical block location k_cache[physical_block, within_block_idx] = k_new[b, i] v_cache[physical_block, within_block_idx] = v_new[b, i] - + # Update cache_seqlens if provided if cache_seqlens is not None: cache_seqlens[b] = start_idx + seqlen_new - + # triton configs BLOCK_M = 16 BLOCK_N = 64 num_stages = 1 num_warps_fwd = 1 num_warps_reduce = 4 - + # kernel_configs is_new_kv = False # Cache has been updated, so no new KV in kernel - use_alibi, (stride_az, stride_ah) = True if alibi_slopes is not None else False, alibi_slopes.stride() if alibi_slopes is not None else (None, None) + use_alibi, (stride_az, stride_ah) = True if alibi_slopes is not None else False, ( + alibi_slopes.stride() if alibi_slopes is not None else (None, None) + ) use_cache_seqlens = cache_seqlens is not None use_sliding_window = window_size_left != -1 or window_size_right != -1 use_block_table = block_table is not None @@ -786,8 +961,13 @@ def attention_decode_forward_triton_impl( NUM_QUANT_GROUPS = 1 # get shapes and strides - (batch_size, seqlen_q, nheads_q, dim_q), (stride_qz, stride_qh, stride_qm, stride_qd) = get_shape_and_strides_from_layout(q, layout) - + (batch_size, seqlen_q, nheads_q, dim_q), ( + stride_qz, + stride_qh, + stride_qm, + stride_qd, + ) = get_shape_and_strides_from_layout(q, layout) + # Handle paged KV cache layout if use_block_table: # For paged attention, k_cache and v_cache have shape [num_blocks, block_size, nheads, head_dim] @@ -801,29 +981,63 @@ def attention_decode_forward_triton_impl( num_blocks_per_seq = block_table.shape[1] seqlen_kc = num_blocks_per_seq * block_size_k seqlen_vc = seqlen_kc - + # Strides for paged layout stride_kc_z = 0 # No batch dimension in paged cache stride_kc_n = k_cache.stride(1) # Sequence stride stride_kc_h = k_cache.stride(2) # Head stride stride_kc_d = k_cache.stride(3) # Dim stride - + stride_vc_z = 0 stride_vc_n = v_cache.stride(1) stride_vc_h = v_cache.stride(2) stride_vc_d = v_cache.stride(3) else: - (_, seqlen_kc, nheads_kc, dim_kc), (stride_kc_z, stride_kc_h, stride_kc_n, stride_kc_d) = get_shape_and_strides_from_layout(k_cache, layout) - (_, seqlen_vc, nheads_vc, dim_vc), (stride_vc_z, stride_vc_h, stride_vc_n, stride_vc_d) = get_shape_and_strides_from_layout(v_cache, layout) + (_, seqlen_kc, nheads_kc, dim_kc), ( + stride_kc_z, + stride_kc_h, + stride_kc_n, + stride_kc_d, + ) = get_shape_and_strides_from_layout(k_cache, layout) + (_, seqlen_vc, nheads_vc, dim_vc), ( + stride_vc_z, + stride_vc_h, + stride_vc_n, + stride_vc_d, + ) = get_shape_and_strides_from_layout(v_cache, layout) block_size_k = 0 # Not used if is_new_kv: - ( _, seqlen_kn, nheads_kn, dim_kn), (stride_kn_z, stride_kn_h, stride_kn_n, stride_kn_d) = get_shape_and_strides_from_layout(k_new, layout) - (_, seqlen_vn, nheads_vn, dim_vn), (stride_vn_z, stride_vn_h, stride_vn_n, stride_vn_d) = get_shape_and_strides_from_layout(v_new, layout) + (_, seqlen_kn, nheads_kn, dim_kn), ( + stride_kn_z, + stride_kn_h, + stride_kn_n, + stride_kn_d, + ) = get_shape_and_strides_from_layout(k_new, layout) + (_, seqlen_vn, nheads_vn, dim_vn), ( + stride_vn_z, + stride_vn_h, + stride_vn_n, + stride_vn_d, + ) = get_shape_and_strides_from_layout(v_new, layout) else: - ( _, seqlen_kn, nheads_kn, dim_kn), (stride_kn_z, stride_kn_h, stride_kn_n, stride_kn_d) = (None, None, None, None), (None, None, None, None) - (_, seqlen_vn, nheads_vn, dim_vn), (stride_vn_z, stride_vn_h, stride_vn_n, stride_vn_d) = (None, None, None, None), (None, None, None, None) - (_, seqlen_o, nheads_o, dim_o), (stride_oz, stride_oh, stride_om, stride_od) = get_shape_and_strides_from_layout(out, layout) - assert dim_q == dim_kc == dim_vc, f"Dimensions must match: {dim_q}, {dim_kc}, {dim_vc}" + (_, seqlen_kn, nheads_kn, dim_kn), ( + stride_kn_z, + stride_kn_h, + stride_kn_n, + stride_kn_d, + ) = (None, None, None, None), (None, None, None, None) + (_, seqlen_vn, nheads_vn, dim_vn), ( + stride_vn_z, + stride_vn_h, + stride_vn_n, + stride_vn_d, + ) = (None, None, None, None), (None, None, None, None) + (_, seqlen_o, nheads_o, dim_o), (stride_oz, stride_oh, stride_om, stride_od) = ( + get_shape_and_strides_from_layout(out, layout) + ) + assert ( + dim_q == dim_kc == dim_vc + ), f"Dimensions must match: {dim_q}, {dim_kc}, {dim_vc}" # add extra information needed by the kernels if layout == "bshd": @@ -841,7 +1055,7 @@ def attention_decode_forward_triton_impl( raise ValueError(f"{layout} layout is not supported") # get padded size - dim_padded = get_padded_headsize(dim_kc) + dim_padded = get_padded_headsize(dim_kc) is_padded_head = dim_padded != dim_kc # Handle MQA/GQA case @@ -857,7 +1071,11 @@ def attention_decode_forward_triton_impl( # Use heuristics if use_block_table: # For paged attention, use the actual sequence length from cache_seqlens - max_seqlen = int(cache_seqlens.max().item()) if cache_seqlens is not None else block_size_k + max_seqlen = ( + int(cache_seqlens.max().item()) + if cache_seqlens is not None + else block_size_k + ) split_k = get_split_k(batch_size, n_group_q, heads_per_group_q, max_seqlen) else: split_k = get_split_k(batch_size, n_group_q, heads_per_group_q, seqlen_kc) @@ -865,37 +1083,63 @@ def attention_decode_forward_triton_impl( # setup grid seqlen_q_ceil = (seqlen_q + BLOCK_M - 1) // BLOCK_M * BLOCK_M - grid = lambda META: (triton.cdiv(seqlen_q, META['BLOCK_M']), batch_size * n_group_q * heads_per_group_q, split_k) - + grid = lambda META: ( + triton.cdiv(seqlen_q, META["BLOCK_M"]), + batch_size * n_group_q * heads_per_group_q, + split_k, + ) + # create intermediate tensors - out_splitk = torch.empty([batch_size * n_group_q * heads_per_group_q, split_k, seqlen_q_ceil, dim_kc], dtype=torch.float32, device=q.device) - metadata = torch.empty([batch_size * n_group_q * heads_per_group_q, 2, split_k, seqlen_q_ceil], dtype=torch.float32, device=q.device) - lse = torch.empty((batch_size * n_group_q * heads_per_group_q, seqlen_q), dtype=torch.float32, device=q.device) - + out_splitk = torch.empty( + [batch_size * n_group_q * heads_per_group_q, split_k, seqlen_q_ceil, dim_kc], + dtype=torch.float32, + device=q.device, + ) + metadata = torch.empty( + [batch_size * n_group_q * heads_per_group_q, 2, split_k, seqlen_q_ceil], + dtype=torch.float32, + device=q.device, + ) + lse = torch.empty( + (batch_size * n_group_q * heads_per_group_q, seqlen_q), + dtype=torch.float32, + device=q.device, + ) + # get intermediate tensor strides stride_osk_zhg, stride_osk_s, stride_osk_m, stride_osk_d = out_splitk.stride() stride_mzhg, stride_m2, stride_ms, stride_mm = metadata.stride() stride_lse_zhg, stride_lse_m = lse.stride() - + # Block table strides if use_block_table: stride_bt_b, stride_bt_s = block_table.stride() else: stride_bt_b, stride_bt_s = 0, 0 - + # FP8 support IS_FP8 = is_fp8(q) if IS_FP8: if (q_descale is None) or (k_descale is None) or (v_descale is None): import warnings - warnings.warn("FP8 tensors detected but descale factors not provided. Using default scale of 1.0", UserWarning) + + warnings.warn( + "FP8 tensors detected but descale factors not provided. Using default scale of 1.0", + UserWarning, + ) # Create default descale tensors if not provided if q_descale is None: - q_descale = torch.ones(batch_size, nheads_q, dtype=torch.float32, device=q.device) + q_descale = torch.ones( + batch_size, nheads_q, dtype=torch.float32, device=q.device + ) if k_descale is None: - k_descale = torch.ones(batch_size, nheads_kc, dtype=torch.float32, device=q.device) + k_descale = torch.ones( + batch_size, nheads_kc, dtype=torch.float32, device=q.device + ) if v_descale is None: - v_descale = torch.ones(batch_size, nheads_vc, dtype=torch.float32, device=q.device) + v_descale = torch.ones( + batch_size, nheads_vc, dtype=torch.float32, device=q.device + ) stride_q_descale_z, stride_q_descale_h = q_descale.stride() stride_k_descale_z, stride_k_descale_h = k_descale.stride() stride_v_descale_z, stride_v_descale_h = v_descale.stride() @@ -911,18 +1155,45 @@ def attention_decode_forward_triton_impl( stride_v_descale_h = 0 if DEBUG: - print("batch_size, seqlen_q, nheads_q, dim_q", (batch_size, seqlen_q, nheads_q, dim_q)) + print( + "batch_size, seqlen_q, nheads_q, dim_q", + (batch_size, seqlen_q, nheads_q, dim_q), + ) print("_, seqlen_kc, nheads_kc, dim_kc", (_, seqlen_kc, nheads_kc, dim_kc)) print("dim_padded:", dim_padded) - print("stride_qz, stride_qm, stride_qg, stride_qh, stride_qd", (stride_qz, stride_qm, stride_qg, stride_qh, stride_qd)) - print("stride_kc_z, stride_kc_n, stride_kc_g, stride_kc_h, stride_kc_d", (stride_kc_z, stride_kc_n, stride_kc_g, stride_kc_h, stride_kc_d)) - print("stride_vc_z, stride_vc_n, stride_vc_g, stride_vc_h, stride_vc_d", (stride_vc_z, stride_vc_n, stride_vc_g, stride_vc_h, stride_vc_d)) + print( + "stride_qz, stride_qm, stride_qg, stride_qh, stride_qd", + (stride_qz, stride_qm, stride_qg, stride_qh, stride_qd), + ) + print( + "stride_kc_z, stride_kc_n, stride_kc_g, stride_kc_h, stride_kc_d", + (stride_kc_z, stride_kc_n, stride_kc_g, stride_kc_h, stride_kc_d), + ) + print( + "stride_vc_z, stride_vc_n, stride_vc_g, stride_vc_h, stride_vc_d", + (stride_vc_z, stride_vc_n, stride_vc_g, stride_vc_h, stride_vc_d), + ) if is_new_kv: - print("stride_kn_z, stride_kn_n, stride_kn_g, stride_kn_h, stride_kn_d", (stride_kn_z, stride_kn_n, stride_kn_g, stride_kn_h, stride_kn_d)) - print("stride_vn_z, stride_vn_n, stride_vn_g, stride_vn_h, stride_vn_d", (stride_vn_z, stride_vn_n, stride_vn_g, stride_vn_h, stride_vn_d)) - print("stride_oz, stride_om, stride_og, stride_oh, stride_od", (stride_oz, stride_om, stride_og, stride_oh, stride_od)) - print("stride_osk_zhg, stride_osk_s, stride_osk_m, stride_osk_d", (stride_osk_zhg, stride_osk_s, stride_osk_m, stride_osk_d)) - print("stride_mzhg, stride_m2, stride_ms, stride_mm", (stride_mzhg, stride_m2, stride_ms, stride_mm)) + print( + "stride_kn_z, stride_kn_n, stride_kn_g, stride_kn_h, stride_kn_d", + (stride_kn_z, stride_kn_n, stride_kn_g, stride_kn_h, stride_kn_d), + ) + print( + "stride_vn_z, stride_vn_n, stride_vn_g, stride_vn_h, stride_vn_d", + (stride_vn_z, stride_vn_n, stride_vn_g, stride_vn_h, stride_vn_d), + ) + print( + "stride_oz, stride_om, stride_og, stride_oh, stride_od", + (stride_oz, stride_om, stride_og, stride_oh, stride_od), + ) + print( + "stride_osk_zhg, stride_osk_s, stride_osk_m, stride_osk_d", + (stride_osk_zhg, stride_osk_s, stride_osk_m, stride_osk_d), + ) + print( + "stride_mzhg, stride_m2, stride_ms, stride_mm", + (stride_mzhg, stride_m2, stride_ms, stride_mm), + ) print("stride_lse_zhg, stride_lse_m", (stride_lse_zhg, stride_lse_m)) _fwd_kernel_splitK[grid]( @@ -1042,7 +1313,6 @@ def attention_decode_forward_triton_impl( k_block_size = dim_padded // k_block_num grid = (batch_size * n_group_q * heads_per_group_q, seqlen_q, k_block_num) - if DEBUG: print("splitK_pow2:", splitK_pow2) print("k_block_num:", k_block_num) @@ -1050,10 +1320,10 @@ def attention_decode_forward_triton_impl( print("grid:", grid) _splitK_reduce[grid]( - out_splitk, - metadata, - out, - lse, + out_splitk, + metadata, + out, + lse, # Split-K output strides stride_osk_zhg=stride_osk_zhg, stride_osk_s=stride_osk_s, @@ -1076,13 +1346,14 @@ def attention_decode_forward_triton_impl( K_BLOCK_SIZE=k_block_size, BLOCK_DMODEL=dim_padded, ACTUAL_BLOCK_DMODEL=dim_kc, - G=n_group_q, + G=n_group_q, H=heads_per_group_q, # TODO: Tune num_warps - split_k=split_k, - splitK_pow2=splitK_pow2, + split_k=split_k, + splitK_pow2=splitK_pow2, MASK_SPLITK=mask_split_k, PADDED_HEAD=is_padded_head, - num_warps=num_warps_reduce) + num_warps=num_warps_reduce, + ) - return lse \ No newline at end of file + return lse.view(batch_size, n_group_q * heads_per_group_q, seqlen_q) diff --git a/flash_attn/flash_attn_triton_amd/fwd_prefill.py b/flash_attn/flash_attn_triton_amd/fwd_prefill.py index bb0301c7700..d1036f98c3f 100755 --- a/flash_attn/flash_attn_triton_amd/fwd_prefill.py +++ b/flash_attn/flash_attn_triton_amd/fwd_prefill.py @@ -2,54 +2,130 @@ import torch import triton import triton.language as tl -from typing import Literal, Optional, Union -from .utils import DROPOUT_USE_PYTORCH, DROPOUT_DUMP, AUTOTUNE, compute_alibi_block, compute_fp8_scaling_factors, get_arch, is_cdna, is_fp8, is_rdna, create_dropout_mask, DEBUG +from typing import Literal, Optional +from .utils import ( + DEBUG, + AUTOTUNE, + DROPOUT_USE_PYTORCH, + DROPOUT_DUMP, + compute_alibi_block, + compute_fp8_scaling_factors, + get_arch, + is_cdna, + is_fp8, + is_rdna, + create_dropout_mask, + apply_rotary, +) # NOTE: triton fails to import tl.constexprs so create them here for the file tl_DROPOUT_USE_PYTORCH: tl.constexpr = triton.language.constexpr(DROPOUT_USE_PYTORCH) tl_DROPOUT_DUMP: tl.constexpr = triton.language.constexpr(DROPOUT_DUMP) + # ------------------------------- # Autotune # ------------------------------- def get_fwd_prefill_cdna_autotune_configs(): return [ - triton.Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'waves_per_eu': 2, 'PRE_LOAD_V': False}, num_stages=1, - num_warps=4), - triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'waves_per_eu': 2, 'PRE_LOAD_V': False}, num_stages=1, - num_warps=4), - triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'waves_per_eu': 3, 'PRE_LOAD_V': False}, num_stages=1, - num_warps=4), - triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'waves_per_eu': 1, 'PRE_LOAD_V': False}, num_stages=1, - num_warps=4), - triton.Config({'BLOCK_M': 128, 'BLOCK_N': 32, 'waves_per_eu': 2, 'PRE_LOAD_V': False}, num_stages=1, - num_warps=4), - triton.Config({'BLOCK_M': 64, 'BLOCK_N': 64, 'waves_per_eu': 1, 'PRE_LOAD_V': False}, num_stages=1, - num_warps=4), + triton.Config( + {"BLOCK_M": 128, "BLOCK_N": 128, "waves_per_eu": 2, "PRE_LOAD_V": False}, + num_stages=1, + num_warps=4, + ), + triton.Config( + {"BLOCK_M": 128, "BLOCK_N": 64, "waves_per_eu": 2, "PRE_LOAD_V": False}, + num_stages=1, + num_warps=4, + ), + triton.Config( + {"BLOCK_M": 128, "BLOCK_N": 64, "waves_per_eu": 3, "PRE_LOAD_V": False}, + num_stages=1, + num_warps=4, + ), + triton.Config( + {"BLOCK_M": 128, "BLOCK_N": 64, "waves_per_eu": 1, "PRE_LOAD_V": False}, + num_stages=1, + num_warps=4, + ), + triton.Config( + {"BLOCK_M": 128, "BLOCK_N": 32, "waves_per_eu": 2, "PRE_LOAD_V": False}, + num_stages=1, + num_warps=4, + ), + triton.Config( + {"BLOCK_M": 64, "BLOCK_N": 64, "waves_per_eu": 1, "PRE_LOAD_V": False}, + num_stages=1, + num_warps=4, + ), # Fall-back config. - triton.Config({'BLOCK_M': 16, 'BLOCK_N': 16, 'waves_per_eu': 1, 'PRE_LOAD_V': False}, num_stages=1, - num_warps=4), - ], ['IS_CAUSAL', 'dropout_p', 'MAX_SEQLENS_Q', 'MAX_SEQLENS_K', 'ACTUAL_BLOCK_DMODEL_QK', 'ACTUAL_BLOCK_DMODEL_V', 'IS_VARLEN', 'HQ', 'HK'] + triton.Config( + {"BLOCK_M": 16, "BLOCK_N": 16, "waves_per_eu": 1, "PRE_LOAD_V": False}, + num_stages=1, + num_warps=4, + ), + ], [ + "IS_CAUSAL", + "dropout_p", + "MAX_SEQLENS_Q", + "MAX_SEQLENS_K", + "ACTUAL_BLOCK_DMODEL_QK", + "ACTUAL_BLOCK_DMODEL_V", + "IS_VARLEN", + "HQ", + "HK", + ] def get_fwd_prefill_rdna_autotune_configs(): return [ - triton.Config({'BLOCK_M': 32, 'BLOCK_N': 32, 'waves_per_eu': 4, 'PRE_LOAD_V': False}, num_stages=1, - num_warps=2), - triton.Config({'BLOCK_M': 32, 'BLOCK_N': 32, 'waves_per_eu': 2, 'PRE_LOAD_V': False}, num_stages=1, - num_warps=2), - triton.Config({'BLOCK_M': 32, 'BLOCK_N': 16, 'waves_per_eu': 4, 'PRE_LOAD_V': False}, num_stages=1, - num_warps=2), - triton.Config({'BLOCK_M': 32, 'BLOCK_N': 16, 'waves_per_eu': 2, 'PRE_LOAD_V': False}, num_stages=1, - num_warps=2), - triton.Config({'BLOCK_M': 16, 'BLOCK_N': 16, 'waves_per_eu': 4, 'PRE_LOAD_V': False}, num_stages=1, - num_warps=2), - triton.Config({'BLOCK_M': 16, 'BLOCK_N': 16, 'waves_per_eu': 2, 'PRE_LOAD_V': False}, num_stages=1, - num_warps=2), + triton.Config( + {"BLOCK_M": 32, "BLOCK_N": 32, "waves_per_eu": 4, "PRE_LOAD_V": False}, + num_stages=1, + num_warps=2, + ), + triton.Config( + {"BLOCK_M": 32, "BLOCK_N": 32, "waves_per_eu": 2, "PRE_LOAD_V": False}, + num_stages=1, + num_warps=2, + ), + triton.Config( + {"BLOCK_M": 32, "BLOCK_N": 16, "waves_per_eu": 4, "PRE_LOAD_V": False}, + num_stages=1, + num_warps=2, + ), + triton.Config( + {"BLOCK_M": 32, "BLOCK_N": 16, "waves_per_eu": 2, "PRE_LOAD_V": False}, + num_stages=1, + num_warps=2, + ), + triton.Config( + {"BLOCK_M": 16, "BLOCK_N": 16, "waves_per_eu": 4, "PRE_LOAD_V": False}, + num_stages=1, + num_warps=2, + ), + triton.Config( + {"BLOCK_M": 16, "BLOCK_N": 16, "waves_per_eu": 2, "PRE_LOAD_V": False}, + num_stages=1, + num_warps=2, + ), # Fall-back config. - triton.Config({'BLOCK_M': 16, 'BLOCK_N': 16, 'waves_per_eu': 1, 'PRE_LOAD_V': False}, num_stages=1, - num_warps=2), - ], ['IS_CAUSAL', 'dropout_p', 'MAX_SEQLENS_Q', 'MAX_SEQLENS_K', 'ACTUAL_BLOCK_DMODEL_QK', 'ACTUAL_BLOCK_DMODEL_V', 'IS_VARLEN', 'HQ', 'HK'] + triton.Config( + {"BLOCK_M": 16, "BLOCK_N": 16, "waves_per_eu": 1, "PRE_LOAD_V": False}, + num_stages=1, + num_warps=2, + ), + ], [ + "IS_CAUSAL", + "dropout_p", + "MAX_SEQLENS_Q", + "MAX_SEQLENS_K", + "ACTUAL_BLOCK_DMODEL_QK", + "ACTUAL_BLOCK_DMODEL_V", + "IS_VARLEN", + "HQ", + "HK", + ] def get_fwd_prefill_autotune_configs(): @@ -64,11 +140,18 @@ def get_fwd_prefill_autotune_configs(): arch = get_arch() if arch == "gfx950": default_config = triton.Config( - {"BLOCK_M": 128, "BLOCK_N": 128, "waves_per_eu": 2, "PRE_LOAD_V": False}, + { + "BLOCK_M": 128, + "BLOCK_N": 128, + "waves_per_eu": 2, + "PRE_LOAD_V": False, + }, num_stages=1, num_warps=4, ) - elif arch == "gfx942" and False: # Disabled due shared mem oom in CI when using triton==3.3.0 when using top of tree everything seems fine. + elif ( + arch == "gfx942" and False + ): # Disabled due shared mem oom in CI when using triton==3.3.0 when using top of tree everything seems fine. default_config = triton.Config( {"BLOCK_M": 128, "BLOCK_N": 64, "waves_per_eu": 2, "PRE_LOAD_V": False}, num_stages=1, @@ -80,10 +163,8 @@ def get_fwd_prefill_autotune_configs(): num_stages=1, num_warps=4, ) - - return [ - default_config - ], [ + + return [default_config], [ "IS_CAUSAL", "dropout_p", "MAX_SEQLENS_Q", @@ -96,27 +177,64 @@ def get_fwd_prefill_autotune_configs(): ] -fwd_prefill_autotune_configs, fwd_prefill_autotune_keys = get_fwd_prefill_autotune_configs() +fwd_prefill_autotune_configs, fwd_prefill_autotune_keys = ( + get_fwd_prefill_autotune_configs() +) + @triton.jit -def _attn_fwd_no_mask(acc, l_i, m_i, - q, k_base_ptrs, v_base_ptrs, bias_base_ptrs, - stride_kn, stride_vk, stride_bn, stride_sn, - start_m, seqlen_k, seqlen_q, - dropout_p, philox_seed, philox_base_ptrs, - sd_mask_base_ptrs, dropout_mask_base_ptrs, - offs_m, offs_n, offs_d_qk, offs_d_v, - block_min, block_max, alibi_slope, - q_descale, k_descale, v_descale, IS_FP8: tl.constexpr, FP8_MAX: tl.constexpr, FP8_P_DESCALE: tl.constexpr, - BLOCK_M: tl.constexpr, BLOCK_DMODEL_QK: tl.constexpr, BLOCK_DMODEL_V: tl.constexpr, BLOCK_N: tl.constexpr, - PRE_LOAD_V: tl.constexpr, - ENABLE_DROPOUT: tl.constexpr, PADDED_HEAD_QK: tl.constexpr, PADDED_HEAD_V: tl.constexpr, - ACTUAL_BLOCK_DMODEL_QK: tl.constexpr, ACTUAL_BLOCK_DMODEL_V: tl.constexpr, - SM_SCALE: tl.constexpr, USE_ALIBI: tl.constexpr, USE_EXP2: tl.constexpr, - RETURN_SCORES: tl.constexpr, ACCUMULATOR_TYPE): +def _attn_fwd_no_mask( + acc, + l_i, + m_i, + q, + k_base_ptrs, + v_base_ptrs, + bias_base_ptrs, + stride_kn, + stride_vk, + stride_bn, + stride_sn, + start_m, + seqlen_k, + seqlen_q, + dropout_p, + philox_seed, + philox_base_ptrs, + sd_mask_base_ptrs, + dropout_mask_base_ptrs, + offs_m, + offs_n, + offs_d_qk, + offs_d_v, + block_min, + block_max, + alibi_slope, + q_descale, + k_descale, + v_descale, + IS_FP8: tl.constexpr, + FP8_MAX: tl.constexpr, + FP8_P_DESCALE: tl.constexpr, + BLOCK_M: tl.constexpr, + BLOCK_DMODEL_QK: tl.constexpr, + BLOCK_DMODEL_V: tl.constexpr, + BLOCK_N: tl.constexpr, + PRE_LOAD_V: tl.constexpr, + ENABLE_DROPOUT: tl.constexpr, + PADDED_HEAD_QK: tl.constexpr, + PADDED_HEAD_V: tl.constexpr, + ACTUAL_BLOCK_DMODEL_QK: tl.constexpr, + ACTUAL_BLOCK_DMODEL_V: tl.constexpr, + SM_SCALE: tl.constexpr, + USE_ALIBI: tl.constexpr, + USE_EXP2: tl.constexpr, + RETURN_SCORES: tl.constexpr, + ACCUMULATOR_TYPE, +): if USE_EXP2: RCP_LN2: tl.constexpr = 1.4426950408889634 - + # loop over k, v, and update accumulator for start_n in range(block_min, block_max, BLOCK_N): # get ptrs @@ -128,37 +246,46 @@ def _attn_fwd_no_mask(acc, l_i, m_i, k_mask, k_mask_other = (offs_d_qk[:, None] < ACTUAL_BLOCK_DMODEL_QK), 0.0 else: k_mask, k_mask_other = None, None - + if PADDED_HEAD_V: v_mask, v_mask_other = (offs_d_v[None, :] < ACTUAL_BLOCK_DMODEL_V), 0.0 else: v_mask, v_mask_other = None, None - + # load k and if preload_v then v - k = tl.load(k_ptrs, mask=k_mask, other=k_mask_other) if PADDED_HEAD_QK else tl.load(k_ptrs) + k = ( + tl.load(k_ptrs, mask=k_mask, other=k_mask_other) + if PADDED_HEAD_QK + else tl.load(k_ptrs) + ) if PRE_LOAD_V: - v = tl.load(v_ptrs, mask=v_mask, other=v_mask_other) if PADDED_HEAD_V else tl.load(v_ptrs) - + v = ( + tl.load(v_ptrs, mask=v_mask, other=v_mask_other) + if PADDED_HEAD_V + else tl.load(v_ptrs) + ) + # setup qk accumlator qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=ACCUMULATOR_TYPE) # -- compute qk ---- - if IS_FP8 : - qk += (tl.dot(q, k) * q_descale * k_descale) + if IS_FP8: + qk += tl.dot(q, k) * q_descale * k_descale else: qk += tl.dot(q, k) - qk_scaled = qk * SM_SCALE + qk_scaled = qk * SM_SCALE if USE_ALIBI: # compute the global position of each token within the sequence q_offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) - alibi_block = compute_alibi_block(alibi_slope, seqlen_q, seqlen_k, q_offs_m, - kv_offs_n) + alibi_block = compute_alibi_block( + alibi_slope, seqlen_q, seqlen_k, q_offs_m, kv_offs_n + ) qk_scaled += alibi_block # compute qk mask qk_mask = (offs_m[:, None] < seqlen_q) & (kv_offs_n[None, :] < seqlen_k) - + # compute bias if bias_base_ptrs is not None: bias_ptrs = bias_base_ptrs + start_n * stride_bn @@ -169,10 +296,10 @@ def _attn_fwd_no_mask(acc, l_i, m_i, m_ij = tl.maximum(m_i, tl.max(qk_scaled, 1)) # scale and subtract max - q_shifted = tl.where(m_ij[:, None] == float("-inf"), - float("-inf"), - qk_scaled - m_ij[:, None]) - + q_shifted = tl.where( + m_ij[:, None] == float("-inf"), float("-inf"), qk_scaled - m_ij[:, None] + ) + # Compute scaled QK and softmax probabilities if USE_EXP2: p = tl.math.exp2(q_shifted * RCP_LN2) @@ -188,7 +315,9 @@ def _attn_fwd_no_mask(acc, l_i, m_i, if tl_DROPOUT_USE_PYTORCH: dropout_mask = tl.load(dropout_mask_ptrs, mask=qk_mask) else: - rng_output = tl.rand(philox_seed, philox_ptrs) # TODO: use tl.randint for better performance + rng_output = tl.rand( + philox_seed, philox_ptrs + ) # TODO: use tl.randint for better performance dropout_mask = rng_output > dropout_p if tl_DROPOUT_DUMP: tl.store(dropout_mask_ptrs, dropout_mask, mask=qk_mask) @@ -203,21 +332,23 @@ def _attn_fwd_no_mask(acc, l_i, m_i, # NOTE: the returned score is not the same as the reference because we need to adjust as we find new maxes per block. We are not doing that sd_mask_ptrs = sd_mask_base_ptrs + start_n * stride_sn tl.store(sd_mask_ptrs, p, mask=qk_mask) - + # -- update output accumulator -- # alpha is an adjustment factor for acc and li as we loop and find new maxes # store the diff in maxes to adjust acc and li as we discover new maxes - m_diff = tl.where(m_ij == float("-inf"), - float("-inf"), - m_i - m_ij) + m_diff = tl.where(m_ij == float("-inf"), float("-inf"), m_i - m_ij) if USE_EXP2: alpha = tl.math.exp2(m_diff * RCP_LN2) else: alpha = tl.math.exp(m_diff) acc = acc * alpha[:, None] if not PRE_LOAD_V: - v = tl.load(v_ptrs, mask=v_mask, other=v_mask_other) if PADDED_HEAD_V else tl.load(v_ptrs) - + v = ( + tl.load(v_ptrs, mask=v_mask, other=v_mask_other) + if PADDED_HEAD_V + else tl.load(v_ptrs) + ) + # -- update m_i and l_i l_i = l_i * alpha + l_ij m_i = m_ij @@ -225,38 +356,80 @@ def _attn_fwd_no_mask(acc, l_i, m_i, if IS_FP8: if FP8_P_DESCALE: scale_p, descale_p = compute_fp8_scaling_factors(p, FP8_MAX) - acc += (tl.dot((p * scale_p).to(v.type.element_ty), v) * descale_p * v_descale) + acc += ( + tl.dot((p * scale_p).to(v.type.element_ty), v) + * descale_p + * v_descale + ) else: acc += tl.dot(p.to(v.type.element_ty), v) * v_descale else: acc += tl.dot(p.to(v.type.element_ty), v) - + return acc, l_i, m_i + @triton.jit -def _attn_fwd_mask(acc, l_i, m_i, - q, k_base_ptrs, v_base_ptrs, bias_base_ptrs, - stride_kn, stride_vk, stride_bn, stride_sn, start_m, - seqlen_k, seqlen_q, - dropout_p, philox_seed, philox_base_ptrs, - sd_mask_base_ptrs, dropout_mask_base_ptrs, - offs_m, offs_n, offs_d_qk, offs_d_v, - block_min, block_max, n_extra_tokens, alibi_slope, - q_descale, k_descale, v_descale, IS_FP8: tl.constexpr, FP8_MAX: tl.constexpr, FP8_P_DESCALE: tl.constexpr, - IS_CAUSAL: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_DMODEL_QK: tl.constexpr, BLOCK_DMODEL_V: tl.constexpr, BLOCK_N: tl.constexpr, - PRE_LOAD_V: tl.constexpr, - ENABLE_DROPOUT: tl.constexpr, PADDED_HEAD_QK: tl.constexpr, PADDED_HEAD_V: tl.constexpr, - ACTUAL_BLOCK_DMODEL_QK: tl.constexpr, ACTUAL_BLOCK_DMODEL_V: tl.constexpr, - SM_SCALE: tl.constexpr, USE_ALIBI: tl.constexpr, USE_EXP2: tl.constexpr, - RETURN_SCORES: tl.constexpr, - USE_SLIDING_WINDOW: tl.constexpr, WINDOW_SIZE_LEFT: tl.constexpr, WINDOW_SIZE_RIGHT: tl.constexpr, - ACCUMULATOR_TYPE): +def _attn_fwd_mask( + acc, + l_i, + m_i, + q, + k_base_ptrs, + v_base_ptrs, + bias_base_ptrs, + stride_kn, + stride_vk, + stride_bn, + stride_sn, + start_m, + seqlen_k, + seqlen_q, + dropout_p, + philox_seed, + philox_base_ptrs, + sd_mask_base_ptrs, + dropout_mask_base_ptrs, + offs_m, + offs_n, + offs_d_qk, + offs_d_v, + block_min, + block_max, + n_extra_tokens, + alibi_slope, + q_descale, + k_descale, + v_descale, + IS_FP8: tl.constexpr, + FP8_MAX: tl.constexpr, + FP8_P_DESCALE: tl.constexpr, + IS_CAUSAL: tl.constexpr, + BLOCK_M: tl.constexpr, + BLOCK_DMODEL_QK: tl.constexpr, + BLOCK_DMODEL_V: tl.constexpr, + BLOCK_N: tl.constexpr, + PRE_LOAD_V: tl.constexpr, + ENABLE_DROPOUT: tl.constexpr, + PADDED_HEAD_QK: tl.constexpr, + PADDED_HEAD_V: tl.constexpr, + ACTUAL_BLOCK_DMODEL_QK: tl.constexpr, + ACTUAL_BLOCK_DMODEL_V: tl.constexpr, + SM_SCALE: tl.constexpr, + USE_ALIBI: tl.constexpr, + USE_EXP2: tl.constexpr, + RETURN_SCORES: tl.constexpr, + USE_SLIDING_WINDOW: tl.constexpr, + WINDOW_SIZE_LEFT: tl.constexpr, + WINDOW_SIZE_RIGHT: tl.constexpr, + ACCUMULATOR_TYPE, +): if USE_EXP2: RCP_LN2: tl.constexpr = 1.4426950408889634 # seqlen diff seqlen_delta_qk = seqlen_k - seqlen_q - + # loop over k, v, and update accumulator for start_n in range(block_min, block_max, BLOCK_N): # get ptrs @@ -266,21 +439,21 @@ def _attn_fwd_mask(acc, l_i, m_i, # For padded blocks, we will overrun the tensor size if # we load all BLOCK_N. For others, the blocks are all within range. kv_offs_n = start_n + tl.arange(0, BLOCK_N) - k_mask = (kv_offs_n[None, :] < seqlen_k) - v_mask = (kv_offs_n[:, None] < seqlen_k) + k_mask = kv_offs_n[None, :] < seqlen_k + v_mask = kv_offs_n[:, None] < seqlen_k if PADDED_HEAD_QK: k_mask = k_mask & (offs_d_qk[:, None] < ACTUAL_BLOCK_DMODEL_QK) if PADDED_HEAD_V: v_mask = v_mask & (offs_d_v[None, :] < ACTUAL_BLOCK_DMODEL_V) - + # load k and if preload_v then v - k = tl.load(k_ptrs, mask=k_mask, other = 0.0) + k = tl.load(k_ptrs, mask=k_mask, other=0.0) if PRE_LOAD_V: v = tl.load(v_ptrs, mask=v_mask, other=0.0) - + # setup qk accumlator qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=ACCUMULATOR_TYPE) - + # We start from end of seqlen_k so only the first iteration would need # to be checked for padding if it is not a multiple of block_n # TODO: This can be optimized to only be true for the padded block. @@ -296,17 +469,18 @@ def _attn_fwd_mask(acc, l_i, m_i, qk = tl.where(mask, qk, float("-inf")) # -- compute qk ---- - if IS_FP8 : - qk += (tl.dot(q, k) * q_descale * k_descale) + if IS_FP8: + qk += tl.dot(q, k) * q_descale * k_descale else: qk += tl.dot(q, k) - qk_scaled = qk * SM_SCALE + qk_scaled = qk * SM_SCALE if USE_ALIBI: # compute the global position of each token within the sequence q_offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) - alibi_block = compute_alibi_block(alibi_slope, seqlen_q, seqlen_k, q_offs_m, - kv_offs_n) + alibi_block = compute_alibi_block( + alibi_slope, seqlen_q, seqlen_k, q_offs_m, kv_offs_n + ) qk_scaled += alibi_block if USE_SLIDING_WINDOW: @@ -315,35 +489,39 @@ def _attn_fwd_mask(acc, l_i, m_i, # For causal sliding window, we need to apply both constraints: # 1. Causal: col_idx <= row_idx + (seqlen_k - seqlen_q) # 2. Sliding window: row_idx - window_left <= col_idx <= row_idx + window_right - + # Get positions row_idx = offs_m # Query positions col_idx = kv_offs_n # Key positions - + # Expand for broadcasting row_idx_expanded = row_idx[:, None] # [BLOCK_M, 1] col_idx_expanded = col_idx[None, :] # [1, BLOCK_N] - + # Apply causal constraint: can only attend to positions before or at the diagonal causal_offset = seqlen_k - seqlen_q causal_mask = col_idx_expanded > (row_idx_expanded + causal_offset) - + # Apply sliding window constraint if WINDOW_SIZE_LEFT < 0: # Only right window constraint - window_mask = col_idx_expanded > (row_idx_expanded + causal_offset + WINDOW_SIZE_RIGHT) + window_mask = col_idx_expanded > ( + row_idx_expanded + causal_offset + WINDOW_SIZE_RIGHT + ) else: # Both left and right window constraints # Adjust window bounds by causal offset left_bound = row_idx_expanded + causal_offset - WINDOW_SIZE_LEFT right_bound = row_idx_expanded + causal_offset + WINDOW_SIZE_RIGHT - + # Can't attend to positions outside the window - window_mask = (col_idx_expanded < left_bound) | (col_idx_expanded > right_bound) - + window_mask = (col_idx_expanded < left_bound) | ( + col_idx_expanded > right_bound + ) + # Final mask is the union of both constraints (True = cannot attend) mask = causal_mask | window_mask - + # Apply mask qk_scaled = tl.where(mask, float("-inf"), qk_scaled) else: @@ -351,25 +529,27 @@ def _attn_fwd_mask(acc, l_i, m_i, # Exactly matching reference construct_local_mask: # row_idx = query positions, col_idx = key positions # sk = seqlen_k, sq = seqlen_q - - # Get positions + + # Get positions row_idx = offs_m # Query positions col_idx = kv_offs_n # Key positions - + # sk and sq from reference (no padding masks in this test) sk = seqlen_k sq = seqlen_q - + # Expand for broadcasting row_idx_expanded = row_idx[:, None] # [BLOCK_M, 1] col_idx_expanded = col_idx[None, :] # [1, BLOCK_N] - + # Reference logic for mask computation if WINDOW_SIZE_LEFT < 0: # Reference: return col_idx > row_idx + sk - sq + window_size[1] - mask = col_idx_expanded > (row_idx_expanded + sk - sq + WINDOW_SIZE_RIGHT) + mask = col_idx_expanded > ( + row_idx_expanded + sk - sq + WINDOW_SIZE_RIGHT + ) else: - # Reference: + # Reference: # sk = torch.full_like(col_idx, seqlen_k) if key_padding_mask is None else sk # return torch.logical_or( # col_idx > torch.minimum(row_idx + sk - sq + window_size[1], sk), @@ -378,15 +558,17 @@ def _attn_fwd_mask(acc, l_i, m_i, # Create sk tensor with proper shape for broadcasting # sk represents the key sequence length, which should be compared per column sk_full = tl.full((1, BLOCK_N), sk, dtype=tl.int32) - + # Compute boundaries right_bound_val = row_idx_expanded + sk - sq + WINDOW_SIZE_RIGHT right_bound = tl.minimum(right_bound_val, sk_full) left_bound = row_idx_expanded + sk - sq - WINDOW_SIZE_LEFT - + # Mask where True = cannot attend (matching reference) - mask = (col_idx_expanded > right_bound) | (col_idx_expanded < left_bound) - + mask = (col_idx_expanded > right_bound) | ( + col_idx_expanded < left_bound + ) + # Apply mask (set to -inf where mask is True) qk_scaled = tl.where(mask, float("-inf"), qk_scaled) else: @@ -394,7 +576,7 @@ def _attn_fwd_mask(acc, l_i, m_i, causal_boundary = start_n + offs_n - seqlen_delta_qk causal_mask = offs_m[:, None] >= causal_boundary[None, :] qk_scaled = tl.where(causal_mask, qk_scaled, float("-inf")) - + # compute qk mask qk_mask = (offs_m[:, None] < seqlen_q) & (kv_offs_n[None, :] < seqlen_k) @@ -414,12 +596,12 @@ def _attn_fwd_mask(acc, l_i, m_i, if USE_SLIDING_WINDOW: # Check if this block has any valid values (m_ij != -inf) # For rows where everything is -inf, set q_shifted to -inf (not NaN) - q_shifted = tl.where(m_ij[:, None] == float("-inf"), - float("-inf"), - qk_scaled - m_ij[:, None]) + q_shifted = tl.where( + m_ij[:, None] == float("-inf"), float("-inf"), qk_scaled - m_ij[:, None] + ) else: q_shifted = qk_scaled - m_ij[:, None] - + # Compute scaled QK and softmax probabilities if USE_EXP2: p = tl.math.exp2(q_shifted * RCP_LN2) @@ -435,7 +617,9 @@ def _attn_fwd_mask(acc, l_i, m_i, if tl_DROPOUT_USE_PYTORCH: dropout_mask = tl.load(dropout_mask_ptrs, mask=qk_mask) else: - rng_output = tl.rand(philox_seed, philox_ptrs) # TODO: use tl.randint for better performance + rng_output = tl.rand( + philox_seed, philox_ptrs + ) # TODO: use tl.randint for better performance dropout_mask = rng_output > dropout_p if tl_DROPOUT_DUMP: tl.store(dropout_mask_ptrs, dropout_mask, mask=qk_mask) @@ -450,13 +634,11 @@ def _attn_fwd_mask(acc, l_i, m_i, # NOTE: the returned score is not the same as the reference because we need to adjust as we find new maxes per block. We are not doing that sd_mask_ptrs = sd_mask_base_ptrs + start_n * stride_sn tl.store(sd_mask_ptrs, p, mask=qk_mask) - + # -- update output accumulator -- # alpha is an adjustment factor for acc and li as we loop and find new maxes # store the diff in maxes to adjust acc and li as we discover new maxes - m_diff = tl.where(m_ij == float("-inf"), - float("-inf"), - m_i - m_ij) + m_diff = tl.where(m_ij == float("-inf"), float("-inf"), m_i - m_ij) if USE_EXP2: alpha = tl.math.exp2(m_diff * RCP_LN2) else: @@ -464,7 +646,7 @@ def _attn_fwd_mask(acc, l_i, m_i, acc = acc * alpha[:, None] if not PRE_LOAD_V: v = tl.load(v_ptrs, mask=v_mask, other=0.0) - + # -- update m_i and l_i l_i = l_i * alpha + l_ij m_i = m_ij @@ -472,20 +654,29 @@ def _attn_fwd_mask(acc, l_i, m_i, if IS_FP8: if FP8_P_DESCALE: scale_p, descale_p = compute_fp8_scaling_factors(p, FP8_MAX) - acc += (tl.dot((p * scale_p).to(v.type.element_ty), v) * descale_p * v_descale) + acc += ( + tl.dot((p * scale_p).to(v.type.element_ty), v) + * descale_p + * v_descale + ) else: acc += tl.dot(p.to(v.type.element_ty), v) * v_descale else: acc += tl.dot(p.to(v.type.element_ty), v) - + return acc, l_i, m_i @triton.jit -def compute_window_bounds(q_start, q_end, diag, seqlen_k, - WINDOW_SIZE_LEFT: tl.constexpr, - WINDOW_SIZE_RIGHT: tl.constexpr, - IS_CAUSAL: tl.constexpr): +def compute_window_bounds( + q_start, + q_end, + diag, + seqlen_k, + WINDOW_SIZE_LEFT: tl.constexpr, + WINDOW_SIZE_RIGHT: tl.constexpr, + IS_CAUSAL: tl.constexpr, +): """Calculate the window boundaries for a query block.""" # Left boundary if WINDOW_SIZE_LEFT < 0: @@ -494,8 +685,8 @@ def compute_window_bounds(q_start, q_end, diag, seqlen_k, else: left_min = tl.maximum(0, q_start + diag - WINDOW_SIZE_LEFT) left_max = tl.maximum(0, q_end + diag - WINDOW_SIZE_LEFT) - - # Right boundary + + # Right boundary if IS_CAUSAL: # Causal cap: col ≤ row + diag right_min = tl.minimum(seqlen_k - 1, q_start + diag) @@ -508,41 +699,54 @@ def compute_window_bounds(q_start, q_end, diag, seqlen_k, # Non-causal doesn't have the diagonal constraint right_min = tl.minimum(seqlen_k - 1, q_start + diag + WINDOW_SIZE_RIGHT) right_max = tl.minimum(seqlen_k - 1, q_end + diag + WINDOW_SIZE_RIGHT) - + return left_min, left_max, right_min, right_max + @triton.jit -def classify_window_blocks(left_min, left_max, right_min, right_max, - BLOCK_N: tl.constexpr): +def classify_window_blocks( + left_min, left_max, right_min, right_max, BLOCK_N: tl.constexpr +): """Classify blocks based on window boundaries.""" # First and last blocks that have ANY overlap with window first_block = left_min // BLOCK_N last_block = right_max // BLOCK_N - + # First block that is FULLY visible for all rows in Q block full_left_block = left_max // BLOCK_N + (left_max % BLOCK_N != 0) clipped_left = tl.minimum(full_left_block, last_block + 1) - + # Last block that is FULLY visible for all rows in Q block last_full_block_candidate = right_min // BLOCK_N if (last_full_block_candidate + 1) * BLOCK_N - 1 > right_min: last_full_block_candidate -= 1 full_right_block = tl.maximum(last_full_block_candidate, clipped_left - 1) - + # Calculate counts n_front_skip_blocks = first_block n_front_masked_blocks = tl.maximum(0, clipped_left - first_block) n_full_blocks = tl.maximum(0, full_right_block - clipped_left + 1) n_back_masked_blocks = tl.maximum(0, last_block - full_right_block) - - return (n_front_skip_blocks, n_front_masked_blocks, - n_full_blocks, n_back_masked_blocks, - clipped_left) # Return clipped_left for padded block handling + + return ( + n_front_skip_blocks, + n_front_masked_blocks, + n_full_blocks, + n_back_masked_blocks, + clipped_left, + ) # Return clipped_left for padded block handling + @triton.jit -def handle_padded_last_block(n_extra_tokens, last_block, total_k_blocks, - clipped_left, n_front_masked_blocks, - n_full_blocks, n_back_masked_blocks): +def handle_padded_last_block( + n_extra_tokens, + last_block, + total_k_blocks, + clipped_left, + n_front_masked_blocks, + n_full_blocks, + n_back_masked_blocks, +): """Ensure a padded last K-block is never classified as 'full'. We move the padded last block (if visible) into the back-masked bucket. @@ -570,6 +774,7 @@ def handle_padded_last_block(n_extra_tokens, last_block, total_k_blocks, return n_front_masked_blocks, n_full_blocks, n_back_masked_blocks + @triton.jit def compute_padding_info(seqlen_k, BLOCK_N: tl.constexpr): """Calculate padding information for the last K block.""" @@ -589,14 +794,22 @@ def compute_padding_info(seqlen_k, BLOCK_N: tl.constexpr): n_extra_tokens = 0 return n_extra_tokens + @triton.jit -def compute_block_masking(seqlen_k, seqlen_q, start_m, - IS_CAUSAL: tl.constexpr, USE_SLIDING_WINDOW: tl.constexpr, - WINDOW_SIZE_LEFT: tl.constexpr, WINDOW_SIZE_RIGHT: tl.constexpr, - BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr): +def compute_block_masking( + seqlen_k, + seqlen_q, + start_m, + IS_CAUSAL: tl.constexpr, + USE_SLIDING_WINDOW: tl.constexpr, + WINDOW_SIZE_LEFT: tl.constexpr, + WINDOW_SIZE_RIGHT: tl.constexpr, + BLOCK_M: tl.constexpr, + BLOCK_N: tl.constexpr, +): """ Classify K blocks for attention computation with sliding window support. - + Returns: - n_front_skip_blocks: Blocks completely before the window - n_front_masked_blocks: Blocks partially overlapping window front @@ -607,39 +820,57 @@ def compute_block_masking(seqlen_k, seqlen_q, start_m, # common q_start = start_m * BLOCK_M - q_end = tl.minimum((start_m + 1) * BLOCK_M - 1, seqlen_q - 1) - diag = seqlen_k - seqlen_q + q_end = tl.minimum((start_m + 1) * BLOCK_M - 1, seqlen_q - 1) + diag = seqlen_k - seqlen_q total_k_blocks = tl.cdiv(seqlen_k, BLOCK_N) n_extra_tokens = compute_padding_info(seqlen_k, BLOCK_N) - + if USE_SLIDING_WINDOW: # get window bounds left_min, left_max, right_min, right_max = compute_window_bounds( - q_start, q_end, diag, seqlen_k, - WINDOW_SIZE_LEFT, WINDOW_SIZE_RIGHT, IS_CAUSAL + q_start, + q_end, + diag, + seqlen_k, + WINDOW_SIZE_LEFT, + WINDOW_SIZE_RIGHT, + IS_CAUSAL, ) # window vanishes → early exit if right_max < left_min: return 0, 0, 0, 0, n_extra_tokens - + # classify blocks - (n_front_skip_blocks, n_front_masked_blocks, - n_full_blocks, n_back_masked_blocks, - clipped_left) = classify_window_blocks( - left_min, left_max, right_min, right_max, BLOCK_N - ) - + ( + n_front_skip_blocks, + n_front_masked_blocks, + n_full_blocks, + n_back_masked_blocks, + clipped_left, + ) = classify_window_blocks(left_min, left_max, right_min, right_max, BLOCK_N) + # handle padded last block if needed if n_extra_tokens != 0: last_block = right_max // BLOCK_N - n_front_masked_blocks, n_full_blocks, n_back_masked_blocks = handle_padded_last_block( - n_extra_tokens, last_block, total_k_blocks, - clipped_left, n_front_masked_blocks, - n_full_blocks, n_back_masked_blocks + n_front_masked_blocks, n_full_blocks, n_back_masked_blocks = ( + handle_padded_last_block( + n_extra_tokens, + last_block, + total_k_blocks, + clipped_left, + n_front_masked_blocks, + n_full_blocks, + n_back_masked_blocks, + ) ) - return (n_front_skip_blocks, n_front_masked_blocks, - n_full_blocks, n_back_masked_blocks, n_extra_tokens) + return ( + n_front_skip_blocks, + n_front_masked_blocks, + n_full_blocks, + n_back_masked_blocks, + n_extra_tokens, + ) else: if IS_CAUSAL: # ========== CAUSAL MODE: Classify K Blocks ========== @@ -660,19 +891,19 @@ def compute_block_masking(seqlen_k, seqlen_q, start_m, # 1. figure out, in tokens, the right-most K position # this Q-block may attend to # ------------------------------------------------------------ - k_max_token = q_end + diag # last visible K index + k_max_token = q_end + diag # last visible K index # this Q-block is entirely above the diagonal ⇒ nothing to do if k_max_token < 0: return 0, 0, 0, 0, n_extra_tokens - k_max_token = tl.minimum(k_max_token, seqlen_k - 1) + k_max_token = tl.minimum(k_max_token, seqlen_k - 1) # ------------------------------------------------------------ # 2. translate token indices into K-block indices # ------------------------------------------------------------ last_visible_k_block = k_max_token // BLOCK_N - n_visible_k_blocks = tl.minimum(last_visible_k_block + 1, total_k_blocks) + n_visible_k_blocks = tl.minimum(last_visible_k_block + 1, total_k_blocks) # ------------------------------------------------------------ # 3. classify those visible blocks @@ -685,14 +916,14 @@ def compute_block_masking(seqlen_k, seqlen_q, start_m, # middle of a K-block or the last K-block is padded # ------------------------------------------------------------ padded_last_k = n_extra_tokens != 0 - is_modulo_mn = (not padded_last_k) & (seqlen_q % BLOCK_M == 0) + is_modulo_mn = (not padded_last_k) & (seqlen_q % BLOCK_M == 0) n_back_masked_blocks = BLOCK_M // BLOCK_N + tl.where(is_modulo_mn, 0, 1) n_back_masked_blocks = tl.minimum(n_back_masked_blocks, n_visible_k_blocks) - n_front_skip_blocks = 0 # causal never skips the left side - n_front_masked_blocks = 0 # ditto - n_full_blocks = n_visible_k_blocks - n_back_masked_blocks + n_front_skip_blocks = 0 # causal never skips the left side + n_front_masked_blocks = 0 # ditto + n_full_blocks = n_visible_k_blocks - n_back_masked_blocks else: # ========== NON-CAUSAL MODE ========== # Without causal mask, all positions can attend to all positions @@ -707,17 +938,24 @@ def compute_block_masking(seqlen_k, seqlen_q, start_m, # [ 1 1 1 1] [ 1 1 1 1] [ 1 1 -∞ -∞] # [ 1 1 1 1] [ 1 1 1 1] [ 1 1 -∞ -∞] # [ 1 1 1 1] [ 1 1 1 1] [ 1 1 -∞ -∞] - - n_front_skip_blocks = 0 # never skips the left side - n_front_masked_blocks = 0 # ditto + + n_front_skip_blocks = 0 # never skips the left side + n_front_masked_blocks = 0 # ditto if n_extra_tokens != 0: n_back_masked_blocks = 1 # Last block needs padding mask n_full_blocks = total_k_blocks - 1 else: n_back_masked_blocks = 0 # All blocks are aligned n_full_blocks = total_k_blocks - - return n_front_skip_blocks, n_front_masked_blocks, n_full_blocks, n_back_masked_blocks, n_extra_tokens + + return ( + n_front_skip_blocks, + n_front_masked_blocks, + n_full_blocks, + n_back_masked_blocks, + n_extra_tokens, + ) + @triton.autotune( configs=fwd_prefill_autotune_configs, @@ -725,20 +963,86 @@ def compute_block_masking(seqlen_k, seqlen_q, start_m, use_cuda_graph=True, ) @triton.jit -def attn_fwd(Q, K, V, bias, - Q_Descale, K_Descale, V_Descale, stride_q_descale_z, stride_k_descale_z, stride_v_descale_z, - SM_SCALE: tl.constexpr, LSE, Out, stride_qz, stride_qh, stride_qm, stride_qk, - stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, - stride_oz, stride_oh, stride_om, stride_on, stride_bz, stride_bh, stride_bm, stride_bn, stride_az, stride_ah, - stride_sz, stride_sh, stride_sm, stride_sn, stride_lse_z, stride_lse_h, stride_lse_m, cu_seqlens_q, cu_seqlens_k, - seqused_q, seqused_k, # Add seqused parameters - dropout_p, philox_seed, philox_offset_base, sd_mask, dropout_mask, alibi_slopes, HQ: tl.constexpr, - HK: tl.constexpr, ACTUAL_BLOCK_DMODEL_QK: tl.constexpr, ACTUAL_BLOCK_DMODEL_V: tl.constexpr, MAX_SEQLENS_Q: tl.constexpr, - MAX_SEQLENS_K: tl.constexpr, IS_VARLEN: tl.constexpr, IS_CAUSAL: tl.constexpr, - USE_SLIDING_WINDOW: tl.constexpr, WINDOW_SIZE_LEFT: tl.constexpr, WINDOW_SIZE_RIGHT: tl.constexpr, BLOCK_M: tl.constexpr, - BLOCK_DMODEL_QK: tl.constexpr, BLOCK_DMODEL_V: tl.constexpr, BLOCK_N: tl.constexpr, PRE_LOAD_V: tl.constexpr, USE_BIAS: tl.constexpr, - ENABLE_DROPOUT: tl.constexpr, RETURN_SCORES: tl.constexpr, NEEDS_SDMASK : tl.constexpr, USE_ALIBI: tl.constexpr, USE_EXP2: tl.constexpr, - IS_FP8: tl.constexpr, FP8_MAX: tl.constexpr, FP8_P_DESCALE: tl.constexpr, USE_SEQUSED: tl.constexpr): +def attn_fwd( + Q, + K, + V, + bias, + Q_Descale, + K_Descale, + V_Descale, + stride_q_descale_z, + stride_k_descale_z, + stride_v_descale_z, + SM_SCALE: tl.constexpr, + LSE, + Out, + stride_qz, + stride_qh, + stride_qm, + stride_qk, + stride_kz, + stride_kh, + stride_kn, + stride_kk, + stride_vz, + stride_vh, + stride_vk, + stride_vn, + stride_oz, + stride_oh, + stride_om, + stride_on, + stride_bz, + stride_bh, + stride_bm, + stride_bn, + stride_az, + stride_ah, + stride_sz, + stride_sh, + stride_sm, + stride_sn, + stride_lse_z, + stride_lse_h, + stride_lse_m, + cu_seqlens_q, + cu_seqlens_k, + seqused_q, + seqused_k, # Add seqused parameters + dropout_p, + philox_seed, + philox_offset_base, + sd_mask, + dropout_mask, + alibi_slopes, + HQ: tl.constexpr, + HK: tl.constexpr, + ACTUAL_BLOCK_DMODEL_QK: tl.constexpr, + ACTUAL_BLOCK_DMODEL_V: tl.constexpr, + MAX_SEQLENS_Q: tl.constexpr, + MAX_SEQLENS_K: tl.constexpr, + IS_VARLEN: tl.constexpr, + IS_CAUSAL: tl.constexpr, + USE_SLIDING_WINDOW: tl.constexpr, + WINDOW_SIZE_LEFT: tl.constexpr, + WINDOW_SIZE_RIGHT: tl.constexpr, + BLOCK_M: tl.constexpr, + BLOCK_DMODEL_QK: tl.constexpr, + BLOCK_DMODEL_V: tl.constexpr, + BLOCK_N: tl.constexpr, + PRE_LOAD_V: tl.constexpr, + USE_BIAS: tl.constexpr, + ENABLE_DROPOUT: tl.constexpr, + RETURN_SCORES: tl.constexpr, + NEEDS_SDMASK: tl.constexpr, + USE_ALIBI: tl.constexpr, + USE_EXP2: tl.constexpr, + IS_FP8: tl.constexpr, + FP8_MAX: tl.constexpr, + FP8_P_DESCALE: tl.constexpr, + USE_SEQUSED: tl.constexpr, +): # set params ACCUMULATOR_TYPE = tl.float32 @@ -753,8 +1057,8 @@ def attn_fwd(Q, K, V, bias, else: off_h_k = off_h_q # Determine if we need to mask the heads - PADDED_HEAD_QK: tl.constexpr = (ACTUAL_BLOCK_DMODEL_QK != BLOCK_DMODEL_QK) - PADDED_HEAD_V: tl.constexpr = (ACTUAL_BLOCK_DMODEL_V != BLOCK_DMODEL_V) + PADDED_HEAD_QK: tl.constexpr = ACTUAL_BLOCK_DMODEL_QK != BLOCK_DMODEL_QK + PADDED_HEAD_V: tl.constexpr = ACTUAL_BLOCK_DMODEL_V != BLOCK_DMODEL_V offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) offs_n = tl.arange(0, BLOCK_N) @@ -765,24 +1069,36 @@ def attn_fwd(Q, K, V, bias, if IS_VARLEN: cu_seqlens_q_start = tl.load(cu_seqlens_q + off_z) cu_seqlens_q_end = tl.load(cu_seqlens_q + off_z + 1) - + # If seqused is provided, use it to limit the actual sequence length if USE_SEQUSED: - actual_seqlen_q = tl.load(seqused_q + off_z) if seqused_q is not None else cu_seqlens_q_end - cu_seqlens_q_start - seqlen_q = tl.minimum(actual_seqlen_q, cu_seqlens_q_end - cu_seqlens_q_start) + actual_seqlen_q = ( + tl.load(seqused_q + off_z) + if seqused_q is not None + else cu_seqlens_q_end - cu_seqlens_q_start + ) + seqlen_q = tl.minimum( + actual_seqlen_q, cu_seqlens_q_end - cu_seqlens_q_start + ) else: seqlen_q = cu_seqlens_q_end - cu_seqlens_q_start - + # we have a one-size-fits-all grid in id(0). Some seqlens might be too small for all start_m so for those we return early. if start_m * BLOCK_M > seqlen_q: return cu_seqlens_k_start = tl.load(cu_seqlens_k + off_z) cu_seqlens_k_end = tl.load(cu_seqlens_k + off_z + 1) - + # If seqused is provided, use it to limit the actual sequence length for keys if USE_SEQUSED: - actual_seqlen_k = tl.load(seqused_k + off_z) if seqused_k is not None else cu_seqlens_k_end - cu_seqlens_k_start - seqlen_k = tl.minimum(actual_seqlen_k, cu_seqlens_k_end - cu_seqlens_k_start) + actual_seqlen_k = ( + tl.load(seqused_k + off_z) + if seqused_k is not None + else cu_seqlens_k_end - cu_seqlens_k_start + ) + seqlen_k = tl.minimum( + actual_seqlen_k, cu_seqlens_k_end - cu_seqlens_k_start + ) else: seqlen_k = cu_seqlens_k_end - cu_seqlens_k_start else: @@ -796,19 +1112,35 @@ def attn_fwd(Q, K, V, bias, # For MQA/GQA (GROUP_SIZE != 1), q_descale uses the same indexing as k/v (off_h_k) # For MHA (GROUP_SIZE == 1), q_descale uses off_h_q (same as off_h_k) if GROUP_SIZE != 1: - q_descale = tl.load(Q_Descale + off_z * stride_q_descale_z + off_h_k) # MQA/GQA: broadcast using k/v head index + q_descale = tl.load( + Q_Descale + off_z * stride_q_descale_z + off_h_k + ) # MQA/GQA: broadcast using k/v head index else: - q_descale = tl.load(Q_Descale + off_z * stride_q_descale_z + off_h_q) # MHA: use q head index + q_descale = tl.load( + Q_Descale + off_z * stride_q_descale_z + off_h_q + ) # MHA: use q head index k_descale = tl.load(K_Descale + off_z * stride_k_descale_z + off_h_k) v_descale = tl.load(V_Descale + off_z * stride_v_descale_z + off_h_k) else: q_descale, k_descale, v_descale = 1.0, 1.0, 1.0 - # figure out masking pattern - n_front_skip_blocks, n_front_masked_blocks, n_full_blocks, n_back_masked_blocks, n_extra_tokens = compute_block_masking( - seqlen_k, seqlen_q, start_m, IS_CAUSAL, USE_SLIDING_WINDOW, - WINDOW_SIZE_LEFT, WINDOW_SIZE_RIGHT, BLOCK_M, BLOCK_N + ( + n_front_skip_blocks, + n_front_masked_blocks, + n_full_blocks, + n_back_masked_blocks, + n_extra_tokens, + ) = compute_block_masking( + seqlen_k, + seqlen_q, + start_m, + IS_CAUSAL, + USE_SLIDING_WINDOW, + WINDOW_SIZE_LEFT, + WINDOW_SIZE_RIGHT, + BLOCK_M, + BLOCK_N, ) # ============================================================ @@ -820,18 +1152,33 @@ def attn_fwd(Q, K, V, bias, No K blocks visible - write zeros and exit. """ # Write zeros to output - o_offset = Out + off_z * stride_oz + off_h_q * stride_oh + cu_seqlens_q_start * stride_om + o_offset = ( + Out + + off_z * stride_oz + + off_h_q * stride_oh + + cu_seqlens_q_start * stride_om + ) o_ptrs = o_offset + offs_m[:, None] * stride_om + offs_d_v[None, :] * stride_on - o_mask = (offs_m[:, None] < seqlen_q) + o_mask = offs_m[:, None] < seqlen_q if PADDED_HEAD_V: o_mask = o_mask & (offs_d_v[None, :] < ACTUAL_BLOCK_DMODEL_V) - tl.store(o_ptrs, tl.zeros([BLOCK_M, BLOCK_DMODEL_V], dtype=Out.type.element_ty), mask=o_mask) - + tl.store( + o_ptrs, + tl.zeros([BLOCK_M, BLOCK_DMODEL_V], dtype=Out.type.element_ty), + mask=o_mask, + ) + # Write zeros to LSE - l_ptrs = LSE + off_z * stride_lse_z + off_h_q * stride_lse_h + cu_seqlens_q_start * stride_lse_m + offs_m * stride_lse_m + l_ptrs = ( + LSE + + off_z * stride_lse_z + + off_h_q * stride_lse_h + + cu_seqlens_q_start * stride_lse_m + + offs_m * stride_lse_m + ) tl.store(l_ptrs, tl.zeros([BLOCK_M], dtype=tl.float32), mask=offs_m < seqlen_q) return - + # ============================================================ # NORMAL PROCESSING (Some K Blocks Visible) # ============================================================ @@ -839,19 +1186,30 @@ def attn_fwd(Q, K, V, bias, This program has visible K blocks to process. We'll use two calls to handle different block types efficiently. """ - + # Initialize for processing # Compute pointers for all the tensors used in this kernel. - q_offset = Q + off_z * stride_qz + off_h_q * stride_qh + cu_seqlens_q_start * stride_qm + q_offset = ( + Q + off_z * stride_qz + off_h_q * stride_qh + cu_seqlens_q_start * stride_qm + ) q_ptrs = q_offset + offs_m[:, None] * stride_qm + offs_d_qk[None, :] * stride_qk - k_offset = K + off_z * stride_kz + off_h_k * stride_kh + cu_seqlens_k_start * stride_kn + k_offset = ( + K + off_z * stride_kz + off_h_k * stride_kh + cu_seqlens_k_start * stride_kn + ) k_ptrs = k_offset + offs_d_qk[:, None] * stride_kk + offs_n[None, :] * stride_kn - v_offset = V + off_z * stride_vz + off_h_k * stride_vh + cu_seqlens_k_start * stride_vk + v_offset = ( + V + off_z * stride_vz + off_h_k * stride_vh + cu_seqlens_k_start * stride_vk + ) v_ptrs = v_offset + offs_n[:, None] * stride_vk + offs_d_v[None, :] * stride_vn if USE_BIAS: # Note: this might get large enough to overflow on some configs bias_offset = off_h_q * stride_bh - bias_ptrs = bias + bias_offset + offs_m[:, None] * stride_bm + offs_n[None, :] * stride_bn + bias_ptrs = ( + bias + + bias_offset + + offs_m[:, None] * stride_bm + + offs_n[None, :] * stride_bn + ) else: bias_ptrs = None @@ -862,16 +1220,32 @@ def attn_fwd(Q, K, V, bias, alibi_slope = None if NEEDS_SDMASK: - sd_mask_offset = sd_mask + off_z * stride_sz + off_h_q * stride_sh #+ cu_seqlens_q_start * stride_sm - sd_mask_ptrs = sd_mask_offset + offs_m[:, None] * stride_sm + offs_n[None, :] * stride_sn + sd_mask_offset = ( + sd_mask + off_z * stride_sz + off_h_q * stride_sh + ) # + cu_seqlens_q_start * stride_sm + sd_mask_ptrs = ( + sd_mask_offset + offs_m[:, None] * stride_sm + offs_n[None, :] * stride_sn + ) else: sd_mask_ptrs = None if ENABLE_DROPOUT: - dropout_mask_offset = dropout_mask + off_z * stride_sz + off_h_q * stride_sh #+ cu_seqlens_q_start * stride_sm - dropout_mask_ptrs = dropout_mask_offset + offs_m[:, None] * stride_sm + offs_n[None, :] * stride_sn - batch_philox_offset = philox_offset_base + off_z * stride_sz + off_h_q * stride_sh #+ cu_seqlens_q_start * stride_sm - philox_ptrs = batch_philox_offset + offs_m[:, None] * stride_sm + offs_n[None, :] * stride_sn + dropout_mask_offset = ( + dropout_mask + off_z * stride_sz + off_h_q * stride_sh + ) # + cu_seqlens_q_start * stride_sm + dropout_mask_ptrs = ( + dropout_mask_offset + + offs_m[:, None] * stride_sm + + offs_n[None, :] * stride_sn + ) + batch_philox_offset = ( + philox_offset_base + off_z * stride_sz + off_h_q * stride_sh + ) # + cu_seqlens_q_start * stride_sm + philox_ptrs = ( + batch_philox_offset + + offs_m[:, None] * stride_sm + + offs_n[None, :] * stride_sn + ) else: dropout_mask_ptrs = None philox_ptrs = 0 @@ -887,100 +1261,196 @@ def attn_fwd(Q, K, V, bias, q_ptrs_mask = q_ptrs_mask & (offs_d_qk[None, :] < ACTUAL_BLOCK_DMODEL_QK) q = tl.load(q_ptrs, mask=q_ptrs_mask, other=0.0) - # ========== Process MASKED K Blocks in the front ========== # NOTE: we use USE_SLIDING_WINDOW as guard because the compiler will crash other wise. front masking is only for sliding window so that is fine. if n_front_masked_blocks > 0 and USE_SLIDING_WINDOW: block_min = n_front_skip_blocks * BLOCK_N block_max = (n_front_skip_blocks + n_front_masked_blocks) * BLOCK_N - + acc, l_i, m_i = _attn_fwd_mask( - acc, l_i, m_i, - q, k_ptrs, v_ptrs, bias_ptrs, - stride_kn, stride_vk, stride_bn, stride_sn, - start_m, seqlen_k, seqlen_q, - dropout_p, philox_seed, philox_ptrs, - sd_mask_ptrs, dropout_mask_ptrs, - offs_m, offs_n, offs_d_qk, offs_d_v, - block_min, # Start of front masked blocks - block_max, # End of front masked blocks - 0, # n_extra_tokens (0 for front blocks, only relevant for last block) - alibi_slope, - q_descale, k_descale, v_descale, IS_FP8, FP8_MAX, FP8_P_DESCALE, + acc, + l_i, + m_i, + q, + k_ptrs, + v_ptrs, + bias_ptrs, + stride_kn, + stride_vk, + stride_bn, + stride_sn, + start_m, + seqlen_k, + seqlen_q, + dropout_p, + philox_seed, + philox_ptrs, + sd_mask_ptrs, + dropout_mask_ptrs, + offs_m, + offs_n, + offs_d_qk, + offs_d_v, + block_min, # Start of front masked blocks + block_max, # End of front masked blocks + 0, # n_extra_tokens (0 for front blocks, only relevant for last block) + alibi_slope, + q_descale, + k_descale, + v_descale, + IS_FP8, + FP8_MAX, + FP8_P_DESCALE, IS_CAUSAL, - BLOCK_M, BLOCK_DMODEL_QK, BLOCK_DMODEL_V, BLOCK_N, + BLOCK_M, + BLOCK_DMODEL_QK, + BLOCK_DMODEL_V, + BLOCK_N, PRE_LOAD_V, - ENABLE_DROPOUT, PADDED_HEAD_QK, PADDED_HEAD_V, - ACTUAL_BLOCK_DMODEL_QK, ACTUAL_BLOCK_DMODEL_V, SM_SCALE, - USE_ALIBI=USE_ALIBI, USE_EXP2=USE_EXP2, - RETURN_SCORES=RETURN_SCORES, - USE_SLIDING_WINDOW=USE_SLIDING_WINDOW, - WINDOW_SIZE_LEFT=WINDOW_SIZE_LEFT, + ENABLE_DROPOUT, + PADDED_HEAD_QK, + PADDED_HEAD_V, + ACTUAL_BLOCK_DMODEL_QK, + ACTUAL_BLOCK_DMODEL_V, + SM_SCALE, + USE_ALIBI=USE_ALIBI, + USE_EXP2=USE_EXP2, + RETURN_SCORES=RETURN_SCORES, + USE_SLIDING_WINDOW=USE_SLIDING_WINDOW, + WINDOW_SIZE_LEFT=WINDOW_SIZE_LEFT, WINDOW_SIZE_RIGHT=WINDOW_SIZE_RIGHT, - ACCUMULATOR_TYPE=ACCUMULATOR_TYPE + ACCUMULATOR_TYPE=ACCUMULATOR_TYPE, ) - + # ========== Process FULL K Blocks (Fast Path) ========== if n_full_blocks > 0: block_min = (n_front_skip_blocks + n_front_masked_blocks) * BLOCK_N - block_max = (n_front_skip_blocks + n_front_masked_blocks + n_full_blocks) * BLOCK_N - + block_max = ( + n_front_skip_blocks + n_front_masked_blocks + n_full_blocks + ) * BLOCK_N + acc, l_i, m_i = _attn_fwd_no_mask( - acc, l_i, m_i, - q, k_ptrs, v_ptrs, bias_ptrs, - stride_kn, stride_vk, stride_bn, stride_sn, - start_m, seqlen_k, seqlen_q, - dropout_p, philox_seed, philox_ptrs, - sd_mask_ptrs, dropout_mask_ptrs, - offs_m, offs_n, offs_d_qk, offs_d_v, - block_min, # Start of range: 0 - block_max, # End of range: n_full_blocks * BLOCK_N + acc, + l_i, + m_i, + q, + k_ptrs, + v_ptrs, + bias_ptrs, + stride_kn, + stride_vk, + stride_bn, + stride_sn, + start_m, + seqlen_k, + seqlen_q, + dropout_p, + philox_seed, + philox_ptrs, + sd_mask_ptrs, + dropout_mask_ptrs, + offs_m, + offs_n, + offs_d_qk, + offs_d_v, + block_min, # Start of range: 0 + block_max, # End of range: n_full_blocks * BLOCK_N alibi_slope, - q_descale, k_descale, v_descale, IS_FP8, FP8_MAX, FP8_P_DESCALE, - BLOCK_M, BLOCK_DMODEL_QK, BLOCK_DMODEL_V, BLOCK_N, + q_descale, + k_descale, + v_descale, + IS_FP8, + FP8_MAX, + FP8_P_DESCALE, + BLOCK_M, + BLOCK_DMODEL_QK, + BLOCK_DMODEL_V, + BLOCK_N, PRE_LOAD_V, - ENABLE_DROPOUT, PADDED_HEAD_QK, PADDED_HEAD_V, - ACTUAL_BLOCK_DMODEL_QK, ACTUAL_BLOCK_DMODEL_V, SM_SCALE, - USE_ALIBI=USE_ALIBI, USE_EXP2=USE_EXP2, - RETURN_SCORES=RETURN_SCORES, ACCUMULATOR_TYPE=ACCUMULATOR_TYPE + ENABLE_DROPOUT, + PADDED_HEAD_QK, + PADDED_HEAD_V, + ACTUAL_BLOCK_DMODEL_QK, + ACTUAL_BLOCK_DMODEL_V, + SM_SCALE, + USE_ALIBI=USE_ALIBI, + USE_EXP2=USE_EXP2, + RETURN_SCORES=RETURN_SCORES, + ACCUMULATOR_TYPE=ACCUMULATOR_TYPE, ) - + # ========== Process MASKED K Blocks in the back ========== if n_back_masked_blocks > 0: - block_min = (n_front_skip_blocks + n_front_masked_blocks + n_full_blocks) * BLOCK_N - block_max = (n_front_skip_blocks + n_front_masked_blocks + n_full_blocks + n_back_masked_blocks) * BLOCK_N - + block_min = ( + n_front_skip_blocks + n_front_masked_blocks + n_full_blocks + ) * BLOCK_N + block_max = ( + n_front_skip_blocks + + n_front_masked_blocks + + n_full_blocks + + n_back_masked_blocks + ) * BLOCK_N + acc, l_i, m_i = _attn_fwd_mask( - acc, l_i, m_i, - q, k_ptrs, v_ptrs, bias_ptrs, - stride_kn, stride_vk, stride_bn, stride_sn, - start_m, seqlen_k, seqlen_q, - dropout_p, philox_seed, philox_ptrs, - sd_mask_ptrs, dropout_mask_ptrs, - offs_m, offs_n, offs_d_qk, offs_d_v, - block_min, # Start of range: n_full_blocks * BLOCK_N - block_max, # End of range: n_visible_k_blocks * BLOCK_N - n_extra_tokens, # Padding tokens in last block - alibi_slope, - q_descale, k_descale, v_descale, IS_FP8, FP8_MAX, FP8_P_DESCALE, - IS_CAUSAL, # Use actual causal flag - BLOCK_M, BLOCK_DMODEL_QK, BLOCK_DMODEL_V, BLOCK_N, + acc, + l_i, + m_i, + q, + k_ptrs, + v_ptrs, + bias_ptrs, + stride_kn, + stride_vk, + stride_bn, + stride_sn, + start_m, + seqlen_k, + seqlen_q, + dropout_p, + philox_seed, + philox_ptrs, + sd_mask_ptrs, + dropout_mask_ptrs, + offs_m, + offs_n, + offs_d_qk, + offs_d_v, + block_min, # Start of range: n_full_blocks * BLOCK_N + block_max, # End of range: n_visible_k_blocks * BLOCK_N + n_extra_tokens, # Padding tokens in last block + alibi_slope, + q_descale, + k_descale, + v_descale, + IS_FP8, + FP8_MAX, + FP8_P_DESCALE, + IS_CAUSAL, # Use actual causal flag + BLOCK_M, + BLOCK_DMODEL_QK, + BLOCK_DMODEL_V, + BLOCK_N, PRE_LOAD_V, - ENABLE_DROPOUT, PADDED_HEAD_QK, PADDED_HEAD_V, - ACTUAL_BLOCK_DMODEL_QK, ACTUAL_BLOCK_DMODEL_V, SM_SCALE, - USE_ALIBI=USE_ALIBI, USE_EXP2=USE_EXP2, - RETURN_SCORES=RETURN_SCORES, - USE_SLIDING_WINDOW=USE_SLIDING_WINDOW, - WINDOW_SIZE_LEFT=WINDOW_SIZE_LEFT, + ENABLE_DROPOUT, + PADDED_HEAD_QK, + PADDED_HEAD_V, + ACTUAL_BLOCK_DMODEL_QK, + ACTUAL_BLOCK_DMODEL_V, + SM_SCALE, + USE_ALIBI=USE_ALIBI, + USE_EXP2=USE_EXP2, + RETURN_SCORES=RETURN_SCORES, + USE_SLIDING_WINDOW=USE_SLIDING_WINDOW, + WINDOW_SIZE_LEFT=WINDOW_SIZE_LEFT, WINDOW_SIZE_RIGHT=WINDOW_SIZE_RIGHT, - ACCUMULATOR_TYPE=ACCUMULATOR_TYPE + ACCUMULATOR_TYPE=ACCUMULATOR_TYPE, ) # ============================================================ # EPILOGUE # ============================================================ # This helps the compiler do Newton Raphson on l_i vs on acc which is much larger. - # Instead of directly computing 1/l_i which can be inf, + # Instead of directly computing 1/l_i which can be inf, # we check for the invalid case first if USE_SLIDING_WINDOW: # For rows where m_i is still -inf, no keys were valid @@ -1036,33 +1506,40 @@ def attn_fwd(Q, K, V, bias, # Causal mask (X = can attend, . = cannot): # K0 K1 K2 K3 # Q0 . . . . <- All masked, would give NaN - # Q1 . . . . <- All masked, would give NaN + # Q1 . . . . <- All masked, would give NaN # Q2 X . . . <- First valid row # Q3 X X . . # Q4 X X X . # Q5 X X X X causal_start_idx = seqlen_q - seqlen_k start_m_idx = start_m * BLOCK_M - + # Create mask for rows that need zeroing row_indices = start_m_idx + tl.arange(0, BLOCK_M) causal_mask = row_indices < causal_start_idx - + # Zero out both acc and LSE for these rows if causal_start_idx > start_m_idx: end_m_idx = (start_m + 1) * BLOCK_M if causal_start_idx < end_m_idx: # This block contains the boundary - need to mask acc - out_mask_boundary = tl.full((BLOCK_DMODEL_V, ), causal_start_idx, dtype=tl.int32) + out_mask_boundary = tl.full( + (BLOCK_DMODEL_V,), causal_start_idx, dtype=tl.int32 + ) out_ptrs_mask = row_indices[:, None] >= out_mask_boundary[None, :] z = 0.0 acc = tl.where(out_ptrs_mask, acc, z.to(acc.type.element_ty)) - + # Zero out LSE for rows above diagonal softmax_lse = tl.where(causal_mask, 0.0, softmax_lse) # write back LSE(Log Sum Exponents), the log of the normalization constant - l_offset = LSE + off_z * stride_lse_z + off_h_q * stride_lse_h + cu_seqlens_q_start * stride_lse_m + l_offset = ( + LSE + + off_z * stride_lse_z + + off_h_q * stride_lse_h + + cu_seqlens_q_start * stride_lse_m + ) l_ptrs = l_offset + offs_m * stride_lse_m # If seqlen_q not multiple of BLOCK_M, we need to mask out the last few rows. @@ -1070,14 +1547,16 @@ def attn_fwd(Q, K, V, bias, end_m_idx = (start_m + 1) * BLOCK_M overflow_size = end_m_idx - seqlen_q if overflow_size > 0: - boundary = tl.full((BLOCK_M, ), BLOCK_M - overflow_size, dtype=tl.int32) + boundary = tl.full((BLOCK_M,), BLOCK_M - overflow_size, dtype=tl.int32) l_ptrs_mask = tl.arange(0, BLOCK_M) < boundary tl.store(l_ptrs, softmax_lse, mask=l_ptrs_mask) else: tl.store(l_ptrs, softmax_lse) # write back O - o_offset = Out + off_z * stride_oz + off_h_q * stride_oh + cu_seqlens_q_start * stride_om + o_offset = ( + Out + off_z * stride_oz + off_h_q * stride_oh + cu_seqlens_q_start * stride_om + ) o_ptrs = o_offset + offs_m[:, None] * stride_om + offs_d_v[None, :] * stride_on o_ptrs_mask = tl.full([BLOCK_M, BLOCK_DMODEL_V], 1, dtype=tl.int1) if overflow_size > 0: @@ -1087,127 +1566,246 @@ def attn_fwd(Q, K, V, bias, tl.store(o_ptrs, acc.to(Out.dtype.element_ty), mask=o_ptrs_mask) -def attention_prefill_forward_triton_impl( - q: torch.Tensor, - k: torch.Tensor, - v: torch.Tensor, - o: torch.Tensor, - sm_scale: float, - alibi_slopes: Optional[torch.Tensor], - causal: bool, - window_size_left: int, - window_size_right: int, - bias: Optional[torch.Tensor], - layout: Literal["bshd", "bhsd", "thd"], - # varlen - cu_seqlens_q: Optional[torch.Tensor], - cu_seqlens_k: Optional[torch.Tensor], - max_seqlens_q: int, - max_seqlens_k: int, - # dropout - dropout_p: float, - philox_seed: Optional[int], - philox_offset: Optional[int], - # misc - return_softmax: bool, - use_exp2: bool, - # fp8 - q_descale: Optional[torch.Tensor], - k_descale: Optional[torch.Tensor], - v_descale: Optional[torch.Tensor], - # seqused for FA v3 - seqused_q: Optional[torch.Tensor] = None, - seqused_k: Optional[torch.Tensor] = None, + +def attention_forward_prefill_triton_impl( + q: torch.Tensor, + k: torch.Tensor, + v: torch.Tensor, + o: torch.Tensor, + sm_scale: float, + alibi_slopes: Optional[torch.Tensor], + causal: bool, + window_size_left: int, + window_size_right: int, + bias: Optional[torch.Tensor], + layout: Literal["bshd", "bhsd", "thd"], + # varlen + cu_seqlens_q: Optional[torch.Tensor], + cu_seqlens_k: Optional[torch.Tensor], + max_seqlens_q: int, + max_seqlens_k: int, + # dropout + dropout_p: float, + philox_seed: Optional[int], + philox_offset: Optional[int], + # misc + return_softmax: bool, + use_exp2: bool, + # fp8 + q_descale: Optional[torch.Tensor], + k_descale: Optional[torch.Tensor], + v_descale: Optional[torch.Tensor], + # seqused for FA v3 + seqused_q: Optional[torch.Tensor] = None, + seqused_k: Optional[torch.Tensor] = None, + # rotary (optional) + rotary_cos: Optional[torch.Tensor] = None, + rotary_sin: Optional[torch.Tensor] = None, + rotary_interleaved: bool = False, + seqlens_rotary: Optional[torch.Tensor] = None, ): # get params, strides and shape IS_VARLEN = layout == "thd" # common assertions - assert 0.0 <= dropout_p <= 1.0, f"dropout_p must be between 0 and 1, got {dropout_p}" - assert q.device == k.device == v.device == o.device, \ - f"All tensors must be on the same device. Got: q={q.device}, k={k.device}, v={v.device}, o={o.device}" + assert ( + 0.0 <= dropout_p <= 1.0 + ), f"dropout_p must be between 0 and 1, got {dropout_p}" + assert ( + q.device == k.device == v.device == o.device + ), f"All tensors must be on the same device. Got: q={q.device}, k={k.device}, v={v.device}, o={o.device}" assert q.dtype == k.dtype == v.dtype, "q, k, v must have the same dtype" current_device = torch.cuda.current_device() - assert q.is_cuda and q.device.index == current_device, f"Device mismatch: Kernel will launch on cuda:{current_device}, but tensors are on {q.device}" - + assert ( + q.is_cuda and q.device.index == current_device + ), f"Device mismatch: Kernel will launch on cuda:{current_device}, but tensors are on {q.device}" + # get shapes and strides if IS_VARLEN: # shape total_seqlen_q, nheads_q, head_size_q = q.shape total_seqlen_k, nheads_k, head_size_k = k.shape total_seqlen_v, nheads_v, head_size_v = v.shape - + # assert shapes - assert cu_seqlens_q is not None, "cu_seqlens_q must be provided for varlen layout" - assert cu_seqlens_k is not None, "cu_seqlens_k must be provided for varlen layout" - assert max_seqlens_q is not None and max_seqlens_q > 0, "max_seqlens_q must be provided and positive for varlen layout" - assert max_seqlens_k is not None and max_seqlens_k > 0, "max_seqlens_k must be provided and positive for varlen layout" - + assert ( + cu_seqlens_q is not None + ), "cu_seqlens_q must be provided for varlen layout" + assert ( + cu_seqlens_k is not None + ), "cu_seqlens_k must be provided for varlen layout" + assert ( + max_seqlens_q is not None and max_seqlens_q > 0 + ), "max_seqlens_q must be provided and positive for varlen layout" + assert ( + max_seqlens_k is not None and max_seqlens_k > 0 + ), "max_seqlens_k must be provided and positive for varlen layout" + # assert head dimensions - assert head_size_q == head_size_k, f"head sizes must match: q={head_size_q}, k={head_size_k}" - assert nheads_k == nheads_v, f"k and v must have same number of heads: k={nheads_k}, v={nheads_v}" - assert nheads_q % nheads_k == 0, f"nheads_q {nheads_q} must be divisible by nheads_k {nheads_k} for GQA/MQA" - + assert ( + head_size_q == head_size_k + ), f"head sizes must match: q={head_size_q}, k={head_size_k}" + assert ( + nheads_k == nheads_v + ), f"k and v must have same number of heads: k={nheads_k}, v={nheads_v}" + assert ( + nheads_q % nheads_k == 0 + ), f"nheads_q {nheads_q} must be divisible by nheads_k {nheads_k} for GQA/MQA" + # assert output shapes - assert o.shape == (total_seqlen_q, nheads_q, head_size_v), f"o shape {o.shape} != expected {(total_seqlen_q, nheads_q, head_size_v)}" - + assert o.shape == ( + total_seqlen_q, + nheads_q, + head_size_v, + ), f"o shape {o.shape} != expected {(total_seqlen_q, nheads_q, head_size_v)}" + # assert cu_seqlens - assert cu_seqlens_q.dtype == torch.int32, f"cu_seqlens_q must be int32, got {cu_seqlens_q.dtype}" - assert cu_seqlens_k.dtype == torch.int32, f"cu_seqlens_k must be int32, got {cu_seqlens_k.dtype}" + assert ( + cu_seqlens_q.dtype == torch.int32 + ), f"cu_seqlens_q must be int32, got {cu_seqlens_q.dtype}" + assert ( + cu_seqlens_k.dtype == torch.int32 + ), f"cu_seqlens_k must be int32, got {cu_seqlens_k.dtype}" assert cu_seqlens_q[0] == 0, "cu_seqlens_q must start with 0" assert cu_seqlens_k[0] == 0, "cu_seqlens_k must start with 0" - assert cu_seqlens_q[-1] == total_seqlen_q, f"cu_seqlens_q[-1] {cu_seqlens_q[-1]} != total_seqlen_q {total_seqlen_q}" - assert cu_seqlens_k[-1] == total_seqlen_k, f"cu_seqlens_k[-1] {cu_seqlens_k[-1]} != total_seqlen_k {total_seqlen_k}" - + assert ( + cu_seqlens_q[-1] == total_seqlen_q + ), f"cu_seqlens_q[-1] {cu_seqlens_q[-1]} != total_seqlen_q {total_seqlen_q}" + assert ( + cu_seqlens_k[-1] == total_seqlen_k + ), f"cu_seqlens_k[-1] {cu_seqlens_k[-1]} != total_seqlen_k {total_seqlen_k}" + # set vars batch = len(cu_seqlens_q) - 1 head_size_qk = head_size_q - + # softmax_lse shape - softmax_lse = torch.zeros((nheads_q, total_seqlen_q), device=q.device, dtype=torch.float32) - + softmax_lse = torch.zeros( + (nheads_q, total_seqlen_q), device=q.device, dtype=torch.float32 + ) + # strides - stride_qb, stride_qh, stride_qm, stride_qd = 0, q.stride(1), q.stride(0), q.stride(2) - stride_kb, stride_kh, stride_kn, stride_kd = 0, k.stride(1), k.stride(0), k.stride(2) - stride_vb, stride_vh, stride_vn, stride_vd = 0, v.stride(1), v.stride(0), v.stride(2) - stride_ob, stride_oh, stride_om, stride_od = 0, o.stride(1), o.stride(0), o.stride(2) - stride_lse_z, stride_lse_h, stride_lse_m = 0, softmax_lse.stride(0), softmax_lse.stride(1) + stride_qb, stride_qh, stride_qm, stride_qd = ( + 0, + q.stride(1), + q.stride(0), + q.stride(2), + ) + stride_kb, stride_kh, stride_kn, stride_kd = ( + 0, + k.stride(1), + k.stride(0), + k.stride(2), + ) + stride_vb, stride_vh, stride_vn, stride_vd = ( + 0, + v.stride(1), + v.stride(0), + v.stride(2), + ) + stride_ob, stride_oh, stride_om, stride_od = ( + 0, + o.stride(1), + o.stride(0), + o.stride(2), + ) + stride_lse_z, stride_lse_h, stride_lse_m = ( + 0, + softmax_lse.stride(0), + softmax_lse.stride(1), + ) else: # shapes batch_q, seqlen_q, nheads_q, head_size_q = q.shape batch_k, seqlen_k, nheads_k, head_size_k = k.shape batch_v, seqlen_v, nheads_v, head_size_v = v.shape - + # assert batch dimensions - assert batch_q == batch_k == batch_v, f"batch sizes must match: q={batch_q}, k={batch_k}, v={batch_v}" - + assert ( + batch_q == batch_k == batch_v + ), f"batch sizes must match: q={batch_q}, k={batch_k}, v={batch_v}" + # assert head dimensions - assert head_size_q == head_size_k, f"head sizes must match: q={head_size_q}, k={head_size_k}" - assert nheads_k == nheads_v, f"k and v must have same number of heads: k={nheads_k}, v={nheads_v}" - assert nheads_q % nheads_k == 0, f"nheads_q {nheads_q} must be divisible by nheads_k {nheads_k} for GQA/MQA" - + assert ( + head_size_q == head_size_k + ), f"head sizes must match: q={head_size_q}, k={head_size_k}" + assert ( + nheads_k == nheads_v + ), f"k and v must have same number of heads: k={nheads_k}, v={nheads_v}" + assert ( + nheads_q % nheads_k == 0 + ), f"nheads_q {nheads_q} must be divisible by nheads_k {nheads_k} for GQA/MQA" + # assert sequence lengths - assert seqlen_k == seqlen_v, f"k and v sequence lengths must match: k={seqlen_k}, v={seqlen_v}" - + assert ( + seqlen_k == seqlen_v + ), f"k and v sequence lengths must match: k={seqlen_k}, v={seqlen_v}" + # assert output shapes - assert o.shape == (batch_q, seqlen_q, nheads_q, head_size_v), f"o shape {o.shape} != expected {(batch_q, seqlen_q, nheads_q, head_size_v)}" - + assert o.shape == ( + batch_q, + seqlen_q, + nheads_q, + head_size_v, + ), f"o shape {o.shape} != expected {(batch_q, seqlen_q, nheads_q, head_size_v)}" + # set vars batch = batch_q head_size_qk = head_size_q max_seqlens_q = seqlen_q max_seqlens_k = seqlen_k - + # softmax_lse shape - softmax_lse = torch.zeros((batch, nheads_q, seqlen_q), device=q.device, dtype=torch.float32) - + softmax_lse = torch.zeros( + (batch, nheads_q, seqlen_q), device=q.device, dtype=torch.float32 + ) + # strides - stride_qb, stride_qh, stride_qm, stride_qd = q.stride(0), q.stride(2), q.stride(1), q.stride(3) - stride_kb, stride_kh, stride_kn, stride_kd = k.stride(0), k.stride(2), k.stride(1), k.stride(3) - stride_vb, stride_vh, stride_vn, stride_vd = v.stride(0), v.stride(2), v.stride(1), v.stride(3) - stride_ob, stride_oh, stride_om, stride_od = o.stride(0), o.stride(2), o.stride(1), o.stride(3) + stride_qb, stride_qh, stride_qm, stride_qd = ( + q.stride(0), + q.stride(2), + q.stride(1), + q.stride(3), + ) + stride_kb, stride_kh, stride_kn, stride_kd = ( + k.stride(0), + k.stride(2), + k.stride(1), + k.stride(3), + ) + stride_vb, stride_vh, stride_vn, stride_vd = ( + v.stride(0), + v.stride(2), + v.stride(1), + v.stride(3), + ) + stride_ob, stride_oh, stride_om, stride_od = ( + o.stride(0), + o.stride(2), + o.stride(1), + o.stride(3), + ) stride_lse_z, stride_lse_h, stride_lse_m = softmax_lse.stride() + # apply rotary embeddings + if rotary_cos is not None and rotary_sin is not None: + if IS_VARLEN: + raise NotImplementedError( + "Rotary embeddings with varlen (thd layout) prefill are not implemented yet." + ) + seqlen_offsets = seqlens_rotary if seqlens_rotary is not None else 0 + local = (window_size_left != -1) or (window_size_right != -1) + q, _ = apply_rotary( + q, + None, + rotary_cos, + rotary_sin, + causal=causal, + local=local, + interleaved=rotary_interleaved, + seqlen_offsets=seqlen_offsets, + ) + # fp8 setup and assertions IS_FP8 = is_fp8(q) if IS_FP8: @@ -1218,39 +1816,56 @@ def attention_prefill_forward_triton_impl( # Check and create default descale tensors if not provided if (q_descale is None) or (k_descale is None) or (v_descale is None): import warnings - warnings.warn("FP8 tensors detected but descale factors not provided. Using default scale of 1.0", UserWarning) + + warnings.warn( + "FP8 tensors detected but descale factors not provided. Using default scale of 1.0", + UserWarning, + ) # Create default descale tensors if not provided if q_descale is None: - q_descale = torch.ones(batch, nheads_q, dtype=torch.float32, device=q.device) + q_descale = torch.ones( + batch, nheads_q, dtype=torch.float32, device=q.device + ) if k_descale is None: - k_descale = torch.ones(batch, nheads_k, dtype=torch.float32, device=q.device) + k_descale = torch.ones( + batch, nheads_k, dtype=torch.float32, device=q.device + ) if v_descale is None: - v_descale = torch.ones(batch, nheads_k, dtype=torch.float32, device=q.device) - + v_descale = torch.ones( + batch, nheads_k, dtype=torch.float32, device=q.device + ) + # o should be fp32 or fp16/bf16 - assert o.dtype in [torch.float16, torch.bfloat16, torch.float32], \ - f"Output tensor o must be fp16, bf16, or fp32 when using fp8, got {o.dtype}" + assert o.dtype in [ + torch.float16, + torch.bfloat16, + torch.float32, + ], f"Output tensor o must be fp16, bf16, or fp32 when using fp8, got {o.dtype}" stride_q_descale_z = q_descale.stride(0) if q_descale is not None else 0 stride_k_descale_z = k_descale.stride(0) if k_descale is not None else 0 stride_v_descale_z = v_descale.stride(0) if v_descale is not None else 0 - + if DEBUG: print(f"FP8 path triggered in fwd_prefill.py") else: FP8_MAX = None q_descale = k_descale = v_descale = None stride_q_descale_z = stride_k_descale_z = stride_v_descale_z = None - + # check output dtype matches input dtype when not using fp8 - assert o.dtype == q.dtype, f"Output dtype {o.dtype} must match input dtype {q.dtype} when not using fp8" - + assert ( + o.dtype == q.dtype + ), f"Output dtype {o.dtype} must match input dtype {q.dtype} when not using fp8" + # check features - use_sliding_window = window_size_left != -1 or window_size_right!= -1 - use_alibi, (stride_az, stride_ah) = (True, alibi_slopes.stride()) if alibi_slopes is not None else (False, (0, 0)) + use_sliding_window = window_size_left != -1 or window_size_right != -1 + use_alibi, (stride_az, stride_ah) = ( + (True, alibi_slopes.stride()) if alibi_slopes is not None else (False, (0, 0)) + ) # NOTE: a large bias tensor leads to overflow during pointer arithmetic - if (bias is not None): - assert (bias.numel() < 2**31) + if bias is not None: + assert bias.numel() < 2**31 # Get closest power of 2 over or equal to 32 for both QK and V dimensions padded_d_model_qk = 1 << (head_size_qk - 1).bit_length() @@ -1266,48 +1881,122 @@ def attention_prefill_forward_triton_impl( # only. This return holds no useful output aside from debugging. NEEDS_SDMASK = (dropout_p > 0.0) or return_softmax if NEEDS_SDMASK: - sd_mask = torch.zeros((batch, nheads_q, max_seqlens_q, max_seqlens_k), device=q.device, - dtype=torch.float32) + sd_mask = torch.zeros( + (batch, nheads_q, max_seqlens_q, max_seqlens_k), + device=q.device, + dtype=torch.float32, + ) if DROPOUT_USE_PYTORCH: - dropout_mask = create_dropout_mask(dropout_p, (batch, nheads_q, max_seqlens_q, max_seqlens_k), seed = philox_seed) + dropout_mask = create_dropout_mask( + dropout_p, + (batch, nheads_q, max_seqlens_q, max_seqlens_k), + seed=philox_seed, + ) else: - dropout_mask = torch.zeros((batch, nheads_q, max_seqlens_q, max_seqlens_k), device=q.device, - dtype=torch.float32) - stride_sz, stride_sh, stride_sm, stride_sn = (sd_mask.stride(0), sd_mask.stride(1), sd_mask.stride(2), sd_mask.stride(3)) + dropout_mask = torch.zeros( + (batch, nheads_q, max_seqlens_q, max_seqlens_k), + device=q.device, + dtype=torch.float32, + ) + stride_sz, stride_sh, stride_sm, stride_sn = ( + sd_mask.stride(0), + sd_mask.stride(1), + sd_mask.stride(2), + sd_mask.stride(3), + ) else: sd_mask = None dropout_mask = None stride_sz, stride_sh, stride_sm, stride_sn = (0, 0, 0, 0) if bias is not None: - stride_bz, stride_bh, stride_bm, stride_bn = (bias.stride(0), bias.stride(1),bias.stride(2), - bias.stride(3)) + stride_bz, stride_bh, stride_bm, stride_bn = ( + bias.stride(0), + bias.stride(1), + bias.stride(2), + bias.stride(3), + ) else: stride_bz, stride_bh, stride_bm, stride_bn = (0, 0, 0, 0) # launch kernel - grid = lambda META: (batch, nheads_q, triton.cdiv(max_seqlens_q, META['BLOCK_M'])) - attn_fwd[grid](q, k, v, bias, - q_descale, k_descale, v_descale, stride_q_descale_z, stride_k_descale_z, stride_v_descale_z, - sm_scale, softmax_lse, o, - stride_qb, stride_qh, stride_qm, stride_qd, - stride_kb, stride_kh, stride_kn, stride_kd, - stride_vb, stride_vh, stride_vn, stride_vd, - stride_ob, stride_oh, stride_om, stride_od, - stride_bz, stride_bh, stride_bm, stride_bn, - stride_az, stride_ah, - stride_sz, stride_sh, stride_sm, stride_sn, - stride_lse_z, stride_lse_h, stride_lse_m, - cu_seqlens_q, cu_seqlens_k, - seqused_q, seqused_k, # Pass seqused tensors - dropout_p=dropout_p, philox_seed=philox_seed, philox_offset_base=philox_offset, sd_mask=sd_mask, dropout_mask=dropout_mask, alibi_slopes=alibi_slopes, - HQ=nheads_q, HK=nheads_k, ACTUAL_BLOCK_DMODEL_QK=head_size_qk, ACTUAL_BLOCK_DMODEL_V=head_size_v, MAX_SEQLENS_Q=max_seqlens_q, - MAX_SEQLENS_K=max_seqlens_k, IS_CAUSAL=causal, - USE_SLIDING_WINDOW=use_sliding_window, WINDOW_SIZE_LEFT=window_size_left, WINDOW_SIZE_RIGHT=window_size_right, - IS_VARLEN=IS_VARLEN, - BLOCK_DMODEL_QK=padded_d_model_qk, BLOCK_DMODEL_V=padded_d_model_v, USE_BIAS=False if bias is None else True, - USE_ALIBI=use_alibi, ENABLE_DROPOUT=dropout_p > 0.0, USE_EXP2=use_exp2, RETURN_SCORES=return_softmax, NEEDS_SDMASK=NEEDS_SDMASK, - IS_FP8=IS_FP8, FP8_MAX=FP8_MAX, FP8_P_DESCALE=False, - USE_SEQUSED=(seqused_q is not None or seqused_k is not None)) # Add flag for seqused - - return softmax_lse, sd_mask if return_softmax else None \ No newline at end of file + grid = lambda META: (batch, nheads_q, triton.cdiv(max_seqlens_q, META["BLOCK_M"])) + attn_fwd[grid]( + q, + k, + v, + bias, + q_descale, + k_descale, + v_descale, + stride_q_descale_z, + stride_k_descale_z, + stride_v_descale_z, + sm_scale, + softmax_lse, + o, + stride_qb, + stride_qh, + stride_qm, + stride_qd, + stride_kb, + stride_kh, + stride_kn, + stride_kd, + stride_vb, + stride_vh, + stride_vn, + stride_vd, + stride_ob, + stride_oh, + stride_om, + stride_od, + stride_bz, + stride_bh, + stride_bm, + stride_bn, + stride_az, + stride_ah, + stride_sz, + stride_sh, + stride_sm, + stride_sn, + stride_lse_z, + stride_lse_h, + stride_lse_m, + cu_seqlens_q, + cu_seqlens_k, + seqused_q, + seqused_k, # Pass seqused tensors + dropout_p=dropout_p, + philox_seed=philox_seed, + philox_offset_base=philox_offset, + sd_mask=sd_mask, + dropout_mask=dropout_mask, + alibi_slopes=alibi_slopes, + HQ=nheads_q, + HK=nheads_k, + ACTUAL_BLOCK_DMODEL_QK=head_size_qk, + ACTUAL_BLOCK_DMODEL_V=head_size_v, + MAX_SEQLENS_Q=max_seqlens_q, + MAX_SEQLENS_K=max_seqlens_k, + IS_CAUSAL=causal, + USE_SLIDING_WINDOW=use_sliding_window, + WINDOW_SIZE_LEFT=window_size_left, + WINDOW_SIZE_RIGHT=window_size_right, + IS_VARLEN=IS_VARLEN, + BLOCK_DMODEL_QK=padded_d_model_qk, + BLOCK_DMODEL_V=padded_d_model_v, + USE_BIAS=False if bias is None else True, + USE_ALIBI=use_alibi, + ENABLE_DROPOUT=dropout_p > 0.0, + USE_EXP2=use_exp2, + RETURN_SCORES=return_softmax, + NEEDS_SDMASK=NEEDS_SDMASK, + IS_FP8=IS_FP8, + FP8_MAX=FP8_MAX, + FP8_P_DESCALE=False, + USE_SEQUSED=(seqused_q is not None or seqused_k is not None), + ) # Add flag for seqused + + return softmax_lse, sd_mask if return_softmax else None diff --git a/flash_attn/flash_attn_triton_amd/fwd_ref.py b/flash_attn/flash_attn_triton_amd/fwd_ref.py deleted file mode 100755 index a8ca54a7ec3..00000000000 --- a/flash_attn/flash_attn_triton_amd/fwd_ref.py +++ /dev/null @@ -1,889 +0,0 @@ -import torch -import math -from typing import Literal, Optional, Union -from .utils import compute_alibi_tensor_ref - -DEBUG = False -DEBUG_CORE = False - -def attention_forward_core_ref_impl( - q, k, v, sm_scale, causal, window_size_left, window_size_right, - dropout_p, philox_seed, philox_offset, alibi_slopes, use_exp2, - cache_seqlens=None, block_table=None, paged_kv_block_size=None -): - if DEBUG_CORE: - print() - print("attention_forward_core_ref_impl") - print("q:", q, q.shape) - print("k:", k, k.shape) - print("v:", v, v.shape) - print("sm_scale:", sm_scale) - print("causal:", causal) - print("window_size_left:", window_size_left) - print("window_size_right:", window_size_right) - print("dropout_p:", dropout_p) - print("philox_seed:", philox_seed) - print("philox_offset:", philox_offset) - print("use_exp2:", use_exp2) - print("cache_seqlens:", cache_seqlens) - print("block_table:", block_table) - print("paged_kv_block_size:", paged_kv_block_size) - - # cast to float32 - q = q.to(torch.float32) - - # Check if we're in paged KV mode - is_paged = block_table is not None and paged_kv_block_size is not None - - if False: # Debug paged attention (disabled for production) - print(f"\n=== attention_forward_core_ref_impl DEBUG ===") - print(f"is_paged: {is_paged}") - print(f"block_table: {block_table.shape if block_table is not None else None}") - print(f"paged_kv_block_size: {paged_kv_block_size}") - if is_paged: - print(f"k shape (paged): {k.shape}") - print(f"v shape (paged): {v.shape}") - print(f"cache_seqlens: {cache_seqlens}") - - if is_paged: - # In paged mode, k and v are [num_blocks, block_size, nheads_k, head_dim] - # We'll compute attention on-the-fly without reconstructing - nheads_q = q.shape[0] - L_q = q.shape[1] - head_dim = q.shape[2] - - # Get number of KV heads from the cache - nheads_k = k.shape[2] # k shape: [num_blocks, block_size, nheads_k, head_dim] - - # Handle MQA/GQA - assert nheads_q % nheads_k == 0, f"nheads_q ({nheads_q}) must be divisible by nheads_k ({nheads_k})" - group_size = nheads_q // nheads_k - - # Determine the actual KV sequence length from cache_seqlens - L_k = cache_seqlens if isinstance(cache_seqlens, int) else cache_seqlens.item() - - if False: # Debug disabled - print(f"L_q: {L_q}, L_k: {L_k}, nheads_q: {nheads_q}, nheads_k: {nheads_k}, group_size: {group_size}, head_dim: {head_dim}") - print(f"block_table contents: {block_table if block_table is not None else 'None'}") - - # Initialize attention scores - attention_scores = torch.zeros((nheads_q, L_q, L_k), dtype=torch.float32, device=q.device) - - # Compute attention scores on-the-fly by accessing blocks directly - for kv_pos in range(L_k): - # Calculate which block and position within block - block_idx = kv_pos // paged_kv_block_size - within_block_idx = kv_pos % paged_kv_block_size - - # Get the physical block number from block_table - # block_table is [1, num_blocks] for single batch in core function - if block_table.dim() == 2: - physical_block = block_table[0, block_idx].item() - else: - physical_block = block_table[block_idx].item() - - # Debug output disabled - # if kv_pos == 0: - # print(f"First KV access: block_idx={block_idx}, within_block={within_block_idx}, physical_block={physical_block}") - # print(f"k_vec shape will be: {k[physical_block, within_block_idx, :, :].shape}") - - # Access k values directly from paged cache - # k shape: [num_blocks, block_size, nheads_k, head_dim] - k_vec = k[physical_block, within_block_idx, :, :].to(torch.float32) # [nheads_k, head_dim] - - # For GQA/MQA, we need to repeat k_vec for each group - if group_size > 1: - # Expand k_vec to match query heads - # k_vec: [nheads_k, head_dim] -> [nheads_q, head_dim] - k_vec = k_vec.repeat_interleave(group_size, dim=0) - - # Compute dot product with all query positions - # q is [nheads_q, L_q, head_dim], k_vec is [nheads_q, head_dim] - # Result should be [nheads_q, L_q] for this kv_pos - attention_scores[:, :, kv_pos] = torch.sum(q * k_vec.unsqueeze(1), dim=-1) - - # Keep k and v in original format for later v computation - k_paged = k - v_paged = v - - # Debug output disabled - # print(f"attention_scores computed shape: {attention_scores.shape}") - # print(f"attention_scores sample values: {attention_scores[0, 0, :5]}") - else: - # Standard non-paged mode - k = k.to(torch.float32) - v = v.to(torch.float32) - - # get seqlens - L_q, L_k = q.shape[1], k.shape[1] - - # Compute attention scores - attention_scores = torch.matmul(q, k.transpose(-2, -1)) - if DEBUG_CORE: - print("attention_scores:", attention_scores, attention_scores.shape) - - # Scale scores - attention_scaled_scores = sm_scale * attention_scores - if DEBUG_CORE: - print("attention_scaled_scores:", attention_scaled_scores, attention_scaled_scores.shape) - - # Apply ALiBi if slopes are provided - if alibi_slopes is not None: - if cache_seqlens is not None: - # DECODE MODE: Special ALiBi handling - # In decode mode, k has shape [nheads, max_cache_len, head_dim] - # but only cache_seqlens positions are valid - - # The test's attn_bias_from_alibi_slopes uses this formula: - # relative_pos = torch.abs(row_idx + sk - sq - col_idx) - # where sk = actual valid key length, sq = query length - - row_idx = torch.arange(L_q, device=q.device, dtype=torch.float32).unsqueeze(1) - col_idx = torch.arange(L_k, device=q.device, dtype=torch.float32).unsqueeze(0) - - # Compute relative positions - # cache_seqlens is the actual number of valid keys (sk in the test) - # L_q is the query sequence length (sq in the test) - relative_pos = torch.abs(row_idx + cache_seqlens - L_q - col_idx) - - # Apply slopes - if alibi_slopes.dim() == 1: - # Shape: [nheads] -> [nheads, 1, 1] - alibi_slopes_expanded = alibi_slopes.view(-1, 1, 1) - else: - # Already has batch dimension - alibi_slopes_expanded = alibi_slopes - - alibi_bias = -alibi_slopes_expanded * relative_pos - - if DEBUG_CORE: - print(f"Decode ALiBi: cache_seqlens={cache_seqlens}, L_q={L_q}, L_k={L_k}") - print(f"relative_pos shape: {relative_pos.shape}") - print(f"alibi_bias shape: {alibi_bias.shape}") - else: - if DEBUG_CORE: - print("alibi_slopes:", alibi_slopes, alibi_slopes.shape) - alibi_bias = compute_alibi_tensor_ref(alibi_slopes, L_q, L_k) - if DEBUG_CORE: - print("alibi_bias:", alibi_bias, alibi_bias.shape) - alibi_bias = alibi_bias.reshape(-1, L_q, L_k) - if DEBUG_CORE: - print("alibi_bias_flat:", alibi_bias, alibi_bias.shape) - - attention_scaled_scores = attention_scaled_scores + alibi_bias - if DEBUG_CORE: - print("attention_scaled_scores after alibi:", attention_scaled_scores, attention_scaled_scores.shape) - - # Apply masks - row_idx = torch.arange(L_q, device=q.device).unsqueeze(1) - col_idx = torch.arange(L_k, device=q.device).unsqueeze(0) - - if cache_seqlens is not None: - # We're in decode mode with a KV cache - # k and v are full allocated size, but only cache_seqlens positions are valid - - # Create a mask for valid cache positions - cache_mask = col_idx < cache_seqlens - - # Use cache_seqlens for offset calculation to match test's construct_local_mask - # which uses key_padding_mask.sum() as the sequence length - col_offset = cache_seqlens - L_q - - if DEBUG_CORE: - print(f"Cache mode: valid_len={cache_seqlens}, L_k={L_k}") - print(f"Using col_offset={col_offset} based on valid cache length") - else: - # Calculate offset for when seqlen_q != seqlen_k - # This offset aligns query positions to key positions - # When L_q < L_k, offset is positive, meaning query i maps to key position (i + offset) - # This is consistent with construct_local_mask in the tests which uses (sk - sq) - col_offset = L_k - L_q - cache_mask = None - - mask_applied = False - if causal and (window_size_left, window_size_right) == (-1, -1): - # Pure causal: ensure query doesn't attend to future keys - # With offset, query i can attend to keys up to position (i + col_offset) - mask = row_idx >= (col_idx - col_offset) - mask_applied = True - if DEBUG_CORE: - print("causal_mask:", mask) - elif (window_size_left, window_size_right) != (-1, -1): - # Handle the case where window sizes exceed sequence length - if window_size_left >= L_k: - window_size_left = -1 # No left limit - if window_size_right >= L_k: - window_size_right = -1 # No right limit - - if causal: - # Causal + sliding window: ensure we don't attend to future - window_size_right = min(window_size_right, 0) if window_size_right != -1 else 0 - - # Create sliding window mask - # Each query at position i attends to keys in [i + offset - left, i + offset + right] - if window_size_left == -1 and window_size_right == -1: - # No window restriction - mask = torch.ones((L_q, L_k), dtype=torch.bool, device=q.device) - else: - mask = torch.ones((L_q, L_k), dtype=torch.bool, device=q.device) - if window_size_left != -1: - # Each query at position i attends to keys from position (i - left) accounting for offset - mask = mask & (col_idx >= (row_idx + col_offset - window_size_left)) - if window_size_right != -1: - # Each query at position i attends to keys up to position (i + right) accounting for offset - mask = mask & (col_idx <= (row_idx + col_offset + window_size_right)) - - # Apply causal constraint - if causal: - causal_mask = row_idx >= (col_idx - col_offset) - mask = mask & causal_mask - - mask_applied = True - if DEBUG_CORE: - print(f"sliding_window_mask (left={window_size_left}, right={window_size_right}):", mask) - - # Apply cache mask if needed - if cache_mask is not None: - if mask_applied: - mask = mask & cache_mask - else: - mask = cache_mask - mask_applied = True - - # Apply the mask if created - if mask_applied: - attention_scaled_scores = attention_scaled_scores.masked_fill( - torch.logical_not(mask.unsqueeze(0)), float('-inf') - ) - if DEBUG_CORE: - print("attention_scaled_scores after masking:", attention_scaled_scores, attention_scaled_scores.shape) - - # Compute max for numerical stability - max_scores = torch.max(attention_scaled_scores, dim=-1, keepdim=True)[0] - if DEBUG_CORE: - print("max_scores:", max_scores, max_scores.shape) - if mask_applied: - # Replace -inf in max_scores with zeros to avoid NaN in subtraction - max_scores = torch.where( - torch.isinf(max_scores), torch.zeros_like(max_scores), max_scores - ) - if DEBUG_CORE: - print("max_scores after mask handling:", max_scores, max_scores.shape) - - # Shift scores - attention_shifted_scaled_scores = attention_scaled_scores - max_scores - if DEBUG_CORE: - print("attention_shifted_scaled_scores:", attention_shifted_scaled_scores, attention_shifted_scaled_scores.shape) - - # Exponentiate - if use_exp2: - RCP_LN = 1 / math.log(2) - exp_scores = torch.exp2(RCP_LN * attention_shifted_scaled_scores) - else: - exp_scores = torch.exp(attention_shifted_scaled_scores) - - if DEBUG_CORE: - print("exp_scores:", exp_scores, exp_scores.shape) - - # Sum of exponentials - sum_exp_scores = torch.sum(exp_scores, dim=-1, keepdim=True) - if DEBUG_CORE: - print("sum_exp_scores:", sum_exp_scores, sum_exp_scores.shape) - if mask_applied: - # if sum of exp scores is 0.0 it means scores where -inf, we cannot compute softmax and softmax_lse. Setting to 1 deals with -inf case cleanly - sum_exp_scores = torch.where( - sum_exp_scores == 0, - torch.ones_like(sum_exp_scores), - sum_exp_scores - ) - if DEBUG_CORE: - print("sum_exp_scores:", sum_exp_scores, sum_exp_scores.shape) - - # Compute softmax probabilities - p = exp_scores / sum_exp_scores - - if DEBUG_CORE: - print("softmax:", p, p.shape) - - # apply dropout if specified - if dropout_p > 0.0: - rand_vals = torch.rand(p.shape, generator=torch.Generator(device=p.device).manual_seed(philox_seed), device=p.device, dtype=p.dtype) - dropout_mask, dropout_scale = rand_vals > dropout_p, (1.0 / (1 - dropout_p)) - if DEBUG_CORE: - print("dropout_scale:", dropout_scale) - print("dropout_mask:", dropout_mask) - # Apply dropout mask and scale - # Set -1 for dropped positions and 1 for kept positions in exp_scores - sd_mask = torch.where(dropout_mask, exp_scores, -exp_scores) - p = torch.where(dropout_mask, p , torch.zeros_like(p)) * dropout_scale - if DEBUG_CORE: - print("softmax after dropout:", p) - print("sd_mask:", sd_mask) - else: - sd_mask = exp_scores - - # Compute log-sum-exp - if use_exp2: - LN2 = math.log(2) - RCP_LN = 1 / math.log(2) - max_scores_base2 = max_scores * RCP_LN - softmax_lse_base2 = max_scores_base2 + torch.log2(sum_exp_scores) - softmax_lse = softmax_lse_base2 * LN2 - softmax_lse.squeeze_(-1) - else: - softmax_lse = max_scores + torch.log(sum_exp_scores) - softmax_lse = softmax_lse.squeeze(-1) - - if DEBUG_CORE: - print("softmax_lse:", softmax_lse, softmax_lse.shape) - - # Compute output - if is_paged: - # Compute output on-the-fly using paged v cache - nheads_q = p.shape[0] - L_q = p.shape[1] - nheads_v = v_paged.shape[2] # [num_blocks, block_size, nheads_v, head_dim] - head_dim = v_paged.shape[3] - - # Handle MQA/GQA for v - assert nheads_q % nheads_v == 0, f"nheads_q ({nheads_q}) must be divisible by nheads_v ({nheads_v})" - v_group_size = nheads_q // nheads_v - - o = torch.zeros((nheads_q, L_q, head_dim), dtype=torch.float32, device=p.device) - - # Accumulate weighted v values - for kv_pos in range(L_k): - # Calculate which block and position within block - block_idx = kv_pos // paged_kv_block_size - within_block_idx = kv_pos % paged_kv_block_size - - # Get the physical block number from block_table - if block_table.dim() == 2: - physical_block = block_table[0, block_idx].item() - else: - physical_block = block_table[block_idx].item() - - # Access v values directly from paged cache - # v_paged shape: [num_blocks, block_size, nheads_v, head_dim] - v_vec = v_paged[physical_block, within_block_idx, :, :].to(torch.float32) # [nheads_v, head_dim] - - # For GQA/MQA, we need to repeat v_vec for each group - if v_group_size > 1: - # Expand v_vec to match query heads - # v_vec: [nheads_v, head_dim] -> [nheads_q, head_dim] - v_vec = v_vec.repeat_interleave(v_group_size, dim=0) - - # Weight by attention probabilities - # p is [nheads_q, L_q, L_k], we need p[:, :, kv_pos] which is [nheads_q, L_q] - # v_vec is [nheads_q, head_dim] - # We want to add p[:, :, kv_pos] * v_vec to each query position - weights = p[:, :, kv_pos].unsqueeze(-1) # [nheads_q, L_q, 1] - o += weights * v_vec.unsqueeze(1) # [nheads_q, L_q, head_dim] - else: - o = torch.matmul(p, v) - - # Debug output disabled - # if False: - # print(f"Output o shape: {o.shape}") - # print(f"Output o sample values: {o[0, 0, :5]}") - - if DEBUG_CORE: - print("o:", o, o.shape) - - # cast back to original dtype - o = o.to(torch.float16) - # softmax_lse = softmax_lse.to(torch.float16) # NOTE: if you cast lse to fp16 it cause accuracy issues. keep fp32 - sd_mask = sd_mask.to(torch.float16) - - return o, softmax_lse, sd_mask - -def attention_vanilla_forward_pytorch_ref_impl(q, k, v, sm_scale, causal, window_size_left, window_size_right, layout, dropout_p, philox_seed, philox_offset, alibi_slopes, use_exp2): - """Compute reference output and softmax_lse using PyTorch's built-in function""" - - # Ensure the layout is 'bhsd' - if layout == "bshd": - q = q.transpose(1, 2).contiguous() - k = k.transpose(1, 2).contiguous() - v = v.transpose(1, 2).contiguous() - elif layout != "bhsd": - raise ValueError(f"Unknown layout {layout}") - - # Prepare tensors - batch_size, nheads_q, seq_len_q, head_dim = q.shape - batch_size, nheads_k, seq_len_k, head_dim = k.shape - group_size = nheads_q // nheads_k - if nheads_q % nheads_k != 0: - raise ValueError("nheads_q must be divisible by nheads_k") - - if group_size != 1: - # MQA or GQA case - # Reshape q to [batch_size, nheads_k, group_size, seq_len_q, head_dim] - q = q.reshape(batch_size, nheads_k, group_size, seq_len_q, head_dim) - # Expand k and v to match group_size - k = k.unsqueeze(2).expand(-1, -1, group_size, -1, -1) - v = v.unsqueeze(2).expand(-1, -1, group_size, -1, -1) - # Flatten the first three dimensions for computation - q = q.reshape(batch_size * nheads_k * group_size, seq_len_q, head_dim) - k = k.reshape(batch_size * nheads_k * group_size, seq_len_k, head_dim) - v = v.reshape(batch_size * nheads_k * group_size, seq_len_k, head_dim) - else: - q = q.reshape(batch_size * nheads_q, seq_len_q, head_dim) - k = k.reshape(batch_size * nheads_k, seq_len_k, head_dim) - v = v.reshape(batch_size * nheads_k, seq_len_k, head_dim) - - # Call the core attention function - o, softmax_lse, sd_mask = attention_forward_core_ref_impl( - q, k, v, sm_scale, causal, window_size_left, window_size_right, dropout_p, philox_seed, philox_offset, alibi_slopes, use_exp2 - ) - - if group_size != 1: - # Reshape outputs back to original dimensions - o = o.reshape(batch_size, nheads_k, group_size, seq_len_q, head_dim) - o = o.reshape(batch_size, nheads_q, seq_len_q, head_dim) - softmax_lse = softmax_lse.reshape(batch_size, nheads_k, group_size, seq_len_q) - softmax_lse = softmax_lse.reshape(batch_size, nheads_q, seq_len_q) - sd_mask = sd_mask.reshape(batch_size, nheads_k, group_size, seq_len_q, seq_len_k) - sd_mask = sd_mask.reshape(batch_size, nheads_q, seq_len_q, seq_len_k) - else: - # Standard case - o = o.reshape(batch_size, nheads_q, seq_len_q, head_dim) - softmax_lse = softmax_lse.reshape(batch_size, nheads_q, seq_len_q) - sd_mask = sd_mask.reshape(batch_size, nheads_q, seq_len_q, seq_len_k) - - # Restore original layout if necessary - if layout == "bshd": - o = o.transpose(1, 2) - - return o, softmax_lse, sd_mask - - -def attention_varlen_forward_pytorch_ref_impl( - q, - k, - v, - sm_scale, - causal, - window_size_left, - window_size_right, - layout, - cu_seqlens_q, - cu_seqlens_k, - max_seqlen_q, - max_seqlen_k, - dropout_p, - philox_seed, - philox_offset, - alibi_slopes, - use_exp2 -): - # Ensure the layout is 'thd' - if layout != 'thd': - raise ValueError(f"Unsupported layout {layout}. Expected 'thd'.") - - batch_size = cu_seqlens_q.shape[0] - 1 - nheads_q, nheads_k = q.shape[1], k.shape[1] - head_dim = q.shape[2] - - # Pre-allocate outputs - total_L_q = q.shape[0] - total_L_k = k.shape[0] - - o = torch.zeros((total_L_q, nheads_q, head_dim), dtype=q.dtype, device=q.device) - softmax_lse = torch.zeros((nheads_q, total_L_q), dtype=torch.float32, device=q.device) - sd_mask = torch.zeros((batch_size, nheads_q, max_seqlen_q, max_seqlen_k), dtype=torch.float32, device=q.device) - - # Compute group_size for MQA/GQA handling - group_size = nheads_q // nheads_k - if nheads_q % nheads_k != 0: - raise ValueError("nheads_q must be divisible by nheads_k") - - for i in range(batch_size): - # Get the start and end indices for the current sequence - start_q = cu_seqlens_q[i].item() - end_q = cu_seqlens_q[i + 1].item() - start_k = cu_seqlens_k[i].item() - end_k = cu_seqlens_k[i + 1].item() - - seqlen_q = end_q - start_q - seqlen_k = end_k - start_k - - if DEBUG: - print(f"Batch {i} with seqlen_q = {seqlen_q}, seqlen_k = {seqlen_k}, Hq= {nheads_q}, Hk = {nheads_k}") - - # Extract q_i, k_i, v_i - q_i = q[start_q:end_q, :, :] # [L_q_i, nheads_q, head_dim] - k_i = k[start_k:end_k, :, :] # [L_k_i, nheads_k, head_dim] - v_i = v[start_k:end_k, :, :] # [L_k_i, nheads_k, head_dim] - - # Permute to [nheads, L_q_i, head_dim] - q_i = q_i.permute(1, 0, 2) - k_i = k_i.permute(1, 0, 2) - v_i = v_i.permute(1, 0, 2) - - # Handle MQA/GQA by adjusting shapes based on group_size - if group_size != 1: - # Reshape q_i to [nheads_k, group_size, L_q_i, head_dim] - q_i = q_i.reshape(nheads_k, group_size, seqlen_q, head_dim) - # Expand k_i and v_i to match group_size - k_i = k_i.unsqueeze(1).expand(-1, group_size, -1, -1) - v_i = v_i.unsqueeze(1).expand(-1, group_size, -1, -1) - # Flatten the first two dimensions for computation - q_i = q_i.reshape(nheads_k * group_size, seqlen_q, head_dim) - k_i = k_i.reshape(nheads_k * group_size, seqlen_k, head_dim) - v_i = v_i.reshape(nheads_k * group_size, seqlen_k, head_dim) - else: - # Standard case - q_i = q_i.reshape(nheads_q, seqlen_q, head_dim) - k_i = k_i.reshape(nheads_k, seqlen_k, head_dim) - v_i = v_i.reshape(nheads_k, seqlen_k, head_dim) - - if alibi_slopes is not None: - alibi_slopes_i = alibi_slopes[i] - else: - alibi_slopes_i = None - - # Call the core attention function for this sequence - o_i, softmax_lse_i, sd_mask_i = attention_forward_core_ref_impl(q_i, k_i, v_i, sm_scale, causal, window_size_left, window_size_right, dropout_p, philox_seed, philox_offset, alibi_slopes_i, use_exp2) - - # Reshape outputs back to original dimensions - if group_size != 1: - # Reshape outputs to [nheads_k, group_size, seqlen_q, head_dim] - o_i = o_i.reshape(nheads_k, group_size, seqlen_q, head_dim) - # Combine the first two dimensions back to nheads_q - o_i = o_i.reshape(nheads_q, seqlen_q, head_dim) - # Reshape softmax_lse_i similarly - softmax_lse_i = softmax_lse_i.reshape(nheads_k, group_size, seqlen_q) - softmax_lse_i = softmax_lse_i.reshape(nheads_q, seqlen_q) - else: - # Outputs are already in the correct shape - pass - - # Convert back to 'thd' layout - o_i = o_i.permute(1, 0, 2) # [L_q_i, nheads_q, head_dim] - sd_mask_i = sd_mask_i # [nheads_q, L_q_i, L_k_i] - - # Place outputs in pre-allocated tensors - o[start_q:end_q, :, :] = o_i - softmax_lse[:, start_q:end_q] = softmax_lse_i - sd_mask[i, :, :seqlen_q, :seqlen_k] = sd_mask_i - - return o, softmax_lse, sd_mask - -def attention_prefill_forward_ref_impl( - q: torch.Tensor, - k: torch.Tensor, - v: torch.Tensor, - out: torch.Tensor, - sm_scale: float, - alibi_slopes: Optional[torch.Tensor], - causal: bool, - window_size_left: int, - window_size_right: int, - layout: Literal["bshd", "bhsd", "thd"], - cu_seqlens_q: torch.Tensor, - cu_seqlens_k: torch.Tensor, - max_seqlen_q: int, - max_seqlen_k: int, - dropout_p: float, - philox_seed: Optional[int], - philox_offset: Optional[int], - use_exp2: bool -): - # compute reference - if layout == "thd": - o_ref, softmax_lse_ref, sd_mask_ref = attention_varlen_forward_pytorch_ref_impl( - q.clone(), - k.clone(), - v.clone(), - sm_scale, - causal, - window_size_left, - window_size_right, - layout, - cu_seqlens_q, - cu_seqlens_k, - max_seqlen_q, - max_seqlen_k, - dropout_p, - philox_seed, - philox_offset, - alibi_slopes, - use_exp2, - ) - else: - o_ref, softmax_lse_ref, sd_mask_ref = attention_vanilla_forward_pytorch_ref_impl( - q.clone(), - k.clone(), - v.clone(), - sm_scale, - causal, - window_size_left, - window_size_right, - layout, - dropout_p, - philox_seed, - philox_offset, - alibi_slopes, - use_exp2) - - # copy back to ouput tensor - out.copy_(o_ref.to(out.dtype)) - - return softmax_lse_ref, sd_mask_ref - -def attention_decode_forward_ref_impl( - q: torch.Tensor, - k_cache: torch.Tensor, - v_cache: torch.Tensor, - k_new: Optional[torch.Tensor], - v_new: Optional[torch.Tensor], - out: torch.Tensor, - sm_scale: float, - causal: bool, - window_size_left: int, - window_size_right: int, - alibi_slopes: Optional[torch.Tensor], - layout: Literal["bshd"], - cache_seqlens: Optional[torch.Tensor], - cache_batch_idx: Optional[torch.Tensor], - block_table: Optional[torch.Tensor] = None, - q_descale: Optional[torch.Tensor] = None, - k_descale: Optional[torch.Tensor] = None, - v_descale: Optional[torch.Tensor] = None, -): - """Compute reference output for decode attention using PyTorch's built-in functions""" - - if False: # Permanently disabled old debug output - pass - # print("\n========== attention_decode_forward_ref_impl inputs ==========") - # print(f"q shape: {q.shape}, dtype: {q.dtype}, device: {q.device}") - print(f"q values:\n{q}") - print(f"\nk_cache shape: {k_cache.shape}, dtype: {k_cache.dtype}, device: {k_cache.device}") - print(f"k_cache values:\n{k_cache}") - print(f"\nv_cache shape: {v_cache.shape}, dtype: {v_cache.dtype}, device: {v_cache.device}") - print(f"v_cache values:\n{v_cache}") - print(f"\nk_new: {k_new.shape if k_new is not None else None}, dtype: {k_new.dtype if k_new is not None else None}") - if k_new is not None: - print(f"k_new values:\n{k_new}") - print(f"\nv_new: {v_new.shape if v_new is not None else None}, dtype: {v_new.dtype if v_new is not None else None}") - if v_new is not None: - print(f"v_new values:\n{v_new}") - print(f"\nout shape: {out.shape}, dtype: {out.dtype}, device: {out.device}") - print(f"out values:\n{out}") - print(f"\nsm_scale: {sm_scale}") - print(f"causal: {causal}") - print(f"window_size_left: {window_size_left}") - print(f"window_size_right: {window_size_right}") - print(f"\nalibi_slopes: {alibi_slopes.shape if alibi_slopes is not None else None}, dtype: {alibi_slopes.dtype if alibi_slopes is not None else None}") - if alibi_slopes is not None: - print(f"alibi_slopes values:\n{alibi_slopes}") - print(f"\nlayout: {layout}") - print(f"cache_seqlens: {cache_seqlens}") - if cache_seqlens is not None and torch.is_tensor(cache_seqlens): - print(f"cache_seqlens values: {cache_seqlens}") - print(f"cache_batch_idx: {cache_batch_idx}") - if cache_batch_idx is not None: - print(f"cache_batch_idx values: {cache_batch_idx}") - print(f"\nblock_table: {block_table.shape if block_table is not None else None}, dtype: {block_table.dtype if block_table is not None else None}") - if block_table is not None: - print(f"block_table values:\n{block_table}") - print("=" * 60) - - # get batch size before any layout conversion - batch_size = q.shape[0] - - # Determine if we're in paged KV mode - is_paged = block_table is not None - if is_paged: - # Infer block size from cache shape - # k_cache shape for paged: [num_blocks, block_size, nheads, head_dim] - paged_kv_block_size = k_cache.shape[1] - else: - paged_kv_block_size = None - - # handle cache_batch_idx - if cache_batch_idx is not None: - # remap batch indices for cache access - batch_indices = cache_batch_idx - else: - batch_indices = torch.arange(batch_size, device=q.device) - - # copy new keys and values into cache if provided (before any layout conversion) - if k_new is not None and v_new is not None: - if is_paged: - # For paged KV cache, we need to update the blocks with new k/v values - _, seq_len_new, _, _ = k_new.shape # shape is [batch, seq_len, nheads, head_dim] for bshd layout - - for b in range(batch_size): - # Determine where to place new k/v in cache - if cache_seqlens is not None: - if torch.is_tensor(cache_seqlens): - start_pos = cache_seqlens[b].item() - else: - start_pos = cache_seqlens - else: - start_pos = 0 - - # For each new position, find the corresponding block and update it - for pos_offset in range(seq_len_new): - kv_pos = start_pos + pos_offset - - # Calculate which block and position within block - block_idx = kv_pos // paged_kv_block_size - within_block_idx = kv_pos % paged_kv_block_size - - # Get the physical block number from block_table - physical_block = block_table[b, block_idx].item() - - # Update the k and v values in the paged cache - # k_cache shape: [num_blocks, block_size, nheads, head_dim] - # k_new shape: [batch, seq_len, nheads, head_dim] - k_cache[physical_block, within_block_idx, :, :] = k_new[b, pos_offset, :, :] - v_cache[physical_block, within_block_idx, :, :] = v_new[b, pos_offset, :, :] - else: - _, seq_len_new, _, _ = k_new.shape # shape is [batch, seq_len, nheads, head_dim] for bshd layout - - for b in range(batch_size): - cache_idx = batch_indices[b].item() if torch.is_tensor(batch_indices) else batch_indices - - # determine where to place new k/v in cache - if cache_seqlens is not None: - if torch.is_tensor(cache_seqlens): - start_pos = cache_seqlens[b].item() - else: - start_pos = cache_seqlens - else: - # if no cache_seqlens, assume we're filling from the beginning - start_pos = 0 - - end_pos = start_pos + seq_len_new - - # copy new keys and values into cache (both are in bshd layout) - k_cache[cache_idx, start_pos:end_pos, :, :] = k_new[b, :, :, :] - v_cache[cache_idx, start_pos:end_pos, :, :] = v_new[b, :, :, :] - - # ensure the layout is 'bhsd' - if layout == "bshd": - q = q.transpose(1, 2).contiguous() - if not is_paged: - k_cache = k_cache.transpose(1, 2).contiguous() - v_cache = v_cache.transpose(1, 2).contiguous() - elif layout != "bhsd": - raise ValueError(f"Unknown layout {layout}") - - # prepare tensors - batch_size_q, nheads_q, seq_len_q, head_dim = q.shape - - if is_paged: - # For paged cache: [num_blocks, block_size, nheads, head_dim] - num_blocks, block_size, nheads_k, head_dim_k = k_cache.shape - _, _, nheads_v, head_dim_v = v_cache.shape - max_cache_len = None # Not directly available in paged mode - batch_size_cache = None # Not applicable in paged mode - else: - batch_size_cache, nheads_k, max_cache_len, head_dim_k = k_cache.shape - _, nheads_v, _, head_dim_v = v_cache.shape - - # validate dimensions - assert head_dim == head_dim_k == head_dim_v, f"Head dimensions must match: {head_dim}, {head_dim_k}, {head_dim_v}" - - # handle MQA/GQA - group_size = nheads_q // nheads_k - if nheads_q % nheads_k != 0: - raise ValueError("nheads_q must be divisible by nheads_k") - - # handle cache_batch_idx - if cache_batch_idx is not None: - # remap batch indices for cache access - batch_indices = cache_batch_idx - else: - batch_indices = torch.arange(batch_size, device=q.device) - - # prepare outputs - o = torch.zeros_like(q) - softmax_lse = torch.zeros((batch_size, nheads_q, seq_len_q), dtype=torch.float32, device=q.device) - - # process each batch element - for b in range(batch_size): - if not is_paged: - cache_idx = batch_indices[b].item() if torch.is_tensor(batch_indices) else batch_indices - - # determine valid cache length for this batch element - if cache_seqlens is not None: - if torch.is_tensor(cache_seqlens): - cache_len = cache_seqlens[b].item() - if k_new is not None: - _, seq_len_new, _, _ = k_new.shape - cache_len += seq_len_new - else: - cache_len = cache_seqlens - if k_new is not None: - _, seq_len_new, _, _ = k_new.shape - cache_len += seq_len_new - else: - if is_paged: - # For paged mode, we need cache_seqlens to know the valid length - raise ValueError("cache_seqlens must be provided for paged KV cache") - else: - cache_len = max_cache_len - - if is_paged: - # For paged KV cache, pass the cache and block table directly - # Extract block table for this batch element - block_table_b = block_table[b:b+1, :] # [1, num_blocks] - k_b = k_cache # Pass entire paged cache - v_b = v_cache # Pass entire paged cache - q_b = q[b:b+1, :, :, :] # [1, nheads_q, seq_len_q, head_dim] - - # For paged mode with MQA/GQA, we handle expansion in the core function - # Just reshape q for now - q_b = q_b.reshape(nheads_q, seq_len_q, head_dim) - else: - # Standard non-paged mode - k_b = k_cache[cache_idx, :, :, :] # [nheads_k, max_cache_len, head_dim] - v_b = v_cache[cache_idx, :, :, :] # [nheads_v, max_cache_len, head_dim] - q_b = q[b:b+1, :, :, :] # [1, nheads_q, seq_len_q, head_dim] - block_table_b = None - - # handle MQA/GQA by expanding k and v - if group_size != 1: - # expand k and v to match q's number of heads - k_b = k_b.unsqueeze(1).expand(-1, group_size, -1, -1) - k_b = k_b.reshape(nheads_q, max_cache_len, head_dim) - - v_b = v_b.unsqueeze(1).expand(-1, group_size, -1, -1) - v_b = v_b.reshape(nheads_q, max_cache_len, head_dim) - - # reshape for attention_forward_core_ref_impl - q_b = q_b.reshape(nheads_q, seq_len_q, head_dim) - - # handle alibi slopes for this batch - alibi_slopes_b = None - if alibi_slopes is not None: - if alibi_slopes.dim() == 2: - alibi_slopes_b = alibi_slopes[b] - else: - alibi_slopes_b = alibi_slopes - - # call core attention function with cache information - o_b, softmax_lse_b, _ = attention_forward_core_ref_impl( - q_b, k_b, v_b, sm_scale, causal, window_size_left, window_size_right, - dropout_p=0.0, philox_seed=None, philox_offset=None, - alibi_slopes=alibi_slopes_b, use_exp2=True, - cache_seqlens=cache_len, # Pass valid cache length - block_table=block_table_b, # Pass block table for paged mode - paged_kv_block_size=paged_kv_block_size, # Pass block size for paged mode - ) - - # store outputs - o[b, :, :, :] = o_b.reshape(nheads_q, seq_len_q, head_dim) - softmax_lse[b, :, :] = softmax_lse_b.reshape(nheads_q, seq_len_q) - - # restore original layout if necessary - if layout == "bshd": - o = o.transpose(1, 2) - - # copy output to the provided tensor - out.copy_(o.to(out.dtype)) - - return softmax_lse \ No newline at end of file diff --git a/flash_attn/flash_attn_triton_amd/interface_fa.py b/flash_attn/flash_attn_triton_amd/interface_fa.py deleted file mode 100644 index 3dc443abf67..00000000000 --- a/flash_attn/flash_attn_triton_amd/interface_fa.py +++ /dev/null @@ -1,927 +0,0 @@ -import torch -import os -from .fwd_prefill import attention_prefill_forward_triton_impl -from .bwd_prefill_split import attention_prefill_backward_triton_split_impl -from .bwd_prefill_fused_atomics import attention_prefill_backward_triton_fused_atomics_impl -from .bwd_prefill_fused_no_atomics import attention_prefill_backward_triton_split_fused_no_atomics_impl -from .fwd_decode import attention_decode_forward_triton_impl -from .fwd_ref import attention_prefill_forward_ref_impl, attention_decode_forward_ref_impl -from .bwd_ref import attention_backward_pytorch_ref_impl -from .utils import DEBUG, USE_REF, MetaData, is_fp8 -from einops import rearrange, repeat -from flash_attn.layers.rotary import apply_rotary_emb -from typing import Literal, Optional, Union - - -USE_EXP2 = True -BWD_MODE = os.environ.get('BWD_MODE', 'fused_no_atomics').lower() - -def fwd(q: torch.Tensor, - k: torch.Tensor, - v: torch.Tensor, - out: Optional[torch.Tensor], - alibi_slopes: Optional[torch.Tensor], - dropout_p: float, - softmax_scale: float, - causal: bool, - window_size_left: int, - window_size_right: int, - softcap: float, - return_softmax: bool, - gen_: Optional[torch.Tensor] = None, - descale_q: Optional[torch.Tensor] = None, - descale_k: Optional[torch.Tensor] = None, - descale_v: Optional[torch.Tensor] = None - ): - - if DEBUG: - print() - print("flash_attn_triton_amd.py::fwd inputs") - print("q:", q, q.shape) - print("k:", k, k.shape) - print("v:", v, v.shape) - print("out:", out, out.shape if out is not None else None) - print("alibi_slopes:", alibi_slopes) - print("dropout_p:", dropout_p) - print("softmax_scale:", softmax_scale) - print("causal:", causal) - print("window_size_left:", window_size_left) - print("window_size_right:", window_size_right) - print("softcap:", softcap) - print("return_softmax:", return_softmax) - print("descale_q:", descale_q, descale_q.shape if descale_q is not None else None) - print("descale_k:", descale_k, descale_k.shape if descale_k is not None else None) - print("descale_v:", descale_v, descale_v.shape if descale_v is not None else None) - - if is_fp8(q): - assert out is not None, "fp8 output tensor should be passed in." - if (descale_q is None) or (descale_k is None) or (descale_v is None): - import warnings - warnings.warn("FP8 tensors detected but descale factors not provided. Using default scale of 1.0", UserWarning) - else: - out = torch.zeros_like(q) if out is None else out.zero_() - - # Setup metadata - metadata = MetaData(sm_scale=softmax_scale) - metadata.max_seqlens_q = q.shape[1] - metadata.max_seqlens_k = k.shape[1] - metadata.layout = "bshd" - - # get shape - batch, _ , nheads_q, _= q.shape - - if causal: - metadata.need_causal(True) - - if alibi_slopes is not None: - if alibi_slopes.dim() == 2: - pass - elif alibi_slopes.dim() == 1: - alibi_slopes = alibi_slopes.unsqueeze(0).expand(batch, -1) - else: - raise ValueError(f"Alibi can be (nheads,) or (batch_size, nheads). Given tensor with shape {alibi_slopes.shape}") - metadata.need_alibi(alibi_slopes, batch, nheads_q) - - # store rng state - metadata.need_dropout(dropout_p, return_softmax) - rng_state = torch.as_tensor([metadata.philox_seed, metadata.philox_offset]) # as_tensors uses the underlying data and doesnot cast - - # check arguments - metadata.check_args(q, k, v, out) - - # call implementation - if USE_REF: - if DEBUG: - print("Using reference implementation") - softmax_lse_ref, sd_mask_ref = attention_prefill_forward_ref_impl( - q, - k, - v, - out, - metadata.sm_scale, - metadata.alibi_slopes, - metadata.causal, - window_size_left, - window_size_right, - metadata.layout, - metadata.cu_seqlens_q, - metadata.cu_seqlens_k, - metadata.max_seqlens_q, - metadata.max_seqlens_k, - metadata.dropout_p, - metadata.philox_seed, - metadata.philox_offset, - USE_EXP2) - softmax_lse=softmax_lse_ref - sd_mask=sd_mask_ref - else: - if DEBUG: - print("Using Triton implementation") - softmax_lse_triton, sd_mask_triton = attention_prefill_forward_triton_impl( - q, - k, - v, - out, - metadata.sm_scale, - metadata.alibi_slopes, - metadata.causal, - window_size_left, - window_size_right, - None, - metadata.layout, - metadata.cu_seqlens_q, - metadata.cu_seqlens_k, - metadata.max_seqlens_q, - metadata.max_seqlens_k, - metadata.dropout_p, - metadata.philox_seed, - metadata.philox_offset, - metadata.return_softmax, - USE_EXP2, - descale_q, - descale_k, - descale_v) - softmax_lse=softmax_lse_triton - sd_mask=sd_mask_triton - - if DEBUG: - print("flash_attn_triton_amd.py::fwd outputs") - print("o:", out, out.shape) - print("softmax_lse:", softmax_lse, softmax_lse.shape) - print("sd_mask:", sd_mask, sd_mask.shape if sd_mask is not None else None ) - print("rng_state:", rng_state) - - return out, softmax_lse, sd_mask, rng_state - -def bwd( - dout: torch.Tensor, - q: torch.Tensor, - k: torch.Tensor, - v: torch.Tensor, - out: torch.Tensor, - softmax_lse: torch.Tensor, - dq: Optional[torch.Tensor], - dk: Optional[torch.Tensor], - dv: Optional[torch.Tensor], - alibi_slopes: Optional[torch.Tensor], - dropout_p: float, - softmax_scale: float, - causal: bool, - window_size_left: int, - window_size_right: int, - softcap: float, - deterministic: bool, - gen_: Optional[torch.Tensor] = None, - rng_state:Optional[torch.Tensor] = None, - descale_q: Optional[torch.Tensor] = None, - descale_k: Optional[torch.Tensor] = None, - descale_v: Optional[torch.Tensor] = None, - descale_o: Optional[torch.Tensor] = None, - descale_do: Optional[torch.Tensor] = None, - descale_dq: Optional[torch.Tensor] = None, - descale_dk: Optional[torch.Tensor] = None, - descale_dv: Optional[torch.Tensor] = None, -): - if DEBUG: - print() - print("flash_attn_triton_amd.py::bwd inputs") - print("dout:", dout, dout.shape) - print("q:", q, q.shape) - print("k:", k, k.shape) - print("v:", v, v.shape) - print("out:", out, out.shape) - print("softmax_lse:", softmax_lse, softmax_lse.shape) - print("dq:", dq, dq.shape if dq is not None else None) - print("dk:", dk, dk.shape if dk is not None else None) - print("dv:", dv, dv.shape if dv is not None else None) - print("alibi_slopes:", alibi_slopes) - print("dropout_p:", dropout_p) - print("out:", out) - print("softmax_scale:", softmax_scale) - print("causal:", causal) - print("window_size_left:", window_size_left) - print("window_size_right:", window_size_right) - print("deterministic:", deterministic) - print("gen_:", gen_) - print("rng_state:", rng_state) - print("descale_q:", descale_q, descale_q.shape if descale_q is not None else None) - print("descale_k:", descale_k, descale_k.shape if descale_k is not None else None) - print("descale_v:", descale_v, descale_v.shape if descale_v is not None else None) - print("descale_o:", descale_o, descale_o.shape if descale_o is not None else None) - print("descale_do:", descale_do, descale_do.shape if descale_do is not None else None) - print("descale_dq:", descale_dq, descale_dq.shape if descale_dq is not None else None) - print("descale_dk:", descale_dk, descale_dk.shape if descale_dk is not None else None) - print("descale_dv:", descale_dv, descale_dv.shape if descale_dv is not None else None) - - dq = torch.zeros_like(q) if dq is None else dq.zero_() - dk = torch.zeros_like(k) if dk is None else dk.zero_() - dv = torch.zeros_like(v) if dv is None else dv.zero_() - - # get shape - batch, _ , nheads_q, _= q.shape - - if dropout_p > 0.0: - assert rng_state is not None - philox_seed, philox_offset = rng_state[0].item(), rng_state[1].item() - else: - philox_seed, philox_offset = None, None - - if alibi_slopes is not None: - if alibi_slopes.dim() == 2: - pass - elif alibi_slopes.dim() == 1: - alibi_slopes = alibi_slopes.unsqueeze(0).expand(batch, -1) - else: - raise ValueError("Alibi can be (nheads,) or (batch_size, nheads).") - - # call implementation - if USE_REF: - if DEBUG: - print("Using reference implementation") - - delta_ref = attention_backward_pytorch_ref_impl( - dout, - q, - k, - v, - out, - softmax_lse, - dq, - dk, - dv, - softmax_scale, - alibi_slopes, - causal, - window_size_left, - window_size_right, - "bshd", - None, - None, - None, - None, - dropout_p, - philox_seed, - philox_offset, - USE_EXP2, - ) - delta = delta_ref - else: - if DEBUG: - print("Using Triton implementation") - if BWD_MODE == "split": - delta_triton = attention_prefill_backward_triton_split_impl( - dout, - q, - k, - v, - out, - softmax_lse, - dq, - dk, - dv, - softmax_scale, - alibi_slopes, - causal, - "bshd", - None, - None, - None, - None, - dropout_p, - philox_seed, - philox_offset, - USE_EXP2, - descale_q, - descale_k, - descale_v, - descale_o, - descale_do, - descale_dq, - descale_dk, - descale_dv, - ) - delta = delta_triton - elif BWD_MODE == "fused_atomics": - delta_triton = attention_prefill_backward_triton_fused_atomics_impl( - dout, - q, - k, - v, - out, - softmax_lse, - dq, - dk, - dv, - softmax_scale, - alibi_slopes, - causal, - None, - None, - q.shape[1], - k.shape[1], - dropout_p, - philox_seed, - philox_offset, - descale_q, - descale_k, - descale_v, - descale_o, - True, - ) - delta = delta_triton - elif BWD_MODE == "fused_no_atomics": - delta_triton = attention_prefill_backward_triton_split_fused_no_atomics_impl( - dout, - q, - k, - v, - out, - softmax_lse, - dq, - dk, - dv, - softmax_scale, - alibi_slopes, - causal, - "bshd", - None, - None, - None, - None, - dropout_p, - philox_seed, - philox_offset, - USE_EXP2, - descale_q, - descale_k, - descale_v, - descale_o, - descale_do, - descale_dq, - descale_dk, - descale_dv, - ) - delta = delta_triton - else: - raise ValueError(f"Unknown bwd mode {BWD_MODE}") - - if DEBUG: - print("flash_attn_triton_amd.py::bwd outputs") - print("dv:", dv, dv.shape) - if is_fp8(dv): - print("descale_dv:", descale_dv, descale_dv.shape if descale_dv is not None else None) - print("dk:", dk, dk.shape) - if is_fp8(dk): - print("descale_dk:", descale_dk, descale_dk.shape if descale_dk is not None else None) - print("dq:", dq, dq.shape) - if is_fp8(dq): - print("descale_dq:", descale_dq, descale_dq.shape if descale_dq is not None else None) - return dq, dk, dv, delta - -def varlen_fwd( - q: torch.Tensor, - k: torch.Tensor, - v: torch.Tensor, - out: Optional[torch.Tensor], - cu_seqlens_q: torch.Tensor, - cu_seqlens_k: torch.Tensor, - seqused_k: Optional[torch.Tensor], - leftpad_k: Optional[torch.Tensor], - block_table_: Optional[torch.Tensor], - alibi_slopes: Optional[torch.Tensor], - max_seqlen_q: int, - max_seqlen_k: int, - dropout_p: float, - softmax_scale: float, - zero_tensors: bool , - causal: bool , - window_size_left: int, - window_size_right: int, - softcap: float, - return_softmax: bool, - gen_: Optional[torch.Tensor] = None, - descale_q: Optional[torch.Tensor] = None, - descale_k: Optional[torch.Tensor] = None, - descale_v: Optional[torch.Tensor] = None - ): - - if DEBUG: - print() - print("flash_attn_triton_amd.py::varlen_fwd") - print("q:", q, q.shape) - print("k:", k, k.shape) - print("v:", v, v.shape) - print("cu_seqlens_q:", cu_seqlens_q, cu_seqlens_q.shape) - print("cu_seqlens_k:", cu_seqlens_k, cu_seqlens_k.shape) - print("alibi_slopes:", alibi_slopes) - print("max_seqlen_q:", max_seqlen_q) - print("max_seqlen_k:", max_seqlen_k) - print("dropout_p:", dropout_p) - print("softmax_scale:", softmax_scale) - print("causal:", causal) - print("window_size_left:", window_size_left) - print("window_size_right:", window_size_right) - print("gen_:", gen_) - print("descale_q:", descale_q, descale_q.shape if descale_q is not None else None) - print("descale_k:", descale_k, descale_k.shape if descale_k is not None else None) - print("descale_v:", descale_v, descale_v.shape if descale_v is not None else None) - - if is_fp8(q): - assert out is not None, "fp8 output tensor should be passed in." - if (descale_q is None) or (descale_k is None) or (descale_v is None): - import warnings - warnings.warn("FP8 tensors detected but descale factors not provided. Using default scale of 1.0", UserWarning) - else: - out = torch.zeros_like(q) if out is None else out.zero_() - - # Setup metadata - metadata = MetaData(sm_scale=softmax_scale) - metadata.set_varlen_params(cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k) # set layout to "thd" and other metdata - assert metadata.layout is not None - - # get shape - batch = len(cu_seqlens_q) - 1 - _, nheads_q, _= q.shape - - if causal: - metadata.need_causal(True) - - if alibi_slopes is not None: - if alibi_slopes.dim() == 2: - pass - elif alibi_slopes.dim() == 1: - alibi_slopes = alibi_slopes.unsqueeze(0).expand(batch, -1) - else: - raise ValueError("Alibi can be (nheads,) or (batch_size, nheads).") - metadata.need_alibi(alibi_slopes, batch, nheads_q) - - # store rng state - metadata.need_dropout(dropout_p, return_softmax) - rng_state = torch.as_tensor([metadata.philox_seed, metadata.philox_offset]) # as_tensors uses the underlying data and doesnot cast - - # Check arguments - metadata.check_args(q, k, v, out) - - # call implementation - if USE_REF: - if DEBUG: - print("Using reference implementation") - softmax_lse_ref, sd_mask_ref = attention_prefill_forward_ref_impl( - q, - k, - v, - out, - metadata.sm_scale, - metadata.alibi_slopes, - metadata.causal, - window_size_left, - window_size_right, - metadata.layout, - metadata.cu_seqlens_q, - metadata.cu_seqlens_k, - metadata.max_seqlens_q, - metadata.max_seqlens_k, - metadata.dropout_p, - metadata.philox_seed, - metadata.philox_offset, - USE_EXP2) - softmax_lse=softmax_lse_ref - sd_mask=sd_mask_ref - else: - if DEBUG: - print("Using Triton implementation") - softmax_lse_triton, sd_mask_triton = attention_prefill_forward_triton_impl( - q, - k, - v, - out, - metadata.sm_scale, - metadata.alibi_slopes, - metadata.causal, - window_size_left, - window_size_right, - None, - metadata.layout, - metadata.cu_seqlens_q, - metadata.cu_seqlens_k, - metadata.max_seqlens_q, - metadata.max_seqlens_k, - metadata.dropout_p, - metadata.philox_seed, - metadata.philox_offset, - metadata.return_softmax, - USE_EXP2, - descale_q, - descale_k, - descale_v) - softmax_lse=softmax_lse_triton - sd_mask=sd_mask_triton - - if DEBUG: - print("varlen_fwd outputs") - print("out:", out, out.shape) - print("softmax_lse:", softmax_lse, softmax_lse.shape) - print("sd_mask:", sd_mask, sd_mask.shape if sd_mask is not None else None ) - - - return out, softmax_lse, sd_mask, rng_state - -def varlen_bwd( - dout: torch.Tensor, - q: torch.Tensor, - k: torch.Tensor, - v: torch.Tensor, - out: torch.Tensor, - softmax_lse: torch.Tensor, - dq: Optional[torch.Tensor], - dk: Optional[torch.Tensor], - dv: Optional[torch.Tensor], - cu_seqlens_q: torch.Tensor, - cu_seqlens_k: torch.Tensor, - alibi_slopes: Optional[torch.Tensor], - max_seqlen_q: int, - max_seqlen_k: int, - dropout_p: float, - softmax_scale: float, - zero_tensors: bool, - causal: bool, - window_size_left: int, - window_size_right: int, - softcap: float, - deterministic: bool, - gen_ : Optional[torch.Tensor] = None, - rng_state: Optional[torch.Tensor] = None, - descale_q: Optional[torch.Tensor] = None, - descale_k: Optional[torch.Tensor] = None, - descale_v: Optional[torch.Tensor] = None, - descale_o: Optional[torch.Tensor] = None, - descale_do: Optional[torch.Tensor] = None, - descale_dq: Optional[torch.Tensor] = None, - descale_dk: Optional[torch.Tensor] = None, - descale_dv: Optional[torch.Tensor] = None, -): - if DEBUG: - print() - print("varlen_bwd") - print("dout:", dout, dout.shape) - print("q:", q, q.shape) - print("k:", k, k.shape) - print("v:", v, v.shape) - print("out:", out) - print("softmax_lse:", softmax_lse, softmax_lse.shape) - print("dq:", dq, dq.shape if dq is not None else None) - print("dk:", dk, dk.shape if dk is not None else None) - print("dv:", dv, dv.shape if dv is not None else None) - print("cu_seqlens_q:", cu_seqlens_q, cu_seqlens_q.shape) - print("cu_seqlens_k:", cu_seqlens_k, cu_seqlens_k.shape) - print("alibi_slopes:", alibi_slopes) - print("max_seqlen_q:", max_seqlen_q) - print("max_seqlen_k:", max_seqlen_k) - print("dropout_p:", dropout_p) - print("softmax_scale:", softmax_scale) - print("causal:", causal) - print("window_size_left:", window_size_left) - print("window_size_right:", window_size_right) - print("deterministic:", deterministic) - print("gen_:", gen_) - print("rng_state:", rng_state) - print("descale_q:", descale_q, descale_q.shape if descale_q is not None else None) - print("descale_k:", descale_k, descale_k.shape if descale_k is not None else None) - print("descale_v:", descale_v, descale_v.shape if descale_v is not None else None) - print("descale_do:", descale_do, descale_do.shape if descale_do else None) - - dq = torch.zeros_like(q) if dq is None else dq.zero_() - dk = torch.zeros_like(k) if dk is None else dk.zero_() - dv = torch.zeros_like(v) if dv is None else dv.zero_() - - # get shape - batch = len(cu_seqlens_q) - 1 - _, nheads_q, _= q.shape - - if dropout_p > 0.0: - assert rng_state is not None - philox_seed, philox_offset = rng_state[0].item(), rng_state[1].item() - else: - philox_seed, philox_offset = None, None - - if alibi_slopes is not None: - if alibi_slopes.dim() == 2: - pass - elif alibi_slopes.dim() == 1: - alibi_slopes = alibi_slopes.unsqueeze(0).expand(batch, -1) - else: - raise ValueError("Alibi can be (nheads,) or (batch_size, nheads).") - - # call implementation - if USE_REF: - if DEBUG: - print("Using reference implementation") - delta_ref = attention_backward_pytorch_ref_impl( - dout, - q, - k, - v, - out, - softmax_lse, - dq, - dk, - dv, - softmax_scale, - alibi_slopes, - causal, - window_size_left, - window_size_right, - "thd", - cu_seqlens_q, - cu_seqlens_k, - max_seqlen_q, - max_seqlen_k, - dropout_p, - philox_seed, - philox_offset, - USE_EXP2, - ) - delta = delta_ref - else: - if DEBUG: - print("Using Triton implementation") - if BWD_MODE == "split": - delta_triton = attention_prefill_backward_triton_split_impl( - dout, - q, - k, - v, - out, - softmax_lse, - dq, - dk, - dv, - softmax_scale, - alibi_slopes, - causal, - "thd", - cu_seqlens_q, - cu_seqlens_k, - max_seqlen_q, - max_seqlen_k, - dropout_p, - philox_seed, - philox_offset, - USE_EXP2, - descale_q, - descale_k, - descale_v, - descale_o, - descale_do, - descale_dq, - descale_dk, - descale_dv, - ) - delta = delta_triton - elif BWD_MODE == "fused_atomics": - delta_triton = attention_prefill_backward_triton_fused_atomics_impl( - dout, - q, - k, - v, - out, - softmax_lse, - dq, - dk, - dv, - softmax_scale, - alibi_slopes, - causal, - cu_seqlens_q, - cu_seqlens_k, - max_seqlen_q, - max_seqlen_k, - dropout_p, - philox_seed, - philox_offset, - descale_q, - descale_k, - descale_v, - descale_o, - True, - ) - delta = delta_triton - elif BWD_MODE == "fused_no_atomics": - delta_triton = attention_prefill_backward_triton_split_fused_no_atomics_impl( - dout, - q, - k, - v, - out, - softmax_lse, - dq, - dk, - dv, - softmax_scale, - alibi_slopes, - causal, - "thd", - cu_seqlens_q, - cu_seqlens_k, - max_seqlen_q, - max_seqlen_k, - dropout_p, - philox_seed, - philox_offset, - USE_EXP2, - descale_q, - descale_k, - descale_v, - descale_o, - descale_do, - descale_dq, - descale_dk, - descale_dv, - ) - delta = delta_triton - else: - raise ValueError(f"Unknown bwd mode {BWD_MODE}") - - if DEBUG: - print("varlen_bwd outputs") - print("delta:", delta, delta.shape) - print("dv:", dv, dv.shape) - print("dk:", dk, dk.shape) - print("dq:", dq, dq.shape) - - return dq, dk, dv, delta - -def fwd_kvcache( - q: torch.Tensor, - k_cache: torch.Tensor, - v_cache: torch.Tensor, - k: Optional[torch.Tensor], - v: Optional[torch.Tensor], - cache_seqlens: Optional[Union[(int, torch.Tensor)]], - rotary_cos: Optional[torch.Tensor], - rotary_sin: Optional[torch.Tensor], - cache_batch_idx: Optional[torch.Tensor], - cache_leftpad: Optional[torch.Tensor], - block_table: Optional[torch.Tensor], - alibi_slopes: Optional[torch.Tensor], - out: Optional[torch.Tensor], - softmax_scale: float, - causal: bool, - window_size_left: int, - window_size_right: int, - softcap: float, - rotary_interleaved: bool, - num_splits: int - ): - - if DEBUG: - print() - print("flash_attn_triton_amd.py::fwd_kvcache inputs") - print("q:", q, q.shape) - print("k_cache:", k_cache, k_cache.shape) - print("v_cache:", v_cache, v_cache.shape) - print("k:", k, k.shape if k is not None else None) - print("v:", v, v.shape if v is not None else None) - print("cache_seqlens:", cache_seqlens ) - print("rotary_cos:",rotary_cos ) - print("rotary_sin:",rotary_sin) - print("cache_batch_idx:", cache_batch_idx) - print("cache_leftpad:", cache_leftpad) - print("block_table:", block_table) - print("alibi_slopes:", alibi_slopes) - print("out:", out) - print("softmax_scale:", softmax_scale) - print("causal:", causal) - print("window_size_left:", window_size_left) - print("window_size_right:", window_size_right) - print("softcap:", softcap) - print("rotary_interleaved:", rotary_interleaved) - print("num_splits:", num_splits) - - # output - out = torch.zeros_like(q) if out is None else out.zero_() - - # fill metadata - metadata = MetaData(sm_scale=softmax_scale) - metadata.layout = "bshd" - metadata.max_seqlens_q = q.shape[1] - metadata.max_seqlens_k = k_cache.shape[1] - metadata.cache_batch_idx = cache_batch_idx - if isinstance(cache_seqlens, int): - metadata.cache_seqlens = torch.tensor(cache_seqlens, device=q.device) - else: - metadata.cache_seqlens = cache_seqlens - - # window_size can be a tensor sometimes - if isinstance(window_size_left, torch.Tensor): - metadata.window_size_left = int(window_size_left.item()) - else: - metadata.window_size_left = window_size_left - if isinstance(window_size_right, torch.Tensor): - metadata.window_size_right = int(window_size_right.item()) - else: - metadata.window_size_right = window_size_right - - k_new = k - v_new = v - - # get shape - batch, _ , nheads_q, _= q.shape - - if causal: - metadata.need_causal(True) - - if alibi_slopes is not None: - if alibi_slopes.dim() == 2: - pass - elif alibi_slopes.dim() == 1: - alibi_slopes = alibi_slopes.unsqueeze(0).expand(batch, -1) - else: - raise ValueError("Alibi can be (nheads,) or (batch_size, nheads).") - metadata.need_alibi(alibi_slopes, batch, nheads_q) - - # rotary boolean - apply_rotary = torch.is_tensor(rotary_cos) and torch.is_tensor(rotary_sin) - if apply_rotary: - metadata.need_rotary(rotary_sin, rotary_cos, rotary_interleaved) - - # Rotary Embedding Implementation - if apply_rotary: - if metadata.causal or (window_size_left != -1 or window_size_right !=-1): # NOTE: when support is added. Add `or metadata.local` - q_ro = apply_rotary_emb( - q, - metadata.rotary_cos, - metadata.rotary_sin, - seqlen_offsets=metadata.cache_seqlens, - interleaved=metadata.rotary_interleaved, - ) - else: - q_ro = rearrange( - apply_rotary_emb( - rearrange(q, "b s h d -> b 1 (s h) d"), - metadata.rotary_cos, - metadata.rotary_sin, - seqlen_offsets=metadata.cache_seqlens, - interleaved=metadata.rotary_interleaved, - ), - "b 1 (s h) d -> b s h d", - s=metadata.max_seqlens_q, - ) - k_ro = apply_rotary_emb( - k_new, - metadata.rotary_cos, - metadata.rotary_sin, - seqlen_offsets=metadata.cache_seqlens, - interleaved=metadata.rotary_interleaved, - ) - - q, k_new = q_ro.to(q.dtype), k_ro.to(q.dtype) - - # launch kernel - if USE_REF: - if DEBUG: - print("Using reference implementation") - softmax_lse_ref = attention_decode_forward_ref_impl( - q, - k_cache, - v_cache, - k_new, - v_new, - out, - metadata.sm_scale, - metadata.causal, - metadata.window_size_left, - metadata.window_size_right, - metadata.alibi_slopes, - metadata.layout, - metadata.cache_seqlens, - metadata.cache_batch_idx, - block_table, - ) - softmax_lse=softmax_lse_ref - else: - if DEBUG: - print("Using Triton implementation") - softmax_lse_triton = attention_decode_forward_triton_impl( - q, - k_cache, - v_cache, - k_new, - v_new, - out, - metadata.sm_scale, - metadata.causal, - metadata.window_size_left, - metadata.window_size_right, - metadata.alibi_slopes, - metadata.layout, - metadata.cache_seqlens, - metadata.cache_batch_idx, - block_table, - ) - softmax_lse = softmax_lse_triton - - if DEBUG: - print("out:", out, out.shape) - print("softmax_lse:", softmax_lse, softmax_lse.shape) - return out, softmax_lse diff --git a/flash_attn/flash_attn_triton_amd/interface_fa_v3.py b/flash_attn/flash_attn_triton_amd/interface_fa_v3.py deleted file mode 100755 index be8e2d3cbeb..00000000000 --- a/flash_attn/flash_attn_triton_amd/interface_fa_v3.py +++ /dev/null @@ -1,660 +0,0 @@ -import torch -import os -from .fwd_prefill import attention_prefill_forward_triton_impl -from .bwd_prefill_split import attention_prefill_backward_triton_split_impl -from .bwd_prefill_fused_atomics import attention_prefill_backward_triton_fused_atomics_impl -from .bwd_prefill_fused_no_atomics import attention_prefill_backward_triton_split_fused_no_atomics_impl -from .fwd_decode import attention_decode_forward_triton_impl -from .fwd_ref import attention_prefill_forward_ref_impl, attention_decode_forward_ref_impl -from .bwd_ref import attention_backward_pytorch_ref_impl -from .utils import DEBUG, USE_REF, MetaData, is_fp8 -from einops import rearrange, repeat -from flash_attn.layers.rotary import apply_rotary_emb -from typing import Optional, Union, Tuple - -USE_EXP2 = True -BWD_MODE = os.environ.get('BWD_MODE', 'fused_no_atomics').lower() -USE_DECODE_PATH = os.environ.get('FLASH_ATTENTION_V3_USE_DECODE', '0') == '1' - -def fwd( - q: torch.Tensor, - k: torch.Tensor, - v: torch.Tensor, - k_new: Optional[torch.Tensor], - v_new: Optional[torch.Tensor], - qv: Optional[torch.Tensor], - out: Optional[torch.Tensor], - cu_seqlens_q: Optional[torch.Tensor], - cu_seqlens_k: Optional[torch.Tensor], - cu_seqlens_k_new: Optional[torch.Tensor], - seqused_q: Optional[torch.Tensor], - seqused_k: Optional[torch.Tensor], - max_seqlen_q: Optional[int], - max_seqlen_k: Optional[int], - page_table: Optional[torch.Tensor], - kv_batch_idx: Optional[torch.Tensor], - leftpad_k: Optional[torch.Tensor], - rotary_cos: Optional[torch.Tensor], - rotary_sin: Optional[torch.Tensor], - seqlens_rotary: Optional[torch.Tensor], - q_descale: Optional[torch.Tensor], - k_descale: Optional[torch.Tensor], - v_descale: Optional[torch.Tensor], - softmax_scale: float, - causal: bool, - window_size_left: int, - window_size_right: int, - attention_chunk: int, - softcap: float, - rotary_interleaved: bool, - scheduler_metadata=None, - num_splits: int = 1, - pack_gqa=None, - sm_margin: int = 0, -) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Flash Attention v3 forward pass compatible interface for AMD Triton implementation. - - This function maps v3 parameters to the existing AMD Triton implementation. - """ - - if DEBUG: - print() - print("interface_fa_v3.py::fwd inputs") - print("q:", q, q.shape) - print("k:", k, k.shape) - print("v:", v, v.shape) - print("k_new:", k_new, k_new.shape if k_new is not None else None) - print("v_new:", v_new, v_new.shape if v_new is not None else None) - print("qv:", qv, qv.shape if qv is not None else None) - print("out:", out, out.shape if out is not None else None) - print("cu_seqlens_q:", cu_seqlens_q, cu_seqlens_q.shape if cu_seqlens_q is not None else None) - print("cu_seqlens_k:", cu_seqlens_k, cu_seqlens_k.shape if cu_seqlens_k is not None else None) - print("cu_seqlens_k_new:", cu_seqlens_k_new, cu_seqlens_k_new.shape if cu_seqlens_k_new is not None else None) - print("seqused_q:", seqused_q, seqused_q.shape if seqused_q is not None else None) - print("seqused_k:", seqused_k, seqused_k.shape if seqused_k is not None else None) - print("max_seqlen_q:", max_seqlen_q) - print("max_seqlen_k:", max_seqlen_k) - print("page_table:", page_table, page_table.shape if page_table is not None else None) - print("kv_batch_idx:", kv_batch_idx, kv_batch_idx.shape if kv_batch_idx is not None else None) - print("leftpad_k:", leftpad_k, leftpad_k.shape if leftpad_k is not None else None) - print("rotary_cos:", rotary_cos, rotary_cos.shape if rotary_cos is not None else None) - print("rotary_sin:", rotary_sin, rotary_sin.shape if rotary_sin is not None else None) - print("seqlens_rotary:", seqlens_rotary, seqlens_rotary.shape if seqlens_rotary is not None else None) - print("q_descale:", q_descale, q_descale.shape if q_descale is not None else None) - print("k_descale:", k_descale, k_descale.shape if k_descale is not None else None) - print("v_descale:", v_descale, v_descale.shape if v_descale is not None else None) - print("softmax_scale:", softmax_scale) - print("causal:", causal) - print("window_size_left:", window_size_left) - print("window_size_right:", window_size_right) - print("attention_chunk:", attention_chunk) - print("softcap:", softcap) - print("rotary_interleaved:", rotary_interleaved) - print("scheduler_metadata:", scheduler_metadata) - print("num_splits:", num_splits) - print("pack_gqa:", pack_gqa) - print("sm_margin:", sm_margin) - - # Handle qv packed input - if qv is not None: - raise NotImplementedError("QV packed input is not yet supported in the AMD Triton backend") - - - # Handle softcap - if softcap != 0.0: - raise NotImplementedError(f"Softcap is not yet supported in the AMD Triton backend (got softcap={softcap}, expected 0.0)") - - # Handle attention_chunk - if attention_chunk != 0 and attention_chunk != 1: - raise NotImplementedError(f"attention_chunk is not yet supported in the AMD Triton backend (got attention_chunk={attention_chunk})") - - - # Handle scheduler metadata - if scheduler_metadata is not None: - raise NotImplementedError("Scheduler metadata is not yet supported in the AMD Triton backend") - - # Handle pack_gqa - if pack_gqa is not None and pack_gqa is not False: - raise NotImplementedError(f"pack_gqa is not yet supported in the AMD Triton backend (got pack_gqa={pack_gqa})") - - # Handle num_splits - if num_splits != 1: - raise NotImplementedError(f"Split attention (num_splits > 1) is not yet supported in the AMD Triton backend (got num_splits={num_splits})") - - # Handle sm_margin - if sm_margin != 0: - raise NotImplementedError(f"sm_margin is not yet supported in the AMD Triton backend (got sm_margin={sm_margin}, expected 0)") - - # Handle leftpad_k - if leftpad_k is not None: - raise NotImplementedError("Left padding (leftpad_k) is not yet supported in the AMD Triton backend") - - # Handle cu_seqlens_k_new - if cu_seqlens_k_new is not None: - raise NotImplementedError("cu_seqlens_k_new is not yet supported in the AMD Triton backend") - - # if seqlens_rotary is not None: - # raise NotImplementedError("seqlens_rotary is not yet supported in the AMD Triton backend") - - # Setup metadata - metadata = MetaData(sm_scale=softmax_scale) - - - # Handle variable length sequences first to determine layout - # Determine layout based on tensor dimensions and cu_seqlens presence - if cu_seqlens_q is not None: - # Q has variable length - check tensor dimensions to confirm - if len(q.shape) == 3: # [total_seqlen, nheads, head_dim] - metadata.layout = "thd" - metadata.varlen = True - metadata.cu_seqlens_q = cu_seqlens_q - metadata.max_seqlens_q = max_seqlen_q - - # K might be varlen or batch mode - if cu_seqlens_k is not None: - metadata.cu_seqlens_k = cu_seqlens_k - metadata.max_seqlens_k = max_seqlen_k - else: - # K is in batch mode while Q is varlen (KV cache scenario) - metadata.cu_seqlens_k = None - metadata.max_seqlens_k = k.shape[1] if len(k.shape) == 4 else max_seqlen_k - else: - raise ValueError(f"cu_seqlens_q provided but q has shape {q.shape}, expected 3D tensor for varlen") - else: - # Regular batch mode - metadata.layout = "bshd" - metadata.varlen = False - metadata.cu_seqlens_q = None - metadata.cu_seqlens_k = None - metadata.max_seqlens_q = q.shape[1] if max_seqlen_q is None else max_seqlen_q - metadata.max_seqlens_k = k.shape[1] if max_seqlen_k is None else max_seqlen_k - - # Now determine if we should use decode or prefill kernel - # Decode kernel should be used for KV cache scenarios where: - # 1. k_new/v_new are provided - incremental KV cache update (primary KV cache indicator) - # 2. kv_batch_idx is provided - KV cache batch indexing (primary KV cache indicator) - # 3. seqused_k without seqused_q - indicates KV cache fill levels (not varlen masking) - # Note: In varlen, both seqused_q and seqused_k are used for sequence masking - # In KV cache, only seqused_k is used to track cache fill levels - if USE_DECODE_PATH: - # Force decode path - use_decode = True - else: - # Detect KV cache scenarios: - # - Clear KV cache indicators (k_new, v_new, kv_batch_idx) - # - OR seqused_k without seqused_q (KV cache fill tracking, not varlen masking) - use_decode = ( - k_new is not None or # Have new KV to append (KV cache indicator) - v_new is not None or # Have new KV to append (KV cache indicator) - kv_batch_idx is not None or # Have KV cache batch indexing (KV cache indicator) - (seqused_k is not None and seqused_q is None) # KV cache fill levels (not varlen) - ) - - # Check for unsupported features with decode kernel - if use_decode: - if metadata.layout == "thd": - raise NotImplementedError("Varlen is not yet supported with the decode kernel in the AMD Triton backend") - if kv_batch_idx is not None: - raise NotImplementedError("kv_batch_idx is not yet supported with the decode kernel in the AMD Triton backend") - - - if out is None: - out_dtype = torch.float32 if is_fp8(q) else q.dtype - if metadata.layout == "bshd": - out = torch.zeros(q.shape[0], q.shape[1], q.shape[2], v.shape[-1], dtype=out_dtype, device=q.device) - elif metadata.layout == "thd": - out = torch.zeros(q.shape[0], q.shape[1], v.shape[-1], dtype=out_dtype, device=q.device) - else: - raise ValueError(f"Unsupported layout: {metadata.layout}. Only 'bshd' and 'thd' layouts are supported.") - else: - out = out.zero_() - - if is_fp8(q): - if (q_descale is None) or (k_descale is None) or (v_descale is None): - import warnings - warnings.warn("FP8 tensors detected but descale factors not provided. Using default scale of 1.0", UserWarning) - - # Get shape - if metadata.layout == "bshd": - batch, _, nheads_q, _ = q.shape - else: # "thd" layout for varlen - _, nheads_q, _ = q.shape - batch = len(cu_seqlens_q) - 1 if cu_seqlens_q is not None else 1 - - # Handle causal mask - if causal: - metadata.need_causal(True) - - # Handle alibi slopes (not directly supported in v3 interface, but we'll keep the logic) - alibi_slopes = None # V3 doesn't have alibi_slopes in the signature - if alibi_slopes is not None: - if alibi_slopes.dim() == 2: - pass - elif alibi_slopes.dim() == 1: - alibi_slopes = alibi_slopes.unsqueeze(0).expand(batch, -1) - else: - raise ValueError(f"Alibi can be (nheads,) or (batch_size, nheads). Given tensor with shape {alibi_slopes.shape}") - metadata.need_alibi(alibi_slopes, batch, nheads_q) - - # Handle dropout (v3 doesn't have dropout in forward) - dropout_p = 0.0 - return_softmax = False - metadata.need_dropout(dropout_p, return_softmax) - - # Handle rotary embeddings - if rotary_cos is not None and rotary_sin is not None: - metadata.need_rotary(rotary_sin, rotary_cos, rotary_interleaved) - - # Apply rotary embeddings if provided - if metadata.causal or window_size_left != -1 or window_size_right != -1: - q_rot = apply_rotary_emb( - q, - rotary_cos, - rotary_sin, - seqlen_offsets=seqlens_rotary, - interleaved=rotary_interleaved, - ) - q = q_rot.to(q.dtype) - - if k_new is not None: - k_rot = apply_rotary_emb( - k_new, - rotary_cos, - rotary_sin, - seqlen_offsets=seqlens_rotary, - interleaved=rotary_interleaved, - ) - k_new = k_rot.to(k.dtype) - - # Store RNG state - rng_state = torch.as_tensor([metadata.philox_seed, metadata.philox_offset]) - - # Call implementation - if USE_REF: - if DEBUG: - print("Using reference implementation") - - if use_decode: - if DEBUG: - print(f"Using decode reference implementation ( layout={metadata.layout}, cache_seqlens={seqused_k is not None}, k_new={k_new is not None}, v_new={v_new is not None}, kv_batch_idx={kv_batch_idx is not None})") - # Use decode reference implementation - softmax_lse = attention_decode_forward_ref_impl( - q, - k, # k_cache - v, # v_cache - k_new, - v_new, - out, - metadata.sm_scale, - metadata.causal, - window_size_left, - window_size_right, - metadata.alibi_slopes, - metadata.layout, - seqused_k, # cache_seqlens - kv_batch_idx, # cache_batch_idx - page_table, # block_table - q_descale, - k_descale, - v_descale, - ) - else: - if DEBUG: - print("Using prefill reference implementation") - # Use prefill reference implementation - softmax_lse_ref, sd_mask_ref = attention_prefill_forward_ref_impl( - q, k, v, out, - metadata.sm_scale, - metadata.alibi_slopes, - metadata.causal, - window_size_left, - window_size_right, - metadata.layout, - metadata.cu_seqlens_q, - metadata.cu_seqlens_k, - metadata.max_seqlens_q, - metadata.max_seqlens_k, - metadata.dropout_p, - metadata.philox_seed, - metadata.philox_offset, - USE_EXP2 - ) - softmax_lse = softmax_lse_ref - else: - if DEBUG: - print("Using Triton implementation") - - if use_decode: - if DEBUG: - print(f"Using Decode Triton implementation (cache_seqlens={seqused_k is not None}, k_new={k_new is not None}, v_new={v_new is not None}, kv_batch_idx={kv_batch_idx is not None})") - - # Use decode kernel for KV cache scenarios - # Note: seqused_k can serve as cache_seqlens in v3 - softmax_lse = attention_decode_forward_triton_impl( - q, - k, # k_cache in v2 terminology - v, # v_cache in v2 terminology - k_new, # New KV values to append to cache - v_new, # New KV values to append to cache - out, - metadata.sm_scale, - metadata.causal, - window_size_left, - window_size_right, - metadata.alibi_slopes, - metadata.layout, - seqused_k, # cache_seqlens - kv_batch_idx, # cache_batch_idx - page_table, # block_table for paged attention - q_descale, - k_descale, - v_descale, - ) - # Decode kernel returns only softmax_lse, not sd_mask - sd_mask_triton = None - else: - if DEBUG: - print("Using prefill Triton implementation") - # Use prefill kernel - softmax_lse_triton, sd_mask_triton = attention_prefill_forward_triton_impl( - q, k, v, out, - metadata.sm_scale, - metadata.alibi_slopes, - metadata.causal, - window_size_left, - window_size_right, - None, # block_table - metadata.layout, - metadata.cu_seqlens_q, - metadata.cu_seqlens_k, - metadata.max_seqlens_q, - metadata.max_seqlens_k, - metadata.dropout_p, - metadata.philox_seed, - metadata.philox_offset, - metadata.return_softmax, - USE_EXP2, - q_descale, - k_descale, - v_descale, - seqused_q, - seqused_k, - ) - softmax_lse = softmax_lse_triton - - if DEBUG: - print("interface_fa_v3.py::fwd outputs") - print("out:", out, out.shape) - print("softmax_lse:", softmax_lse, softmax_lse.shape) - - # Return format compatible with v3 - # V3 returns (out, softmax_lse, *rest) where rest can be empty or contain additional outputs - return out, softmax_lse - - -def bwd( - dout: torch.Tensor, - q: torch.Tensor, - k: torch.Tensor, - v: torch.Tensor, - out: torch.Tensor, - softmax_lse: torch.Tensor, - dq: Optional[torch.Tensor], - dk: Optional[torch.Tensor], - dv: Optional[torch.Tensor], - cu_seqlens_q: Optional[torch.Tensor], - cu_seqlens_k: Optional[torch.Tensor], - seqused_q: Optional[torch.Tensor], - seqused_k: Optional[torch.Tensor], - max_seqlen_q: Optional[int], - max_seqlen_k: Optional[int], - softmax_scale: float, - causal: bool, - window_size_left: int, - window_size_right: int, - softcap: float, - deterministic: bool, - sm_margin: int = 0, -) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Flash Attention v3 backward pass compatible interface for AMD Triton implementation. - - This function maps v3 parameters to the existing AMD Triton implementation. - """ - - if DEBUG: - print() - print("interface_fa_v3.py::bwd inputs") - print("dout:", dout, dout.shape) - print("q:", q, q.shape) - print("k:", k, k.shape) - print("v:", v, v.shape) - print("out:", out, out.shape) - print("softmax_lse:", softmax_lse, softmax_lse.shape) - print("dq:", dq, dq.shape if dq is not None else None) - print("dk:", dk, dk.shape if dk is not None else None) - print("dv:", dv, dv.shape if dv is not None else None) - print("cu_seqlens_q:", cu_seqlens_q, cu_seqlens_q.shape if cu_seqlens_q is not None else None) - print("cu_seqlens_k:", cu_seqlens_k, cu_seqlens_k.shape if cu_seqlens_k is not None else None) - print("seqused_q:", seqused_q, seqused_q.shape if seqused_q is not None else None) - print("seqused_k:", seqused_k, seqused_k.shape if seqused_k is not None else None) - print("max_seqlen_q:", max_seqlen_q) - print("max_seqlen_k:", max_seqlen_k) - print("softmax_scale:", softmax_scale) - print("causal:", causal) - print("window_size_left:", window_size_left) - print("window_size_right:", window_size_right) - print("softcap:", softcap) - print("deterministic:", deterministic) - print("sm_margin:", sm_margin) - - # Check for unsupported features in backward pass - - # Handle softcap - if softcap != 0.0: - raise NotImplementedError(f"Softcap is not yet supported in the AMD Triton backend backward pass (got softcap={softcap}, expected 0.0)") - - # Handle sm_margin - if sm_margin != 0: - raise NotImplementedError(f"sm_margin is not yet supported in the AMD Triton backend backward pass (got sm_margin={sm_margin}, expected 0)") - - # Initialize gradient tensors if not provided - dq = torch.zeros_like(q) if dq is None else dq.zero_() - dk = torch.zeros_like(k) if dk is None else dk.zero_() - dv = torch.zeros_like(v) if dv is None else dv.zero_() - - # Determine layout based on cu_seqlens - if cu_seqlens_q is not None and cu_seqlens_k is not None: - # Variable length sequence mode - layout = "thd" - batch = len(cu_seqlens_q) - 1 - _, nheads_q, _ = q.shape - else: - # Regular batch mode - layout = "bshd" - batch, _, nheads_q, _ = q.shape - max_seqlen_q = q.shape[1] if max_seqlen_q is None else max_seqlen_q - max_seqlen_k = k.shape[1] if max_seqlen_k is None else max_seqlen_k - - # V3 backward doesn't have dropout or alibi slopes - dropout_p = 0.0 - philox_seed, philox_offset = None, None - alibi_slopes = None - - # For fp8, we would need descale factors, but v3 interface doesn't expose them - # So we'll pass None for now - descale_q = None - descale_k = None - descale_v = None - descale_o = None - descale_do = None - descale_dq = None - descale_dk = None - descale_dv = None - - # Call implementation - if USE_REF: - if DEBUG: - print("Using reference implementation") - delta_ref = attention_backward_pytorch_ref_impl( - dout, q, k, v, out, softmax_lse, - dq, dk, dv, - softmax_scale, - alibi_slopes, - causal, - window_size_left, - window_size_right, - layout, - cu_seqlens_q, - cu_seqlens_k, - max_seqlen_q, - max_seqlen_k, - dropout_p, - philox_seed, - philox_offset, - USE_EXP2, - ) - delta = delta_ref - else: - if DEBUG: - print("Using Triton implementation") - - if BWD_MODE == "split": - delta_triton = attention_prefill_backward_triton_split_impl( - dout, q, k, v, out, softmax_lse, - dq, dk, dv, - softmax_scale, - alibi_slopes, - causal, - layout, - cu_seqlens_q, - cu_seqlens_k, - max_seqlen_q, - max_seqlen_k, - dropout_p, - philox_seed, - philox_offset, - USE_EXP2, - descale_q, descale_k, descale_v, descale_o, - descale_do, descale_dq, descale_dk, descale_dv, - seqused_q, seqused_k, - ) - delta = delta_triton - elif BWD_MODE == "fused_atomics": - delta_triton = attention_prefill_backward_triton_fused_atomics_impl( - dout, q, k, v, out, softmax_lse, - dq, dk, dv, - softmax_scale, - alibi_slopes, - causal, - cu_seqlens_q, - cu_seqlens_k, - max_seqlen_q, - max_seqlen_k, - dropout_p, - philox_seed, - philox_offset, - descale_q, descale_k, descale_v, descale_o, - True, - ) - delta = delta_triton - elif BWD_MODE == "fused_no_atomics": - delta_triton = attention_prefill_backward_triton_split_fused_no_atomics_impl( - dout, q, k, v, out, softmax_lse, - dq, dk, dv, - softmax_scale, - alibi_slopes, - causal, - layout, - cu_seqlens_q, - cu_seqlens_k, - max_seqlen_q, - max_seqlen_k, - dropout_p, - philox_seed, - philox_offset, - USE_EXP2, - descale_q, descale_k, descale_v, descale_o, - descale_do, descale_dq, descale_dk, descale_dv, - seqused_q, seqused_k, - ) - delta = delta_triton - else: - raise ValueError(f"Unknown bwd mode {BWD_MODE}") - - if DEBUG: - print("interface_fa_v3.py::bwd outputs") - print("dq:", dq, dq.shape) - print("dk:", dk, dk.shape) - print("dv:", dv, dv.shape) - print("delta:", delta, delta.shape if delta is not None else None) - - # V3 expects (dq, dk, dv, softmax_d, *rest) - # delta is the softmax_d in this case - return dq, dk, dv, delta - - -def fwd_combine( - out_partial: torch.Tensor, - lse_partial: torch.Tensor, - out: Optional[torch.Tensor] = None, - out_dtype: Optional[torch.dtype] = None, -) -> torch.Tensor: - """ - Combine partial outputs from split attention computation. - - This is used when num_splits > 1 to combine the partial results. - - Args: - out_partial: Partial output tensor from split computation - lse_partial: Partial log-sum-exp tensor - out: Optional output tensor to write to - out_dtype: Optional dtype for output - - Returns: - Combined output tensor - """ - raise NotImplementedError("fwd_combine is not yet implemented in the AMD Triton backend") - - -def get_scheduler_metadata( - batch_size: int, - max_seqlen_q: int, - max_seqlen_k: int, - num_heads_q: int, - num_heads_kv: int, - headdim: int, - headdim_v: int, - qkv_dtype: torch.dtype, - cache_seqlens: torch.Tensor, - cu_seqlens_q: Optional[torch.Tensor] = None, - cu_seqlens_k: Optional[torch.Tensor] = None, - cu_seqlens_k_new: Optional[torch.Tensor] = None, - seqused_q: Optional[torch.Tensor] = None, - cache_leftpad: Optional[torch.Tensor] = None, - page_size: Optional[int] = None, - max_seqlen_k_new: int = 0, - causal: bool = False, - window_size_left: int = -1, - window_size_right: int = -1, - attention_chunk: int = 0, - has_softcap: bool = False, - num_splits: int = 0, - pack_gqa: Optional[bool] = None, - sm_margin: int = 0, -): - """ - Get scheduler metadata for optimized kernel selection. - - This function is used to precompute metadata for kernel scheduling in FA3. - The AMD Triton backend currently doesn't use scheduler metadata, so this - raises an error. - - Args: - Various attention parameters used for scheduling decisions - - Returns: - None - scheduler metadata is not used in AMD Triton backend - """ - raise NotImplementedError("get_scheduler_metadata is not supported in the AMD Triton backend yet.") \ No newline at end of file diff --git a/flash_attn/flash_attn_triton_amd/interface_v2.py b/flash_attn/flash_attn_triton_amd/interface_v2.py new file mode 100644 index 00000000000..134c4a76c12 --- /dev/null +++ b/flash_attn/flash_attn_triton_amd/interface_v2.py @@ -0,0 +1,674 @@ +import torch +import os +from typing import Optional, Union +from .fwd_prefill import attention_forward_prefill_triton_impl +from .fwd_decode import attention_forward_decode_triton_impl +from .bwd import attention_backward_triton_impl +from .utils import DEBUG, USE_EXP2, BWD_MODE, PHILOX_SEED, PHILOX_OFFSET + + +def fwd( + q: torch.Tensor, + k: torch.Tensor, + v: torch.Tensor, + out: Optional[torch.Tensor], + alibi_slopes: Optional[torch.Tensor], + dropout_p: float, + softmax_scale: float, + causal: bool, + window_size_left: int, + window_size_right: int, + softcap: float, + return_softmax: bool, + gen_: Optional[torch.Tensor] = None, +): + + # Reject FP8 tensors (FA2 AMD path does not support FP8) + if str(q.dtype).startswith("torch.float8"): + raise NotImplementedError( + "FP8 tensors are not supported in the AMD Triton FA2 interface. Use the FA3 path instead." + ) + + # Unsupported features assertions (keep behavior explicit like v3 shim) + if softcap != 0.0: + raise NotImplementedError( + "softcap is not supported in the AMD Triton FA2 interface (expected 0.0)." + ) + + if DEBUG: + print() + print("flash_attn_triton_amd.py::fwd inputs") + print("q:", q, q.shape) + print("k:", k, k.shape) + print("v:", v, v.shape) + print("out:", out, out.shape if out is not None else None) + print("alibi_slopes:", alibi_slopes) + print("dropout_p:", dropout_p) + print("softmax_scale:", softmax_scale) + print("causal:", causal) + print("window_size_left:", window_size_left) + print("window_size_right:", window_size_right) + print("softcap:", softcap) + print("return_softmax:", return_softmax) + out = torch.zeros_like(q) if out is None else out.zero_() + + # Layout / shapes + layout = "bshd" + max_seqlen_q = q.shape[1] + max_seqlen_k = k.shape[1] + batch, _, nheads_q, _ = q.shape + + # Normalize / validate alibi + if alibi_slopes is not None: + if alibi_slopes.dim() == 1: + alibi_slopes = alibi_slopes.unsqueeze(0).expand(batch, -1) + assert alibi_slopes.is_cuda and alibi_slopes.dim() == 2 + assert alibi_slopes.shape == (batch, nheads_q) + + # Dropout + RNG seed + philox_seed, philox_offset = PHILOX_SEED, PHILOX_OFFSET + rng_state = torch.as_tensor([philox_seed, philox_offset]) + + # argument checks + assert q.dim() == 4 and k.dim() == 4 and v.dim() == 4 + assert q.shape[-1] == k.shape[-1] == v.shape[-1] + assert q.dtype == k.dtype == v.dtype + assert out.shape[:-1] == q.shape[:-1] and out.shape[-1] == v.shape[-1] + nheads_k = k.shape[2] + assert (nheads_q % nheads_k) == 0 + + # call implementation + if DEBUG: + print("Using Triton implementation") + softmax_lse, sd_mask = attention_forward_prefill_triton_impl( + q, + k, + v, + out, + softmax_scale, + alibi_slopes, + causal, + window_size_left, + window_size_right, + None, + layout, + None, + None, + max_seqlen_q, + max_seqlen_k, + dropout_p, + philox_seed, + philox_offset, + return_softmax, + USE_EXP2, + None, + None, + None, + ) + + if DEBUG: + print("flash_attn_triton_amd.py::fwd outputs") + print("o:", out, out.shape) + print("softmax_lse:", softmax_lse, softmax_lse.shape) + print("sd_mask:", sd_mask, sd_mask.shape if sd_mask is not None else None) + print("rng_state:", rng_state) + + # --- Assertions (shape + dtype contracts) --- + # out: (B, Sq, Hq, D) + assert out.shape == q.shape, f"[fwd] out shape {out.shape} != q shape {q.shape}" + # softmax_lse: (B, Hq, Sq) + expected_lse_shape = (q.shape[0], q.shape[2], q.shape[1]) + assert ( + softmax_lse.shape == expected_lse_shape + ), f"[fwd] softmax_lse shape {softmax_lse.shape} != {expected_lse_shape}" + assert ( + softmax_lse.dtype == torch.float32 + ), f"[fwd] softmax_lse dtype {softmax_lse.dtype} != torch.float32" + if return_softmax: + # sd_mask: (B, Hq, Sq, Sk) + assert sd_mask is not None, "[fwd] return_softmax=True but sd_mask is None" + assert sd_mask.dim() == 4, f"[fwd] sd_mask dim {sd_mask.dim()} != 4" + assert ( + sd_mask.shape[0] == q.shape[0] + and sd_mask.shape[1] == q.shape[2] + and sd_mask.shape[2] == q.shape[1] + ), f"[fwd] sd_mask leading dims {sd_mask.shape[:3]} mismatch (B,Hq,Sq) {(q.shape[0], q.shape[2], q.shape[1])}" + else: + assert sd_mask is None, "[fwd] return_softmax=False but sd_mask is not None" + + return out, softmax_lse, sd_mask, rng_state + + +def bwd( + dout: torch.Tensor, + q: torch.Tensor, + k: torch.Tensor, + v: torch.Tensor, + out: torch.Tensor, + softmax_lse: torch.Tensor, + dq: Optional[torch.Tensor], + dk: Optional[torch.Tensor], + dv: Optional[torch.Tensor], + alibi_slopes: Optional[torch.Tensor], + dropout_p: float, + softmax_scale: float, + causal: bool, + window_size_left: int, + window_size_right: int, + softcap: float, + deterministic: bool, + gen_: Optional[torch.Tensor] = None, + rng_state: Optional[torch.Tensor] = None, +): + if softcap != 0.0: + raise NotImplementedError( + "softcap is not supported in the AMD Triton FA2 interface (expected 0.0)." + ) + + if DEBUG: + print() + print("flash_attn_triton_amd.py::bwd inputs") + print("dout:", dout, dout.shape) + print("q:", q, q.shape) + print("k:", k, k.shape) + print("v:", v, v.shape) + print("out:", out, out.shape) + print("softmax_lse:", softmax_lse, softmax_lse.shape) + print("dq:", dq, dq.shape if dq is not None else None) + print("dk:", dk, dk.shape if dk is not None else None) + print("dv:", dv, dv.shape if dv is not None else None) + print("alibi_slopes:", alibi_slopes) + print("dropout_p:", dropout_p) + print("out:", out) + print("softmax_scale:", softmax_scale) + print("causal:", causal) + print("window_size_left:", window_size_left) + print("window_size_right:", window_size_right) + print("deterministic:", deterministic) + print("gen_:", gen_) + print("rng_state:", rng_state) + + dq = torch.zeros_like(q) if dq is None else dq.zero_() + dk = torch.zeros_like(k) if dk is None else dk.zero_() + dv = torch.zeros_like(v) if dv is None else dv.zero_() + + # get shape + batch, _, nheads_q, _ = q.shape + + # Upstream change: base seeding logic on provided rng_state instead of dropout probability. + if rng_state is not None: + philox_seed, philox_offset = rng_state[0].item(), rng_state[1].item() + else: + philox_seed, philox_offset = None, None + + if alibi_slopes is not None: + if alibi_slopes.dim() == 2: + pass + elif alibi_slopes.dim() == 1: + alibi_slopes = alibi_slopes.unsqueeze(0).expand(batch, -1) + else: + raise ValueError("Alibi can be (nheads,) or (batch_size, nheads).") + + # call implementation + if DEBUG: + print("Using Triton implementation") + delta = attention_backward_triton_impl( + do=dout, + q=q, + k=k, + v=v, + o=out, + softmax_lse=softmax_lse, + dq=dq, + dk=dk, + dv=dv, + sm_scale=softmax_scale, + alibi_slopes=alibi_slopes, + causal=causal, + layout="bshd", + cu_seqlens_q=None, + cu_seqlens_k=None, + max_seqlen_q=q.shape[1], + max_seqlen_k=k.shape[1], + seqused_q=None, + seqused_k=None, + dropout_p=dropout_p, + philox_seed=philox_seed, + philox_offset=philox_offset, + use_exp2=USE_EXP2, + mode=BWD_MODE, + ) + + if DEBUG: + print("flash_attn_triton_amd.py::bwd outputs") + print("dv:", dv, dv.shape) + print("dk:", dk, dk.shape) + print("dq:", dq, dq.shape) + # --- Assertions --- + assert dq.shape == q.shape, f"[bwd] dq shape {dq.shape} != q shape {q.shape}" + assert dk.shape == k.shape, f"[bwd] dk shape {dk.shape} != k shape {k.shape}" + assert dv.shape == v.shape, f"[bwd] dv shape {dv.shape} != v shape {v.shape}" + # delta (softmax_d) : (B, Hq, Sq) + expected_delta_shape = (q.shape[0], q.shape[2], q.shape[1]) + assert ( + delta.shape == expected_delta_shape + ), f"[bwd] delta shape {delta.shape} != {expected_delta_shape}" + return dq, dk, dv, delta + + +def varlen_fwd( + q: torch.Tensor, + k: torch.Tensor, + v: torch.Tensor, + out: Optional[torch.Tensor], + cu_seqlens_q: torch.Tensor, + cu_seqlens_k: torch.Tensor, + seqused_k: Optional[torch.Tensor], + leftpad_k: Optional[torch.Tensor], + block_table_: Optional[torch.Tensor], + alibi_slopes: Optional[torch.Tensor], + max_seqlen_q: int, + max_seqlen_k: int, + dropout_p: float, + softmax_scale: float, + zero_tensors: bool, + causal: bool, + window_size_left: int, + window_size_right: int, + softcap: float, + return_softmax: bool, + gen_: Optional[torch.Tensor] = None, +): + + if str(q.dtype).startswith("torch.float8"): + raise NotImplementedError( + "FP8 tensors are not supported in the AMD Triton FA2 interface (varlen_fwd). Use the FA3 path instead." + ) + + if softcap != 0.0: + raise NotImplementedError( + "softcap is not supported in varlen_fwd (expected 0.0)." + ) + if leftpad_k is not None: + raise NotImplementedError( + "leftpad_k is not supported in AMD Triton FA2 varlen_fwd." + ) + if block_table_ is not None: + raise NotImplementedError( + "block_table / paged attention is not supported in AMD Triton FA2 varlen_fwd." + ) + if seqused_k is not None: + raise NotImplementedError( + "seqused_k is not supported in AMD Triton FA2 varlen_fwd." + ) + + if DEBUG: + print() + print("flash_attn_triton_amd.py::varlen_fwd") + print("q:", q, q.shape) + print("k:", k, k.shape) + print("v:", v, v.shape) + print("cu_seqlens_q:", cu_seqlens_q, cu_seqlens_q.shape) + print("cu_seqlens_k:", cu_seqlens_k, cu_seqlens_k.shape) + print("alibi_slopes:", alibi_slopes) + print("max_seqlen_q:", max_seqlen_q) + print("max_seqlen_k:", max_seqlen_k) + print("dropout_p:", dropout_p) + print("softmax_scale:", softmax_scale) + print("causal:", causal) + print("window_size_left:", window_size_left) + print("window_size_right:", window_size_right) + print("gen_:", gen_) + out = torch.zeros_like(q) if out is None else out.zero_() + + # Layout and basic info for varlen + layout = "thd" + batch = len(cu_seqlens_q) - 1 + _, nheads_q, _ = q.shape + + if alibi_slopes is not None: + if alibi_slopes.dim() == 1: + alibi_slopes = alibi_slopes.unsqueeze(0).expand(batch, -1) + assert alibi_slopes.is_cuda and alibi_slopes.dim() == 2 + assert alibi_slopes.shape == (batch, nheads_q) + + philox_seed, philox_offset = PHILOX_SEED, PHILOX_OFFSET + rng_state = torch.as_tensor([philox_seed, philox_offset]) + + # Inline checks (subset appropriate for varlen) + assert q.dim() == 3 and k.dim() == 3 and v.dim() == 3 + assert q.shape[-1] == k.shape[-1] == v.shape[-1] + assert q.dtype == k.dtype == v.dtype + assert out.shape == q.shape + nheads_k = k.shape[1] + assert (nheads_q % nheads_k) == 0 + + # call implementation + if DEBUG: + print("Using Triton implementation") + softmax_lse, sd_mask = attention_forward_prefill_triton_impl( + q, + k, + v, + out, + softmax_scale, + alibi_slopes, + causal, + window_size_left, + window_size_right, + None, + layout, + cu_seqlens_q, + cu_seqlens_k, + max_seqlen_q, + max_seqlen_k, + dropout_p, + philox_seed, + philox_offset, + return_softmax, + USE_EXP2, + None, + None, + None, + ) + + if DEBUG: + print("varlen_fwd outputs") + print("out:", out, out.shape) + print("softmax_lse:", softmax_lse, softmax_lse.shape) + print("sd_mask:", sd_mask, sd_mask.shape if sd_mask is not None else None) + # --- Assertions --- + # out: (Total_Q, Hq, D) + assert ( + out.shape == q.shape + ), f"[varlen_fwd] out shape {out.shape} != q shape {q.shape}" + # softmax_lse: (Hq, Total_Q) + expected_lse_shape = (q.shape[1], q.shape[0]) + assert ( + softmax_lse.shape == expected_lse_shape + ), f"[varlen_fwd] softmax_lse shape {softmax_lse.shape} != {expected_lse_shape}" + assert ( + softmax_lse.dtype == torch.float32 + ), f"[varlen_fwd] softmax_lse dtype {softmax_lse.dtype} != torch.float32" + if return_softmax: + # sd_mask expected: (B, Hq, max_seqlen_q, max_seqlen_k) + assert ( + sd_mask is not None + ), "[varlen_fwd] return_softmax=True but sd_mask is None" + assert sd_mask.dim() == 4, f"[varlen_fwd] sd_mask dim {sd_mask.dim()} != 4" + assert sd_mask.shape[0] == ( + len(cu_seqlens_q) - 1 + ), f"[varlen_fwd] sd_mask batch {sd_mask.shape[0]} != {len(cu_seqlens_q)-1}" + assert ( + sd_mask.shape[1] == q.shape[1] + ), f"[varlen_fwd] sd_mask nheads {sd_mask.shape[1]} != {q.shape[1]}" + else: + assert ( + sd_mask is None + ), "[varlen_fwd] return_softmax=False but sd_mask is not None" + return out, softmax_lse, sd_mask, rng_state + + +def varlen_bwd( + dout: torch.Tensor, + q: torch.Tensor, + k: torch.Tensor, + v: torch.Tensor, + out: torch.Tensor, + softmax_lse: torch.Tensor, + dq: Optional[torch.Tensor], + dk: Optional[torch.Tensor], + dv: Optional[torch.Tensor], + cu_seqlens_q: torch.Tensor, + cu_seqlens_k: torch.Tensor, + alibi_slopes: Optional[torch.Tensor], + max_seqlen_q: int, + max_seqlen_k: int, + dropout_p: float, + softmax_scale: float, + zero_tensors: bool, + causal: bool, + window_size_left: int, + window_size_right: int, + softcap: float, + deterministic: bool, + gen_: Optional[torch.Tensor] = None, + rng_state: Optional[torch.Tensor] = None, +): + if str(q.dtype).startswith("torch.float8"): + raise NotImplementedError( + "FP8 tensors are not supported in the AMD Triton FA2 interface (varlen_bwd). Use the FA3 path instead." + ) + if softcap != 0.0: + raise NotImplementedError( + "softcap is not supported in varlen_bwd (expected 0.0)." + ) + + if DEBUG: + print() + print("varlen_bwd") + print("dout:", dout, dout.shape) + print("q:", q, q.shape) + print("k:", k, k.shape) + print("v:", v, v.shape) + print("out:", out) + print("softmax_lse:", softmax_lse, softmax_lse.shape) + print("dq:", dq, dq.shape if dq is not None else None) + print("dk:", dk, dk.shape if dk is not None else None) + print("dv:", dv, dv.shape if dv is not None else None) + print("cu_seqlens_q:", cu_seqlens_q, cu_seqlens_q.shape) + print("cu_seqlens_k:", cu_seqlens_k, cu_seqlens_k.shape) + print("alibi_slopes:", alibi_slopes) + print("max_seqlen_q:", max_seqlen_q) + print("max_seqlen_k:", max_seqlen_k) + print("dropout_p:", dropout_p) + print("softmax_scale:", softmax_scale) + print("causal:", causal) + print("window_size_left:", window_size_left) + print("window_size_right:", window_size_right) + print("deterministic:", deterministic) + print("gen_:", gen_) + print("rng_state:", rng_state) + + dq = torch.zeros_like(q) if dq is None else dq.zero_() + dk = torch.zeros_like(k) if dk is None else dk.zero_() + dv = torch.zeros_like(v) if dv is None else dv.zero_() + + # get shape + batch = len(cu_seqlens_q) - 1 + _, nheads_q, _ = q.shape + + # Upstream change: base seeding logic on provided rng_state instead of dropout probability. + if rng_state is not None: + philox_seed, philox_offset = rng_state[0].item(), rng_state[1].item() + else: + philox_seed, philox_offset = None, None + + if alibi_slopes is not None: + if alibi_slopes.dim() == 2: + pass + elif alibi_slopes.dim() == 1: + alibi_slopes = alibi_slopes.unsqueeze(0).expand(batch, -1) + else: + raise ValueError("Alibi can be (nheads,) or (batch_size, nheads).") + + # call implementation + if DEBUG: + print("Using Triton implementation") + delta = attention_backward_triton_impl( + do=dout, + q=q, + k=k, + v=v, + o=out, + softmax_lse=softmax_lse, + dq=dq, + dk=dk, + dv=dv, + sm_scale=softmax_scale, + alibi_slopes=alibi_slopes, + causal=causal, + layout="thd", + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_q, + max_seqlen_k=max_seqlen_k, + seqused_q=None, + seqused_k=None, + dropout_p=dropout_p, + philox_seed=philox_seed, + philox_offset=philox_offset, + use_exp2=USE_EXP2, + mode=BWD_MODE, + ) + + if DEBUG: + print("varlen_bwd outputs") + print("delta:", delta, delta.shape) + print("dv:", dv, dv.shape) + print("dk:", dk, dk.shape) + print("dq:", dq, dq.shape) + # --- Assertions --- + assert dq.shape == q.shape, f"[varlen_bwd] dq shape {dq.shape} != q shape {q.shape}" + assert dk.shape == k.shape, f"[varlen_bwd] dk shape {dk.shape} != k shape {k.shape}" + assert dv.shape == v.shape, f"[varlen_bwd] dv shape {dv.shape} != v shape {v.shape}" + expected_delta_shape = (q.shape[1], q.shape[0]) # (Hq, Total_Q) + assert ( + delta.shape == expected_delta_shape + ), f"[varlen_bwd] delta shape {delta.shape} != {expected_delta_shape}" + return dq, dk, dv, delta + + +def fwd_kvcache( + q: torch.Tensor, + k_cache: torch.Tensor, + v_cache: torch.Tensor, + k: Optional[torch.Tensor], + v: Optional[torch.Tensor], + cache_seqlens: Optional[Union[(int, torch.Tensor)]], + rotary_cos: Optional[torch.Tensor], + rotary_sin: Optional[torch.Tensor], + cache_batch_idx: Optional[torch.Tensor], + cache_leftpad: Optional[torch.Tensor], + block_table: Optional[torch.Tensor], + alibi_slopes: Optional[torch.Tensor], + out: Optional[torch.Tensor], + softmax_scale: float, + causal: bool, + window_size_left: int, + window_size_right: int, + softcap: float, + rotary_interleaved: bool, + num_splits: int, +): + + if softcap != 0.0: + raise NotImplementedError( + "softcap is not supported in fwd_kvcache (expected 0.0)." + ) + if num_splits not in (0, 1): + raise NotImplementedError( + "num_splits > 1 not supported in AMD Triton FA2 fwd_kvcache." + ) + + if DEBUG: + print() + print("flash_attn_triton_amd.py::fwd_kvcache inputs") + print("q:", q, q.shape) + print("k_cache:", k_cache, k_cache.shape) + print("v_cache:", v_cache, v_cache.shape) + print("k:", k, k.shape if k is not None else None) + print("v:", v, v.shape if v is not None else None) + print("cache_seqlens:", cache_seqlens) + print("rotary_cos:", rotary_cos) + print("rotary_sin:", rotary_sin) + print("cache_batch_idx:", cache_batch_idx) + print("cache_leftpad:", cache_leftpad) + print("block_table:", block_table) + print("alibi_slopes:", alibi_slopes) + print("out:", out) + print("softmax_scale:", softmax_scale) + print("causal:", causal) + print("window_size_left:", window_size_left) + print("window_size_right:", window_size_right) + print("softcap:", softcap) + print("rotary_interleaved:", rotary_interleaved) + print("num_splits:", num_splits) + + # output + out = torch.zeros_like(q) if out is None else out.zero_() + + # Basic layout info for decode path + layout = "bshd" + max_seqlen_q = q.shape[1] + max_seqlen_k = k_cache.shape[1] + cache_seqlens_tensor = ( + torch.tensor(cache_seqlens, device=q.device) + if isinstance(cache_seqlens, int) + else cache_seqlens + ) + window_left = ( + int(window_size_left.item()) + if isinstance(window_size_left, torch.Tensor) + else window_size_left + ) + window_right = ( + int(window_size_right.item()) + if isinstance(window_size_right, torch.Tensor) + else window_size_right + ) + + k_new = k + v_new = v + + # get shape + batch, _, nheads_q, _ = q.shape + + if alibi_slopes is not None: + if alibi_slopes.dim() == 1: + alibi_slopes = alibi_slopes.unsqueeze(0).expand(batch, -1) + assert alibi_slopes.is_cuda and alibi_slopes.dim() == 2 + assert alibi_slopes.shape == (batch, nheads_q) + + # launch kernel + if DEBUG: + print("Using Triton implementation") + softmax_lse = attention_forward_decode_triton_impl( + q, + k_cache, + v_cache, + k_new, + v_new, + out, + softmax_scale, + causal, + window_left, + window_right, + alibi_slopes, + layout, + cache_seqlens_tensor, + cache_batch_idx, + block_table, + None, + None, + None, + rotary_cos=rotary_cos, + rotary_sin=rotary_sin, + rotary_interleaved=rotary_interleaved, + ) + + if DEBUG: + print("out:", out, out.shape) + print("softmax_lse:", softmax_lse, softmax_lse.shape) + # --- Assertions --- + assert ( + out.shape == q.shape + ), f"[fwd_kvcache] out shape {out.shape} != q shape {q.shape}" + expected_lse_shape = (q.shape[0], q.shape[2], q.shape[1]) + assert ( + softmax_lse.shape == expected_lse_shape + ), f"[fwd_kvcache] softmax_lse shape {softmax_lse.shape} != {expected_lse_shape}" + assert ( + softmax_lse.dtype == torch.float32 + ), f"[fwd_kvcache] softmax_lse dtype {softmax_lse.dtype} != torch.float32" + return out, softmax_lse diff --git a/flash_attn/flash_attn_triton_amd/interface_v3.py b/flash_attn/flash_attn_triton_amd/interface_v3.py new file mode 100755 index 00000000000..436077a8a7c --- /dev/null +++ b/flash_attn/flash_attn_triton_amd/interface_v3.py @@ -0,0 +1,608 @@ +import torch +import os +from typing import Optional, Union, Tuple +from .fwd_prefill import attention_forward_prefill_triton_impl +from .fwd_decode import attention_forward_decode_triton_impl +from .bwd import attention_backward_triton_impl +from .utils import DEBUG, USE_EXP2, BWD_MODE, PHILOX_SEED, PHILOX_OFFSET, is_fp8 + + +def fwd( + q: torch.Tensor, + k: torch.Tensor, + v: torch.Tensor, + k_new: Optional[torch.Tensor], + v_new: Optional[torch.Tensor], + qv: Optional[torch.Tensor], + out: Optional[torch.Tensor], + cu_seqlens_q: Optional[torch.Tensor], + cu_seqlens_k: Optional[torch.Tensor], + cu_seqlens_k_new: Optional[torch.Tensor], + seqused_q: Optional[torch.Tensor], + seqused_k: Optional[torch.Tensor], + max_seqlen_q: Optional[int], + max_seqlen_k: Optional[int], + page_table: Optional[torch.Tensor], + kv_batch_idx: Optional[torch.Tensor], + leftpad_k: Optional[torch.Tensor], + rotary_cos: Optional[torch.Tensor], + rotary_sin: Optional[torch.Tensor], + seqlens_rotary: Optional[torch.Tensor], + q_descale: Optional[torch.Tensor], + k_descale: Optional[torch.Tensor], + v_descale: Optional[torch.Tensor], + softmax_scale: float, + causal: bool, + window_size_left: int, + window_size_right: int, + attention_chunk: int, + softcap: float, + rotary_interleaved: bool, + scheduler_metadata=None, + num_splits: int = 1, + pack_gqa=None, + sm_margin: int = 0, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Flash Attention v3 forward pass compatible interface for AMD Triton implementation. + + This function maps v3 parameters to the existing AMD Triton implementation. + """ + + if DEBUG: + print() + print("interface_fa_v3.py::fwd inputs") + print("q:", q, q.shape) + print("k:", k, k.shape) + print("v:", v, v.shape) + print("k_new:", k_new, k_new.shape if k_new is not None else None) + print("v_new:", v_new, v_new.shape if v_new is not None else None) + print("qv:", qv, qv.shape if qv is not None else None) + print("out:", out, out.shape if out is not None else None) + print( + "cu_seqlens_q:", + cu_seqlens_q, + cu_seqlens_q.shape if cu_seqlens_q is not None else None, + ) + print( + "cu_seqlens_k:", + cu_seqlens_k, + cu_seqlens_k.shape if cu_seqlens_k is not None else None, + ) + print( + "cu_seqlens_k_new:", + cu_seqlens_k_new, + cu_seqlens_k_new.shape if cu_seqlens_k_new is not None else None, + ) + print( + "seqused_q:", seqused_q, seqused_q.shape if seqused_q is not None else None + ) + print( + "seqused_k:", seqused_k, seqused_k.shape if seqused_k is not None else None + ) + print("max_seqlen_q:", max_seqlen_q) + print("max_seqlen_k:", max_seqlen_k) + print( + "page_table:", + page_table, + page_table.shape if page_table is not None else None, + ) + print( + "kv_batch_idx:", + kv_batch_idx, + kv_batch_idx.shape if kv_batch_idx is not None else None, + ) + print( + "leftpad_k:", leftpad_k, leftpad_k.shape if leftpad_k is not None else None + ) + print( + "rotary_cos:", + rotary_cos, + rotary_cos.shape if rotary_cos is not None else None, + ) + print( + "rotary_sin:", + rotary_sin, + rotary_sin.shape if rotary_sin is not None else None, + ) + print( + "seqlens_rotary:", + seqlens_rotary, + seqlens_rotary.shape if seqlens_rotary is not None else None, + ) + print( + "q_descale:", q_descale, q_descale.shape if q_descale is not None else None + ) + print( + "k_descale:", k_descale, k_descale.shape if k_descale is not None else None + ) + print( + "v_descale:", v_descale, v_descale.shape if v_descale is not None else None + ) + print("softmax_scale:", softmax_scale) + print("causal:", causal) + print("window_size_left:", window_size_left) + print("window_size_right:", window_size_right) + print("attention_chunk:", attention_chunk) + print("softcap:", softcap) + print("rotary_interleaved:", rotary_interleaved) + print("scheduler_metadata:", scheduler_metadata) + print("num_splits:", num_splits) + print("pack_gqa:", pack_gqa) + print("sm_margin:", sm_margin) + + # Handle qv packed input + if qv is not None: + raise NotImplementedError( + "QV packed input is not yet supported in the AMD Triton backend" + ) + + # Handle softcap + if softcap != 0.0: + raise NotImplementedError( + f"Softcap is not yet supported in the AMD Triton backend (got softcap={softcap}, expected 0.0)" + ) + + # Handle attention_chunk + if attention_chunk != 0 and attention_chunk != 1: + raise NotImplementedError( + f"attention_chunk is not yet supported in the AMD Triton backend (got attention_chunk={attention_chunk})" + ) + + # Handle scheduler metadata + if scheduler_metadata is not None: + raise NotImplementedError( + "Scheduler metadata is not yet supported in the AMD Triton backend" + ) + + # Handle pack_gqa + if pack_gqa is not None and pack_gqa is not False: + raise NotImplementedError( + f"pack_gqa is not yet supported in the AMD Triton backend (got pack_gqa={pack_gqa})" + ) + + # Handle num_splits + if num_splits != 1: + raise NotImplementedError( + f"Split attention (num_splits > 1) is not yet supported in the AMD Triton backend (got num_splits={num_splits})" + ) + + # Handle sm_margin + if sm_margin != 0: + raise NotImplementedError( + f"sm_margin is not yet supported in the AMD Triton backend (got sm_margin={sm_margin}, expected 0)" + ) + + # Handle leftpad_k + if leftpad_k is not None: + raise NotImplementedError( + "Left padding (leftpad_k) is not yet supported in the AMD Triton backend" + ) + + # Handle cu_seqlens_k_new + if cu_seqlens_k_new is not None: + raise NotImplementedError( + "cu_seqlens_k_new is not yet supported in the AMD Triton backend" + ) + + # if seqlens_rotary is not None: + # raise NotImplementedError("seqlens_rotary is not yet supported in the AMD Triton backend") + + # establish layout / varlen & max seq lens + if cu_seqlens_q is not None: + if len(q.shape) != 3: + raise ValueError( + f"cu_seqlens_q provided but q has shape {q.shape}, expected 3D tensor for varlen" + ) + layout = "thd" + cu_seqlens_q_local = cu_seqlens_q + max_seqlens_q_local = max_seqlen_q + if cu_seqlens_k is not None: + cu_seqlens_k_local = cu_seqlens_k + max_seqlens_k_local = max_seqlen_k + else: + cu_seqlens_k_local = None + max_seqlens_k_local = k.shape[1] if len(k.shape) == 4 else max_seqlen_k + else: + layout = "bshd" + cu_seqlens_q_local = None + cu_seqlens_k_local = None + max_seqlens_q_local = q.shape[1] if max_seqlen_q is None else max_seqlen_q + max_seqlens_k_local = k.shape[1] if max_seqlen_k is None else max_seqlen_k + + # Now determine if we should use decode or prefill kernel + # Decode kernel should be used for KV cache scenarios where: + # 1. k_new/v_new are provided - incremental KV cache update (primary KV cache indicator) + # 2. kv_batch_idx is provided - KV cache batch indexing (primary KV cache indicator) + # 3. seqused_k without seqused_q - indicates KV cache fill levels (not varlen masking) + # Note: In varlen, both seqused_q and seqused_k are used for sequence masking + # In KV cache, only seqused_k is used to track cache fill levels + # Detect KV cache scenarios: + # - Clear KV cache indicators (k_new, v_new, kv_batch_idx) + # - OR seqused_k without seqused_q (KV cache fill tracking, not varlen masking) + use_decode = ( + k_new is not None # Have new KV to append (KV cache indicator) + or v_new is not None # Have new KV to append (KV cache indicator) + or kv_batch_idx is not None # Have KV cache batch indexing (KV cache indicator) + or ( + seqused_k is not None and seqused_q is None + ) # KV cache fill levels (not varlen) + ) + + # Check for unsupported features with decode kernel + if use_decode: + if layout == "thd": + raise NotImplementedError( + "Varlen is not yet supported with the decode kernel in the AMD Triton backend" + ) + if kv_batch_idx is not None: + raise NotImplementedError( + "kv_batch_idx is not yet supported with the decode kernel in the AMD Triton backend" + ) + + if out is None: + out_dtype = torch.float32 if is_fp8(q) else q.dtype + if layout == "bshd": + out = torch.zeros( + q.shape[0], + q.shape[1], + q.shape[2], + v.shape[-1], + dtype=out_dtype, + device=q.device, + ) + elif layout == "thd": + out = torch.zeros( + q.shape[0], q.shape[1], v.shape[-1], dtype=out_dtype, device=q.device + ) + else: + raise ValueError( + f"Unsupported layout: {layout}. Only 'bshd' and 'thd' layouts are supported." + ) + else: + out = out.zero_() + + if is_fp8(q): + if (q_descale is None) or (k_descale is None) or (v_descale is None): + import warnings + + warnings.warn( + "FP8 tensors detected but descale factors not provided. Using default scale of 1.0", + UserWarning, + ) + else: + # Enforce exact expected shapes; no reshaping or normalization. + if layout == "bshd": + expected_batch = q.shape[0] + expected_q_heads = q.shape[2] + expected_kv_heads = k.shape[2] + else: # thd layout + expected_batch = ( + (len(cu_seqlens_q_local) - 1) + if cu_seqlens_q_local is not None + else 1 + ) + expected_q_heads = q.shape[1] + expected_kv_heads = k.shape[1] + + assert ( + q_descale.dim() == 2 + and q_descale.shape[0] == expected_batch + and q_descale.shape[1] == expected_kv_heads + ), f"q_descale expected shape ({expected_batch}, {expected_kv_heads}) got {tuple(q_descale.shape)}" + assert ( + k_descale.dim() == 2 + and k_descale.shape[0] == expected_batch + and k_descale.shape[1] == expected_kv_heads + ), f"k_descale expected shape ({expected_batch}, {expected_kv_heads}) got {tuple(k_descale.shape)}" + assert ( + v_descale.dim() == 2 + and v_descale.shape[0] == expected_batch + and v_descale.shape[1] == expected_kv_heads + ), f"v_descale expected shape ({expected_batch}, {expected_kv_heads}) got {tuple(v_descale.shape)}" + + # Handle causal mask + causal_flag = bool(causal) + + # Handle alibi slopes + alibi_slopes = None + + # Handle dropout + dropout_p = 0.0 + return_softmax = False + philox_seed = PHILOX_SEED + philox_offset = PHILOX_OFFSET + + # Call implementation + if DEBUG: + print("Using Triton implementation") + + if use_decode: + if DEBUG: + print( + f"Using Decode Triton implementation (cache_seqlens={seqused_k is not None}, k_new={k_new is not None}, v_new={v_new is not None}, kv_batch_idx={kv_batch_idx is not None})" + ) + + softmax_lse = attention_forward_decode_triton_impl( + q, + k, + v, + k_new, + v_new, + out, + softmax_scale, + causal_flag, + window_size_left, + window_size_right, + alibi_slopes, + layout, + seqused_k, + kv_batch_idx, + page_table, + q_descale, + k_descale, + v_descale, + rotary_cos=rotary_cos, + rotary_sin=rotary_sin, + rotary_interleaved=rotary_interleaved, + seqlens_rotary=seqlens_rotary, + ) + else: + if DEBUG: + print("Using Prefill Triton implementation") + softmax_lse, _ = attention_forward_prefill_triton_impl( + q, + k, + v, + out, + softmax_scale, + alibi_slopes, + causal_flag, + window_size_left, + window_size_right, + None, + layout, + cu_seqlens_q_local, + cu_seqlens_k_local, + max_seqlens_q_local, + max_seqlens_k_local, + dropout_p, + philox_seed, + philox_offset, + return_softmax, + USE_EXP2, + q_descale, + k_descale, + v_descale, + seqused_q, + seqused_k, + rotary_cos=rotary_cos, + rotary_sin=rotary_sin, + rotary_interleaved=rotary_interleaved, + seqlens_rotary=seqlens_rotary, + ) + + if DEBUG: + print("interface_fa_v3.py::fwd outputs") + print("out:", out, out.shape) + print("softmax_lse:", softmax_lse, softmax_lse.shape) + + # Return format compatible with v3 + # V3 returns (out, softmax_lse, *rest) where rest can be empty or contain additional outputs + return out, softmax_lse + + +def bwd( + dout: torch.Tensor, + q: torch.Tensor, + k: torch.Tensor, + v: torch.Tensor, + out: torch.Tensor, + softmax_lse: torch.Tensor, + dq: Optional[torch.Tensor], + dk: Optional[torch.Tensor], + dv: Optional[torch.Tensor], + cu_seqlens_q: Optional[torch.Tensor], + cu_seqlens_k: Optional[torch.Tensor], + seqused_q: Optional[torch.Tensor], + seqused_k: Optional[torch.Tensor], + max_seqlen_q: Optional[int], + max_seqlen_k: Optional[int], + softmax_scale: float, + causal: bool, + window_size_left: int, + window_size_right: int, + softcap: float, + deterministic: bool, + sm_margin: int = 0, +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Flash Attention v3 backward pass compatible interface for AMD Triton implementation. + + This function maps v3 parameters to the existing AMD Triton implementation. + """ + + if DEBUG: + print() + print("interface_fa_v3.py::bwd inputs") + print("dout:", dout, dout.shape) + print("q:", q, q.shape) + print("k:", k, k.shape) + print("v:", v, v.shape) + print("out:", out, out.shape) + print("softmax_lse:", softmax_lse, softmax_lse.shape) + print("dq:", dq, dq.shape if dq is not None else None) + print("dk:", dk, dk.shape if dk is not None else None) + print("dv:", dv, dv.shape if dv is not None else None) + print( + "cu_seqlens_q:", + cu_seqlens_q, + cu_seqlens_q.shape if cu_seqlens_q is not None else None, + ) + print( + "cu_seqlens_k:", + cu_seqlens_k, + cu_seqlens_k.shape if cu_seqlens_k is not None else None, + ) + print( + "seqused_q:", seqused_q, seqused_q.shape if seqused_q is not None else None + ) + print( + "seqused_k:", seqused_k, seqused_k.shape if seqused_k is not None else None + ) + print("max_seqlen_q:", max_seqlen_q) + print("max_seqlen_k:", max_seqlen_k) + print("softmax_scale:", softmax_scale) + print("causal:", causal) + print("window_size_left:", window_size_left) + print("window_size_right:", window_size_right) + print("softcap:", softcap) + print("deterministic:", deterministic) + print("sm_margin:", sm_margin) + + # Check for unsupported features in backward pass + + # Handle softcap + if softcap != 0.0: + raise NotImplementedError( + f"Softcap is not yet supported in the AMD Triton backend backward pass (got softcap={softcap}, expected 0.0)" + ) + + # Handle sm_margin + if sm_margin != 0: + raise NotImplementedError( + f"sm_margin is not yet supported in the AMD Triton backend backward pass (got sm_margin={sm_margin}, expected 0)" + ) + + # Initialize gradient tensors if not provided + dq = torch.zeros_like(q) if dq is None else dq.zero_() + dk = torch.zeros_like(k) if dk is None else dk.zero_() + dv = torch.zeros_like(v) if dv is None else dv.zero_() + + # Determine layout based on cu_seqlens + if cu_seqlens_q is not None and cu_seqlens_k is not None: + # Variable length sequence mode + layout = "thd" + batch = len(cu_seqlens_q) - 1 + _, nheads_q, _ = q.shape + else: + # Regular batch mode + layout = "bshd" + batch, _, nheads_q, _ = q.shape + max_seqlen_q = q.shape[1] if max_seqlen_q is None else max_seqlen_q + max_seqlen_k = k.shape[1] if max_seqlen_k is None else max_seqlen_k + + # V3 backward doesn't have dropout or alibi slopes + dropout_p = 0.0 + philox_seed, philox_offset = None, None + alibi_slopes = None + + # Call implementation + if DEBUG: + print("Using Triton implementation (unified backward dispatcher)") + delta = attention_backward_triton_impl( + do=dout, + q=q, + k=k, + v=v, + o=out, + softmax_lse=softmax_lse, + dq=dq, + dk=dk, + dv=dv, + sm_scale=softmax_scale, + alibi_slopes=alibi_slopes, + causal=causal, + layout=layout, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_q, + max_seqlen_k=max_seqlen_k, + seqused_q=seqused_q, + seqused_k=seqused_k, + dropout_p=dropout_p, + philox_seed=philox_seed, + philox_offset=philox_offset, + use_exp2=USE_EXP2, + mode=BWD_MODE, + ) + + if DEBUG: + print("interface_fa_v3.py::bwd outputs") + print("dq:", dq, dq.shape) + print("dk:", dk, dk.shape) + print("dv:", dv, dv.shape) + print("delta:", delta, delta.shape if delta is not None else None) + + # V3 expects (dq, dk, dv, softmax_d, *rest) + # delta is the softmax_d in this case + return dq, dk, dv, delta + + +def fwd_combine( + out_partial: torch.Tensor, + lse_partial: torch.Tensor, + out: Optional[torch.Tensor] = None, + out_dtype: Optional[torch.dtype] = None, +) -> torch.Tensor: + """ + Combine partial outputs from split attention computation. + + This is used when num_splits > 1 to combine the partial results. + + Args: + out_partial: Partial output tensor from split computation + lse_partial: Partial log-sum-exp tensor + out: Optional output tensor to write to + out_dtype: Optional dtype for output + + Returns: + Combined output tensor + """ + raise NotImplementedError( + "fwd_combine is not yet implemented in the AMD Triton backend" + ) + + +def get_scheduler_metadata( + batch_size: int, + max_seqlen_q: int, + max_seqlen_k: int, + num_heads_q: int, + num_heads_kv: int, + headdim: int, + headdim_v: int, + qkv_dtype: torch.dtype, + cache_seqlens: torch.Tensor, + cu_seqlens_q: Optional[torch.Tensor] = None, + cu_seqlens_k: Optional[torch.Tensor] = None, + cu_seqlens_k_new: Optional[torch.Tensor] = None, + seqused_q: Optional[torch.Tensor] = None, + cache_leftpad: Optional[torch.Tensor] = None, + page_size: Optional[int] = None, + max_seqlen_k_new: int = 0, + causal: bool = False, + window_size_left: int = -1, + window_size_right: int = -1, + attention_chunk: int = 0, + has_softcap: bool = False, + num_splits: int = 0, + pack_gqa: Optional[bool] = None, + sm_margin: int = 0, +): + """ + Get scheduler metadata for optimized kernel selection. + + This function is used to precompute metadata for kernel scheduling in FA3. + The AMD Triton backend currently doesn't use scheduler metadata, so this + raises an error. + + Args: + Various attention parameters used for scheduling decisions + + Returns: + None - scheduler metadata is not used in AMD Triton backend + """ + raise NotImplementedError( + "get_scheduler_metadata is not supported in the AMD Triton backend yet." + ) diff --git a/flash_attn/flash_attn_triton_amd/utils.py b/flash_attn/flash_attn_triton_amd/utils.py index 44502785a35..71ed1c1c2de 100644 --- a/flash_attn/flash_attn_triton_amd/utils.py +++ b/flash_attn/flash_attn_triton_amd/utils.py @@ -7,147 +7,57 @@ import triton import triton.language as tl import numpy as np -from typing import Literal, Optional +from typing import Literal, Optional, Union, Tuple # ------------------------------- # Gloabl Variables # ------------------------------- -AUTOTUNE = os.environ.get('FLASH_ATTENTION_TRITON_AMD_AUTOTUNE', '0').lower() in ('1', 'true', 'yes') +AUTOTUNE = os.environ.get("FLASH_ATTENTION_TRITON_AMD_AUTOTUNE", "0").lower() in ( + "1", + "true", + "yes", +) if AUTOTUNE: os.environ["TRITON_PRINT_AUTOTUNING"] = "1" -DEBUG = os.environ.get('FLASH_ATTENTION_TRITON_AMD_DEBUG', '0').lower() in ('1', 'true', 'yes') -USE_REF = os.environ.get('FLASH_ATTENTION_TRITON_AMD_REF', '0').lower() in ('1', 'true', 'yes') -PERF = os.environ.get('FLASH_ATTENTION_TRITON_AMD_PERF', '0').lower() in ('1', 'true', 'yes') -USE_SINGLE_BWD_KERNEL = os.environ.get('USE_SINGLE_BWD_KERNEL', '0').lower() in ('1', 'true', 'yes') +DEBUG = os.environ.get("FLASH_ATTENTION_TRITON_AMD_DEBUG", "0").lower() in ( + "1", + "true", + "yes", +) +PERF = os.environ.get("FLASH_ATTENTION_TRITON_AMD_PERF", "0").lower() in ( + "1", + "true", + "yes", +) +USE_SINGLE_BWD_KERNEL = os.environ.get("USE_SINGLE_BWD_KERNEL", "0").lower() in ( + "1", + "true", + "yes", +) USE_TRITON_ROCM = os.getenv("FLASH_ATTENTION_TRITON_AMD_ENABLE", "FALSE") == "TRUE" -USE_TRITON_INTERPRET = os.environ.get('TRITON_INTERPRET', '0').lower() in ('1', 'true', 'yes') -DEBUG_TRITON = os.environ.get('DEBUG_TRITON', '0').lower() in ('1', 'true', 'yes') and USE_TRITON_INTERPRET -DEBUG_TRITON_DETAIL = os.environ.get('DEBUG_TRITON_DETAIL', '0').lower() in ('1', 'true', 'yes') and USE_TRITON_INTERPRET -if USE_TRITON_ROCM: # TODO remove this +USE_TRITON_INTERPRET = os.environ.get("TRITON_INTERPRET", "0").lower() in ( + "1", + "true", + "yes", +) +DEBUG_TRITON = ( + os.environ.get("DEBUG_TRITON", "0").lower() in ("1", "true", "yes") + and USE_TRITON_INTERPRET +) +DEBUG_TRITON_DETAIL = ( + os.environ.get("DEBUG_TRITON_DETAIL", "0").lower() in ("1", "true", "yes") + and USE_TRITON_INTERPRET +) +if USE_TRITON_ROCM: # TODO remove this random.seed(42) +BWD_MODE = os.environ.get("BWD_MODE", "fused_no_atomics").lower() DROPOUT_USE_PYTORCH = False DROPOUT_DUMP = False +USE_EXP2 = True +PHILOX_SEED = 0x1BF58 +PHILOX_OFFSET = 0x1D4B49 -# ------------------------------- -# Metadata -# ------------------------------- -class MetaData(): - cu_seqlens_q: Optional[torch.Tensor] = None - cu_seqlens_k: Optional[torch.Tensor] = None - max_seqlens_q: int = 0 - max_seqlens_k: int = 0 - bias: Optional[torch.Tensor] = None - alibi_slopes: Optional[torch.Tensor] = None - causal: bool = False - num_contexts = 0 - varlen: bool = False - layout: Optional[Literal["bshd", "bhsd", "thd"]] = None - cache_seqlens: Optional[torch.Tensor] = None - cache_batch_idx = None - packing: Optional[bool] = None - return_softmax: bool = False - dropout_p: float = 0.0 - philox_seed: Optional[int] = None - philox_offset : Optional[int]= None # if dropout_p > 0.0 seed the RNG so we get reproducible results for testing. - # NOTE: scale sm_scale by log_2(e) and use 2^x in the loop as we do not have native e^x support in HW. - rotary_sin: Optional[torch.Tensor] = None - rotary_cos: Optional[torch.Tensor] = None - rotary_interleaved: bool = False - rotary_conjunction: bool = False - window_size_left: int = -1 - window_size_right: int = -1 - - - def __repr__(self) -> str: - return (f"MetaData(\n" - f" sm_scale={self.sm_scale},\n" - f" cu_seqlens_q={self.cu_seqlens_q},\n" - f" cu_seqlens_k={self.cu_seqlens_k},\n" - f" max_seqlens_q={self.max_seqlens_q},\n" - f" max_seqlens_k={self.max_seqlens_k},\n" - f" bias={self.bias},\n" - f" alibi_slopes={self.alibi_slopes},\n" - f" causal={self.causal},\n" - f" num_contexts={self.num_contexts},\n" - f" varlen={self.varlen},\n" - f" layout={self.layout},\n" - f" cache_seqlens={self.cache_seqlens},\n" - f" cache_batch_idx={self.cache_batch_idx},\n" - f" dropout_p={self.dropout_p},\n" - f" return_softmax={self.return_softmax}\n" - f" window_size_left={self.window_size_left},\n" - f" window_size_right={self.window_size_right},\n" - f")") - - def __init__(self, sm_scale=1.0): - self.sm_scale = sm_scale - - def set_varlen_params(self, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k): - self.varlen = True - self.layout = 'thd' - self.cu_seqlens_q = cu_seqlens_q - self.cu_seqlens_k = cu_seqlens_k - self.max_seqlens_q = max_seqlen_q - self.max_seqlens_k = max_seqlen_k - - # Without "varlen", there should still be one sequence. - assert len(cu_seqlens_q) >= 2 - assert len(cu_seqlens_q) == len(cu_seqlens_k) - - def need_bias(self, bias, batch, nheads, seqlen_q, seqlen_k): - assert bias.is_cuda - assert bias.dim() == 4 - assert bias.shape[0] == 1 - assert bias.shape[2:] == (seqlen_q, seqlen_k) - self.bias = bias - - def need_alibi(self, alibi_slopes, batch, nheads): - assert alibi_slopes.is_cuda - assert alibi_slopes.dim() == 2 - assert alibi_slopes.shape[0] == batch - assert alibi_slopes.shape[1] == nheads - self.alibi_slopes = alibi_slopes - - def need_causal(self, causal): - self.causal = causal - - def need_rotary(self, sin, cos, rotary_interleaved, rotary_conjunction=False): - self.rotary_sin = sin - self.rotary_cos = cos - self.rotary_interleaved = rotary_interleaved - self.rotary_conjunction = rotary_conjunction - - def need_dropout(self, dropout_p, return_softmax): - self.dropout_p = dropout_p - self.return_softmax = return_softmax - self.philox_seed, self.philox_offset = 0x1BF58, 0x1D4B49 - - def check_args(self, q, k, v, o): - assert q.dim() == k.dim() and q.dim() == v.dim() - - batch, nheads_q, nheads_k, head_size, _, _ = get_shapes_from_layout(q, k, self.layout, self.cu_seqlens_q, self.cu_seqlens_k, self.max_seqlens_q, self.max_seqlens_k) - if self.varlen: - assert q.dim() == 3 - assert self.cu_seqlens_q is not None - assert self.cu_seqlens_k is not None - assert len(self.cu_seqlens_q) == len(self.cu_seqlens_k) - # TODO: Remove once bias is supported with varlen - assert self.bias is None - # assert not self.return_softmax - else: - assert q.dim() == 4 - assert self.max_seqlens_q > 0 and self.max_seqlens_k > 0 - assert self.cu_seqlens_q is None and self.cu_seqlens_k is None - # assert k.shape == v.shape - assert q.shape[-1] == k.shape[-1] # and q.shape[-1] == v.shape[-1] - # TODO: Change assert if we support qkl f8 and v f16 - assert q.dtype == k.dtype and q.dtype == v.dtype - assert o.shape[:-1] == q.shape[:-1] and o.shape[-1] == v.shape[-1] - assert (nheads_q % nheads_k) == 0 - assert self.layout is not None - assert self.layout == 'thd' or not self.varlen - # ------------------------------- # Input Helper # ------------------------------- @@ -155,14 +65,17 @@ def random_seqlens_composition(SEQ_LEN, BATCH): # generate a random composition of N into Z positive parts. idx = torch.randperm(SEQ_LEN - 1)[: BATCH - 1] + 1 idx, _ = torch.sort(idx) - breakpoints = torch.cat([ - torch.tensor([0], dtype=torch.long), - idx, - torch.tensor([SEQ_LEN], dtype=torch.long), - ]) + breakpoints = torch.cat( + [ + torch.tensor([0], dtype=torch.long), + idx, + torch.tensor([SEQ_LEN], dtype=torch.long), + ] + ) seqlens = (breakpoints[1:] - breakpoints[:-1]).to(torch.int32) return seqlens + def generate_varlen_tensor( total_seqlen: int, num_heads: int, @@ -171,7 +84,7 @@ def generate_varlen_tensor( equal_seqlens: bool = False, device: str = "cuda", dtype: torch.dtype = torch.float16, - mode: Literal["random", "ones", "incremental", "identity"] = "random" + mode: Literal["random", "ones", "incremental", "identity"] = "random", ): if DEBUG: print("total_seqlen", total_seqlen) @@ -186,23 +99,28 @@ def generate_varlen_tensor( # get valid batch_size if batch_size is None: - valid_batch_sizes = [bs for bs in [1, 2, 4, 8, 16, 32, 64] if bs <= total_seqlen] + valid_batch_sizes = [ + bs for bs in [1, 2, 4, 8, 16, 32, 64] if bs <= total_seqlen + ] batch_size = random.choice(valid_batch_sizes) - + # get seqlens if equal_seqlens: seqlens = torch.full( - (batch_size,), - total_seqlen // batch_size, - dtype=torch.int32, - device=device + (batch_size,), total_seqlen // batch_size, dtype=torch.int32, device=device ) seqlens[-1] += total_seqlen % batch_size else: seqlens = random_seqlens_composition(total_seqlen, batch_size).to(device=device) # create cumulative sequence lengths - cu_seqlens = torch.cat([torch.tensor([0], dtype=torch.int32, device=device), seqlens.cumsum(dim=0)]).to(torch.int32).to(device=device) + cu_seqlens = ( + torch.cat( + [torch.tensor([0], dtype=torch.int32, device=device), seqlens.cumsum(dim=0)] + ) + .to(torch.int32) + .to(device=device) + ) max_seqlen = torch.max(seqlens).to(torch.int32).item() # create varlen tensor based on mode @@ -210,8 +128,8 @@ def generate_varlen_tensor( x = torch.zeros(total_seqlen, num_heads, head_size, dtype=dtype, device=device) for i in range(batch_size): start = cu_seqlens[i].item() - end = cu_seqlens[i+1].item() - length = end - start + end = cu_seqlens[i + 1].item() + length = end - start x[start:end, :, :] = ( torch.arange(length, dtype=dtype, device=device) @@ -223,14 +141,16 @@ def generate_varlen_tensor( # for each batch, create identity pattern within that batch's sequence for i in range(batch_size): start = cu_seqlens[i].item() - end = cu_seqlens[i+1].item() + end = cu_seqlens[i + 1].item() length = end - start - + # create identity pattern for positions within this batch for pos in range(min(length, head_size)): x[start + pos, :, pos] = 1.0 elif mode == "random": - x = torch.randn((total_seqlen, num_heads, head_size), dtype=dtype, device=device) + x = torch.randn( + (total_seqlen, num_heads, head_size), dtype=dtype, device=device + ) elif mode == "ones": x = torch.ones((total_seqlen, num_heads, head_size), dtype=dtype, device=device) else: @@ -238,14 +158,25 @@ def generate_varlen_tensor( if is_fp8_dtype: # cast to fp8 - x, descale_x = cast_to_fp8(x, og_fp8_dtype, "thd", cu_seqlens=cu_seqlens, max_seqlen=max_seqlen) + x, descale_x = cast_to_fp8( + x, og_fp8_dtype, "thd", cu_seqlens=cu_seqlens, max_seqlen=max_seqlen + ) x.requires_grad_() return x, cu_seqlens, max_seqlen, descale_x else: x.requires_grad_() return x, cu_seqlens, max_seqlen -def generate_bshd_tensor(BATCH, SEQ_LEN, NUM_HEADS, D_HEAD, dtype: torch.dtype = torch.float16, device="cuda", mode: Literal["random", "ones", "incremental", "identity"] = "random"): + +def generate_bshd_tensor( + BATCH, + SEQ_LEN, + NUM_HEADS, + D_HEAD, + dtype: torch.dtype = torch.float16, + device="cuda", + mode: Literal["random", "ones", "incremental", "identity"] = "random", +): # save fp8 type is_fp8_dtype = is_dtype_fp8(dtype) if is_fp8_dtype: @@ -255,7 +186,12 @@ def generate_bshd_tensor(BATCH, SEQ_LEN, NUM_HEADS, D_HEAD, dtype: torch.dtype = # gen tensor based on mode tensor_shape = (BATCH, SEQ_LEN, NUM_HEADS, D_HEAD) if mode == "incremental": - x = torch.arange(SEQ_LEN, dtype=dtype, device=device).view(1, SEQ_LEN, 1, 1).expand(*tensor_shape).contiguous() + x = ( + torch.arange(SEQ_LEN, dtype=dtype, device=device) + .view(1, SEQ_LEN, 1, 1) + .expand(*tensor_shape) + .contiguous() + ) elif mode == "identity": x = torch.zeros(tensor_shape, dtype=dtype, device=device) # create identity pattern: position i has value 1 at dimension i @@ -267,7 +203,7 @@ def generate_bshd_tensor(BATCH, SEQ_LEN, NUM_HEADS, D_HEAD, dtype: torch.dtype = x = torch.ones(tensor_shape, dtype=dtype, device=device) else: raise ValueError(f"Unkown mode {mode}") - + if is_fp8_dtype: # cast to fp8 x, descale_x = cast_to_fp8(x, og_fp8_dtype, "bshd") @@ -277,17 +213,31 @@ def generate_bshd_tensor(BATCH, SEQ_LEN, NUM_HEADS, D_HEAD, dtype: torch.dtype = x.requires_grad_() return x -def generate_bhsd_tensor(BATCH, NUM_HEADS, SEQ_LEN, D_HEAD, dtype: torch.dtype = torch.float16, device="cuda", mode: Literal["random", "ones", "incremental", "identity"] = "random"): + +def generate_bhsd_tensor( + BATCH, + NUM_HEADS, + SEQ_LEN, + D_HEAD, + dtype: torch.dtype = torch.float16, + device="cuda", + mode: Literal["random", "ones", "incremental", "identity"] = "random", +): # save fp8 type is_fp8_dtype = is_dtype_fp8(dtype) if is_fp8_dtype: og_fp8_dtype = dtype dtype = torch.float32 - + # gen tensor based on mode tensor_shape = (BATCH, NUM_HEADS, SEQ_LEN, D_HEAD) if mode == "incremental": - x = torch.arange(SEQ_LEN, dtype=dtype, device=device).view(1, 1, SEQ_LEN, 1).expand(*tensor_shape).contiguous() + x = ( + torch.arange(SEQ_LEN, dtype=dtype, device=device) + .view(1, 1, SEQ_LEN, 1) + .expand(*tensor_shape) + .contiguous() + ) elif mode == "identity": x = torch.zeros(tensor_shape, dtype=dtype, device=device) # create identity pattern: position i has value 1 at dimension i @@ -299,14 +249,23 @@ def generate_bhsd_tensor(BATCH, NUM_HEADS, SEQ_LEN, D_HEAD, dtype: torch.dtype = x = torch.ones(tensor_shape, dtype=dtype, device=device) else: raise ValueError(f"Unkown mode {mode}") - + if is_fp8_dtype: raise ValueError("fp8 not supported for bhsd yet") else: x.requires_grad_() return x -def generate_bshd_qkv_packed(BATCH, SEQ_LEN, NUM_HEADS, D_HEAD, dtype: torch.dtype = torch.float16, device="cuda", DEBUG_INPUT=False): + +def generate_bshd_qkv_packed( + BATCH, + SEQ_LEN, + NUM_HEADS, + D_HEAD, + dtype: torch.dtype = torch.float16, + device="cuda", + DEBUG_INPUT=False, +): """Generate QKV packed tensor with shape (BATCH, SEQ_LEN, 3, NUM_HEADS, D_HEAD)""" # save fp8 type is_fp8_dtype = is_dtype_fp8(dtype) @@ -317,10 +276,15 @@ def generate_bshd_qkv_packed(BATCH, SEQ_LEN, NUM_HEADS, D_HEAD, dtype: torch.dty # gen tensor tensor_shape = (BATCH, SEQ_LEN, 3, NUM_HEADS, D_HEAD) if DEBUG_INPUT: - x = torch.arange(SEQ_LEN, dtype=dtype, device=device).view(1, SEQ_LEN, 1, 1, 1).expand(*tensor_shape).contiguous() + x = ( + torch.arange(SEQ_LEN, dtype=dtype, device=device) + .view(1, SEQ_LEN, 1, 1, 1) + .expand(*tensor_shape) + .contiguous() + ) else: x = torch.randn(tensor_shape, dtype=dtype, device=device) - + if is_fp8_dtype: # cast to fp8 - need to handle the packed dimension raise NotImplementedError("FP8 not supported for QKV packing yet") @@ -329,7 +293,15 @@ def generate_bshd_qkv_packed(BATCH, SEQ_LEN, NUM_HEADS, D_HEAD, dtype: torch.dty return x -def generate_bshd_kv_packed(BATCH, SEQ_LEN, NUM_HEADS, D_HEAD, dtype: torch.dtype = torch.float16, device="cuda", DEBUG_INPUT=False): +def generate_bshd_kv_packed( + BATCH, + SEQ_LEN, + NUM_HEADS, + D_HEAD, + dtype: torch.dtype = torch.float16, + device="cuda", + DEBUG_INPUT=False, +): """Generate KV packed tensor with shape (BATCH, SEQ_LEN, 2, NUM_HEADS, D_HEAD)""" # save fp8 type is_fp8_dtype = is_dtype_fp8(dtype) @@ -340,10 +312,15 @@ def generate_bshd_kv_packed(BATCH, SEQ_LEN, NUM_HEADS, D_HEAD, dtype: torch.dtyp # gen tensor tensor_shape = (BATCH, SEQ_LEN, 2, NUM_HEADS, D_HEAD) if DEBUG_INPUT: - x = torch.arange(SEQ_LEN, dtype=dtype, device=device).view(1, SEQ_LEN, 1, 1, 1).expand(*tensor_shape).contiguous() + x = ( + torch.arange(SEQ_LEN, dtype=dtype, device=device) + .view(1, SEQ_LEN, 1, 1, 1) + .expand(*tensor_shape) + .contiguous() + ) else: x = torch.randn(tensor_shape, dtype=dtype, device=device) - + if is_fp8_dtype: # cast to fp8 - need to handle the packed dimension raise NotImplementedError("FP8 not supported for KV packing yet") @@ -352,21 +329,34 @@ def generate_bshd_kv_packed(BATCH, SEQ_LEN, NUM_HEADS, D_HEAD, dtype: torch.dtyp return x -def generate_bhsd_qkv_packed(BATCH, NUM_HEADS, SEQ_LEN, D_HEAD, dtype: torch.dtype = torch.float16, device="cuda", DEBUG_INPUT=False): +def generate_bhsd_qkv_packed( + BATCH, + NUM_HEADS, + SEQ_LEN, + D_HEAD, + dtype: torch.dtype = torch.float16, + device="cuda", + DEBUG_INPUT=False, +): """Generate QKV packed tensor with shape (BATCH, 3, NUM_HEADS, SEQ_LEN, D_HEAD)""" # save fp8 type is_fp8_dtype = is_dtype_fp8(dtype) if is_fp8_dtype: og_fp8_dtype = dtype dtype = torch.float32 - + # gen tensor tensor_shape = (BATCH, 3, NUM_HEADS, SEQ_LEN, D_HEAD) if DEBUG_INPUT: - x = torch.arange(SEQ_LEN, dtype=dtype, device=device).view(1, 1, 1, SEQ_LEN, 1).expand(*tensor_shape).contiguous() + x = ( + torch.arange(SEQ_LEN, dtype=dtype, device=device) + .view(1, 1, 1, SEQ_LEN, 1) + .expand(*tensor_shape) + .contiguous() + ) else: x = torch.randn(tensor_shape, dtype=dtype, device=device) - + if is_fp8_dtype: # cast to fp8 - need to handle the packed dimension raise NotImplementedError("FP8 not supported for QKV packing yet") @@ -375,21 +365,34 @@ def generate_bhsd_qkv_packed(BATCH, NUM_HEADS, SEQ_LEN, D_HEAD, dtype: torch.dty return x -def generate_bhsd_kv_packed(BATCH, NUM_HEADS, SEQ_LEN, D_HEAD, dtype: torch.dtype = torch.float16, device="cuda", DEBUG_INPUT=False): +def generate_bhsd_kv_packed( + BATCH, + NUM_HEADS, + SEQ_LEN, + D_HEAD, + dtype: torch.dtype = torch.float16, + device="cuda", + DEBUG_INPUT=False, +): """Generate KV packed tensor with shape (BATCH, 2, NUM_HEADS, SEQ_LEN, D_HEAD)""" # save fp8 type is_fp8_dtype = is_dtype_fp8(dtype) if is_fp8_dtype: og_fp8_dtype = dtype dtype = torch.float32 - + # gen tensor tensor_shape = (BATCH, 2, NUM_HEADS, SEQ_LEN, D_HEAD) if DEBUG_INPUT: - x = torch.arange(SEQ_LEN, dtype=dtype, device=device).view(1, 1, 1, SEQ_LEN, 1).expand(*tensor_shape).contiguous() + x = ( + torch.arange(SEQ_LEN, dtype=dtype, device=device) + .view(1, 1, 1, SEQ_LEN, 1) + .expand(*tensor_shape) + .contiguous() + ) else: x = torch.randn(tensor_shape, dtype=dtype, device=device) - + if is_fp8_dtype: # cast to fp8 - need to handle the packed dimension raise NotImplementedError("FP8 not supported for KV packing yet") @@ -406,7 +409,7 @@ def generate_varlen_qkv_packed( equal_seqlens: bool = False, device: str = "cuda", dtype: torch.dtype = torch.float16, - DEBUG_INPUT: bool = False + DEBUG_INPUT: bool = False, ): """Generate varlen QKV packed tensor with shape (total_seqlen, 3, num_heads, head_size)""" if DEBUG: @@ -423,31 +426,38 @@ def generate_varlen_qkv_packed( # get valid batch_size if batch_size is None: - valid_batch_sizes = [bs for bs in [1, 2, 4, 8, 16, 32, 64] if bs <= total_seqlen] + valid_batch_sizes = [ + bs for bs in [1, 2, 4, 8, 16, 32, 64] if bs <= total_seqlen + ] batch_size = random.choice(valid_batch_sizes) - + # get seqlens if equal_seqlens: seqlens = torch.full( - (batch_size,), - total_seqlen // batch_size, - dtype=torch.int32, - device=device + (batch_size,), total_seqlen // batch_size, dtype=torch.int32, device=device ) seqlens[-1] += total_seqlen % batch_size else: seqlens = random_seqlens_composition(total_seqlen, batch_size).to(device=device) # create cumulative sequence lengths - cu_seqlens = torch.cat([torch.tensor([0], dtype=torch.int32, device=device), seqlens.cumsum(dim=0)]).to(torch.int32).to(device=device) + cu_seqlens = ( + torch.cat( + [torch.tensor([0], dtype=torch.int32, device=device), seqlens.cumsum(dim=0)] + ) + .to(torch.int32) + .to(device=device) + ) max_seqlen = torch.max(seqlens).to(torch.int32).item() # create varlen qkv packed tensor if DEBUG_INPUT: - x = torch.zeros(total_seqlen, 3, num_heads, head_size, dtype=dtype, device=device) + x = torch.zeros( + total_seqlen, 3, num_heads, head_size, dtype=dtype, device=device + ) for i in range(batch_size): start = cu_seqlens[i].item() - end = cu_seqlens[i+1].item() + end = cu_seqlens[i + 1].item() length = end - start x[start:end, :, :, :] = ( @@ -456,7 +466,9 @@ def generate_varlen_qkv_packed( .expand(length, 3, num_heads, head_size) ) else: - x = torch.randn((total_seqlen, 3, num_heads, head_size), dtype=dtype, device=device) + x = torch.randn( + (total_seqlen, 3, num_heads, head_size), dtype=dtype, device=device + ) if is_fp8_dtype: # cast to fp8 - need to handle the packed dimension @@ -474,7 +486,7 @@ def generate_varlen_kv_packed( equal_seqlens: bool = False, device: str = "cuda", dtype: torch.dtype = torch.float16, - DEBUG_INPUT: bool = False + DEBUG_INPUT: bool = False, ): """Generate varlen KV packed tensor with shape (total_seqlen, 2, num_heads, head_size)""" if DEBUG: @@ -491,31 +503,38 @@ def generate_varlen_kv_packed( # get valid batch_size if batch_size is None: - valid_batch_sizes = [bs for bs in [1, 2, 4, 8, 16, 32, 64] if bs <= total_seqlen] + valid_batch_sizes = [ + bs for bs in [1, 2, 4, 8, 16, 32, 64] if bs <= total_seqlen + ] batch_size = random.choice(valid_batch_sizes) - + # get seqlens if equal_seqlens: seqlens = torch.full( - (batch_size,), - total_seqlen // batch_size, - dtype=torch.int32, - device=device + (batch_size,), total_seqlen // batch_size, dtype=torch.int32, device=device ) seqlens[-1] += total_seqlen % batch_size else: seqlens = random_seqlens_composition(total_seqlen, batch_size).to(device=device) # create cumulative sequence lengths - cu_seqlens = torch.cat([torch.tensor([0], dtype=torch.int32, device=device), seqlens.cumsum(dim=0)]).to(torch.int32).to(device=device) + cu_seqlens = ( + torch.cat( + [torch.tensor([0], dtype=torch.int32, device=device), seqlens.cumsum(dim=0)] + ) + .to(torch.int32) + .to(device=device) + ) max_seqlen = torch.max(seqlens).to(torch.int32).item() # create varlen kv packed tensor if DEBUG_INPUT: - x = torch.zeros(total_seqlen, 2, num_heads, head_size, dtype=dtype, device=device) + x = torch.zeros( + total_seqlen, 2, num_heads, head_size, dtype=dtype, device=device + ) for i in range(batch_size): start = cu_seqlens[i].item() - end = cu_seqlens[i+1].item() + end = cu_seqlens[i + 1].item() length = end - start x[start:end, :, :, :] = ( @@ -524,7 +543,9 @@ def generate_varlen_kv_packed( .expand(length, 2, num_heads, head_size) ) else: - x = torch.randn((total_seqlen, 2, num_heads, head_size), dtype=dtype, device=device) + x = torch.randn( + (total_seqlen, 2, num_heads, head_size), dtype=dtype, device=device + ) if is_fp8_dtype: # cast to fp8 - need to handle the packed dimension @@ -533,7 +554,6 @@ def generate_varlen_kv_packed( x.requires_grad_() return x, cu_seqlens, max_seqlen -# Replace the existing input_helper function in utils.py with this updated version def input_helper( BATCH: int, @@ -547,7 +567,7 @@ def input_helper( dtype: torch.dtype, layout: Literal["bshd", "bhsd", "thd"], packing: Optional[Literal["kv", "qkv"]] = None, - device: Literal["cpu", "cuda"] = "cuda" + device: Literal["cpu", "cuda"] = "cuda", ): torch.manual_seed(20) is_fp8_dtype = is_dtype_fp8(dtype) @@ -557,136 +577,284 @@ def input_helper( TOTAL_SEQLENS_Q = BATCH * N_CTX_Q TOTAL_SEQLENS_K = BATCH * N_CTX_K equal_seqlens = False - + # deal with packing if packing is None: # gen tensors if is_fp8_dtype: - q, cu_seqlens_q, max_seqlen_q, descale_q = generate_varlen_tensor(TOTAL_SEQLENS_Q, HQ, D_HEAD, batch_size=BATCH, dtype=dtype, device=device, equal_seqlens=equal_seqlens) - k, cu_seqlens_k, max_seqlen_k, descale_k = generate_varlen_tensor(TOTAL_SEQLENS_K, HK, D_HEAD, batch_size=BATCH, dtype=dtype, device=device, equal_seqlens=equal_seqlens) - v, _, _, descale_v = generate_varlen_tensor(TOTAL_SEQLENS_K, HK, D_HEAD, batch_size=BATCH, dtype=dtype, device=device, equal_seqlens=equal_seqlens) - do, _, _, descale_do = generate_varlen_tensor(TOTAL_SEQLENS_Q, HQ, D_HEAD, batch_size=BATCH, dtype=dtype, device=device, equal_seqlens=equal_seqlens) + q, cu_seqlens_q, max_seqlen_q, descale_q = generate_varlen_tensor( + TOTAL_SEQLENS_Q, + HQ, + D_HEAD, + batch_size=BATCH, + dtype=dtype, + device=device, + equal_seqlens=equal_seqlens, + ) + k, cu_seqlens_k, max_seqlen_k, descale_k = generate_varlen_tensor( + TOTAL_SEQLENS_K, + HK, + D_HEAD, + batch_size=BATCH, + dtype=dtype, + device=device, + equal_seqlens=equal_seqlens, + ) + v, _, _, descale_v = generate_varlen_tensor( + TOTAL_SEQLENS_K, + HK, + D_HEAD, + batch_size=BATCH, + dtype=dtype, + device=device, + equal_seqlens=equal_seqlens, + ) + do, _, _, descale_do = generate_varlen_tensor( + TOTAL_SEQLENS_Q, + HQ, + D_HEAD, + batch_size=BATCH, + dtype=dtype, + device=device, + equal_seqlens=equal_seqlens, + ) else: - q, cu_seqlens_q, max_seqlen_q = generate_varlen_tensor(TOTAL_SEQLENS_Q, HQ, D_HEAD, batch_size=BATCH, dtype=dtype, device=device, equal_seqlens=equal_seqlens) - k, cu_seqlens_k, max_seqlen_k = generate_varlen_tensor(TOTAL_SEQLENS_K, HK, D_HEAD, batch_size=BATCH, dtype=dtype, device=device, equal_seqlens=equal_seqlens) - v, _, _ = generate_varlen_tensor(TOTAL_SEQLENS_K, HK, D_HEAD, batch_size=BATCH, dtype=dtype, device=device, equal_seqlens=equal_seqlens) - do, _, _ = generate_varlen_tensor(TOTAL_SEQLENS_Q, HQ, D_HEAD, batch_size=BATCH, dtype=dtype, device=device, equal_seqlens=equal_seqlens) + q, cu_seqlens_q, max_seqlen_q = generate_varlen_tensor( + TOTAL_SEQLENS_Q, + HQ, + D_HEAD, + batch_size=BATCH, + dtype=dtype, + device=device, + equal_seqlens=equal_seqlens, + ) + k, cu_seqlens_k, max_seqlen_k = generate_varlen_tensor( + TOTAL_SEQLENS_K, + HK, + D_HEAD, + batch_size=BATCH, + dtype=dtype, + device=device, + equal_seqlens=equal_seqlens, + ) + v, _, _ = generate_varlen_tensor( + TOTAL_SEQLENS_K, + HK, + D_HEAD, + batch_size=BATCH, + dtype=dtype, + device=device, + equal_seqlens=equal_seqlens, + ) + do, _, _ = generate_varlen_tensor( + TOTAL_SEQLENS_Q, + HQ, + D_HEAD, + batch_size=BATCH, + dtype=dtype, + device=device, + equal_seqlens=equal_seqlens, + ) elif packing == "kv": # gen tensors with kv packing if is_fp8_dtype: raise ValueError("FP8 not supported for KV packing yet") else: - q, cu_seqlens_q, max_seqlen_q = generate_varlen_tensor(TOTAL_SEQLENS_Q, HQ, D_HEAD, batch_size=BATCH, dtype=dtype, device=device, equal_seqlens=equal_seqlens) - kv, cu_seqlens_k, max_seqlen_k = generate_varlen_kv_packed(TOTAL_SEQLENS_K, HK, D_HEAD, batch_size=BATCH, dtype=dtype, device=device, equal_seqlens=equal_seqlens) - do, _, _ = generate_varlen_tensor(TOTAL_SEQLENS_Q, HQ, D_HEAD, batch_size=BATCH, dtype=dtype, device=device, equal_seqlens=equal_seqlens) + q, cu_seqlens_q, max_seqlen_q = generate_varlen_tensor( + TOTAL_SEQLENS_Q, + HQ, + D_HEAD, + batch_size=BATCH, + dtype=dtype, + device=device, + equal_seqlens=equal_seqlens, + ) + kv, cu_seqlens_k, max_seqlen_k = generate_varlen_kv_packed( + TOTAL_SEQLENS_K, + HK, + D_HEAD, + batch_size=BATCH, + dtype=dtype, + device=device, + equal_seqlens=equal_seqlens, + ) + do, _, _ = generate_varlen_tensor( + TOTAL_SEQLENS_Q, + HQ, + D_HEAD, + batch_size=BATCH, + dtype=dtype, + device=device, + equal_seqlens=equal_seqlens, + ) elif packing == "qkv": # qkv packing - requires same sequence length for q and k - assert N_CTX_Q == N_CTX_K, "For QKV packing, Q and K must have same sequence length" + assert ( + N_CTX_Q == N_CTX_K + ), "For QKV packing, Q and K must have same sequence length" assert HQ == HK, "For QKV packing, Q and K must have same number of heads" - + if is_fp8_dtype: raise ValueError("FP8 not supported for QKV packing yet") else: - qkv, cu_seqlens_q, max_seqlen_q = generate_varlen_qkv_packed(TOTAL_SEQLENS_Q, HQ, D_HEAD, batch_size=BATCH, dtype=dtype, device=device, equal_seqlens=equal_seqlens) + qkv, cu_seqlens_q, max_seqlen_q = generate_varlen_qkv_packed( + TOTAL_SEQLENS_Q, + HQ, + D_HEAD, + batch_size=BATCH, + dtype=dtype, + device=device, + equal_seqlens=equal_seqlens, + ) cu_seqlens_k = cu_seqlens_q max_seqlen_k = max_seqlen_q - do, _, _ = generate_varlen_tensor(TOTAL_SEQLENS_Q, HQ, D_HEAD, batch_size=BATCH, dtype=dtype, device=device, equal_seqlens=equal_seqlens) - - # setup metadata - sm_scale = D_HEAD**-0.5 - metadata = MetaData(sm_scale=sm_scale) - metadata.set_varlen_params(cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k) - metadata.need_causal(CAUSAL) - metadata.need_dropout(DROPOUT_P, True) - - elif layout == 'bshd' or layout == "bhsd": + do, _, _ = generate_varlen_tensor( + TOTAL_SEQLENS_Q, + HQ, + D_HEAD, + batch_size=BATCH, + dtype=dtype, + device=device, + equal_seqlens=equal_seqlens, + ) + + elif layout == "bshd" or layout == "bhsd": # deal with packing if packing is None: # gen tensors if layout == "bshd": if is_fp8_dtype: - q, descale_q = generate_bshd_tensor(BATCH, N_CTX_Q, HQ, D_HEAD, dtype=dtype, device=device) - k, descale_k = generate_bshd_tensor(BATCH, N_CTX_K, HK, D_HEAD, dtype=dtype, device=device) - v, descale_v = generate_bshd_tensor(BATCH, N_CTX_K, HK, D_HEAD, dtype=dtype, device=device) - do, descale_do = generate_bshd_tensor(BATCH, N_CTX_Q, HQ, D_HEAD, dtype=dtype, device=device) + q, descale_q = generate_bshd_tensor( + BATCH, N_CTX_Q, HQ, D_HEAD, dtype=dtype, device=device + ) + k, descale_k = generate_bshd_tensor( + BATCH, N_CTX_K, HK, D_HEAD, dtype=dtype, device=device + ) + v, descale_v = generate_bshd_tensor( + BATCH, N_CTX_K, HK, D_HEAD, dtype=dtype, device=device + ) + do, descale_do = generate_bshd_tensor( + BATCH, N_CTX_Q, HQ, D_HEAD, dtype=dtype, device=device + ) else: - q = generate_bshd_tensor(BATCH, N_CTX_Q, HQ, D_HEAD, dtype=dtype, device=device) - k = generate_bshd_tensor(BATCH, N_CTX_K, HK, D_HEAD, dtype=dtype, device=device) - v = generate_bshd_tensor(BATCH, N_CTX_K, HK, D_HEAD, dtype=dtype, device=device) - do = generate_bshd_tensor(BATCH, N_CTX_Q, HQ, D_HEAD, dtype=dtype, device=device) + q = generate_bshd_tensor( + BATCH, N_CTX_Q, HQ, D_HEAD, dtype=dtype, device=device + ) + k = generate_bshd_tensor( + BATCH, N_CTX_K, HK, D_HEAD, dtype=dtype, device=device + ) + v = generate_bshd_tensor( + BATCH, N_CTX_K, HK, D_HEAD, dtype=dtype, device=device + ) + do = generate_bshd_tensor( + BATCH, N_CTX_Q, HQ, D_HEAD, dtype=dtype, device=device + ) elif layout == "bhsd": - q, descale_q = generate_bhsd_tensor(BATCH, HQ, N_CTX_Q, D_HEAD, dtype=dtype, device=device) - k, descale_k = generate_bhsd_tensor(BATCH, HK, N_CTX_K, D_HEAD, dtype=dtype, device=device) - v, descale_v = generate_bhsd_tensor(BATCH, HK, N_CTX_K, D_HEAD, dtype=dtype, device=device) - do, descale_do = generate_bhsd_tensor(BATCH, HQ, N_CTX_Q, D_HEAD, dtype=dtype, device=device) + q, descale_q = generate_bhsd_tensor( + BATCH, HQ, N_CTX_Q, D_HEAD, dtype=dtype, device=device + ) + k, descale_k = generate_bhsd_tensor( + BATCH, HK, N_CTX_K, D_HEAD, dtype=dtype, device=device + ) + v, descale_v = generate_bhsd_tensor( + BATCH, HK, N_CTX_K, D_HEAD, dtype=dtype, device=device + ) + do, descale_do = generate_bhsd_tensor( + BATCH, HQ, N_CTX_Q, D_HEAD, dtype=dtype, device=device + ) else: - q = generate_bhsd_tensor(BATCH, HQ, N_CTX_Q, D_HEAD, dtype=dtype, device=device) - k = generate_bhsd_tensor(BATCH, HK, N_CTX_K, D_HEAD, dtype=dtype, device=device) - v = generate_bhsd_tensor(BATCH, HK, N_CTX_K, D_HEAD, dtype=dtype, device=device) - do = generate_bhsd_tensor(BATCH, HQ, N_CTX_Q, D_HEAD, dtype=dtype, device=device) + q = generate_bhsd_tensor( + BATCH, HQ, N_CTX_Q, D_HEAD, dtype=dtype, device=device + ) + k = generate_bhsd_tensor( + BATCH, HK, N_CTX_K, D_HEAD, dtype=dtype, device=device + ) + v = generate_bhsd_tensor( + BATCH, HK, N_CTX_K, D_HEAD, dtype=dtype, device=device + ) + do = generate_bhsd_tensor( + BATCH, HQ, N_CTX_Q, D_HEAD, dtype=dtype, device=device + ) elif packing == "kv": # gen tensors with kv packing if is_fp8_dtype: raise ValueError("FP8 not supported for KV packing yet") else: if layout == "bshd": - q = generate_bshd_tensor(BATCH, N_CTX_Q, HQ, D_HEAD, dtype=dtype, device=device) - kv = generate_bshd_kv_packed(BATCH, N_CTX_K, HK, D_HEAD, dtype=dtype, device=device) - do = generate_bshd_tensor(BATCH, N_CTX_Q, HQ, D_HEAD, dtype=dtype, device=device) + q = generate_bshd_tensor( + BATCH, N_CTX_Q, HQ, D_HEAD, dtype=dtype, device=device + ) + kv = generate_bshd_kv_packed( + BATCH, N_CTX_K, HK, D_HEAD, dtype=dtype, device=device + ) + do = generate_bshd_tensor( + BATCH, N_CTX_Q, HQ, D_HEAD, dtype=dtype, device=device + ) elif layout == "bhsd": - q = generate_bhsd_tensor(BATCH, HQ, N_CTX_Q, D_HEAD, dtype=dtype, device=device) - kv = generate_bhsd_kv_packed(BATCH, HK, N_CTX_K, D_HEAD, dtype=dtype, device=device) - do = generate_bhsd_tensor(BATCH, HQ, N_CTX_Q, D_HEAD, dtype=dtype, device=device) + q = generate_bhsd_tensor( + BATCH, HQ, N_CTX_Q, D_HEAD, dtype=dtype, device=device + ) + kv = generate_bhsd_kv_packed( + BATCH, HK, N_CTX_K, D_HEAD, dtype=dtype, device=device + ) + do = generate_bhsd_tensor( + BATCH, HQ, N_CTX_Q, D_HEAD, dtype=dtype, device=device + ) elif packing == "qkv": # qkv packing - requires same sequence length for q and k - assert N_CTX_Q == N_CTX_K, "For QKV packing, Q and K must have same sequence length" + assert ( + N_CTX_Q == N_CTX_K + ), "For QKV packing, Q and K must have same sequence length" assert HQ == HK, "For QKV packing, Q and K must have same number of heads" - + if is_fp8_dtype: raise ValueError("FP8 not supported for QKV packing yet") else: if layout == "bshd": - qkv = generate_bshd_qkv_packed(BATCH, N_CTX_Q, HQ, D_HEAD, dtype=dtype, device=device) - do = generate_bshd_tensor(BATCH, N_CTX_Q, HQ, D_HEAD, dtype=dtype, device=device) + qkv = generate_bshd_qkv_packed( + BATCH, N_CTX_Q, HQ, D_HEAD, dtype=dtype, device=device + ) + do = generate_bshd_tensor( + BATCH, N_CTX_Q, HQ, D_HEAD, dtype=dtype, device=device + ) elif layout == "bhsd": - qkv = generate_bhsd_qkv_packed(BATCH, HQ, N_CTX_Q, D_HEAD, dtype=dtype, device=device) - do = generate_bhsd_tensor(BATCH, HQ, N_CTX_Q, D_HEAD, dtype=dtype, device=device) - - # setup metadata - sm_scale = D_HEAD**-0.5 - metadata = MetaData(sm_scale=sm_scale) - metadata.max_seqlens_q = N_CTX_Q - metadata.max_seqlens_k = N_CTX_K - metadata.layout = layout - metadata.need_causal(CAUSAL) - metadata.need_dropout(DROPOUT_P, True) + qkv = generate_bhsd_qkv_packed( + BATCH, HQ, N_CTX_Q, D_HEAD, dtype=dtype, device=device + ) + do = generate_bhsd_tensor( + BATCH, HQ, N_CTX_Q, D_HEAD, dtype=dtype, device=device + ) + else: raise ValueError(f"Unknown layout: {layout}") # return based on packing if packing is None: if is_fp8_dtype: - return (q, descale_q), (k, descale_k), (v, descale_v), (do, descale_do), metadata + return (q, descale_q), (k, descale_k), (v, descale_v), (do, descale_do) else: - return q, k, v, do, metadata + return q, k, v, do elif packing == "kv": if is_fp8_dtype: raise ValueError("FP8 not supported kv packing yet") else: - return q, kv, do, metadata + return q, kv, do elif packing == "qkv": if is_fp8_dtype: raise ValueError("FP8 not supported qkv packing yet") else: - return qkv, do, metadata + return qkv, do else: assert False, f"Unsupported packing mode: {packing}" + # ------------------------------- # Alibi # ------------------------------- @triton.jit -def compute_alibi_block(alibi_slope, seqlen_q, seqlen_k, offs_m, offs_n, transpose=False): +def compute_alibi_block( + alibi_slope, seqlen_q, seqlen_k, offs_m, offs_n, transpose=False +): # when seqlen_k and seqlen_q are different we want the diagonal to stick to the bottom right of the attention matrix # for casual mask we want something like this where (1 is kept and 0 is masked) # seqlen_q = 2 and seqlen_k = 5 @@ -717,11 +885,17 @@ def compute_alibi_block(alibi_slope, seqlen_q, seqlen_k, offs_m, offs_n, transpo else: return alibi_block + # ------------------------------- # FP8 # ------------------------------- def is_dtype_fp8(dtype): - if dtype in {torch.float8_e4m3fnuz, torch.float8_e4m3fn, torch.float8_e5m2, torch.float8_e5m2fnuz}: + if dtype in { + torch.float8_e4m3fnuz, + torch.float8_e4m3fn, + torch.float8_e5m2, + torch.float8_e5m2fnuz, + }: if arch_supports_fp8(): return True else: @@ -729,36 +903,50 @@ def is_dtype_fp8(dtype): else: return False + def is_fp8(x): return is_dtype_fp8(x.dtype) + @triton.jit def compute_fp8_scaling_factors(x, fp8_max: tl.constexpr): # compute fp8 scaling and descaling factor for a block - x_amax = tl.max(tl.abs(x)) # NOTE: abs deals with negative values + x_amax = tl.max(tl.abs(x)) # NOTE: abs deals with negative values x_amax = tl.where(x_amax <= 1e-9, 1e-9, x_amax) scale_x = fp8_max / x_amax descale_x = x_amax / fp8_max return scale_x, descale_x + @triton.jit def _cast_varlen_to_fp8_kernel_2d( - X, X_fp8, Descale, - cu_seqlens, H, MAX_SEQLEN, - stride_batch, stride_seq, stride_head, stride_dim, - stride_out_batch, stride_out_seq, stride_out_head, stride_out_dim, - stride_desc_batch, stride_desc_head, - FP8_CLAMP_VAL, + X, + X_fp8, + Descale, + cu_seqlens, + H, + MAX_SEQLEN, + stride_batch, + stride_seq, + stride_head, + stride_dim, + stride_out_batch, + stride_out_seq, + stride_out_head, + stride_out_dim, + stride_desc_batch, + stride_desc_head, + FP8_CLAMP_VAL, FP8_MAX, BLOCK_SIZE: tl.constexpr, HEAD_DIM: tl.constexpr, ACTUAL_HEAD_DIM: tl.constexpr, - IS_VARLEN: tl.constexpr - ): + IS_VARLEN: tl.constexpr, +): # Process one (batch, head) pair per kernel b_id = tl.program_id(0) h_id = tl.program_id(1) - + # Get sequence bounds for this batch if IS_VARLEN: seq_start = tl.load(cu_seqlens + b_id) @@ -766,11 +954,11 @@ def _cast_varlen_to_fp8_kernel_2d( seqlen = seq_end - seq_start else: seq_start = 0 - seqlen = MAX_SEQLEN - + seqlen = MAX_SEQLEN + # initialize max value tracker x_max_val = 0.0 - + # STEP 1: Find max absolute value across the entire sequence num_of_blocks = tl.cdiv(seqlen, BLOCK_SIZE) for blk_idx in range(0, num_of_blocks): @@ -778,7 +966,7 @@ def _cast_varlen_to_fp8_kernel_2d( # offsets offs_seq = blk_idx * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) offs_dim = tl.arange(0, HEAD_DIM) - + # Create mask for valid elements mask_seq = offs_seq[:, None] < seqlen if ACTUAL_HEAD_DIM != HEAD_DIM: @@ -786,27 +974,33 @@ def _cast_varlen_to_fp8_kernel_2d( mask_seq = mask_seq & mask_dim # Load block - adj_x = b_id * stride_batch + h_id * stride_head + seq_start * stride_seq + offs_seq[:, None] * stride_seq + offs_dim[None, :] * stride_dim + adj_x = ( + b_id * stride_batch + + h_id * stride_head + + seq_start * stride_seq + + offs_seq[:, None] * stride_seq + + offs_dim[None, :] * stride_dim + ) x_block = tl.load(X + adj_x, mask=mask_seq, other=0.0) # print("x_block:", x_block) - + # Find max absolute value in this block block_max = tl.max(tl.abs(x_block)) # print("block_max:", block_max) - + # Update overall max x_max_val = tl.maximum(x_max_val, block_max) # print("x_max_val:", x_max_val) - + # clamp to avoid division by zero issues x_max_val = tl.maximum(x_max_val, FP8_CLAMP_VAL) - + # compute scale and descale factors for the entire sequence scale = FP8_MAX / x_max_val descale = x_max_val / FP8_MAX - + # store descale factor for this (batch, head) pair - desc_ptr = Descale + b_id * stride_desc_batch + h_id# * stride_desc_head + desc_ptr = Descale + b_id * stride_desc_batch + h_id # * stride_desc_head tl.store(desc_ptr, descale) # STEP 2: Apply scaling to the entire sequence and convert to FP8 @@ -814,31 +1008,44 @@ def _cast_varlen_to_fp8_kernel_2d( # offsets offs_seq = blk_idx * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) offs_dim = tl.arange(0, HEAD_DIM) - + # Create mask for valid elements mask_seq = offs_seq[:, None] < seqlen if ACTUAL_HEAD_DIM != HEAD_DIM: mask_dim = offs_dim[None, :] < ACTUAL_HEAD_DIM mask_seq = mask_seq & mask_dim - + # Load block - Using the fixed addressing - addr = b_id * stride_batch + h_id * stride_head + seq_start * stride_seq + offs_seq[:, None] * stride_seq + offs_dim[None, :] * stride_dim + addr = ( + b_id * stride_batch + + h_id * stride_head + + seq_start * stride_seq + + offs_seq[:, None] * stride_seq + + offs_dim[None, :] * stride_dim + ) x_block = tl.load(X + addr, mask=mask_seq, other=0.0) - + # Apply scale and convert to FP8 x_fp8_block = (x_block * scale).to(X_fp8.type.element_ty) - + # Store results - addr_out = b_id * stride_out_batch + h_id * stride_out_head + seq_start * stride_out_seq + offs_seq[:, None] * stride_out_seq + offs_dim[None, :] * stride_out_dim + addr_out = ( + b_id * stride_out_batch + + h_id * stride_out_head + + seq_start * stride_out_seq + + offs_seq[:, None] * stride_out_seq + + offs_dim[None, :] * stride_out_dim + ) tl.store(X_fp8 + addr_out, x_fp8_block, mask=mask_seq) + def cast_to_fp8( x: torch.Tensor, fp8_dtype: torch.dtype, layout: Literal["bshd", "thd"], clamp_val: float = 1e-9, cu_seqlens: Optional[torch.Tensor] = None, - max_seqlen: Optional[int] = None + max_seqlen: Optional[int] = None, ) -> tuple[torch.Tensor, torch.Tensor]: if False: print() @@ -850,10 +1057,17 @@ def cast_to_fp8( print("clamp_val:", clamp_val) # check types are valid - assert x.dtype in {torch.float16, torch.float32, torch.float64, torch.bfloat16} and is_dtype_fp8(fp8_dtype), f"Cannot cast {x.dtype} to {fp8_dtype}" + assert x.dtype in { + torch.float16, + torch.float32, + torch.float64, + torch.bfloat16, + } and is_dtype_fp8(fp8_dtype), f"Cannot cast {x.dtype} to {fp8_dtype}" # extract dimensions - batch, max_seqlen_final, num_heads, head_dim = get_shape_from_layout(x, layout, cu_seqlens, max_seqlen) + batch, max_seqlen_final, num_heads, head_dim = get_shape_from_layout( + x, layout, cu_seqlens, max_seqlen + ) is_varlen = layout == "thd" fp8_max = torch.finfo(fp8_dtype).max if False: @@ -868,12 +1082,18 @@ def cast_to_fp8( # kernel params x_fp8 = torch.zeros_like(x, dtype=fp8_dtype) - descale_factors = torch.zeros((batch, num_heads), device=x.device, dtype=torch.float32) + descale_factors = torch.zeros( + (batch, num_heads), device=x.device, dtype=torch.float32 + ) BLOCK_SIZE = 128 # calculate strides - stride_batch, stride_head, stride_seq, stride_dim = get_stride_from_layout(x, layout) - stride_out_batch, stride_out_head, stride_out_seq, stride_out_dim = get_stride_from_layout(x_fp8, layout) + stride_batch, stride_head, stride_seq, stride_dim = get_stride_from_layout( + x, layout + ) + stride_out_batch, stride_out_head, stride_out_seq, stride_out_dim = ( + get_stride_from_layout(x_fp8, layout) + ) stride_desc_batch, stride_desc_head = descale_factors.stride() if False: @@ -890,23 +1110,36 @@ def cast_to_fp8( grid = (batch, num_heads) _cast_varlen_to_fp8_kernel_2d[grid]( - x, x_fp8, descale_factors, - cu_seqlens, num_heads, max_seqlen_final, - stride_batch, stride_seq, stride_head, stride_dim, - stride_out_batch, stride_out_seq, stride_out_head, stride_out_dim, - stride_desc_batch, stride_desc_head, - clamp_val, fp8_max, + x, + x_fp8, + descale_factors, + cu_seqlens, + num_heads, + max_seqlen_final, + stride_batch, + stride_seq, + stride_head, + stride_dim, + stride_out_batch, + stride_out_seq, + stride_out_head, + stride_out_dim, + stride_desc_batch, + stride_desc_head, + clamp_val, + fp8_max, BLOCK_SIZE=BLOCK_SIZE, - HEAD_DIM=padded_head_dim, + HEAD_DIM=padded_head_dim, ACTUAL_HEAD_DIM=head_dim, - IS_VARLEN=is_varlen + IS_VARLEN=is_varlen, ) - + if False: print("x_fp8:", x_fp8, x_fp8.shape) print("descale_factors:", descale_factors, descale_factors.shape) return x_fp8, descale_factors + # ------------------------------- # Misc # ------------------------------- @@ -916,47 +1149,74 @@ def get_shape_from_layout( cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, ) -> tuple[int, int, int, int]: - if layout == 'bhsd': + if layout == "bhsd": batch, num_heads, max_seqlen_final, head_dim = x.shape - elif layout == 'bshd': + elif layout == "bshd": batch, max_seqlen_final, num_heads, head_dim = x.shape - elif layout == 'thd': + elif layout == "thd": total_seqlen, num_heads, head_dim = x.shape if cu_seqlens is None: - raise ValueError("cu_seqlens must be provided for varlen (thd) layout") + raise ValueError("cu_seqlens must be provided for varlen (thd) layout") if max_seqlen is None: raise ValueError("max_seqlen must be provided for varlen (thd) layout") - - batch, max_seqlen_final, num_heads, head_dim = len(cu_seqlens) - 1, max_seqlen, num_heads, head_dim + + batch, max_seqlen_final, num_heads, head_dim = ( + len(cu_seqlens) - 1, + max_seqlen, + num_heads, + head_dim, + ) else: assert False, "Got unsupported layout." return batch, max_seqlen_final, num_heads, head_dim -def get_shapes_from_layout(q, k, layout, cu_seqlens_q = None, cu_seqlens_k = None, max_seqlen_q=None, max_seqlen_k=None): - batch_q, seqlen_q, nheads_q, head_size_q = get_shape_from_layout(q, layout, cu_seqlens_q, max_seqlen_q) - batch_k, seqlen_k, nheads_k, head_size_k = get_shape_from_layout(k, layout, cu_seqlens_k, max_seqlen_k) - +def get_shapes_from_layout( + q, + k, + layout, + cu_seqlens_q=None, + cu_seqlens_k=None, + max_seqlen_q=None, + max_seqlen_k=None, +): + batch_q, seqlen_q, nheads_q, head_size_q = get_shape_from_layout( + q, layout, cu_seqlens_q, max_seqlen_q + ) + batch_k, seqlen_k, nheads_k, head_size_k = get_shape_from_layout( + k, layout, cu_seqlens_k, max_seqlen_k + ) + # assert assert batch_q == batch_k assert head_size_q == head_size_k return batch_q, nheads_q, nheads_k, head_size_q, seqlen_q, seqlen_k -def get_stride_from_layout(x: torch.Tensor, layout:Literal["bshd", "bhsd", "thd"]): - if layout == 'thd': - strides = (0, x.stride(1), x.stride(0), x.stride(2)) - elif layout == 'bhsd': + +def get_stride_from_layout(x: torch.Tensor, layout: Literal["bshd", "bhsd", "thd"]): + if layout == "thd": + strides = (0, x.stride(1), x.stride(0), x.stride(2)) + elif layout == "bhsd": strides = (x.stride(0), x.stride(1), x.stride(2), x.stride(3)) - elif layout == 'bshd': + elif layout == "bshd": strides = (x.stride(0), x.stride(2), x.stride(1), x.stride(3)) else: - assert False, 'Got unsupported layout.' + assert False, "Got unsupported layout." return strides -def get_shape_and_strides_from_layout(x: torch.Tensor, layout: Literal["bshd", "bhsd", "thd"], cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None): - return get_shape_from_layout(x, layout, cu_seqlens, max_seqlen), get_stride_from_layout(x, layout) + +def get_shape_and_strides_from_layout( + x: torch.Tensor, + layout: Literal["bshd", "bhsd", "thd"], + cu_seqlens: Optional[torch.Tensor] = None, + max_seqlen: Optional[int] = None, +): + return get_shape_from_layout( + x, layout, cu_seqlens, max_seqlen + ), get_stride_from_layout(x, layout) + def get_strides_from_layout(q, k, v, o, layout): q_strides = get_stride_from_layout(q, layout) @@ -965,6 +1225,7 @@ def get_strides_from_layout(q, k, v, o, layout): o_strides = get_stride_from_layout(o, layout) return q_strides, k_strides, v_strides, o_strides + def get_padded_headsize(size): # Get closest power of 2 over or equal to 32. padded_d_model = 1 << (size - 1).bit_length() @@ -973,19 +1234,28 @@ def get_padded_headsize(size): padded_d_model = max(padded_d_model, 16) return padded_d_model + def compute_alibi_tensor_ref(alibi_slopes, seqlen_q, seqlen_k): - q_idx = torch.arange(seqlen_q, dtype=torch.int32, device="cuda").unsqueeze(-1) # (N_CTX_Q, 1) - k_idx = torch.arange(seqlen_k, dtype=torch.int32, device="cuda").unsqueeze(0) # (1, N_CTX_K) + q_idx = torch.arange(seqlen_q, dtype=torch.int32, device="cuda").unsqueeze( + -1 + ) # (N_CTX_Q, 1) + k_idx = torch.arange(seqlen_k, dtype=torch.int32, device="cuda").unsqueeze( + 0 + ) # (1, N_CTX_K) relative_pos = torch.abs(q_idx + seqlen_k - seqlen_q - k_idx) # (N_CTX_Q, N_CTX_K) - return -1 * alibi_slopes.unsqueeze(-1).unsqueeze(-1) * relative_pos # (Z, H, N_CTX_Q, N_CTX_K) + return ( + -1 * alibi_slopes.unsqueeze(-1).unsqueeze(-1) * relative_pos + ) # (Z, H, N_CTX_Q, N_CTX_K) + def round_multiple(x, m): return (x + m - 1) // m * m + def save_tensor_to_csv(tensor, filename, decimal_places=2): """ save a 2d tensor to csv file - + args: tensor: torch tensor of shape [rows, cols] filename: output csv filename @@ -994,46 +1264,63 @@ def save_tensor_to_csv(tensor, filename, decimal_places=2): # ensure tensor is 2d if tensor.ndim != 2: raise ValueError(f"tensor must be 2d, got shape {tensor.shape}") - + # ensure filename ends with .csv - if not filename.endswith('.csv'): - filename = filename + '.csv' - + if not filename.endswith(".csv"): + filename = filename + ".csv" + # save to csv using numpy - np.savetxt(filename, - tensor.detach().cpu().numpy(), - delimiter=',', - fmt=f'%.{decimal_places}f') + np.savetxt( + filename, + tensor.detach().cpu().numpy(), + delimiter=",", + fmt=f"%.{decimal_places}f", + ) + # ------------------------------- # Dropouts # ------------------------------- def create_dropout_mask(dropout_p, shape, seed): device = "cuda" - rand_vals = torch.rand(shape, generator=torch.Generator(device=device).manual_seed(seed), device=device, dtype=torch.float32) + rand_vals = torch.rand( + shape, + generator=torch.Generator(device=device).manual_seed(seed), + device=device, + dtype=torch.float32, + ) return rand_vals > dropout_p -def create_dropout_mask_varlen(dropout_p, batch, nheads_q, cu_seqlens_q, cu_seqlens_k, philox_seed): + +def create_dropout_mask_varlen( + dropout_p, batch, nheads_q, cu_seqlens_q, cu_seqlens_k, philox_seed +): device = "cuda" - qlens = (cu_seqlens_q[1:] - cu_seqlens_q[:-1]) - klens = (cu_seqlens_k[1:] - cu_seqlens_k[:-1]) + qlens = cu_seqlens_q[1:] - cu_seqlens_q[:-1] + klens = cu_seqlens_k[1:] - cu_seqlens_k[:-1] max_qlen = qlens.max() max_klen = klens.max() dropout_mask = torch.zeros((batch, nheads_q, max_qlen, max_klen), device=device) for b in range(batch): qlen = qlens[b] klen = klens[b] - rand_vals = torch.rand((nheads_q, qlen, klen), generator=torch.Generator(device=device).manual_seed(philox_seed), device=device, dtype=torch.float32) + rand_vals = torch.rand( + (nheads_q, qlen, klen), + generator=torch.Generator(device=device).manual_seed(philox_seed), + device=device, + dtype=torch.float32, + ) submask = rand_vals > dropout_p dropout_mask[b, :, :qlen, :klen] = submask return dropout_mask -def write_dropout_mask(x, tensor_name = "tensor"): + +def write_dropout_mask(x, tensor_name="tensor"): batch, head, seqlen_m, seqlen_n = x.shape x = x.tolist() - with open(f'{tensor_name}.csv', 'w') as f: + with open(f"{tensor_name}.csv", "w") as f: writer = csv.writer(f) for b in range(batch): for h in range(head): @@ -1041,22 +1328,22 @@ def write_dropout_mask(x, tensor_name = "tensor"): if True: BLOCK_M = 64 BLOCK_N = 64 - + # Calculate number of blocks in each dimension m_blocks = math.ceil(seqlen_m / BLOCK_M) n_blocks = math.ceil(seqlen_n / BLOCK_N) - + # Process each block for m_block in range(m_blocks): # Calculate row range for current block row_start = m_block * BLOCK_M row_end = min(row_start + BLOCK_M, seqlen_m) - + for n_block in range(n_blocks): # Calculate column range for current block col_start = n_block * BLOCK_N col_end = min(col_start + BLOCK_N, seqlen_n) - + # Extract and write the current block for row_idx in range(row_start, row_end): row_data = dropout_mask[row_idx][col_start:col_end] @@ -1064,6 +1351,379 @@ def write_dropout_mask(x, tensor_name = "tensor"): else: writer.writerows(dropout_mask) + +# ------------------------------- +# Rotary +# ------------------------------- +@triton.jit +def _rotary_kernel( + OUT, + X, + COS, + SIN, + CU_SEQLENS, + SEQLEN_OFFSETS, + seqlen, + nheads, + seqlen_ro, + stride_out_batch, + stride_out_seqlen, + stride_out_nheads, + stride_out_headdim, + stride_x_batch, + stride_x_seqlen, + stride_x_nheads, + stride_x_headdim, + ROTARY_DIM: tl.constexpr, + IS_SEQLEN_OFFSETS_TENSOR: tl.constexpr, + IS_VARLEN: tl.constexpr, + INTERLEAVED: tl.constexpr, + CONJUGATE: tl.constexpr, + BLOCK_H: tl.constexpr, + BLOCK_M: tl.constexpr, +): + BLOCK_K: tl.constexpr = triton.next_power_of_2(ROTARY_DIM) + ROTARY_DIM_HALF = ROTARY_DIM // 2 + pid_head = tl.program_id(axis=0) + pid_m = tl.program_id(axis=1) + pid_batch = tl.program_id(axis=2) + + if not IS_VARLEN: + X = X + pid_batch * stride_x_batch + OUT = OUT + pid_batch * stride_out_batch + else: + start_idx = tl.load(CU_SEQLENS + pid_batch) + seqlen = tl.load(CU_SEQLENS + pid_batch + 1) - start_idx + X = X + start_idx * stride_x_seqlen + OUT = OUT + start_idx * stride_out_seqlen + + if pid_m * BLOCK_M >= seqlen: + return + + rh = pid_head * BLOCK_H + tl.arange(0, BLOCK_H) + rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) + if not IS_SEQLEN_OFFSETS_TENSOR: + rm_cs = rm + SEQLEN_OFFSETS + else: + rm_cs = rm + tl.load(SEQLEN_OFFSETS + pid_batch) + + rk_half = tl.arange(0, BLOCK_K // 2) + COS = COS + (rm_cs[:, None] * ROTARY_DIM_HALF + rk_half[None, :]) + SIN = SIN + (rm_cs[:, None] * ROTARY_DIM_HALF + rk_half[None, :]) + mask_cs = (rm_cs[:, None] < seqlen_ro) & (rk_half[None, :] < ROTARY_DIM_HALF) + cos = tl.load(COS, mask=mask_cs, other=1.0).to(tl.float32) + sin = tl.load(SIN, mask=mask_cs, other=0.0).to(tl.float32) + if CONJUGATE: + sin = -sin + + if not INTERLEAVED: + X = X + ( + rh[:, None, None] * stride_x_nheads + + rm[None, :, None] * stride_x_seqlen + + rk_half[None, None, :] * stride_x_headdim + ) + OUT = OUT + ( + rh[:, None, None] * stride_out_nheads + + rm[None, :, None] * stride_out_seqlen + + rk_half[None, None, :] * stride_out_headdim + ) + mask = ( + (rh[:, None, None] < nheads) + & (rm[None, :, None] < seqlen) + & (rk_half[None, None, :] < ROTARY_DIM_HALF) + ) + x0 = tl.load(X, mask=mask, other=0.0).to(tl.float32) + x1 = tl.load(X + ROTARY_DIM_HALF * stride_x_headdim, mask=mask, other=0.0).to( + tl.float32 + ) + o0 = x0 * cos - x1 * sin + o1 = x0 * sin + x1 * cos + tl.store(OUT, o0, mask=mask) + tl.store(OUT + ROTARY_DIM_HALF * stride_out_headdim, o1, mask=mask) + else: + rk = tl.arange(0, BLOCK_K) + X = X + ( + rh[:, None, None] * stride_x_nheads + + rm[None, :, None] * stride_x_seqlen + + rk[None, None, :] * stride_x_headdim + ) + OUT = OUT + ( + rh[:, None, None] * stride_out_nheads + + rm[None, :, None] * stride_out_seqlen + + rk[None, None, :] * stride_out_headdim + ) + mask = ( + (rh[:, None, None] < nheads) + & (rm[None, :, None] < seqlen) + & (rk[None, None, :] < ROTARY_DIM) + ) + x = tl.load(X, mask=mask, other=0.0).to(tl.float32) + x0, x1 = tl.split(tl.reshape(x, [BLOCK_H, BLOCK_M, BLOCK_K // 2, 2])) + o0 = x0 * cos - x1 * sin + o1 = x0 * sin + x1 * cos + o = tl.reshape(tl.join(o0, o1), [BLOCK_H, BLOCK_M, BLOCK_K]) + tl.store(OUT, o, mask=mask) + + +def _apply_rotary_kernel( + x: torch.Tensor, + cos: torch.Tensor, + sin: torch.Tensor, + seqlen_offsets: Union[int, torch.Tensor] = 0, + cu_seqlens: Optional[torch.Tensor] = None, + max_seqlen: Optional[int] = None, + interleaved: bool = False, + inplace: bool = False, + conjugate: bool = False, +) -> torch.Tensor: + is_varlen = cu_seqlens is not None + if not is_varlen: + batch, seqlen, nheads, headdim = x.shape + else: + assert ( + max_seqlen is not None + ), "If cu_seqlens is passed, max_seqlen must also be provided" + total_seqlen, nheads, headdim = x.shape + batch_p_1 = cu_seqlens.shape[0] + batch = batch_p_1 - 1 + seqlen = max_seqlen + seqlen_ro, rotary_dim_half = cos.shape + assert sin.shape == cos.shape + rotary_dim = 2 * rotary_dim_half + assert rotary_dim <= headdim + assert headdim <= 256 + assert seqlen_ro >= seqlen + + cos, sin = cos.contiguous(), sin.contiguous() + if isinstance(seqlen_offsets, torch.Tensor): + assert seqlen_offsets.shape == (batch,) + assert seqlen_offsets.dtype in (torch.int32, torch.int64) + seqlen_offsets = seqlen_offsets.contiguous() + else: + assert seqlen_offsets + seqlen <= seqlen_ro + + out = torch.empty_like(x) if not inplace else x + if rotary_dim < headdim and not inplace: + out[..., rotary_dim:].copy_(x[..., rotary_dim:]) + + # Block heuristics + BLOCK_M = 8 if rotary_dim <= 128 else 4 + grid = ( + triton.cdiv(nheads, 2), + triton.cdiv(seqlen, BLOCK_M), + batch, + ) + + # NOTE: We assume CUDA device indexing compatibility in upstream; adapt for ROCm by using device context. + # For ROCm, torch.cuda.device works if HIP_VISIBLE_DEVICES mapping is set. + with torch.cuda.device(x.device.index): # Works for ROCm as alias + torch.library.wrap_triton(_rotary_kernel)[grid]( + out, + x, + cos, + sin, + cu_seqlens, + seqlen_offsets, + seqlen, + nheads, + seqlen_ro, + out.stride(0) if not is_varlen else 0, + out.stride(-3), + out.stride(-2), + out.stride(-1), + x.stride(0) if not is_varlen else 0, + x.stride(-3), + x.stride(-2), + x.stride(-1), + rotary_dim, + isinstance(seqlen_offsets, torch.Tensor), + is_varlen, + interleaved, + conjugate, + BLOCK_M=BLOCK_M, + BLOCK_H=2, + ) + return out + + +class _ApplyRotary(torch.autograd.Function): + @staticmethod + def forward( + ctx, + x: torch.Tensor, + cos: torch.Tensor, + sin: torch.Tensor, + interleaved: bool, + inplace: bool, + seqlen_offsets: Union[int, torch.Tensor], + cu_seqlens: Optional[torch.Tensor], + max_seqlen: Optional[int], + ): + out = _apply_rotary_kernel( + x, + cos, + sin, + seqlen_offsets=seqlen_offsets, + cu_seqlens=cu_seqlens, + max_seqlen=max_seqlen, + interleaved=interleaved, + inplace=inplace, + conjugate=False, + ) + if isinstance(seqlen_offsets, int): + ctx.save_for_backward(cos, sin, cu_seqlens) + ctx.seqlen_offsets = seqlen_offsets + else: + ctx.save_for_backward(cos, sin, cu_seqlens, seqlen_offsets) + ctx.seqlen_offsets = None + ctx.interleaved = interleaved + ctx.inplace = inplace + ctx.max_seqlen = max_seqlen + return out if not inplace else x + + @staticmethod + def backward(ctx, do: torch.Tensor): + seqlen_offsets = ctx.seqlen_offsets + if seqlen_offsets is None: + cos, sin, cu_seqlens, seqlen_offsets = ctx.saved_tensors + else: + cos, sin, cu_seqlens = ctx.saved_tensors + dx = _apply_rotary_kernel( + do, + cos, + sin, + seqlen_offsets=seqlen_offsets, + cu_seqlens=cu_seqlens, + max_seqlen=ctx.max_seqlen, + interleaved=ctx.interleaved, + inplace=ctx.inplace, + conjugate=True, + ) + return dx, None, None, None, None, None, None, None + + +def apply_rotary_emb( + x: torch.Tensor, + cos: torch.Tensor, + sin: torch.Tensor, + interleaved: bool = False, + inplace: bool = False, + seqlen_offsets: Union[int, torch.Tensor] = 0, + cu_seqlens: Optional[torch.Tensor] = None, + max_seqlen: Optional[int] = None, +) -> torch.Tensor: + """Public API: apply rotary embeddings to tensor x. + + Args: + x: (B, S, H, D) if `cu_seqlens` is None else (total_S, H, D). + cos, sin: (S_rotary, rotary_dim/2) + interleaved: GPT-J style if True. + inplace: modify x in place (saves memory if rotary_dim == D). + seqlen_offsets: int or (B,) tensor of starting offsets per sequence (KV cache decode). + cu_seqlens: (B+1,) tensor enabling varlen mode. + max_seqlen: required when `cu_seqlens` is provided. + """ + # FP8 path: upcast to bfloat16 (preferred) or float16 for rotary math to avoid excessive error + original_dtype = x.dtype + is_fp8_input = original_dtype == getattr(torch, "float8_e4m3fn", None) + if is_fp8_input: + # Choose bf16 if available in cos.dtype path; otherwise fallback to float16 + target_dtype = ( + torch.bfloat16 + if cos.dtype == torch.bfloat16 or torch.cuda.is_bf16_supported() + else torch.float16 + ) + # Upcast x, cos, sin for computation (without modifying originals in-place) + x_up = x.to(target_dtype) + cos_up = cos.to(target_dtype) if cos.dtype != target_dtype else cos + sin_up = sin.to(target_dtype) if sin.dtype != target_dtype else sin + out_up = _ApplyRotary.apply( + x_up, + cos_up, + sin_up, + interleaved, + False, + seqlen_offsets, + cu_seqlens, + max_seqlen, + ) + # Cast result back to original fp8 dtype + if inplace: + x.copy_(out_up.to(original_dtype)) + return x + return out_up.to(original_dtype) + else: + return _ApplyRotary.apply( + x, cos, sin, interleaved, inplace, seqlen_offsets, cu_seqlens, max_seqlen + ) + + +def apply_rotary( + q: torch.Tensor, + k_new: Optional[torch.Tensor], + cos: torch.Tensor, + sin: torch.Tensor, + *, + causal: bool, + local: bool, + interleaved: bool = False, + seqlen_offsets: Union[int, torch.Tensor] = 0, +) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + """High-level rotary application used by AMD prefill & decode paths. + + Policy (matches test reference & legacy semantics): + - If causal OR local attention ⇒ apply rotary directly on (B, S, H, D). + - Else (non-causal global) ⇒ flatten heads into sequence: (B, 1, S*H, D), + apply rotary once, then unflatten back. + - k_new (incremental KV slice) is always rotated directly when provided. + + Args: + q: (B, S, H, D) + k_new: Optional (B, S_k, H_k, D) + cos, sin: rotary caches (S_rotary, rotary_dim/2) + causal: causal attention flag + local: sliding-window / local attention flag (pre-computed outside) + interleaved: GPT-J style rotary layout + seqlen_offsets: int or (B,) tensor of per-sequence start offsets + Returns: + (q_rot, k_new_rot) + """ + assert q.ndim == 4, f"Expected q shape (B,S,H,D), got {q.shape}" + B, S, H, D = q.shape + use_flatten = (not causal) and (not local) + + if use_flatten: + # Flatten (S,H) -> (S*H) with an added singleton dim to preserve expected 4D shape. + q_flat = q.reshape(B, S * H, D).unsqueeze(1) # (B, 1, S*H, D) + q_flat = apply_rotary_emb( + q_flat, + cos, + sin, + interleaved=interleaved, + seqlen_offsets=seqlen_offsets, + ) + # Restore shape back to (B, S, H, D) + q = q_flat.view(B, 1, S * H, D).reshape(B, S, H, D) + else: + q = apply_rotary_emb( + q, + cos, + sin, + interleaved=interleaved, + seqlen_offsets=seqlen_offsets, + ) + + if k_new is not None: + k_new = apply_rotary_emb( + k_new, + cos, + sin, + interleaved=interleaved, + seqlen_offsets=seqlen_offsets, + ) + return q, k_new + + # ------------------------------- # Runtime info # ------------------------------- @@ -1071,18 +1731,36 @@ def write_dropout_mask(x, tensor_name = "tensor"): def is_hip(): return triton.runtime.driver.active.get_current_target().backend == "hip" + @functools.cache def get_arch(): return triton.runtime.driver.active.get_current_target().arch + @functools.cache def is_cdna(): - return is_hip() and get_arch() in ('gfx908', 'gfx90a', 'gfx940', 'gfx941', 'gfx942', 'gfx950') + return is_hip() and get_arch() in ( + "gfx908", + "gfx90a", + "gfx940", + "gfx941", + "gfx942", + "gfx950", + ) + @functools.cache def is_rdna(): - return is_hip() and get_arch() in ("gfx1030", "gfx1100", "gfx1101", "gfx1102", "gfx1200", "gfx1201") + return is_hip() and get_arch() in ( + "gfx1030", + "gfx1100", + "gfx1101", + "gfx1102", + "gfx1200", + "gfx1201", + ) + @functools.cache def arch_supports_fp8(): - return is_hip() and get_arch() in ('gfx942') + return is_hip() and get_arch() in ("gfx942") diff --git a/hopper/flash_attn_interface.py b/hopper/flash_attn_interface.py index 644e86e4b13..b7ede7dc442 100755 --- a/hopper/flash_attn_interface.py +++ b/hopper/flash_attn_interface.py @@ -11,7 +11,7 @@ if USE_TRITON_ROCM: import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) - from flash_attn.flash_attn_triton_amd import interface_fa_v3 as flash_attn_3_gpu + from flash_attn.flash_attn_triton_amd import flash_attn_3 as flash_attn_3_gpu else: # isort: off # We need to import the CUDA kernels after importing torch diff --git a/hopper/test_flash_attn_triton_amd.py b/hopper/test_flash_attn_triton_amd.py index 738ec1d8c13..73e54dce066 100755 --- a/hopper/test_flash_attn_triton_amd.py +++ b/hopper/test_flash_attn_triton_amd.py @@ -27,7 +27,7 @@ DISABLE_BACKWARD = os.getenv("FLASH_ATTENTION_DISABLE_BACKWARD", "FALSE") == "TRUE" DISABLE_SPLIT = os.getenv("FLASH_ATTENTION_DISABLE_SPLIT", "TRUE") == "TRUE" DISABLE_PAGEDKV = os.getenv("FLASH_ATTENTION_DISABLE_PAGEDKV", "FALSE") == "TRUE" -DISABLE_APPENDKV = os.getenv("FLASH_ATTENTION_DISABLE_APPENDKV", "TRUE") == "TRUE" +DISABLE_APPENDKV = os.getenv("FLASH_ATTENTION_DISABLE_APPENDKV", "FALSE") == "TRUE" DISABLE_LOCAL = os.getenv("FLASH_ATTENTION_DISABLE_LOCAL", "TRUE") == "TRUE" DISABLE_SOFTCAP = os.getenv("FLASH_ATTENTION_DISABLE_SOFTCAP", "TRUE") == "TRUE" DISABLE_PACKGQA = os.getenv("FLASH_ATTENTION_DISABLE_PACKGQA", "TRUE") == "TRUE" @@ -579,9 +579,8 @@ def _gen_unused_masks(padding_mask, add_unused, max_seq_len, bs, device): # @pytest.mark.parametrize("dtype", [torch.float8_e4m3fn]) @pytest.mark.parametrize("mha_type", ["mha", "mqa", "gqa"]) # @pytest.mark.parametrize("mha_type", ["mha"]) -# @pytest.mark.parametrize("new_kv", [False] + ([True] if not DISABLE_APPENDKV else [])) +@pytest.mark.parametrize("new_kv", [False] + ([True] if not DISABLE_APPENDKV else [])) # @pytest.mark.parametrize("new_kv", [True]) -@pytest.mark.parametrize("new_kv", [False, True]) @pytest.mark.parametrize("causal,local", [(False, False), (True, False)] + ([(False, True)] if not DISABLE_LOCAL else [])) # @pytest.mark.parametrize("causal,local", [(False, False), (True, False)]) # @pytest.mark.parametrize("causal,local", [(False, False)])