uuid
stringlengths
36
36
file_name
stringlengths
5
50
repo_name
stringclasses
110 values
file_path
stringlengths
7
112
commit_hash
stringclasses
110 values
starcount
int64
0
0
input
stringlengths
39
33.8k
category
dict
licenses
listlengths
1
2
github_url
stringlengths
94
193
c5940182-0af6-4902-b164-e9d9776bf1ee
_quantize.py
IBM/qattn
qattn/nn/functional/_quantize.py
07ceda0aceb9afd299d622325944c0c0471827fe
0
@triton.jit def clamp(x: tl.tensor, min_val, max_val) ->tl.tensor: """Clamps all elements in `x` into range [min, max]. Args: x (tl.tensor): the input tensor. min_val (Number): lower bound of the range. max_val (Number): upper bound of the range. Returns: tl.tensor: the out...
{ "Data Type": [], "Functionality": [ "Activation Functions", "Elementwise Operations" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/IBM/qattn/blob/07ceda0aceb9afd299d622325944c0c0471827fe/qattn/nn/functional/_quantize.py
cb753c39-8d4d-492d-af54-91bdf9d9f3a8
triton_fused_attention.py
pytorch-labs/tritonbench
tritonbench/kernels/triton_fused_attention.py
3a5dccb159834968567a2e45e561dc1aeaa8f8a8
0
@triton.autotune(list(filter(keep, configsTmaWS)), key=['N_CTX']) @triton.jit def _attn_fwd_tma_ws(Q, K, V, sm_scale, M, Out, desc_q, desc_k, desc_v, desc_o, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_oz, stride_...
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Tiled", "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "BSD" ]
https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/kernels/triton_fused_attention.py
9391ce5d-adac-414a-92ce-2ca0faecd484
copy_strided.py
triton-lang/triton
python/examples/copy_strided.py
a2b398e0bb1b120f31cf386d6ae3261c3ab84207
0
@triton.jit def kernel(X, stride_xm, Z, stride_zn, BLOCK_M: tl.constexpr, BLOCK_N: tl. constexpr): off_m = tl.arange(0, BLOCK_M) off_n = tl.arange(0, BLOCK_N) Xs = X + off_m[:, None] * stride_xm + off_n[None, :] * 1 Zs = Z + off_m[:, None] * 1 + off_n[None, :] * stride_zn tl.store(Zs, tl.load(Xs...
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Blocked Access", "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/triton-lang/triton/blob/a2b398e0bb1b120f31cf386d6ae3261c3ab84207/python/examples/copy_strided.py
04bd883d-3bc8-46e4-9259-5f5271e53a83
y_7.py
IntelLabs/EquiTriton
src/equitriton/sph_harm/direct/y_7.py
1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c
0
@triton.jit def seventh_order_fwd(coord_ptr: tl.tensor, output_ptr: tl.tensor, block_size: tl.constexpr, coord_numel: tl.constexpr, output_numel: tl. constexpr, col_offset: tl.constexpr, output_stride: tl.constexpr): coord_stride = 3 block_id = tl.program_id(0) coord_striding = tl.arange(0, block_si...
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_7.py
ad543ac2-d2ac-447e-8a53-c871539d272e
y_2.py
IntelLabs/EquiTriton
src/equitriton/sph_harm/direct/y_2.py
1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c
0
@triton.jit def second_order_bwd(coord_ptr: tl.tensor, coord_grad_ptr: tl.tensor, sph_grad_ptr: tl.tensor, block_size: tl.constexpr, coord_numel: tl. constexpr, output_numel: tl.constexpr, col_offset: tl.constexpr, output_stride: tl.constexpr): block_id = tl.program_id(0) coord_stride = 3 coord_...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_2.py
28f5b112-be8c-4f0b-8096-f9ab7e3bb85c
mhmoe.py
dtadpole/triton-playground
mhmoe.py
2d317976722d63080133b1bf88b1f0cdec98f831
0
@triton.jit def _mlp_wide_kernel_bwd_dx(dx, pid_h, pid_b, x_ptr, w1_ptr, w2_ptr, o_ptr, dx_ptr, dw1_ptr, dw2_ptr, do_ptr, H, B, D: tl.constexpr, E, stride_xb, stride_xd, stride_w1d, stride_w1e, stride_w2e, stride_w2d, stride_ob, stride_od, stride_dxb, stride_dxd, stride_dw1d, stride_dw1e, stride_dw2e, s...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Activation Functions", "Matrix Multiplication" ], "Memory Access Pattern": [ "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High ...
[ "MIT" ]
https://github.com/dtadpole/triton-playground/blob/2d317976722d63080133b1bf88b1f0cdec98f831/mhmoe.py
9a05112d-23d7-43f7-92f8-e3e2e3cd9ee0
flash_attention.py
falkaer/multi-scale-music
seq/flash_attention.py
a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d
0
@triton.jit def _bwd_kernel(Q, K, V, S, sm_scale, DO, DQ, DK, DV, M, D, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vn, stride_vk, stride_doz, stride_doh, stride_dom, stride_dok, stride_dqz, stride_dqh, stride_dqm, stride_dqk, stri...
{ "Data Type": [ "fp32", "fp16" ], "Functionality": [ "Attention Mechanisms", "Backpropagation" ], "Memory Access Pattern": [ "Strided Access", "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "Memory-Bo...
[ "MIT" ]
https://github.com/falkaer/multi-scale-music/blob/a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d/seq/flash_attention.py
4db0e91c-60b7-4181-91a9-99d1f7995e1e
flash_triton.py
MayDomine/Burst-Attention
burst_attn/flash_triton.py
b088c554072935074ea9c643de5ee363be5ab1f6
0
@triton.jit def _bwd_store_dk_dv(dk_ptrs, dv_ptrs, dk, dv, offs_n, offs_d, seqlen_k, headdim, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl. constexpr): if EVEN_N & EVEN_M: if EVEN_HEADDIM: tl.store(dv_ptrs, dv) tl.store(dk_ptrs, dk) else: t...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Strided Access", "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Memory-Bound" ] }
[ "Apache" ]
https://github.com/MayDomine/Burst-Attention/blob/b088c554072935074ea9c643de5ee363be5ab1f6/burst_attn/flash_triton.py
743113b7-b1b8-41f3-ae0c-36089061283a
matmul.py
sustcsonglin/flash-linear-attention
fla/ops/utils/matmul.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'HAS_INPUT': lambda args: args['input'] is not None, 'HAS_ALPHA': lambda args: args['alpha'] is not None, 'HAS_BETA': lambda args: args['beta'] is not None}) @triton.autotune(configs=[triton.Config({'BM': 128, 'BK': 64, 'BN': 256, 'G': 4}, num_stages=3, num_warps=8), triton.Config({'BM':...
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/utils/matmul.py
41d7f9ea-8351-46ef-aa36-57ca4cdd7d75
06-fused-attention.py
triton-lang/triton
python/tutorials/06-fused-attention.py
a2b398e0bb1b120f31cf386d6ae3261c3ab84207
0
@triton.jit def _attn_bwd_dq(dq, q, K, V, do, m, D, stride_tok, stride_d, H, N_CTX, BLOCK_M2: tl.constexpr, BLOCK_N2: tl.constexpr, HEAD_DIM: tl.constexpr, start_m, start_n, num_steps, MASK: tl.constexpr): offs_m = start_m + tl.arange(0, BLOCK_M2) offs_n = start_n + tl.arange(0, BLOCK_N2) offs_k = t...
{ "Data Type": [ "fp32", "fp16" ], "Functionality": [ "Attention Mechanisms", "Backpropagation" ], "Memory Access Pattern": [ "Strided Access", "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/triton-lang/triton/blob/a2b398e0bb1b120f31cf386d6ae3261c3ab84207/python/tutorials/06-fused-attention.py
f12f4856-2ba8-4fdf-accd-606056062839
sparse_linear.py
ServiceNow/Fast-LLM
fast_llm/functional/triton/sparse_linear.py
8b46289079da67cba99628448a6b6083dac083cf
0
@triton.autotune(configs=autotune_configs, key=['row_dense_dim', 'row_sparse_dim', 'col_dim']) @triton.jit def input_row_sparse_matmul_kernel(lhs_ptr, rhs_ptr, out_ptr, expert_ends_ptr, expert_pad_begins_ptr, row_dense_dim: tl.constexpr, row_sparse_dim: tl.constexpr, col_dim: tl.constexpr, inner_dim: tl. ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "Memory-Bound" ] }
[ "Apache" ]
https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/sparse_linear.py
45c898a1-7e30-4f2d-bdf7-18b7ae8654ae
shape.py
2niuhe/triton_utils
src/triton_utils/shape.py
6184906ac3b86dac3ccbfac128ec393ccecde5df
0
@triton.jit def load_2d(ptr, sz0: tl.constexpr, sz1: tl.constexpr, n0, n1, max0, max1, stride0=None, stride1=1): """Chunk 2d matrix (defined by ptr) into 2d grid, where each chunk has size (sz0,sz1). Load the (n0,n1)th chunk. Ie, load [n0*sz0,...,(n0+1)*sz0-1] x [n1*sz1,...,(n1+1)*sz1-1]. """ stride...
{ "Data Type": [], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [] }
[ "Apache" ]
https://github.com/2niuhe/triton_utils/blob/6184906ac3b86dac3ccbfac128ec393ccecde5df/src/triton_utils/shape.py
5558131b-8910-430e-bfcb-0a8e3e1e1a06
pointwise.py
ServiceNow/Fast-LLM
fast_llm/functional/triton/pointwise.py
8b46289079da67cba99628448a6b6083dac083cf
0
@triton.jit def triton_add_kernel(input_ptr, other_ptr, out_ptr, numel: tl.constexpr, block_size: tl.constexpr): block_start = tl.program_id(axis=0).to(tl.int64) * block_size offsets = block_start + tl.arange(0, block_size) mask = offsets < numel input_ = tl.load(input_ptr + offsets, mask=mask) ...
{ "Data Type": [], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput" ] }
[ "Apache" ]
https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/pointwise.py
3ba95bb3-c419-4af2-b7ce-b7e0d81c14fd
group_norm.py
chengzeyi/stable-fast
src/sfast/triton/ops/group_norm.py
3a6f35c7045f8f6812515957ca62ef37260ff080
0
@eval( """triton.heuristics({ 'BLOCK_SIZE': lambda kwargs: triton.next_power_of_2(kwargs['cluster_num']), })""" ) @eval( """triton.heuristics({ 'num_warps': lambda kwargs: max(1, min(16, kwargs['BLOCK_SIZE'] // 128)), })""" ) @triton.jit def group_norm_4d_channels_last_forward_collect_st...
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization", "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "Memory-Bound" ] }
[ "MIT" ]
https://github.com/chengzeyi/stable-fast/blob/3a6f35c7045f8f6812515957ca62ef37260ff080/src/sfast/triton/ops/group_norm.py
5e7d7488-136e-4938-aca1-8982d4c280bf
paged_attn.py
AlibabaPAI/FLASHNN
flashnn/triton_kernels/paged_attn.py
528a9301587f5fb135b25d973a87ba0a40a703a7
0
@triton.jit def _inner_paged_attn_unroll_2_kernel(q, k_cache, v_cache, stride_km, block_base_ptrs, base_offs_kv, alibi_slope, block_offs, seq_len, qkv, qk_max, exp_sum, BLOCK_SIZE: tl.constexpr, LO: tl.constexpr, HI: tl. constexpr): for block_idx in range(LO, HI, 2): offs_kv_0 = tl.load(block_ba...
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Strided Access", "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "Apache" ]
https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/paged_attn.py
3db55c15-f4b1-4305-b51b-6e73a82136ea
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/delta_rule/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_FINAL_STATE_GRADIENT': lambda args: args['dht'] is not None, 'USE_INITIAL_STATE': lambda args: args['dh0'] is not None, 'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for num_warps in [1, 2, 4]], key=['BT', ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Recurrent Neural Networks" ], "Memory Access Pattern": [ "Strided Access", "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Th...
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/delta_rule/chunk.py
1e01284c-e8e1-42a2-b16b-7529e9d7459b
cross_entropy_loss.py
tdrussell/qlora-pipe
kernels/cross_entropy_loss.py
6fb7c8eeae52a0e36c41f00628985f29d8330684
0
@triton.heuristics({'DO_LOGIT_SCALING': lambda args: args['DO_LOGIT_SCALING']}) @triton.jit def _cross_entropy_forward(logits_ptr, logits_row_stride, loss_ptr, logsumexp_ptr, labels_ptr, VOCAB_SIZE: tl.constexpr, BLOCK_SIZE: tl. constexpr, DO_LOGIT_SCALING: tl.constexpr, LOGIT_SCALE: tl.constexpr): """ ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Softmax", "Cross Entropy" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/tdrussell/qlora-pipe/blob/6fb7c8eeae52a0e36c41f00628985f29d8330684/kernels/cross_entropy_loss.py
da158db8-4576-47cc-be40-fffdcc99f725
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/rwkv6/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({'BS': 16}, num_warps=2), triton. Config({'BS': 16}, num_warps=4), triton.Config({'BS': 16}, num_warps=8), triton.Config({'BS': 32}, num_warps=2), triton.Config({'BS': 32}, num_warps=4), tri...
{ "Data Type": [ "fp32" ], "Functionality": [ "Recurrent Neural Networks" ], "Memory Access Pattern": [ "Strided Access", "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/rwkv6/chunk.py
8312c2c2-b06b-4313-beab-8941ed39ad2b
triton_jagged_tensor_ops.py
pytorch/FBGEMM
fbgemm_gpu/fbgemm_gpu/triton/jagged/triton_jagged_tensor_ops.py
fe980ab54a6e28818d81c8694b6564e7f804418b
0
@triton.jit def jagged_jagged_elementwise_arithmetic_ops(x_ptr, y_ptr, M: tl.constexpr, N: tl.constexpr, stride_row: tl.constexpr, stride_col: tl.constexpr, output, thread_block_row_size: tl.constexpr, thread_block_col_size: tl. constexpr, ops_func: tl.constexpr) ->None: pid = tl.program_id(0) num_g...
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access", "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "BSD", "MIT" ]
https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/triton/jagged/triton_jagged_tensor_ops.py
e91acd67-c457-4e0b-98c0-f49971d6f6da
simulate.py
Aalanli/AMDGPUExperiments
simulate.py
2a6fd9e1e81d1916e3d87db4dda930e2fa417527
0
@triton.jit def simulate_kernel(output, n_steps, seed, p, start, block_size: tl.constexpr): n_program = tl.num_programs(axis=0) pid = tl.program_id(axis=0) block_start = pid * block_size offsets = block_start + tl.arange(0, block_size) state = tl.full([block_size], start, dtype=tl.uint32) for _ ...
{ "Data Type": [], "Functionality": [ "Elementwise Operations", "Recurrent Neural Networks" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/Aalanli/AMDGPUExperiments/blob/2a6fd9e1e81d1916e3d87db4dda930e2fa417527/simulate.py
caf594ae-0007-42f3-a11f-df8a6e031d08
triton_fused_local_attn2.py
LouChao98/vqtree
ops/triton_fused_local_attn2.py
27a53274df7a804bce27dffcce5f5be73f64b6f3
0
@triton.jit def _attn_fwd_inner(acc, l_i, m_i, q, sm_scale, K_block_ptr, V_block_ptr, start_m, offs_m, offs_n, SEQLEN_K: tl.constexpr, WINDOW_SIZE: tl. constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, EVEN_MN: tl. constexpr, STAGE: tl.constexpr): if STAGE == 1: lo = start_m * BLOCK_M - W...
{ "Data Type": [ "fp32", "fp16" ], "Functionality": [ "Attention Mechanisms", "Elementwise Operations" ], "Memory Access Pattern": [ "Coalesced", "Blocked Access" ], "Parallelization Strategy": [ "Cooperative Groups" ], "Performance Objective": [ "High Throughput", ...
[ "Apache" ]
https://github.com/LouChao98/vqtree/blob/27a53274df7a804bce27dffcce5f5be73f64b6f3/ops/triton_fused_local_attn2.py
85d4ae25-63a1-4dfe-83dc-04295de1183a
copy.py
chengzeyi/stable-fast
src/sfast/triton/ops/copy.py
3a6f35c7045f8f6812515957ca62ef37260ff080
0
@eval( """triton.heuristics({ 'BLOCK_M': lambda kwargs: min(64, triton.next_power_of_2(kwargs['size_inp_0'])), 'BLOCK_N': lambda kwargs: min(64, triton.next_power_of_2(kwargs['size_inp_1'])), 'BATCH_STRIDE_INP_IS_1': lambda kwargs: kwargs['batch_stride_inp'] == 1, 'STRIDE_INP_0_IS_1': lambda kwargs:...
{ "Data Type": [], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access", "Blocked Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Memory-Bound" ] }
[ "MIT" ]
https://github.com/chengzeyi/stable-fast/blob/3a6f35c7045f8f6812515957ca62ef37260ff080/src/sfast/triton/ops/copy.py
0ba1eec5-4ae0-4399-a078-20d80cf2ab46
gemm_postop_gelu_benchmark.py
intel/intel-xpu-backend-for-triton
benchmarks/triton_kernels_benchmark/gemm_postop_gelu_benchmark.py
6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2
0
@triton.jit def gelu(x): """ GeLU_ activation - Gaussian error linear unit .. _GeLU: https://arxiv.org/pdf/1606.08415.pdf """ return 0.5 * x * (1 + tanh(kAlpha * (x + 0.044715 * x * x * x)))
{ "Data Type": [ "fp32", "fp16" ], "Functionality": [ "Activation Functions", "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/intel/intel-xpu-backend-for-triton/blob/6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2/benchmarks/triton_kernels_benchmark/gemm_postop_gelu_benchmark.py
1e8d66ac-783c-4e08-bacd-8c8629067011
softmax_online_v2_spec_rev.py
iclementine/optimize_softmax
softmax_online_v2_spec_rev.py
6ddeee3481dd5e63f4a30b946c417e97bc4494bf
0
@triton.jit def softmax_kernel_online_v2(output_ptr, input_ptr, M, N, TILE_N: tl.constexpr ): pid_m = tl.program_id(0) m = tl.full((TILE_N,), value=-float('inf'), dtype=output_ptr.dtype. element_ty) z = tl.full((TILE_N,), value=0, dtype=output_ptr.dtype.element_ty) prev_multiple = prev_multi...
{ "Data Type": [ "fp32", "fp16" ], "Functionality": [ "Softmax", "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access", "Blocked Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput", "Memory-B...
[ "BSD" ]
https://github.com/iclementine/optimize_softmax/blob/6ddeee3481dd5e63f4a30b946c417e97bc4494bf/softmax_online_v2_spec_rev.py
a454fb7a-be17-4207-aa7f-74a5ac3b9ee8
attn_qk_int8_per_block_hd128_causal.py
rodjjo/editorium
editorium/app/server/pipelines/cogvideo/sageattention/attn_qk_int8_per_block_hd128_causal.py
7b92e2c92a144bf23bbe6fe88e3d513ffcf7d694
0
@triton.jit def _attn_fwd_inner(acc, l_i, m_i, q, q_scale, K_ptrs, K_scale_ptr, V_ptrs, start_m, BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_N: tl. constexpr, STAGE: tl.constexpr, offs_m: tl.constexpr, offs_n: tl. constexpr, N_CTX: tl.constexpr): if STAGE == 1: lo, hi = 0, start_m * BLO...
{ "Data Type": [ "fp32", "fp16" ], "Functionality": [ "Attention Mechanisms", "Elementwise Operations" ], "Memory Access Pattern": [ "Coalesced", "Blocked Access" ], "Parallelization Strategy": [ "Cooperative Groups" ], "Performance Objective": [ "High Throughput", ...
[ "Apache" ]
https://github.com/rodjjo/editorium/blob/7b92e2c92a144bf23bbe6fe88e3d513ffcf7d694/editorium/app/server/pipelines/cogvideo/sageattention/attn_qk_int8_per_block_hd128_causal.py
4149e45f-a98a-46f3-9ba5-1d39130b86ec
sparse_linear.py
ServiceNow/Fast-LLM
fast_llm/functional/triton/sparse_linear.py
8b46289079da67cba99628448a6b6083dac083cf
0
@triton.autotune(configs=autotune_configs, key=['col_sparse_dim', 'inner_dim', 'sparse_dim']) @triton.jit def output_sparse_matmul_kernel(lhs_ptr, rhs_ptr, out_ptr, expert_ends_ptr, row_dim: tl.constexpr, col_sparse_dim: tl.constexpr, inner_dim: tl. constexpr, sparse_dim: tl.constexpr, padded_sparse_dim: tl...
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Blocked Access", "Tiled" ], "Parallelization Strategy": [ "Cooperative Groups" ], "Performance Objective": [ "High Throughput", "Memory-Bound", "Compute Bound" ] }
[ "Apache" ]
https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/sparse_linear.py
c4bde555-085f-4158-9b7e-4d8f039195cf
mlstm_matmul.py
LukasBluebaum/xLSTM-Triton-CUDA-Implementation
mlstm_matmul.py
6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b
0
@triton.jit def mlstm_matmul_kernel_df(dF, F, NH: tl.constexpr, S: tl.constexpr): bh_id = tl.program_id(0) batch_id = bh_id // NH head_id = bh_id % NH batch_offset_f = batch_id * NH * S + head_id * S offset_f = tl.arange(0, S) df = tl.load(dF + batch_offset_f + offset_f, offset_f < S) df = t...
{ "Data Type": [ "fp32", "fp16" ], "Functionality": [ "Recurrent Neural Networks", "Elementwise Operations", "Activation Functions" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute...
[ "MIT" ]
https://github.com/LukasBluebaum/xLSTM-Triton-CUDA-Implementation/blob/6fb49b89cc74e7dadd0f3d56db05684bb4e86f4b/mlstm_matmul.py
5b75cd56-ee47-4e6f-a821-a5604495112c
test_triton_varargs.py
facebookresearch/xformers
tests/test_triton_varargs.py
a2f37f8c5f4e3ae0d3459a92e42cd1aeb45b03bc
0
@triton.jit def sumN(output_ptr, scaling_ptr, *inputs, BLOCK_SIZE: tl.constexpr): offset = tl.arange(0, BLOCK_SIZE) output = tl.zeros([BLOCK_SIZE], tl.float32) scaling: 'VAR_ARGS_ARRAY' for i in range(len(scaling)): scaling[i] = tl.load(scaling_ptr + i) for i in range(2): for j in ra...
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "BSD" ]
https://github.com/facebookresearch/xformers/blob/a2f37f8c5f4e3ae0d3459a92e42cd1aeb45b03bc/tests/test_triton_varargs.py
f62e413e-399f-406a-a6fb-44c3e664e00d
dw.py
Forkxz/TritonDeepLearningKernel
kernel/dropconnect/dw.py
add54b6318e8fa5fdbf8c7b47659de9fceaa5691
0
@triton.jit def dropconnect_dx_kernel(dy_ptr, x_ptr, dw_ptr, seed, M, K, N, stride_dym, stride_dyn, stride_xm, stride_xk, stride_dm, stride_dk, stride_dn, stride_dwk, stride_dwn, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl. constexpr, BLOCK_SIZE_K: tl.constexpr, ALLOWTF32: tl.constexpr): """ dY_m ...
{ "Data Type": [ "fp32", "fp16" ], "Functionality": [ "Backpropagation", "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/Forkxz/TritonDeepLearningKernel/blob/add54b6318e8fa5fdbf8c7b47659de9fceaa5691/kernel/dropconnect/dw.py
0ef41aca-fbb5-44bb-ae73-f97ff2bac77e
fused_chunk.py
sustcsonglin/flash-linear-attention
fla/ops/delta_rule/fused_chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({}, num_warps=2), triton.Config({}, num_warps=4)], key=['BT', 'BK']) @triton.jit def fused_chunk_delta_rule_fwd_kernel(q, k, v, v_new, d, o, initial_state, final_state, s_k_h, s_k_t, s_k_d, s_v_h, s_v_t, s_v_d, B, H, T, scale, BT: tl.co...
{ "Data Type": [ "fp32", "fp16" ], "Functionality": [ "Attention Mechanisms", "Activation Functions", "Elementwise Operations" ], "Memory Access Pattern": [ "Coalesced", "Blocked Access" ], "Parallelization Strategy": [ "Cooperative Groups" ], "Performance Objective": [...
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/delta_rule/fused_chunk.py
07031280-262d-4433-ae3a-7a2222f9099b
gemm_postop_gelu_benchmark.py
intel/intel-xpu-backend-for-triton
benchmarks/triton_kernels_benchmark/gemm_postop_gelu_benchmark.py
6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2
0
@triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=2, num_warps=32), triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=3...
{ "Data Type": [ "fp32", "fp16" ], "Functionality": [ "Matrix Multiplication", "Activation Functions" ], "Memory Access Pattern": [ "Blocked Access", "Tiled" ], "Parallelization Strategy": [ "Cooperative Groups" ], "Performance Objective": [ "High Throughput", "Comp...
[ "MIT" ]
https://github.com/intel/intel-xpu-backend-for-triton/blob/6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2/benchmarks/triton_kernels_benchmark/gemm_postop_gelu_benchmark.py
1fa87111-7ad0-4833-a0b9-b14824dfba0a
dequant_kernel.py
drisspg/transformer_nuggets
transformer_nuggets/quant/dequant_kernel.py
a4c66bbeebaa479ad8b6ed82d7efbafa41b17260
0
@triton.jit def dequantize(inputs, nf4_lut): """Dequantizes the nf4 data to bfloat16""" return tl.load(nf4_lut + inputs)
{ "Data Type": [ "bf16" ], "Functionality": [ "Quantization" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "Memory-Bound" ] }
[ "BSD" ]
https://github.com/drisspg/transformer_nuggets/blob/a4c66bbeebaa479ad8b6ed82d7efbafa41b17260/transformer_nuggets/quant/dequant_kernel.py
c184d15a-84bd-4dc9-90d1-69aa80226eab
gemm_postop_addmatrix_benchmark.py
intel/intel-xpu-backend-for-triton
benchmarks/triton_kernels_benchmark/gemm_postop_addmatrix_benchmark.py
6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2
0
@triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=2, num_warps=32), triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 4, 'grf_mode': 'large'}, num_stages=3...
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/intel/intel-xpu-backend-for-triton/blob/6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2/benchmarks/triton_kernels_benchmark/gemm_postop_addmatrix_benchmark.py
e1abcb7f-d624-4f0f-adf5-1a9471e35d14
fp8_gemm.py
pytorch/FBGEMM
fbgemm_gpu/experimental/gemm/triton_gemm/fp8_gemm.py
fe980ab54a6e28818d81c8694b6564e7f804418b
0
@triton.autotune(configs=MATMUL_CONFIGS, key=['m_key', 'n_key', 'k_key']) @triton.jit def _kernel_matmul_fp8_row(A_ptr, B_ptr, C_ptr, M, N, K, m_key, n_key, k_key, A_scale, B_scale, Bias, stride_am, stride_ak, stride_bn, stride_bk, stride_cm, stride_cn, dot_out_dtype: tl.constexpr, allow_tf32: tl.constexpr,...
{ "Data Type": [ "int8" ], "Functionality": [ "Matrix Multiplication", "Quantization" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Persistent Kernels" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "BSD", "MIT" ]
https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/experimental/gemm/triton_gemm/fp8_gemm.py
2923d2d3-cffb-4408-8fa9-e9794bb2be61
05-layer-norm.py
triton-lang/triton
python/tutorials/05-layer-norm.py
a2b398e0bb1b120f31cf386d6ae3261c3ab84207
0
@triton.jit def _layer_norm_bwd_dwdb(DW, DB, FINAL_DW, FINAL_DB, M, N, BLOCK_SIZE_M: tl .constexpr, BLOCK_SIZE_N: tl.constexpr): pid = tl.program_id(0) cols = pid * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) dw = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) db = tl.zeros((BLOCK_SIZE_M, BL...
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization", "Backpropagation" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/triton-lang/triton/blob/a2b398e0bb1b120f31cf386d6ae3261c3ab84207/python/tutorials/05-layer-norm.py
d99050b5-9c96-4233-a447-a34ac46ac1f1
_semi_structured_conversions.py
huyz2023/2by4-pretrain
sparse/_semi_structured_conversions.py
9e330125dea71e5a3dee235f4efb8869f9e4cdd0
0
@triton.jit def _sparse_semi_structured_to_dense_kernel(sparse_ptr, meta_reordered_ptr, dense_ptr, m, k, BLOCK_SIZE: tl.constexpr): row_idx = tl.program_id(0) group, interweave = 32, 4 dest_row = row_idx // 32 * 32 + row_idx % 8 * 4 + row_idx % group // 8 if dest_row % 2 == 0: dest_row_ = (r...
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Transposed Access" ], "Parallelization Strategy": [], "Performance Objective": [ "Memory-Bound" ] }
[ "BSD" ]
https://github.com/huyz2023/2by4-pretrain/blob/9e330125dea71e5a3dee235f4efb8869f9e4cdd0/sparse/_semi_structured_conversions.py
0fff7131-f1c9-4588-b8ed-1ddc2f4a032b
mas_triton.py
uthree/tts_impl
src/tts_impl/functional/monotonic_align/mas_triton.py
a9d9a66b26a0de4694e502dedfdff7be26d99ddd
0
@triton.jit def _maximum_path(path, value, t_x, t_y, B, T, S, max_neg_val, BLOCK_SIZE_X: tl.constexpr): batch = tl.program_id(axis=0) path += batch * T * S value += batch * T * S x_length = tl.load(t_x + batch) y_length = tl.load(t_y + batch) offs_prev = tl.arange(0, BLOCK_SIZE_X) init =...
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/uthree/tts_impl/blob/a9d9a66b26a0de4694e502dedfdff7be26d99ddd/src/tts_impl/functional/monotonic_align/mas_triton.py
031b0c3d-1c3a-4448-87a8-dbf9c6afc3b9
cross_entropy_loss.py
tdrussell/qlora-pipe
kernels/cross_entropy_loss.py
6fb7c8eeae52a0e36c41f00628985f29d8330684
0
@triton.heuristics({'DO_LOGIT_SCALING': lambda args: args['DO_LOGIT_SCALING']}) @triton.jit def _cross_entropy_backward(logits_ptr, logits_row_stride, dloss_ptr, dloss_row_stride, logsumexp_ptr, labels_ptr, VOCAB_SIZE: tl.constexpr, BLOCK_SIZE: tl.constexpr, DO_LOGIT_SCALING: tl.constexpr, LOGIT_SCALE: tl.c...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Softmax" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/tdrussell/qlora-pipe/blob/6fb7c8eeae52a0e36c41f00628985f29d8330684/kernels/cross_entropy_loss.py
0269e925-e1dc-4528-851c-455458868afd
sb_varlen_fwd.py
shawntan/stickbreaking-attention
stickbreaking_attention/sb_varlen/sb_varlen_fwd.py
8dd32ad5e58f0ee0232fd4782dc53d354ff8d283
0
@triton.jit def load_kv(K_blk_ptrs, V_blk_ptrs, N_mask, NO_N_MASK, D_mask, NO_D_MASK: tl.constexpr): if NO_D_MASK: if NO_N_MASK: k = tl.load(K_blk_ptrs) v = tl.load(V_blk_ptrs) else: k = tl.load(K_blk_ptrs, mask=N_mask[:, None]) v = tl.load(V_blk_p...
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Elementwise Operations" ], "Memory Access Pattern": [ "Coalesced" ], "Parallelization Strategy": [], "Performance Objective": [ "High Throughput", "Memory-Bound" ] }
[ "Apache" ]
https://github.com/shawntan/stickbreaking-attention/blob/8dd32ad5e58f0ee0232fd4782dc53d354ff8d283/stickbreaking_attention/sb_varlen/sb_varlen_fwd.py
d237c845-8362-47df-b7e1-eaff14494c4f
mhmoe.py
dtadpole/triton-playground
mhmoe.py
2d317976722d63080133b1bf88b1f0cdec98f831
0
@triton.autotune(configs=[triton.Config({'BLOCK_SIZE_B': 32, 'BLOCK_SIZE_E': 32}, num_stages=3, num_warps=4), triton.Config({'BLOCK_SIZE_B': 64, 'BLOCK_SIZE_E': 32}, num_stages=2, num_warps=4), triton.Config({ 'BLOCK_SIZE_B': 32, 'BLOCK_SIZE_E': 64}, num_stages=2, num_warps=4), triton.Config({'BLOCK_SIZ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/dtadpole/triton-playground/blob/2d317976722d63080133b1bf88b1f0cdec98f831/mhmoe.py
a20e0fd8-9354-44cf-b64c-1476d6ce796a
fused_norm_gate.py
sustcsonglin/flash-linear-attention
fla/modules/fused_norm_gate.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'RECOMPUTE_OUTPUT': lambda args: args['Y'] is not None}) @triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({}, num_warps=2), triton.Config({}, num_warps=4), triton.Config({}, num_warps=8), triton.Config({}, num_warps=16), triton.Config({}, num_warps=32)], key=['N', ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization", "Backpropagation" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/modules/fused_norm_gate.py
d5f138ed-d6b1-4147-92bd-4d9d2bf1a5a2
parallel.py
sustcsonglin/flash-linear-attention
fla/ops/simple_gla/parallel.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'NV': lambda args: triton.cdiv(args['V'], args['BV']), 'OUTPUT_ATTENTIONS': lambda args: args['attn'] is not None}) @triton.jit def parallel_simple_gla_fwd_kernel(q, k, v, g, o, attn, s_k_h, s_k_t, s_v_h, s_v_t, scale, B: tl.constexpr, H: tl.constexpr, T: tl.constexpr, K: tl. constexpr, ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/simple_gla/parallel.py
24e6a887-0de6-4d46-817d-187228e035a0
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/gsa/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.jit def chunk_gsa_fwd_k_kernel_intra(v, g, o, A, offsets, indices, T: tl. constexpr, HQ: tl.constexpr, H: tl.constexpr, V: tl.constexpr, BT: tl. constexpr, BC: tl.constexpr, BV: tl.constexpr, NC: tl.constexpr, NG: tl .cons...
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms", "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gsa/chunk.py
7a21ee06-6228-4111-8cca-8a85dc7cb97b
triton_sll.py
pytorch/FBGEMM
fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
fe980ab54a6e28818d81c8694b6564e7f804418b
0
@triton.jit def jagged2_to_padded_dense_kernel(x_ptr, lengths_ptr, offsets_ptr, output_dense_ptr, stride_b, stride_m, stride_n, max_length, BLOCK_M: tl .constexpr, BLOCK_N: tl.constexpr): pid_batch = tl.program_id(2) pid_m = tl.program_id(0) pid_n = tl.program_id(1) begin = tl.load(offsets_ptr +...
{ "Data Type": [], "Functionality": [], "Memory Access Pattern": [ "Strided Access", "Blocked Access" ], "Parallelization Strategy": [], "Performance Objective": [] }
[ "BSD", "MIT" ]
https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
1bcc5bf5-0201-4106-9714-19fc02c62ac9
p_loss_kernels.py
BobMcDear/attorch
attorch/p_loss_kernels.py
da06cb6236bb47195e33fe3986ed21c675ed94cc
0
@triton.autotune(configs=element_wise_kernel_configs(), key=['size']) @triton.jit def p_loss_backward_kernel(output_grad_pointer, input_pointer, target_pointer, input_grad_pointer, target_grad_pointer, size, p_loss: tl.constexpr, reduction: tl.constexpr, BLOCK_SIZE: tl.constexpr): """ Calculates the inp...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/p_loss_kernels.py
1e4b6bf4-c537-4599-9578-cf786aca1b31
single.py
shawntan/scattermoe
scattermoe/kernels/single.py
63b76a2f5f28c052fb4cd7c34479a54158354052
0
@triton.jit def _single2scatter(X_ptr, stride_xm, stride_xk, W_ptr, stride_we, stride_wk, stride_wn, Y_ptr, stride_ym, stride_yn, expert_idxs_ptr, FAN_OUT: tl.constexpr, K: tl.constexpr, N: tl.constexpr, E: tl. constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr, ACC_TYPE: tl. constexpr): pid0 =...
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Memory-Bound" ] }
[ "Apache" ]
https://github.com/shawntan/scattermoe/blob/63b76a2f5f28c052fb4cd7c34479a54158354052/scattermoe/kernels/single.py
4a649b03-1ebe-4b88-b47e-1980960298a0
W4A16_MatMul.py
MDK8888/GPTFast
GPTFast/Kernels/Triton/GPTQ/Matmul/W4A16_MatMul.py
926b7553cfbaf1ec2a702a4bfb477132ce98c2e1
0
@triton.autotune(configs=[triton.Config({'BLOCK_M': 16, 'BLOCK_N': 32, 'BLOCK_K': 64}, num_stages=2, num_warps=2), triton.Config({'BLOCK_M': 16, 'BLOCK_N': 32, 'BLOCK_K': 64}, num_stages=3, num_warps=4), triton. Config({'BLOCK_M': 32, 'BLOCK_N': 32, 'BLOCK_K': 64}, num_stages=2, num_warps=2), triton.Co...
{ "Data Type": [ "int8" ], "Functionality": [ "Matrix Multiplication", "Quantization" ], "Memory Access Pattern": [ "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "Apache" ]
https://github.com/MDK8888/GPTFast/blob/926b7553cfbaf1ec2a702a4bfb477132ce98c2e1/GPTFast/Kernels/Triton/GPTQ/Matmul/W4A16_MatMul.py
ecca4ed7-6a0c-4e5e-845f-085135f8040b
ops.py
shawntan/scattermoe
scattermoe/kernels/ops.py
63b76a2f5f28c052fb4cd7c34479a54158354052
0
@triton.autotune(configs=_config_XtY(), key=['M', 'N', 'K']) @triton.heuristics({'NO_K_MASK': lambda args: args['K'] % args['BLOCK_K'] == 0, 'NO_N_MASK': lambda args: args['N'] % args['BLOCK_N'] == 0}) @triton.jit def _groupXtY(DY_ptr, stride_dym, stride_dyk, X_ptr, stride_xm, stride_xn, DW_ptr, stride_dwe, str...
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "Apache" ]
https://github.com/shawntan/scattermoe/blob/63b76a2f5f28c052fb4cd7c34479a54158354052/scattermoe/kernels/ops.py
87fdafb5-a70c-4967-b40f-bb70db039d6c
flash_attention.py
falkaer/multi-scale-music
seq/flash_attention.py
a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d
0
@triton.jit def apply_dropout(x, offsets, p, seed, mask_val=float('-inf')): rand = tl.rand(seed, offsets) scale = 1 / (1 - p) return tl.where(rand > p, x * scale, mask_val)
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Low Latency" ] }
[ "MIT" ]
https://github.com/falkaer/multi-scale-music/blob/a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d/seq/flash_attention.py
0f85b722-d956-44c9-8c49-e98decba5b86
y_3.py
IntelLabs/EquiTriton
src/equitriton/sph_harm/direct/y_3.py
1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c
0
@triton.jit def third_order_bwd(coord_ptr: tl.tensor, coord_grad_ptr: tl.tensor, sph_grad_ptr: tl.tensor, block_size: tl.constexpr, coord_numel: tl. constexpr, output_numel: tl.constexpr, col_offset: tl.constexpr, output_stride: tl.constexpr): block_id = tl.program_id(0) coord_stride = 3 coord_s...
{ "Data Type": [], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [] }
[ "Apache" ]
https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_3.py
08bb518c-fbda-4604-9322-dfac40c7b421
apply_token_bitmask_inplace_triton.py
mlc-ai/xgrammar
python/xgrammar/kernels/apply_token_bitmask_inplace_triton.py
49655f4e5992a0c00183c9bd43d78b49c4e668ab
0
@triton.jit def apply_token_bitmask_inplace_kernel(logits_ptr, bitmask_ptr, indices_ptr, num_rows, vocab_size, bitmask_size, NUM_SMS: tl.constexpr, BLOCK_SIZE: tl.constexpr): pid = tl.program_id(0) num_blocks = tl.cdiv(vocab_size, BLOCK_SIZE) for work_id in tl.range(pid, num_rows * num_blocks, NUM_S...
{ "Data Type": [], "Functionality": [ "Softmax", "Elementwise Operations" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "Apache" ]
https://github.com/mlc-ai/xgrammar/blob/49655f4e5992a0c00183c9bd43d78b49c4e668ab/python/xgrammar/kernels/apply_token_bitmask_inplace_triton.py
e7510983-2f6d-4849-8c80-611f47f2a9cc
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/gla/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({'BK': BK, 'BV': BV}, num_warps= num_warps) for BK in [32, 64] for BV in [64, 128] for num_warps in [2, 4, 8]], key=['BT']) @triton.jit def chunk_gla_bwd_kernel_inter(q, k, v, h, g, do, dh, dq,...
{ "Data Type": [], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gla/chunk.py
735840ce-064d-4ebd-af98-c2aba99b4e2a
copy.py
chengzeyi/stable-fast
src/sfast/triton/ops/copy.py
3a6f35c7045f8f6812515957ca62ef37260ff080
0
@eval( """triton.heuristics({ 'BLOCK_M': lambda kwargs: min(32, triton.next_power_of_2(kwargs['size_inp_0'])), 'BLOCK_N': lambda kwargs: min(32, triton.next_power_of_2(kwargs['size_inp_1'])), 'BLOCK_K': lambda kwargs: min(32, triton.next_power_of_2(kwargs['size_inp_2'])), 'BATCH_STRIDE_INP_IS_1': la...
{ "Data Type": [], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/chengzeyi/stable-fast/blob/3a6f35c7045f8f6812515957ca62ef37260ff080/src/sfast/triton/ops/copy.py
7856aa29-242e-4335-8d5c-122814a3f128
attn_torch_function.py
ROCm/aotriton
tritonsrc/attn_torch_function.py
016f733e8ff746450e066f78bed68709ccd93e60
0
@triton.autotune(configs=TRITON_CONFIG_LIST_FWD, key=['max_seqlen_q', 'max_seqlen_k', 'CAUSAL']) @triton.jit def tuned_attn_fwd(Q, K, V, B, sm_scale, M, Out, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_bz, stride_...
{ "Data Type": [], "Functionality": [ "Attention Mechanisms", "Softmax" ], "Memory Access Pattern": [ "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/tritonsrc/attn_torch_function.py
cda10c52-c03b-4cd3-bcd2-bf5e77672cfe
triton_rms_norm.py
vladmandic/dcae
dcae/nn/triton_rms_norm.py
5223970c7e6c6acfe282e18be7e3821b61511673
0
@triton.jit def _rms_norm_2d_bwd_dx_fused(DX, DY, DW, DB, X, W, B, Rrms, M, C, N, num_blocks, eps, GROUP_SIZE_M: tl.constexpr, BLOCK_SIZE: tl.constexpr, BLOCK_SIZE_C: tl.constexpr): m_n = tl.program_id(0) m, n = m_n // num_blocks, m_n % num_blocks X += m * C * N DY += m * C * N DX += m * C *...
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization", "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/vladmandic/dcae/blob/5223970c7e6c6acfe282e18be7e3821b61511673/dcae/nn/triton_rms_norm.py
f37fbd18-5828-4317-9496-ead47311db58
partition_k.py
pytorch-labs/tritonbench
tritonbench/operators/gemm/partition_k.py
3a5dccb159834968567a2e45e561dc1aeaa8f8a8
0
@triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 64}, num_stages=4, num_warps=2), triton.Config({ 'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 64}, num_stages =5, num_warps=2), triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K...
{ "Data Type": [ "fp16" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled", "Blocked Access" ], "Parallelization Strategy": [ "Persistent Kernels" ], "Performance Objective": [ "High Throughput" ] }
[ "BSD" ]
https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/gemm/partition_k.py
c8196583-7032-4955-b22f-e6dfe1c1f392
RzLinearBackward.py
apd10/RzLinear
python/rz_linear/impl/RzLinearBackward.py
eb56657b2de0a97f398f88af421b0fbcbc5469c9
0
@triton.jit def rz_linear_backward_weight_grad_kernel_notune(a_ptr, b_ptr, c_ptr, init_factor, M, N, K, H, stride_am, stride_ak, stride_bm, stride_bn, R7: int, R6: int, R5: int, R4: int, R3: int, R2: int, R1: int, R0: int, allow_tf32: tl.constexpr, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl. constexpr...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Matrix Multiplication" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/apd10/RzLinear/blob/eb56657b2de0a97f398f88af421b0fbcbc5469c9/python/rz_linear/impl/RzLinearBackward.py
4ee02cfd-0063-451e-9188-dd563dfe40c5
fused_chunk.py
sustcsonglin/flash-linear-attention
fla/ops/retention/fused_chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.jit def fused_chunk_retention_fwd_kernel(q, k, v, o, h0, ht, scale, B: tl. constexpr, H: tl.constexpr, T: tl.constexpr, K: tl.constexpr, V: tl. constexpr, BT: tl.constexpr, BK: tl.constexpr, BV: tl.constexpr, USE_INITIAL_STATE: tl.constexpr, STORE_FINAL_STATE: tl.constexpr, CHECK: tl.constexpr):...
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/retention/fused_chunk.py
dbd509cd-7029-42a7-9a9f-50d56a9e82f4
normalization.py
ai-compiler-study/triton-kernels
triton_kernels/kernels/normalization.py
2308e5e9d965059fe2d19b4d535debac4970b69e
0
@triton.jit def _rms_norm_bwd(dY, dX, dW, X, W, Rstd, stride, N, BLOCK_SIZE: tl.constexpr): row = tl.program_id(0) X += row * stride dY += row * stride dX += row * stride dW += row * stride cols = tl.arange(0, BLOCK_SIZE) mask = cols < N dy = tl.load(dY + cols, mask=mask, other=0.0) ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization", "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/ai-compiler-study/triton-kernels/blob/2308e5e9d965059fe2d19b4d535debac4970b69e/triton_kernels/kernels/normalization.py
10c2ce56-9173-429f-9c4d-35c04a889c0f
triton_ops.py
imoneoi/bf16_fused_adam
bf16_fused_adam/triton_ops.py
66375343b528a00a483646a58a8a851a90834f9e
0
@triton.jit def bit_split_kernel(x_ptr, output_hi_ptr, output_lo_ptr, n_elements, BLOCK_SIZE: tl.constexpr): pid = tl.program_id(axis=0) block_start = pid * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements x = tl.load(x_ptr + offsets, mask=mask).to(tl.uint3...
{ "Data Type": [ "bf16" ], "Functionality": [ "Quantization" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/imoneoi/bf16_fused_adam/blob/66375343b528a00a483646a58a8a851a90834f9e/bf16_fused_adam/triton_ops.py
f5633053-7e55-4be5-a160-add1d765c266
test_autodiff.py
srush/triton-autodiff
tests/test_autodiff.py
f9d1a04d048e3252bfd222646db7175ad60a3c7c
0
@triton.jit def ub1(X, Y): r = tl.arange(0, 16) r2 = tl.arange(0, 32) x = tl.load(X + 16 * r2[:, None] + r) y = triton_unbroadcast(x, tl.arange(0, 16).shape) tl.store(Y + r, y)
{ "Data Type": [], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/srush/triton-autodiff/blob/f9d1a04d048e3252bfd222646db7175ad60a3c7c/tests/test_autodiff.py
0a8208c8-85ba-4efb-b976-49ac81225dd3
chunk_h.py
sustcsonglin/flash-linear-attention
fla/ops/common/chunk_h.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_INITIAL_STATE': lambda args: args['h0'] is not None, 'STORE_FINAL_STATE': lambda args: args['ht'] is not None, 'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({'BK': BK, 'BV': BV}, num_warps= num_warps, num_stages=num_stages) for BK ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/common/chunk_h.py
bd50fa8d-7a93-45f1-be5a-1848956128a7
matmul.py
jax-ml/jax-triton
examples/matmul.py
859cc392bec876d132bd0790ea6c00b6c246dd2b
0
@triton.jit def relu(x): return tl.where(x >= 0, x, 0)
{ "Data Type": [ "fp32" ], "Functionality": [ "Activation Functions", "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "High Throughput" ] }
[ "Apache" ]
https://github.com/jax-ml/jax-triton/blob/859cc392bec876d132bd0790ea6c00b6c246dd2b/examples/matmul.py
24bff4b0-3174-46f7-90f5-71dbb3e12644
softmax_naive.py
iclementine/optimize_softmax
softmax_naive.py
6ddeee3481dd5e63f4a30b946c417e97bc4494bf
0
@triton.jit def softmax_kernel(output_ptr, input_ptr, M, N, TILE_N: tl.constexpr): pid_m = tl.program_id(0) n_offsets = tl.arange(0, TILE_N) offset = pid_m * N + n_offsets input_ptrs = input_ptr + offset mask = n_offsets < N inp = tl.load(input_ptrs, mask=mask, other=-float('inf')).to(output_ptr...
{ "Data Type": [ "fp32" ], "Functionality": [ "Softmax" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "High Throughput" ] }
[ "BSD" ]
https://github.com/iclementine/optimize_softmax/blob/6ddeee3481dd5e63f4a30b946c417e97bc4494bf/softmax_naive.py
8a498a79-9878-41b0-9e3c-3a83716b2886
swiglu.py
shauray8/continuity
continuity/diffusion_engine/models/triton_kernels/swiglu.py
a52ad077e4ed3162576c7417f302e792ccdf5eca
0
@triton.jit def _fg_kernel(e, g, h, n_elements, BLOCK_SIZE: tl.constexpr): block_idx = tl.program_id(0) offsets = block_idx * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements e_row = tl.load(e + offsets, mask=mask, other=0).to(tl.float32) g_row = tl.load(g + offsets, mask=mask, othe...
{ "Data Type": [ "fp32" ], "Functionality": [ "Activation Functions", "Backpropagation", "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/shauray8/continuity/blob/a52ad077e4ed3162576c7417f302e792ccdf5eca/continuity/diffusion_engine/models/triton_kernels/swiglu.py
891bbd59-6bf6-4383-a905-e03f4293b257
ops.py
srush/triton-autodiff
triton_autodiff/ops.py
f9d1a04d048e3252bfd222646db7175ad60a3c7c
0
@triton.jit def add_grad(left, right): right = triton_unbroadcast(right, left.shape) return left + right
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/srush/triton-autodiff/blob/f9d1a04d048e3252bfd222646db7175ad60a3c7c/triton_autodiff/ops.py
f4342a00-1b63-4e9d-90eb-9cd669123177
activation.py
chengzeyi/stable-fast
src/sfast/triton/ops/activation.py
3a6f35c7045f8f6812515957ca62ef37260ff080
0
@triton.jit def gelu(x): return 0.5 * x * (1.0 + tl.tanh(0.7978845608028654 * (x + 0.044715 * x * x * x)))
{ "Data Type": [ "fp32" ], "Functionality": [ "Activation Functions" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [], "Performance Objective": [ "High Throughput" ] }
[ "MIT" ]
https://github.com/chengzeyi/stable-fast/blob/3a6f35c7045f8f6812515957ca62ef37260ff080/src/sfast/triton/ops/activation.py
1f58dd3e-6916-457c-a58e-f4b968d69485
activation.py
chengzeyi/stable-fast
src/sfast/triton/ops/activation.py
3a6f35c7045f8f6812515957ca62ef37260ff080
0
@triton.jit def identity(x): return x
{ "Data Type": [], "Functionality": [], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [] }
[ "MIT" ]
https://github.com/chengzeyi/stable-fast/blob/3a6f35c7045f8f6812515957ca62ef37260ff080/src/sfast/triton/ops/activation.py
810d1a75-8358-49ce-890d-2b567c12cecf
chunk.py
sustcsonglin/flash-linear-attention
fla/ops/gated_delta_rule/chunk.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({}, num_warps=4)], key=['BT', 'BK', 'BV']) @triton.jit def chunk_gated_delta_rule_fwd_kernel_o(q, k, v, h, g, o, offsets, indices, scale, T: tl.constexpr, H: tl.constexpr, K: tl.constexpr, V: tl...
{ "Data Type": [ "fp32", "fp16" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Blocked Access", "Transposed Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ]...
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/gated_delta_rule/chunk.py
f634d81f-eff7-4c83-ac4b-f2cf21f8ff28
rope.py
ardywibowo/triton-mode
kernels/rope.py
5cd773ec95e25e23c6b75e312c7a9a1c6eb650b1
0
@triton.jit def triton_rope(q_buffer, q_buffer_stride, k_buffer, k_buffer_stride, cos_values, cos_values_stride, sin_values, sin_values_stride, seq_length, batch_size: tl.constexpr, num_q_heads: tl.constexpr, num_k_heads: tl.constexpr, head_dim: tl.constexpr, padded_num_q_heads: tl.constexpr, padded_num...
{ "Data Type": [ "fp16", "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Blocked Access", "Transposed Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Compute Bound" ]...
[ "MIT" ]
https://github.com/ardywibowo/triton-mode/blob/5cd773ec95e25e23c6b75e312c7a9a1c6eb650b1/kernels/rope.py
9636248c-a664-44e8-a77b-6cdc6b314b7e
silu.py
ai-compiler-study/triton-kernels
triton_kernels/ops/silu.py
2308e5e9d965059fe2d19b4d535debac4970b69e
0
@triton.jit def triton_silu(x_ptr, b_ptr, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex x = tl.load(x_ptr + x0, mask=xmask).to(tl.float32) output = (x * tl.sigmoid(x)).to(tl.float32) tl.store(...
{ "Data Type": [ "fp32" ], "Functionality": [ "Activation Functions", "Elementwise Operations" ], "Memory Access Pattern": [ "Coalesced" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/ai-compiler-study/triton-kernels/blob/2308e5e9d965059fe2d19b4d535debac4970b69e/triton_kernels/ops/silu.py
736db1ea-c332-43df-a318-da529e0545d7
rms_norm.py
dame-cell/Triformer
triformer/rms_norm.py
0712537d576166b93fa09aa9509b2661b9ed8a68
0
@triton.jit def rmsnorm_forward(Y, Y_row_stride, X, X_row_stride, W, r, n_cols, eps, BLOCK_SIZE: tl.constexpr): row_idx = tl.program_id(0) col_offsets = tl.arange(0, BLOCK_SIZE) mask = col_offsets < n_cols Y_ptr = Y + row_idx * Y_row_stride X_ptr = X + row_idx * X_row_stride r_ptr = r + row_...
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/dame-cell/Triformer/blob/0712537d576166b93fa09aa9509b2661b9ed8a68/triformer/rms_norm.py
395d36cc-ba19-422f-b9e3-4d3f977cec4b
block_sparse_attention_lut.py
sparklesea/sparse-quant
sparse-attention/muxi/playground/kernels/block_sparse_attention_lut.py
e3d8b6ecab208c31b744913ed8c3caaa43605f86
0
@triton.jit def _sparse_attention_decode_fwd_kernel(Q, K, V, sm_scale, Out, L, M, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_oz, stride_oh, stride_om, stride_on, Z, H, N_CTX, stride_luth, NNZ: tl.constexpr, B...
{ "Data Type": [ "fp16", "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Blocked Access", "Transposed Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound", "High Throughput" ]...
[ "Apache", "BSD" ]
https://github.com/sparklesea/sparse-quant/blob/e3d8b6ecab208c31b744913ed8c3caaa43605f86/sparse-attention/muxi/playground/kernels/block_sparse_attention_lut.py
89a686ef-90d0-45ff-bfe3-19e9d5250c8f
wy_fast.py
sustcsonglin/flash-linear-attention
fla/ops/delta_rule/wy_fast.py
5968de9a22c096326b19859cfe05dac36155c31d
0
@triton.heuristics({'USE_OFFSETS': lambda args: args['offsets'] is not None}) @triton.autotune(configs=[triton.Config({}, num_warps=num_warps) for num_warps in [1, 2, 4, 8, 16]], key=['BK']) @triton.jit def fwd_prepare_wy_repr_kernel_chunk64(k, beta, A, offsets, indices, T: tl. constexpr, H: tl.constexpr, K: tl...
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Blocked Access", "Transposed Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/sustcsonglin/flash-linear-attention/blob/5968de9a22c096326b19859cfe05dac36155c31d/fla/ops/delta_rule/wy_fast.py
6ed4de31-dcc6-4dc9-a02d-409ffb40b3d9
math.py
BobMcDear/attorch
attorch/math.py
da06cb6236bb47195e33fe3986ed21c675ed94cc
0
@triton.jit def update_ema(prev_ema, new_val, momentum): """ Updates exponential moving average. Args: prev_ema: Previous exponential moving average. new_val: Value used to update the exponential moving average. momentum: Momentum. Returns: Updated running statistic. ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Single Instance" ] }
[ "MIT" ]
https://github.com/BobMcDear/attorch/blob/da06cb6236bb47195e33fe3986ed21c675ed94cc/attorch/math.py
f6af2c0f-1b6e-4f7e-a6be-7ccf22bd5782
y_3.py
IntelLabs/EquiTriton
src/equitriton/sph_harm/direct/y_3.py
1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c
0
@triton.jit def third_order_fwd(coord_ptr: tl.tensor, output_ptr: tl.tensor, block_size: tl.constexpr, coord_numel: tl.constexpr, output_numel: tl.constexpr, col_offset: tl.constexpr, output_stride: tl.constexpr): coord_stride = 3 block_id = tl.program_id(0) coord_striding = tl.arange(0, block_size)...
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/direct/y_3.py
318e31bb-32df-41cc-97f5-6c170ad3dd15
triton_kernels.py
IntelLabs/EquiTriton
src/equitriton/sph_harm/triton_kernels.py
1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c
0
@triton.jit def _triton_third_order_bwd(x_ptr: tl.tensor, y_ptr: tl.tensor, z_ptr: tl. tensor, g_x_ptr: tl.tensor, g_y_ptr: tl.tensor, g_z_ptr: tl.tensor, g_1_0_ptr: tl.tensor, g_1_1_ptr: tl.tensor, g_1_2_ptr: tl.tensor, g_2_0_ptr: tl.tensor, g_2_1_ptr: tl.tensor, g_2_2_ptr: tl.tensor, g_2_3_ptr: tl.ten...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "Apache" ]
https://github.com/IntelLabs/EquiTriton/blob/1cbf04f69b512a5c1d8ff4880dbf6e17fe089d4c/src/equitriton/sph_harm/triton_kernels.py
6927aecc-280a-412c-8568-2e3046c2cd1c
triton_jagged_tensor_ops.py
pytorch/FBGEMM
fbgemm_gpu/fbgemm_gpu/triton/jagged/triton_jagged_tensor_ops.py
fe980ab54a6e28818d81c8694b6564e7f804418b
0
@triton.jit def triton_jagged_to_dense_optimization_2d(input_jagged_values_ptr, input_jagged_offset_ptr, input_jagged_row_stride, output_dense_ptr, output_dense_row_stride, output_dense_matrix_stride, thread_block_row_size: tl.constexpr, thread_block_col_size: tl. constexpr, padded_value, operation_func...
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Memory-Bound" ] }
[ "BSD", "MIT" ]
https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/triton/jagged/triton_jagged_tensor_ops.py
d73689df-125b-497e-b758-19f7ab9950f4
06-fused-attention.py
2lambda123/triton
python/tutorials/06-fused-attention.py
09e27725b89043a07f49c440db6a9aedcfba8432
0
@triton.jit def _bwd_preprocess(Out, DO, Delta, BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr ): off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M) off_n = tl.arange(0, D_HEAD) o = tl.load(Out + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32) do = tl.load(DO + off_m[:, None] * D_HEAD ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/2lambda123/triton/blob/09e27725b89043a07f49c440db6a9aedcfba8432/python/tutorials/06-fused-attention.py
8f081f68-2198-4cce-a6f5-0e54650bd725
triton_sll.py
pytorch/FBGEMM
fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
fe980ab54a6e28818d81c8694b6564e7f804418b
0
@triton.jit def padded_dense_to_jagged2_kernel(x_ptr, lengths_ptr, offsets_ptr, output_jagged_ptr, stride_b, stride_m, stride_n, max_length, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr): pid_batch = tl.program_id(2) pid_m = tl.program_id(0) pid_n = tl.program_id(1) begin = tl.load(offsets_ptr +...
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput", "Memory-Bound" ] }
[ "BSD", "MIT" ]
https://github.com/pytorch/FBGEMM/blob/fe980ab54a6e28818d81c8694b6564e7f804418b/fbgemm_gpu/fbgemm_gpu/sll/triton_sll.py
6d81ba3a-93a7-42c4-af16-ccbd76925f55
05-layer-norm.py
triton-lang/triton
python/tutorials/05-layer-norm.py
a2b398e0bb1b120f31cf386d6ae3261c3ab84207
0
@triton.jit def _layer_norm_bwd_dx_fused(DX, DY, DW, DB, X, W, Mean, Rstd, Lock, stride, N, GROUP_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr): row = tl.program_id(0) cols = tl.arange(0, BLOCK_SIZE_N) mask = cols < N X += row * stride DY += row * stride DX += row * stride lock_id = ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Normalization", "Backpropagation" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/triton-lang/triton/blob/a2b398e0bb1b120f31cf386d6ae3261c3ab84207/python/tutorials/05-layer-norm.py
5d85d5a2-0693-4087-ad1a-4430132d743a
gemm_splitk_benchmark.py
intel/intel-xpu-backend-for-triton
benchmarks/triton_kernels_benchmark/gemm_splitk_benchmark.py
6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2
0
@triton.autotune(configs=[triton.Config({'BLOCK_M': 256, 'BLOCK_N': 256, 'BLOCK_K': 32, 'GROUP_M': 4, 'SPLIT_K': 4, 'grf_mode': 'large'}, num_stages=4, num_warps=32)], key=['M', 'N', 'K']) @triton.jit def _kernel(A, B, C, M: tl.constexpr, N: tl.constexpr, K: tl.constexpr, stride_am: tl.constexpr, stride_ak:...
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled", "Coalesced" ], "Parallelization Strategy": [ "Persistent Kernels" ], "Performance Objective": [ "High Throughput", "Memory-Bound" ] }
[ "MIT" ]
https://github.com/intel/intel-xpu-backend-for-triton/blob/6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2/benchmarks/triton_kernels_benchmark/gemm_splitk_benchmark.py
ff71555a-07ff-4b43-bde0-81003adfcfab
flash_attention.py
falkaer/multi-scale-music
seq/flash_attention.py
a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d
0
@triton.jit def causal_mask(offs_m, offs_n, M, N, EVEN_M: tl.constexpr, EVEN_N: tl. constexpr): shift = N - M mask = shift + offs_m[:, None] >= offs_n[None, :] if not EVEN_M & EVEN_N: mask = mask & make_bounds(offs_m, offs_n, M, N, EVEN_M, EVEN_N) return tl.where(mask, 0, float('-inf'))
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Latency Sensitive" ] }
[ "MIT" ]
https://github.com/falkaer/multi-scale-music/blob/a7794ddfb3bbd95b70acf3fe72a08d8a1d47564d/seq/flash_attention.py
658e3aea-d371-4a6d-8906-df02e8f90b5c
blocksparse_attention_kernel.py
Charlie-XIAO/sparse-vllm
vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py
d228909a30b0c245c35417fb7d2acdf9a3690042
0
@triton.jit def _fwd_kernel_inner(acc, l_i, m_i, q, Q, k_block_col_idx, layout_col_ptr, layout_col_stride_h, layout_col_stride_m, k_ptrs, v_ptrs, off_h, offs_m, offs_n, offs_d, stride_kt, stride_vt, sm_scale, k_seqlen, past_len, LAST_K_BLOCK: tl.constexpr, BLOCK_M_LOADING: tl.constexpr, BLOCK_N: tl. con...
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Latency Sensitive" ] }
[ "Apache" ]
https://github.com/Charlie-XIAO/sparse-vllm/blob/d228909a30b0c245c35417fb7d2acdf9a3690042/vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py
ca7c4674-22d1-465d-8d7e-f908478548b8
sparse_copy.py
ServiceNow/Fast-LLM
fast_llm/functional/triton/sparse_copy.py
8b46289079da67cba99628448a6b6083dac083cf
0
@triton.jit def sparse_map_kernel(top_experts_ptr, expert_ends_ptr, expert_pad_begins_ptr, sparse_rows_ptr, num_sparse_rows: tl.constexpr, num_experts: tl.constexpr, pad_to_multiple: tl.constexpr, block_size: tl.constexpr, block_size_expert: tl.constexpr, dtype: tl.constexpr): """ Since the methods ...
{ "Data Type": [], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Memory-Bound" ] }
[ "Apache" ]
https://github.com/ServiceNow/Fast-LLM/blob/8b46289079da67cba99628448a6b6083dac083cf/fast_llm/functional/triton/sparse_copy.py
8a2be484-de09-45f5-adcd-8c5f11a156e6
kernels.py
pytorch-labs/tritonbench
tritonbench/operators/jagged_sum/kernels.py
3a5dccb159834968567a2e45e561dc1aeaa8f8a8
0
@triton.autotune(configs=[triton.Config({'BLOCK_SIZE_RAGGED': b_r, 'BLOCK_SIZE_M': b_m}, num_warps=w, num_stages=s) for b_r, b_m, w, s in itertools.product(BLOCK_SIZES, BLOCK_SIZES, NUM_WARPS, NUM_STAGES)], key=['M']) @triton.jit def triton_jagged_sum_kernel_simple_fused_buffer_then_sum(input_ptr_values, ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "BSD" ]
https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/jagged_sum/kernels.py
942e12b2-81c0-40e4-966c-b945f0b6e59f
parallel_scan.py
chengkai-liu/RecBLR
parallel_scan.py
66e520c26e28c05a5425ba2e81c9169b7e0176e2
0
@triton.jit def forward_scan(gates, tokens, outputs, SEQUENCE_LENGTH: tl.constexpr): sequence_id = tl.num_programs(axis=1) * tl.program_id(axis=0 ) + tl.program_id(axis=1) strides = tl.arange(0, SEQUENCE_LENGTH) + sequence_id * SEQUENCE_LENGTH tokens_ = tl.load(tokens + strides) gates_ = tl.load...
{ "Data Type": [], "Functionality": [ "Recurrent Neural Networks" ], "Memory Access Pattern": [ "Blocked Access" ], "Parallelization Strategy": [ "Cooperative Groups" ], "Performance Objective": [ "Low Latency" ] }
[ "MIT" ]
https://github.com/chengkai-liu/RecBLR/blob/66e520c26e28c05a5425ba2e81c9169b7e0176e2/parallel_scan.py
7d8a010b-7cc8-4d82-8e78-3e637e19e741
kernels.py
pytorch-labs/tritonbench
tritonbench/operators/jagged_mean/kernels.py
3a5dccb159834968567a2e45e561dc1aeaa8f8a8
0
@triton.autotune(configs=[triton.Config({'BLOCK_SIZE_RAGGED': b_r, 'BLOCK_SIZE_M': b_m}, num_warps=w, num_stages=s) for b_r, b_m, w, s in itertools.product(BLOCK_SIZES_RAGGED, BLOCK_SIZES_M, NUM_WARPS, NUM_STAGES)], key=['M']) @triton.jit def triton_jagged_mean_kernel_simple_fused_sum_then_buffer(input_ptr_...
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "BSD" ]
https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/jagged_mean/kernels.py
fbeec137-387e-4b5f-a767-2a50d629e0ce
bwd_inner_dk_dv.py
ROCm/aotriton
tritonsrc/bwd_inner_dk_dv.py
016f733e8ff746450e066f78bed68709ccd93e60
0
@triton.jit def bwd_inner_dk_dv(dk, dv, qk_scale, bias_scale, q_ptrs, q_stride, kt, vt, B_block_ptr, do_ptrs, do_stride, l_ptrs, D_ptrs, seqlen_q, seqlen_k, head_dim, start_k, lo, hi, overflow_size, dropout_p, dropout_scale, philox_seed, batch_philox_offset, max_seqlen_k, BLOCK_M: tl.constexpr, BLOCK_DM...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Attention Mechanisms" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "Compute Bound" ] }
[ "MIT" ]
https://github.com/ROCm/aotriton/blob/016f733e8ff746450e066f78bed68709ccd93e60/tritonsrc/bwd_inner_dk_dv.py
b20ebc56-6a89-4549-b7b7-605c742067fe
flash_attention.py
drisspg/transformer_nuggets
transformer_nuggets/flash/flash_attention.py
a4c66bbeebaa479ad8b6ed82d7efbafa41b17260
0
@triton.jit def masked_row(rows): """rows is BLOCK_M slice of the QK score Returns: BLOCK_M vector of boolean values indicating whether this Query x Key position is fully masked """ return rows == float('-inf')
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [] }
[ "BSD" ]
https://github.com/drisspg/transformer_nuggets/blob/a4c66bbeebaa479ad8b6ed82d7efbafa41b17260/transformer_nuggets/flash/flash_attention.py
3536e65e-9891-4926-b200-b5960ac48099
triton_attention.py
pytorch-labs/tritonbench
tritonbench/operators/template_attention/triton_attention.py
3a5dccb159834968567a2e45e561dc1aeaa8f8a8
0
@triton.autotune(configs=[triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'BLOCK_DMODEL': 64}, num_stages=3, num_warps=4)], key=['num_queries']) @triton.jit def triton_tem_fused_no_exp2(arg_Q, arg_K, arg_V, out_ptr0, num_queries: tl .constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_DMODEL: tl.co...
{ "Data Type": [ "fp16" ], "Functionality": [ "Attention Mechanisms", "Matrix Multiplication" ], "Memory Access Pattern": [ "Blocked Access" ], "Parallelization Strategy": [ "Thread-Block Mappings" ], "Performance Objective": [ "High Throughput" ] }
[ "BSD" ]
https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/template_attention/triton_attention.py
2b4627a5-61b5-4d4b-aacb-1b0565738910
masks.py
drisspg/transformer_nuggets
transformer_nuggets/flash/masks.py
a4c66bbeebaa479ad8b6ed82d7efbafa41b17260
0
@triton.jit def alibi_attention_triton(score, batch, head, seq_len_q, seq_len_kv, num_heads ): alibi_scale = tl.math.exp2(-((head + 1) * 8.0 / num_heads)) bias = seq_len_kv - seq_len_q score = score + alibi_scale * bias return score
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput" ] }
[ "BSD" ]
https://github.com/drisspg/transformer_nuggets/blob/a4c66bbeebaa479ad8b6ed82d7efbafa41b17260/transformer_nuggets/flash/masks.py
6b120d1b-0950-4333-ad5e-5a0ff072d2b4
triton_fused_attention.py
pytorch-labs/tritonbench
tritonbench/kernels/triton_fused_attention.py
3a5dccb159834968567a2e45e561dc1aeaa8f8a8
0
@triton.autotune(list(filter(keep, configsTma)), key=['N_CTX']) @triton.jit def _attn_fwd_tma(Q, K, V, sm_scale, M, Out, desc_q, desc_k, desc_v, desc_o, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_oz, stride_oh, s...
{ "Data Type": [ "fp32" ], "Functionality": [ "Attention Mechanisms" ], "Memory Access Pattern": [ "Blocked Access" ], "Parallelization Strategy": [ "Cooperative Groups" ], "Performance Objective": [ "High Throughput" ] }
[ "BSD" ]
https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/kernels/triton_fused_attention.py
ffd4e13b-2976-4dd9-bf5e-79d064300709
08-grouped-gemm.py
triton-lang/triton
python/tutorials/08-grouped-gemm.py
a2b398e0bb1b120f31cf386d6ae3261c3ab84207
0
@triton.autotune(configs=[triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'NUM_SM': 84}), triton.Config( {'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'NUM_SM': 128}), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'NUM_SM': 84}),...
{ "Data Type": [ "fp16" ], "Functionality": [ "Matrix Multiplication" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "High Throughput", "Compute Bound" ] }
[ "MIT" ]
https://github.com/triton-lang/triton/blob/a2b398e0bb1b120f31cf386d6ae3261c3ab84207/python/tutorials/08-grouped-gemm.py
ea89f9e0-302f-4fd2-a737-0aff546ce52f
kernels.py
pytorch-labs/tritonbench
tritonbench/operators/sum/kernels.py
3a5dccb159834968567a2e45e561dc1aeaa8f8a8
0
@triton.autotune(configs=[triton.Config({'BLOCK_SIZE_N': b_n, 'BLOCK_SIZE_K': b_k}, num_warps=w) for b_n, b_k, w in itertools.product ([(4 ** n) for n in range(6)], [(4 ** n) for n in range(4)], [2, 4, 8]) ], key=['N']) @triton.jit def triton_sum_kernel_2D_result_dim_1_sum_then_buffer(input_ptr, output_ptr,...
{ "Data Type": [ "fp32" ], "Functionality": [ "Elementwise Operations" ], "Memory Access Pattern": [ "Tiled" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Memory-Bound", "Batch-Oriented" ] }
[ "BSD" ]
https://github.com/pytorch-labs/tritonbench/blob/3a5dccb159834968567a2e45e561dc1aeaa8f8a8/tritonbench/operators/sum/kernels.py
0d8babf1-950f-4d42-b5a5-310a8413e616
fused_moe_fp16.py
AlibabaPAI/FLASHNN
flashnn/triton_kernels/fused_moe_fp16.py
528a9301587f5fb135b25d973a87ba0a40a703a7
0
@triton.jit def _fused_moe_kernel(A, B, C, topk_weights_ptr, sorted_token_ids_ptr, expert_ids_ptr, num_tokens_post_padded_ptr, N, K, EM, num_valid_tokens, stride_am, stride_ak, stride_be, stride_bn, stride_bk, stride_cm, stride_cn, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl...
{ "Data Type": [ "fp32" ], "Functionality": [ "Matrix Multiplication", "Top-K Selection" ], "Memory Access Pattern": [ "Blocked Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput", "Batch-Oriented...
[ "Apache" ]
https://github.com/AlibabaPAI/FLASHNN/blob/528a9301587f5fb135b25d973a87ba0a40a703a7/flashnn/triton_kernels/fused_moe_fp16.py
90a00adc-8b7b-4112-b132-f300bfc18a2a
rwkv_vanilla.py
berlino/seq_icl
src/models/sequence/rnn/scan_triton/rwkv_vanilla.py
9b9223d15348b5a415fb453ed988ed5f7ab9fbdc
0
@triton.jit def wkv_triton_vanilla_backward_kernel(w_ptr, w_s_c, u_ptr, u_s_c, k_ptr, k_s_b, k_s_t, k_s_c, v_ptr, v_s_b, v_s_t, v_s_c, state_ptr, state_s_b, state_s_ab, state_s_t, state_s_c, gwkv_ptr, gwkv_s_b, gwkv_s_t, gwkv_s_c, gstate_out_ptr, gstate_out_s_b, gstate_out_s_ab, gstate_out_s_c, gw_ptr, ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation" ], "Memory Access Pattern": [], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "Batch-Oriented" ] }
[ "Apache" ]
https://github.com/berlino/seq_icl/blob/9b9223d15348b5a415fb453ed988ed5f7ab9fbdc/src/models/sequence/rnn/scan_triton/rwkv_vanilla.py
52b81982-1106-4fa0-9262-3ccc64858017
gemm_postop_gelu_benchmark.py
intel/intel-xpu-backend-for-triton
benchmarks/triton_kernels_benchmark/gemm_postop_gelu_benchmark.py
6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2
0
@triton.jit def tanh(x): return 2 * tl.sigmoid(2 * x) - 1
{ "Data Type": [], "Functionality": [ "Activation Functions", "Elementwise Operations" ], "Memory Access Pattern": [], "Parallelization Strategy": [], "Performance Objective": [ "Low Latency", "Single Instance" ] }
[ "MIT" ]
https://github.com/intel/intel-xpu-backend-for-triton/blob/6ee08cd29ec3cd8b8eb3f92b9c93977fc6f6e5c2/benchmarks/triton_kernels_benchmark/gemm_postop_gelu_benchmark.py
a830e503-24c8-412e-a532-a0dc3f91630f
k_softmax_dropout.py
kimiasa/Experiments
src/ops/triton/k_softmax_dropout.py
c4e73bfefd8290695ec52b6386b6b81838ca94a1
0
@triton.autotune(configs=[triton.Config({}, num_warps=1), triton.Config({}, num_warps=2), triton.Config({}, num_warps=4), triton.Config({}, num_warps=8), triton.Config({}, num_warps=16), triton.Config({}, num_warps=32)], key=['K']) @triton.heuristics({'DEPTH': lambda nargs: get_depth(nargs['K'])}) @triton.h...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Softmax" ], "Memory Access Pattern": [ "Strided Access" ], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "High Throughput" ] }
[ "Apache" ]
https://github.com/kimiasa/Experiments/blob/c4e73bfefd8290695ec52b6386b6b81838ca94a1/src/ops/triton/k_softmax_dropout.py
bef108ba-facf-4f6f-b328-e31add9c646e
real_rnn_tie_input_gate.py
berlino/seq_icl
src/models/sequence/rnn/scan_triton/real_rnn_tie_input_gate.py
9b9223d15348b5a415fb453ed988ed5f7ab9fbdc
0
@triton.jit def bwd_sequential_scan(grad_output, v, f, h, B, L, C, BLOCK_M: tl.constexpr): offset_b = tl.program_id(0) if offset_b >= B: return offset_n = tl.program_id(1) ptr = tl.arange(0, BLOCK_M) + offset_b * L * C + (L - 1 ) * C + offset_n * BLOCK_M grad_h = tl.zeros([BLOCK_M], ...
{ "Data Type": [ "fp32" ], "Functionality": [ "Backpropagation", "Recurrent Neural Networks" ], "Memory Access Pattern": [], "Parallelization Strategy": [ "Grid-Stride Loops" ], "Performance Objective": [ "Compute Bound", "Batch-Oriented" ] }
[ "Apache" ]
https://github.com/berlino/seq_icl/blob/9b9223d15348b5a415fb453ed988ed5f7ab9fbdc/src/models/sequence/rnn/scan_triton/real_rnn_tie_input_gate.py
Free AI Image Generator No sign-up. Instant results. Open Now