-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsoftmax.py
More file actions
43 lines (36 loc) · 1.26 KB
/
softmax.py
File metadata and controls
43 lines (36 loc) · 1.26 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import torch
import triton
import triton.language as tl
@triton.jit
def softmax_core(x_ptr, y_ptr, N, BLOCK: tl.constexpr):
"""
Numerically stable softmax computation for a 1D tensor.
Uses running max + exp-sum normalization for stability.
"""
max_val = tl.full((), -float("inf"), dtype=tl.float32)
exp_sum = tl.zeros((), dtype=tl.float32)
offset = 0
while offset < N:
idx = offset + tl.arange(0, BLOCK)
mask = idx < N
chunk = tl.load(x_ptr + idx, mask=mask, other=-float("inf")).to(tl.float32)
chunk_max = tl.max(chunk, axis=0)
new_max = tl.maximum(max_val, chunk_max)
exp_sum = exp_sum * tl.exp(max_val - new_max) + tl.sum(tl.exp(chunk - new_max), axis=0)
max_val = new_max
offset += BLOCK
offset = 0
while offset < N:
idx = offset + tl.arange(0, BLOCK)
mask = idx < N
x = tl.load(x_ptr + idx, mask=mask, other=-float("inf")).to(tl.float32)
y = tl.exp(x - max_val) / exp_sum
tl.store(y_ptr + idx, y, mask=mask)
offset += BLOCK
def softmax_forward(x: torch.Tensor, y: torch.Tensor, n: int):
"""
Launches Triton softmax kernel on GPU.
"""
BLOCK = 1024
grid = (1,)
softmax_core[grid](x, y, n, BLOCK)