-
Notifications
You must be signed in to change notification settings - Fork 27
Expand file tree
/
Copy pathtask.yml
More file actions
54 lines (44 loc) · 1.68 KB
/
task.yml
File metadata and controls
54 lines (44 loc) · 1.68 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
# name: matmul-py
files:
- {"name": "submission.py", "source": "@SUBMISSION@"}
- {"name": "task.py", "source": "task.py"}
- {"name": "utils.py", "source": "../utils.py"}
- {"name": "reference.py", "source": "reference.py"}
- {"name": "eval.py", "source": "../eval.py"}
milestones:
- {
name: "pytorch",
source: "submission.py",
description: "PyTorch reference implementation as a performance baseline for matmul"
}
- {
name: "triton",
source: "triton_ref.py",
description: "Triton reference implementation as a performance baseline for matmul",
exclude_gpus: ['T4']
}
lang: "py"
description: |
Implement a custom matmul function that matches the reference implementation.
The function should handle a tuple of input tensors and apply matmul.
You may assume that all matrix dimensions are multiples of 16.
config:
main: "eval.py"
templates:
# doesn't need to be an "empty" template file, we can also use a reference implementation here
Python: "submission.py"
tests:
- {"m": 64, "n": 64, "k": 64, "seed": 53124}
- {"m": 128, "n": 128, "k": 128, "seed": 3321}
- {"m": 256, "n": 256, "k": 256, "seed": 1200}
- {"m": 32, "n": 512, "k": 32, "seed": 32523}
- {"m": 64, "n": 1024, "k": 64, "seed": 4327}
benchmarks:
- {"m": 128, "n": 128, "k": 128, "seed": 43214}
- {"m": 256, "n": 256, "k": 256, "seed": 423011}
- {"m": 512, "n": 512, "k": 512, "seed": 123456}
- {"m": 1024, "n": 1024, "k": 1024, "seed": 1029}
- {"m": 2048, "n": 2048, "k": 2048, "seed": 75342}
- {"m": 1024, "n": 1536, "k": 1024, "seed": 321}
- {"m": 2048, "n": 3072, "k": 2048, "seed": 32412}
- {"m": 4096, "n": 5120, "k": 4096, "seed": 123456}