forked from flashinfer-ai/flashinfer
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtopk.cuh
More file actions
2490 lines (2199 loc) · 101 KB
/
topk.cuh
File metadata and controls
2490 lines (2199 loc) · 101 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Copyright (c) 2024 by FlashInfer team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FLASHINFER_TOPK_CUH_
#define FLASHINFER_TOPK_CUH_
#include <cuda.h>
#include <cstdlib>
#include <cuda/std/limits>
#include <numeric>
#include "utils.cuh"
#include "vec_dtypes.cuh"
namespace flashinfer {
namespace sampling {
// ============================================================================
// RadixTopK Type Traits - supports float, half, and bfloat16
// OrderedType: uint32_t for float, uint16_t for half/bf16
// NUM_ROUNDS is computed as: sizeof(OrderedType) * 8 / RADIX_BITS
// ============================================================================
template <typename DType>
struct RadixTopKTraits;
// Specialization for float (32-bit)
template <>
struct RadixTopKTraits<float> {
using OrderedType = uint32_t;
// Compute number of rounds based on radix bits (not hardcoded)
template <uint32_t RADIX_BITS>
static __host__ __device__ constexpr uint32_t num_rounds() {
return sizeof(OrderedType) * 8 / RADIX_BITS;
}
__device__ __forceinline__ static OrderedType ToOrdered(float val) {
uint32_t bits = __float_as_uint(val);
// For descending order: flip all bits if negative, else flip sign bit
return (bits & 0x80000000) ? ~bits : (bits ^ 0x80000000);
}
__device__ __forceinline__ static float FromOrdered(OrderedType ordered) {
uint32_t bits = (ordered & 0x80000000) ? (ordered ^ 0x80000000) : ~ordered;
return __uint_as_float(bits);
}
__device__ __forceinline__ static float NegInf() {
return -cuda::std::numeric_limits<float>::infinity();
}
};
// Specialization for half (16-bit)
template <>
struct RadixTopKTraits<half> {
using OrderedType = uint16_t;
template <uint32_t RADIX_BITS>
static __host__ __device__ constexpr uint32_t num_rounds() {
return sizeof(OrderedType) * 8 / RADIX_BITS;
}
__device__ __forceinline__ static OrderedType ToOrdered(half val) {
uint16_t bits = __half_as_ushort(val);
return (bits & 0x8000) ? static_cast<uint16_t>(~bits) : static_cast<uint16_t>(bits ^ 0x8000);
}
__device__ __forceinline__ static half FromOrdered(OrderedType ordered) {
uint16_t bits = (ordered & 0x8000) ? static_cast<uint16_t>(ordered ^ 0x8000)
: static_cast<uint16_t>(~ordered);
return __ushort_as_half(bits);
}
__device__ __forceinline__ static half NegInf() {
return __ushort_as_half(static_cast<uint16_t>(0xFC00)); // -inf in fp16
}
};
// Specialization for nv_bfloat16 (16-bit)
template <>
struct RadixTopKTraits<nv_bfloat16> {
using OrderedType = uint16_t;
template <uint32_t RADIX_BITS>
static __host__ __device__ constexpr uint32_t num_rounds() {
return sizeof(OrderedType) * 8 / RADIX_BITS;
}
__device__ __forceinline__ static OrderedType ToOrdered(nv_bfloat16 val) {
uint16_t bits = __bfloat16_as_ushort(val);
return (bits & 0x8000) ? static_cast<uint16_t>(~bits) : static_cast<uint16_t>(bits ^ 0x8000);
}
__device__ __forceinline__ static nv_bfloat16 FromOrdered(OrderedType ordered) {
uint16_t bits = (ordered & 0x8000) ? static_cast<uint16_t>(ordered ^ 0x8000)
: static_cast<uint16_t>(~ordered);
return __ushort_as_bfloat16(bits);
}
__device__ __forceinline__ static nv_bfloat16 NegInf() {
return __ushort_as_bfloat16(static_cast<uint16_t>(0xFF80)); // -inf in bf16
}
};
// ==================== Multi-CTA Top-K Implementation ====================
// Acquire/Release primitives for inter-CTA synchronization
__device__ __forceinline__ int ld_acquire(int* ptr) {
int state = 0;
#if (__CUDA_ARCH__ >= 700)
// SM70 and newer use memory consistency qualifiers
// Acquire pattern using acquire modifier
asm volatile("ld.global.acquire.gpu.b32 %0, [%1];\n" : "=r"(state) : "l"(ptr));
#else
asm volatile("ld.cg.global.b32 %0, [%1];\n" : "=r"(state) : "l"(ptr));
#endif
return state;
}
__device__ __forceinline__ void red_release(int* ptr, int val) {
#if (__CUDA_ARCH__ >= 700)
// SM70 and newer use memory consistency qualifiers
// Release pattern using acq_rel fence + relaxed modifier
// (The fence also releases data that was weakly-written by other threads prior to the last
// syncthreads)
asm volatile("fence.acq_rel.gpu;\n");
asm volatile("red.relaxed.gpu.global.add.s32 [%0], %1;\n" : : "l"(ptr), "r"(val));
#else
__threadfence();
atomicAdd(ptr, val);
#endif
}
__device__ __forceinline__ void st_release(int* ptr, int val) {
#if (__CUDA_ARCH__ >= 700)
// SM70 and newer use memory consistency qualifiers
// Release pattern: fence + release store
asm volatile("fence.acq_rel.gpu;\n");
asm volatile("st.release.gpu.global.b32 [%0], %1;\n" : : "l"(ptr), "r"(val));
#else
__threadfence();
atomicExch(ptr, val);
#endif
}
// Wait until the value at ptr reaches target_val using acquire semantics
// Only thread 0 spins, then all threads synchronize
__device__ __forceinline__ void wait_ge(int* ptr, int target_val, int thread_idx) {
if (thread_idx == 0) {
#pragma unroll 1
while (ld_acquire(ptr) < target_val) {
}
}
__syncthreads();
}
// ==================== Multi-CTA Radix Top-K Mask Logits ====================
// Global state for multi-CTA radix reduction (one per group)
struct RadixRowState {
uint32_t histogram[3][256]; // Triple-buffered histograms for 1-barrier-per-round
uint32_t remaining_k; // Remaining k after current round
uint32_t prefix; // Accumulated prefix (high bits of k-th element)
int arrival_counter; // For inter-CTA synchronization
int output_counter; // For collecting top-k indices (RadixTopK)
float sum_topk; // For RenormProb: sum of top-k elements
};
// ==================== Common Device Functions for Radix Top-K ====================
/*!
* \brief Compute suffix sum in shared memory using parallel reduction.
*
* After this function, suffix_sum[i] contains the count of elements >= bucket i.
* This is computed by summing all histogram values from bucket i to 255.
*
* \param suffix_sum Shared memory array of size RADIX (256)
* \param tx Thread index within the block
*/
template <uint32_t BLOCK_THREADS>
__device__ __forceinline__ void RadixSuffixSum(uint32_t* suffix_sum, uint32_t tx) {
constexpr uint32_t RADIX = 256;
// Parallel suffix sum: compute count of elements >= each bucket
for (uint32_t stride = 1; stride < RADIX; stride *= 2) {
uint32_t val = 0;
if (tx < RADIX) {
val = suffix_sum[tx];
if (tx + stride < RADIX) {
val += suffix_sum[tx + stride];
}
}
__syncthreads();
if (tx < RADIX) {
suffix_sum[tx] = val;
}
__syncthreads();
}
}
/*!
* \brief Find the threshold bucket that contains the k-th largest element.
*
* The threshold bucket satisfies: count_ge >= k && count_gt < k
* where count_ge = suffix_sum[bucket] and count_gt = suffix_sum[bucket+1].
*
* \param suffix_sum Shared memory array containing suffix sums
* \param remaining_k Number of top-k elements still to find
* \param found_bucket Output: the found threshold bucket
* \param found_remaining_k Output: remaining_k minus count of elements > threshold
* \param tx Thread index within the block
*/
__device__ __forceinline__ void RadixFindThresholdBucket(uint32_t* suffix_sum, uint32_t remaining_k,
uint32_t* found_bucket,
uint32_t* found_remaining_k, uint32_t tx) {
constexpr uint32_t RADIX = 256;
// Initialize (only thread 0)
if (tx == 0) {
*found_bucket = 0;
*found_remaining_k = remaining_k;
}
__syncthreads();
// All threads in RADIX range check their bucket
if (tx < RADIX) {
uint32_t count_ge = suffix_sum[tx];
uint32_t count_gt = (tx + 1 < RADIX) ? suffix_sum[tx + 1] : 0;
if (count_ge >= remaining_k && count_gt < remaining_k) {
*found_bucket = tx;
*found_remaining_k = remaining_k - count_gt;
}
}
__syncthreads();
}
/*!
* \brief Build local histogram for one round of radix select.
*
* Counts elements in shared_ordered that match the current prefix and bins them
* by their byte at the current shift position.
*
* \tparam OrderedType The ordered integer type (uint16_t or uint32_t)
* \param shared_ordered Shared memory containing ordered values
* \param actual_chunk_size Number of elements in this CTA's chunk
* \param local_histogram Output shared memory histogram
* \param prefix Current prefix (high bits determined so far)
* \param shift Bit shift for extracting current byte
* \param round Current round (0 to NUM_ROUNDS-1)
* \param tx Thread index
*/
template <uint32_t BLOCK_THREADS, typename OrderedType>
__device__ __forceinline__ void RadixBuildLocalHistogram(const OrderedType* shared_ordered,
uint32_t actual_chunk_size,
uint32_t* local_histogram, uint32_t prefix,
uint32_t shift, uint32_t round,
uint32_t tx) {
constexpr uint32_t ORDERED_BITS = sizeof(OrderedType) * 8;
constexpr uint32_t RADIX_BITS = 8;
for (uint32_t i = tx; i < actual_chunk_size; i += BLOCK_THREADS) {
OrderedType ordered = shared_ordered[i];
// Check if this element matches the prefix (high bits determined so far)
OrderedType mask =
(round == 0)
? OrderedType(0)
: static_cast<OrderedType>(~OrderedType(0) << (ORDERED_BITS - round * RADIX_BITS));
if ((ordered & mask) == static_cast<OrderedType>(prefix)) {
uint32_t bucket = (ordered >> shift) & 0xFF;
atomicAdd(&local_histogram[bucket], 1);
}
}
}
/*!
* \brief Perform one round of radix select with optional multi-CTA synchronization.
*
* This is the core radix select logic used by all TopK kernels.
* It builds histogram, aggregates across CTAs (if multi-CTA), computes suffix sum,
* and finds the threshold bucket.
*
* \tparam BLOCK_THREADS Number of threads per block
* \tparam SINGLE_CTA True if single-CTA mode (no inter-CTA sync needed)
* \tparam OrderedType The ordered integer type
*
* \param shared_ordered Shared memory containing ordered values
* \param actual_chunk_size Number of elements in this CTA's chunk
* \param local_histogram Shared memory for local histogram (size RADIX)
* \param suffix_sum Shared memory for suffix sum computation (size RADIX)
* \param state Pointer to RadixRowState for multi-CTA sync (nullptr if SINGLE_CTA)
* \param prefix Current prefix value
* \param remaining_k Current remaining k value
* \param round Current round (0 to NUM_ROUNDS-1)
* \param barrier_phase Reference to barrier phase counter
* \param ctas_per_group Number of CTAs per group
* \param tx Thread index
* \param out_new_prefix Output: updated prefix after this round
* \param out_new_remaining_k Output: updated remaining_k after this round
*/
template <uint32_t BLOCK_THREADS, bool SINGLE_CTA, typename OrderedType>
__device__ __forceinline__ void RadixSelectOneRound(
const OrderedType* shared_ordered, uint32_t actual_chunk_size, uint32_t* local_histogram,
uint32_t* suffix_sum, uint32_t* shared_scalars, RadixRowState* state, uint32_t prefix,
uint32_t remaining_k, uint32_t round, uint32_t iter, int& barrier_phase,
uint32_t ctas_per_group, uint32_t cta_in_group, uint32_t tx, uint32_t* out_new_prefix,
uint32_t* out_new_remaining_k) {
constexpr uint32_t RADIX = 256;
constexpr uint32_t ORDERED_BITS = sizeof(OrderedType) * 8;
constexpr uint32_t RADIX_BITS = 8;
constexpr uint32_t NUM_ROUNDS = ORDERED_BITS / RADIX_BITS;
uint32_t shift = ORDERED_BITS - (round + 1) * RADIX_BITS;
uint32_t global_round = iter * NUM_ROUNDS + round;
// For multi-CTA: pointers to global histograms (triple buffer)
uint32_t* current_hist = nullptr;
uint32_t* next_hist = nullptr;
if constexpr (!SINGLE_CTA) {
current_hist = state->histogram[global_round % 3];
next_hist = state->histogram[(global_round + 1) % 3];
}
// Clear local histogram only
for (uint32_t i = tx; i < RADIX; i += BLOCK_THREADS) {
local_histogram[i] = 0;
}
__syncthreads();
// Build local histogram from shared memory
RadixBuildLocalHistogram<BLOCK_THREADS, OrderedType>(shared_ordered, actual_chunk_size,
local_histogram, prefix, shift, round, tx);
__syncthreads();
// For multi-CTA: write -> (leading CTA clears next) -> barrier -> read
// For single-CTA: local_histogram is already the complete histogram
if constexpr (!SINGLE_CTA) {
// Accumulate local histogram to global
for (uint32_t i = tx; i < RADIX; i += BLOCK_THREADS) {
if (local_histogram[i] > 0) {
atomicAdd(¤t_hist[i], local_histogram[i]);
}
}
// Only leading CTA clears next round's histogram BEFORE barrier
if (cta_in_group == 0) {
for (uint32_t i = tx; i < RADIX; i += BLOCK_THREADS) {
next_hist[i] = 0;
}
}
// Barrier: wait for all CTAs to finish atomicAdd and clearing
if (tx == 0) {
red_release(&state->arrival_counter, 1);
}
int target = (barrier_phase + 1) * ctas_per_group;
wait_ge(&state->arrival_counter, target, tx);
barrier_phase++;
__syncthreads();
// Read current histogram (after barrier, all atomicAdds are complete)
for (uint32_t i = tx; i < RADIX; i += BLOCK_THREADS) {
suffix_sum[i] = current_hist[i];
}
} else {
// Single-CTA: copy local histogram directly to suffix_sum
for (uint32_t i = tx; i < RADIX; i += BLOCK_THREADS) {
suffix_sum[i] = local_histogram[i];
}
}
__syncthreads();
// Compute suffix sum
RadixSuffixSum<BLOCK_THREADS>(suffix_sum, tx);
// Find threshold bucket using shared_scalars for found_bucket and found_remaining_k
// shared_scalars[0] = found_bucket, shared_scalars[1] = found_remaining_k
RadixFindThresholdBucket(suffix_sum, remaining_k, &shared_scalars[0], &shared_scalars[1], tx);
// Output new prefix and remaining_k
*out_new_prefix = prefix | (shared_scalars[0] << shift);
*out_new_remaining_k = shared_scalars[1];
}
/*!
* \brief Load data from global memory to shared memory and convert to ordered representation.
*
* This is the common Stage 1 for all TopK kernels. It loads data using vectorized
* memory access and converts to ordered representation for radix select.
*
* \tparam BLOCK_THREADS Number of threads per block
* \tparam VEC_SIZE Vector size for memory access
* \tparam DType Data type (float, half, nv_bfloat16)
* \tparam Traits Type traits for DType
*
* \param input Pointer to input data row start (already offset by row)
* \param shared_ordered Shared memory for ordered values
* \param chunk_start Start index within the row for this CTA's chunk
* \param actual_chunk_size Number of elements in this CTA's chunk
* \param tx Thread index
*/
template <uint32_t BLOCK_THREADS, uint32_t VEC_SIZE, typename DType, typename Traits>
__device__ __forceinline__ void LoadToSharedOrdered(const DType* input,
typename Traits::OrderedType* shared_ordered,
uint32_t chunk_start,
uint32_t actual_chunk_size, uint32_t tx) {
using OrderedType = typename Traits::OrderedType;
vec_t<DType, VEC_SIZE> input_vec;
const uint32_t aligned_size = (actual_chunk_size / VEC_SIZE) * VEC_SIZE;
#pragma unroll 2
for (uint32_t i = tx * VEC_SIZE; i < aligned_size; i += BLOCK_THREADS * VEC_SIZE) {
input_vec.cast_load(input + chunk_start + i);
#pragma unroll
for (uint32_t j = 0; j < VEC_SIZE; ++j) {
shared_ordered[i + j] = Traits::ToOrdered(input_vec[j]);
}
}
// Handle tail
for (uint32_t i = aligned_size + tx; i < actual_chunk_size; i += BLOCK_THREADS) {
shared_ordered[i] = Traits::ToOrdered(input[chunk_start + i]);
}
__syncthreads();
}
/*!
* \brief Find the k-th largest element using radix select from pre-loaded shared memory.
*
* This function assumes data has already been loaded into shared_ordered.
* It performs the complete radix select algorithm (initial barrier + NUM_ROUNDS)
* and returns the ordered pivot value.
*
* \tparam BLOCK_THREADS Number of threads per block
* \tparam SINGLE_CTA True if single-CTA mode
* \tparam OrderedType The ordered integer type
*
* \param shared_ordered Shared memory containing ordered values (pre-loaded)
* \param actual_chunk_size Number of elements in this CTA's chunk
* \param k Number of top elements to select
* \param local_histogram Shared memory for local histogram (size RADIX)
* \param suffix_sum Shared memory for suffix sum (size RADIX)
* \param shared_scalars Shared memory for scalars [prefix_cache, remaining_k_cache, found_bucket,
* found_remaining_k, output_counter]
* \param state RadixRowState pointer for multi-CTA sync (nullptr if SINGLE_CTA)
* \param barrier_phase Reference to barrier phase counter
* \param ctas_per_group Number of CTAs per group
* \param cta_in_group CTA index within group
* \param tx Thread index
* \param iter Current iteration (for triple-buffer indexing)
* \return The pivot value in ordered representation
*/
template <uint32_t BLOCK_THREADS, bool SINGLE_CTA, typename OrderedType>
__device__ __forceinline__ OrderedType RadixSelectFromSharedMemory(
const OrderedType* shared_ordered, uint32_t actual_chunk_size, uint32_t k,
uint32_t* local_histogram, uint32_t* suffix_sum, uint32_t* shared_scalars, RadixRowState* state,
int& barrier_phase, uint32_t ctas_per_group, uint32_t cta_in_group, uint32_t tx, uint32_t iter,
uint32_t& out_local_gt_count) {
constexpr uint32_t RADIX = 256;
constexpr uint32_t RADIX_BITS = 8;
constexpr uint32_t ORDERED_BITS = sizeof(OrderedType) * 8;
constexpr uint32_t NUM_ROUNDS = ORDERED_BITS / RADIX_BITS;
// Aliases for scalar shared variables
#define prefix_cache shared_scalars[0]
#define remaining_k_cache shared_scalars[1]
#define found_bucket shared_scalars[2]
#define found_remaining_k shared_scalars[3]
#define shared_output_counter shared_scalars[4]
// Initialize local caches
if (tx == 0) {
prefix_cache = 0;
remaining_k_cache = k;
if constexpr (SINGLE_CTA) {
shared_output_counter = 0;
}
}
__syncthreads();
// Initial barrier (skip for single CTA)
if constexpr (!SINGLE_CTA) {
if (tx == 0) {
red_release(&state->arrival_counter, 1);
}
int target = (barrier_phase + 1) * ctas_per_group;
wait_ge(&state->arrival_counter, target, tx);
barrier_phase++;
__syncthreads();
// CTA 0 clears output counter AFTER barrier
if (cta_in_group == 0 && tx == 0) {
st_release(&state->output_counter, 0);
}
}
// NUM_ROUNDS of radix select
for (uint32_t round = 0; round < NUM_ROUNDS; ++round) {
uint32_t global_round = iter * NUM_ROUNDS + round;
uint32_t shift = ORDERED_BITS - (round + 1) * RADIX_BITS;
uint32_t prefix = prefix_cache;
uint32_t remaining_k = remaining_k_cache;
// For multi-CTA: pointers to global histograms (triple buffer)
uint32_t* current_hist = nullptr;
uint32_t* next_hist = nullptr;
if constexpr (!SINGLE_CTA) {
current_hist = state->histogram[global_round % 3];
next_hist = state->histogram[(global_round + 1) % 3];
}
// Clear local histogram
for (uint32_t i = tx; i < RADIX; i += BLOCK_THREADS) {
local_histogram[i] = 0;
}
__syncthreads();
// Build local histogram
#pragma unroll 2
for (uint32_t i = tx; i < actual_chunk_size; i += BLOCK_THREADS) {
OrderedType ordered = shared_ordered[i];
OrderedType mask =
(round == 0)
? OrderedType(0)
: static_cast<OrderedType>(~OrderedType(0) << (ORDERED_BITS - round * RADIX_BITS));
if ((ordered & mask) == static_cast<OrderedType>(prefix)) {
uint32_t bucket = (ordered >> shift) & 0xFF;
atomicAdd(&local_histogram[bucket], 1);
}
}
__syncthreads();
// Multi-CTA: accumulate to global, barrier, read back
if constexpr (!SINGLE_CTA) {
for (uint32_t i = tx; i < RADIX; i += BLOCK_THREADS) {
if (local_histogram[i] > 0) {
atomicAdd(¤t_hist[i], local_histogram[i]);
}
}
if (cta_in_group == 0) {
for (uint32_t i = tx; i < RADIX; i += BLOCK_THREADS) {
next_hist[i] = 0;
}
}
if (tx == 0) {
red_release(&state->arrival_counter, 1);
}
int target = (barrier_phase + 1) * ctas_per_group;
wait_ge(&state->arrival_counter, target, tx);
barrier_phase++;
__syncthreads();
for (uint32_t i = tx; i < RADIX; i += BLOCK_THREADS) {
suffix_sum[i] = current_hist[i];
}
} else {
for (uint32_t i = tx; i < RADIX; i += BLOCK_THREADS) {
suffix_sum[i] = local_histogram[i];
}
}
__syncthreads();
// Compute suffix sum
RadixSuffixSum<BLOCK_THREADS>(suffix_sum, tx);
// Find threshold bucket
if (tx == 0) {
found_bucket = 0;
found_remaining_k = remaining_k;
}
__syncthreads();
if (tx < RADIX) {
uint32_t count_ge = suffix_sum[tx];
uint32_t count_gt = (tx + 1 < RADIX) ? suffix_sum[tx + 1] : 0;
if (count_ge >= remaining_k && count_gt < remaining_k) {
found_bucket = tx;
found_remaining_k = remaining_k - count_gt;
}
}
__syncthreads();
// Update caches
if (tx == 0) {
prefix_cache = prefix | (found_bucket << shift);
remaining_k_cache = found_remaining_k;
}
__syncthreads();
}
OrderedType ordered_pivot = static_cast<OrderedType>(prefix_cache);
// Count > pivot elements by scanning shared_ordered
// This is needed because suffix_sum only tracks elements matching the current prefix,
// not all elements > pivot (which includes elements with higher-order bits > pivot)
if (tx == 0) {
suffix_sum[0] = 0;
}
__syncthreads();
uint32_t my_gt_count = 0;
#pragma unroll 2
for (uint32_t i = tx; i < actual_chunk_size; i += BLOCK_THREADS) {
if (shared_ordered[i] > ordered_pivot) {
my_gt_count++;
}
}
// Warp-level reduction
for (int offset = 16; offset > 0; offset /= 2) {
my_gt_count += __shfl_down_sync(0xffffffff, my_gt_count, offset);
}
// First thread of each warp atomics to shared
int lane = tx % 32;
if (lane == 0 && my_gt_count > 0) {
atomicAdd(&suffix_sum[0], my_gt_count);
}
__syncthreads();
out_local_gt_count = suffix_sum[0];
#undef prefix_cache
#undef remaining_k_cache
#undef found_bucket
#undef found_remaining_k
#undef shared_output_counter
return ordered_pivot;
}
/*!
* \brief Find the k-th largest element pivot using radix select.
*
* This is the main entry point for the radix select algorithm.
* It performs NUM_ROUNDS of radix select to find the exact pivot value.
*
* \tparam BLOCK_THREADS Number of threads per block
* \tparam VEC_SIZE Vector size for memory access
* \tparam SINGLE_CTA True if single-CTA mode
* \tparam DType Data type (float, half, nv_bfloat16)
*
* \param input Input data pointer (for this row)
* \param shared_ordered Shared memory for ordered values
* \param local_histogram Shared memory for local histogram
* \param suffix_sum Shared memory for suffix sum
* \param shared_scalars Shared memory for temporary scalar values (size >= 5)
* \param state RadixRowState pointer (nullptr if SINGLE_CTA)
* \param chunk_start Start index in vocab for this CTA
* \param actual_chunk_size Number of elements in this chunk
* \param k Number of top elements to select
* \param barrier_phase Reference to barrier phase counter
* \param ctas_per_group Number of CTAs per group
* \param cta_in_group CTA index within group
* \param tx Thread index
* \param iter Current iteration (for triple-buffer indexing)
* \return The pivot value (k-th largest element)
*/
template <uint32_t BLOCK_THREADS, uint32_t VEC_SIZE, bool SINGLE_CTA, typename DType>
__device__ __forceinline__ DType RadixSelectFindPivot(
const DType* input, typename RadixTopKTraits<DType>::OrderedType* shared_ordered,
uint32_t* local_histogram, uint32_t* suffix_sum, uint32_t* shared_scalars, RadixRowState* state,
uint32_t chunk_start, uint32_t actual_chunk_size, uint32_t k, int& barrier_phase,
uint32_t ctas_per_group, uint32_t cta_in_group, uint32_t tx, uint32_t iter = 0) {
using Traits = RadixTopKTraits<DType>;
using OrderedType = typename Traits::OrderedType;
// Stage 1: Load and convert to ordered representation
LoadToSharedOrdered<BLOCK_THREADS, VEC_SIZE, DType, Traits>(input, shared_ordered, chunk_start,
actual_chunk_size, tx);
// Stage 2: Radix select to find pivot
uint32_t local_gt_count = 0; // Not used in this function
OrderedType ordered_pivot = RadixSelectFromSharedMemory<BLOCK_THREADS, SINGLE_CTA, OrderedType>(
shared_ordered, actual_chunk_size, k, local_histogram, suffix_sum, shared_scalars, state,
barrier_phase, ctas_per_group, cta_in_group, tx, iter, local_gt_count);
// Convert ordered representation back to DType pivot
return Traits::FromOrdered(ordered_pivot);
}
/*!
* \brief Collect top-k indices based on pivot value with custom output transform (Single Pass).
*
* This optimized version uses a single pass to write all elements:
* - > pivot: use shared memory atomic for local offset within CTA's allocation
* - == pivot: use global memory atomic, check if pos < k before writing
*
* The local_gt_count is computed during the last round of radix select, so we know
* exactly how many > pivot elements each CTA has. This allows batched global atomic
* (one per CTA) for > pivot elements.
*
* \tparam BLOCK_THREADS Number of threads per block
* \tparam SINGLE_CTA True if single-CTA mode
* \tparam OrderedType The ordered integer type
* \tparam OutputFunc Functor type: void(uint32_t original_idx, OrderedType ordered_val, int
* output_pos)
*
* \param shared_ordered Shared memory containing ordered values
* \param actual_chunk_size Number of elements in this CTA's chunk
* \param chunk_start Start index in input for this chunk
* \param k Number of top elements to select
* \param ordered_pivot The pivot value in ordered representation
* \param local_gt_count Number of > pivot elements in this CTA (from radix select)
* \param local_histogram Shared memory for counters
* \param shared_output_counter Pointer to shared output counter (SINGLE_CTA mode)
* \param state RadixRowState pointer for multi-CTA sync (nullptr if SINGLE_CTA)
* \param barrier_phase Reference to barrier phase counter (unused in new implementation)
* \param ctas_per_group Number of CTAs per group
* \param tx Thread index
* \param output_func Functor called as output_func(original_idx, ordered_val, output_pos) for each
* element
*/
template <uint32_t BLOCK_THREADS, bool SINGLE_CTA, typename OrderedType, typename OutputFunc>
__device__ __forceinline__ void RadixCollectIndices(
const OrderedType* shared_ordered, uint32_t actual_chunk_size, uint32_t chunk_start, uint32_t k,
OrderedType ordered_pivot, uint32_t local_gt_count, uint32_t* local_histogram,
uint32_t* shared_output_counter, RadixRowState* state, int& barrier_phase,
uint32_t ctas_per_group, uint32_t tx, OutputFunc output_func) {
// Use local_histogram for counters:
// [0]: local_offset_gt (local offset for > pivot elements within CTA's allocation)
// [1]: global_base_gt (global base position for > pivot)
#define local_offset_gt local_histogram[0]
#define global_base_gt local_histogram[1]
// Get global base position for this CTA's > pivot elements (one atomic per CTA)
if (tx == 0) {
local_offset_gt = 0;
if (local_gt_count > 0) {
if constexpr (SINGLE_CTA) {
global_base_gt = atomicAdd(shared_output_counter, local_gt_count);
} else {
global_base_gt = atomicAdd(&state->output_counter, local_gt_count);
}
}
}
__syncthreads();
// Pass 1: Write elements > pivot
// These are guaranteed to be in top-k, use local offset within CTA's allocation
#pragma unroll 2
for (uint32_t i = tx; i < actual_chunk_size; i += BLOCK_THREADS) {
OrderedType ordered_val = shared_ordered[i];
if (ordered_val > ordered_pivot) {
uint32_t local_pos = atomicAdd(&local_offset_gt, 1);
int pos = global_base_gt + local_pos;
output_func(chunk_start + i, ordered_val, pos);
}
}
// Barrier to ensure all > pivot elements are collected first (only for multi-CTA)
// This is critical: without this barrier, CTAs may write == pivot elements while
// other CTAs are still writing > pivot elements, causing incorrect positions.
if constexpr (!SINGLE_CTA) {
if (tx == 0) {
red_release(&state->arrival_counter, 1);
}
int target = (barrier_phase + 1) * ctas_per_group;
wait_ge(&state->arrival_counter, target, tx);
barrier_phase++;
}
__syncthreads();
// Pass 2: Write elements == pivot
// Use global atomic directly since we need cross-CTA coordination to respect
// the k limit (some == pivot elements may be truncated).
#pragma unroll 2
for (uint32_t i = tx; i < actual_chunk_size; i += BLOCK_THREADS) {
OrderedType ordered_val = shared_ordered[i];
if (ordered_val == ordered_pivot) {
int pos;
if constexpr (SINGLE_CTA) {
pos = atomicAdd(shared_output_counter, 1);
} else {
pos = atomicAdd(&state->output_counter, 1);
}
if (pos < static_cast<int>(k)) {
output_func(chunk_start + i, ordered_pivot, pos);
}
}
}
#undef local_offset_gt
#undef global_base_gt
}
// ==================== Unified Radix Top-K Kernel with Epilogue Modes ====================
/*!
* \brief Epilogue mode for unified RadixTopK kernel.
*/
enum class RadixTopKMode {
Basic, ///< Returns (indices, values) pairs
PageTableTransform, ///< Gathers indices through page table
RaggedTransform, ///< Adds offset to indices
};
/*!
* \brief Unified Multi-CTA Radix Top-K kernel with mode-specific epilogues.
*
* This kernel unifies three top-k variants:
* - Basic: Returns top-k indices and values
* - PageTableTransform: Gathers top-k indices through a page table
* - RaggedTransform: Adds per-row offset to top-k indices
*
* \tparam BLOCK_THREADS Number of threads per block
* \tparam VEC_SIZE Vector size for memory access
* \tparam SINGLE_CTA True if single-CTA mode
* \tparam MODE Epilogue mode (Basic, PageTableTransform, or RaggedTransform)
* \tparam DType Data type (float, half, nv_bfloat16)
* \tparam IdType Index type
*/
template <uint32_t BLOCK_THREADS, uint32_t VEC_SIZE, bool SINGLE_CTA, RadixTopKMode MODE,
typename DType, typename IdType>
__global__ void __launch_bounds__(BLOCK_THREADS) RadixTopKKernel_Unified(
DType* input, // [num_rows, stride]
IdType* output_indices, // [num_rows, top_k] - indices or page table entries
DType* output_values, // [num_rows, top_k] - only used in Basic mode, nullptr otherwise
const IdType*
aux_data, // Mode-specific: top_k_arr (Basic), src_page_table (PageTable), offsets (Ragged)
IdType* lengths, // [num_rows] per-row lengths, nullptr for Basic (uses stride)
const IdType* row_to_batch, // [num_rows] batch mapping for PageTable, nullptr otherwise
int64_t aux_stride, // src_page_table stride for PageTable mode, 0 otherwise
uint32_t top_k_val, uint32_t stride, uint32_t num_rows, RadixRowState* row_states,
uint32_t chunk_size, uint32_t ctas_per_group) {
using Traits = RadixTopKTraits<DType>;
using OrderedType = typename Traits::OrderedType;
constexpr uint32_t RADIX = 256;
const uint32_t global_cta_id = blockIdx.x;
const uint32_t group_id = global_cta_id / ctas_per_group;
const uint32_t cta_in_group = global_cta_id % ctas_per_group;
const uint32_t tx = threadIdx.x;
extern __shared__ uint8_t smem[];
constexpr size_t num_scalars = SINGLE_CTA ? 5 : 4;
constexpr size_t fixed_smem_size = sizeof(uint32_t) * (RADIX + RADIX + num_scalars);
uint32_t* local_histogram = reinterpret_cast<uint32_t*>(smem);
uint32_t* suffix_sum = local_histogram + RADIX;
uint32_t* shared_scalars = suffix_sum + RADIX;
size_t ordered_offset = ((fixed_smem_size + 15) / 16) * 16;
OrderedType* shared_ordered = reinterpret_cast<OrderedType*>(smem + ordered_offset);
#define shared_output_counter shared_scalars[4]
RadixRowState* state = nullptr;
if constexpr (!SINGLE_CTA) {
state = &row_states[group_id];
}
uint32_t num_groups = gridDim.x / ctas_per_group;
uint32_t total_iterations = (num_rows + num_groups - 1) / num_groups;
int barrier_phase = 0;
for (uint32_t iter = 0; iter < total_iterations; iter++) {
uint32_t row_idx = group_id + iter * num_groups;
if (row_idx >= num_rows) break;
// Mode-specific: get row length and k value
uint32_t length, k;
if constexpr (MODE == RadixTopKMode::Basic) {
length = stride; // Fixed length for all rows
k = (aux_data != nullptr) ? aux_data[row_idx] : top_k_val; // aux_data = top_k_arr
} else {
length = lengths[row_idx]; // Per-row length
k = top_k_val; // Fixed k
}
// Mode-specific: output pointers and auxiliary data
IdType* row_output = output_indices + row_idx * top_k_val;
// Handle trivial cases
if constexpr (MODE == RadixTopKMode::Basic) {
if (k >= length) {
// k >= vocab_size: return all indices
const uint32_t chunk_start = cta_in_group * chunk_size;
const uint32_t chunk_end = min(chunk_start + chunk_size, length);
for (uint32_t i = tx; i < chunk_end - chunk_start; i += BLOCK_THREADS) {
if (chunk_start + i < k) {
row_output[chunk_start + i] = static_cast<IdType>(chunk_start + i);
output_values[row_idx * top_k_val + chunk_start + i] =
input[row_idx * stride + chunk_start + i];
}
}
// Clear histogram for next iteration (in case it's k < length)
if constexpr (!SINGLE_CTA) {
constexpr uint32_t NUM_ROUNDS = sizeof(OrderedType) * 8 / 8;
uint32_t next_first_hist_idx = ((iter + 1) * NUM_ROUNDS) % 3;
if (cta_in_group == 0) {
for (uint32_t i = tx; i < RADIX; i += BLOCK_THREADS) {
state->histogram[next_first_hist_idx][i] = 0;
}
}
}
continue;
}
} else if constexpr (MODE == RadixTopKMode::PageTableTransform) {
uint32_t batch_idx = (row_to_batch != nullptr) ? row_to_batch[row_idx] : row_idx;
const IdType* src_page_entry = aux_data + batch_idx * aux_stride;
if (length <= top_k_val) {
for (uint32_t i = tx; i < top_k_val; i += BLOCK_THREADS) {
row_output[i] = (i < length) ? src_page_entry[i] : static_cast<IdType>(-1);
}
// Clear histogram for next iteration
if constexpr (!SINGLE_CTA) {
constexpr uint32_t NUM_ROUNDS = sizeof(OrderedType) * 8 / 8;
uint32_t next_first_hist_idx = ((iter + 1) * NUM_ROUNDS) % 3;
if (cta_in_group == 0) {
for (uint32_t i = tx; i < RADIX; i += BLOCK_THREADS) {
state->histogram[next_first_hist_idx][i] = 0;
}
}
}
continue;
}
} else { // RaggedTransform
IdType offset = aux_data[row_idx];
if (length <= top_k_val) {
for (uint32_t i = tx; i < top_k_val; i += BLOCK_THREADS) {
row_output[i] = (i < length) ? static_cast<IdType>(i) + offset : static_cast<IdType>(-1);
}
// Clear histogram for next iteration
if constexpr (!SINGLE_CTA) {
constexpr uint32_t NUM_ROUNDS = sizeof(OrderedType) * 8 / 8;
uint32_t next_first_hist_idx = ((iter + 1) * NUM_ROUNDS) % 3;
if (cta_in_group == 0) {
for (uint32_t i = tx; i < RADIX; i += BLOCK_THREADS) {
state->histogram[next_first_hist_idx][i] = 0;
}
}
}
continue;
}
}
const uint32_t chunk_start = cta_in_group * chunk_size;
const uint32_t chunk_end = min(chunk_start + chunk_size, length);
const uint32_t actual_chunk_size = chunk_end - chunk_start;
// Stage 1: Load and convert to ordered representation
LoadToSharedOrdered<BLOCK_THREADS, VEC_SIZE, DType, Traits>(
input + row_idx * stride, shared_ordered, chunk_start, actual_chunk_size, tx);
// Stage 2: Radix select to find k-th largest element (also computes local_gt_count)
uint32_t local_gt_count = 0;
OrderedType ordered_pivot = RadixSelectFromSharedMemory<BLOCK_THREADS, SINGLE_CTA, OrderedType>(
shared_ordered, actual_chunk_size, k, local_histogram, suffix_sum, shared_scalars, state,
barrier_phase, ctas_per_group, cta_in_group, tx, iter, local_gt_count);
// Stage 3: Collect indices with mode-specific epilogue (single pass)
if constexpr (MODE == RadixTopKMode::Basic) {
DType* row_output_values = output_values + row_idx * top_k_val;
RadixCollectIndices<BLOCK_THREADS, SINGLE_CTA, OrderedType>(
shared_ordered, actual_chunk_size, chunk_start, k, ordered_pivot, local_gt_count,
local_histogram, &shared_output_counter, state, barrier_phase, ctas_per_group, tx,
[&](uint32_t original_idx, OrderedType ordered_val, int pos) {
row_output[pos] = static_cast<IdType>(original_idx);
row_output_values[pos] = Traits::FromOrdered(ordered_val);
});
} else if constexpr (MODE == RadixTopKMode::PageTableTransform) {
uint32_t batch_idx = (row_to_batch != nullptr) ? row_to_batch[row_idx] : row_idx;
const IdType* src_page_entry = aux_data + batch_idx * aux_stride;
// Collect raw indices first
RadixCollectIndices<BLOCK_THREADS, SINGLE_CTA, OrderedType>(
shared_ordered, actual_chunk_size, chunk_start, k, ordered_pivot, local_gt_count,
local_histogram, &shared_output_counter, state, barrier_phase, ctas_per_group, tx,
[&](uint32_t original_idx, OrderedType /*ordered_val*/, int pos) {
row_output[pos] = static_cast<IdType>(original_idx);
});
if constexpr (SINGLE_CTA) {
__syncthreads();
// Transform through page table with coalesced access
for (uint32_t i = tx; i < k; i += BLOCK_THREADS) {
IdType idx = row_output[i];
row_output[i] = src_page_entry[idx];
}
} else {
// Barrier to ensure all CTAs finished writing indices
if (tx == 0) {
red_release(&state->arrival_counter, 1);
}
int target = (barrier_phase + 1) * ctas_per_group;
wait_ge(&state->arrival_counter, target, tx);