|
| 1 | +/************************************************************************* |
| 2 | + * Copyright (c) 2022-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved. |
| 3 | + * |
| 4 | + * See LICENSE for license information. |
| 5 | + ************************************************************************/ |
| 6 | + |
| 7 | +#include "../extensions.h" |
| 8 | + |
| 9 | +#ifdef NVTE_ENABLE_NVSHMEM |
| 10 | +#include <nvshmem.h> |
| 11 | +#include <nvshmem_api/nvshmem_waitkernel.h> |
| 12 | +#include <nvshmemx.h> |
| 13 | +#endif |
| 14 | + |
| 15 | +#include <cuda.h> |
| 16 | +#include <cuda_fp8.h> |
| 17 | +#include <torch/cuda.h> |
| 18 | +#include <torch/extension.h> |
| 19 | + |
| 20 | +namespace nvshmem_api { |
| 21 | +void init_nvshmem_backend(c10d::ProcessGroup *process_group) { |
| 22 | +#ifdef NVTE_ENABLE_NVSHMEM |
| 23 | + nvshmemx_init_attr_t attr = {}; |
| 24 | + nvshmemx_uniqueid_t id = {}; |
| 25 | + |
| 26 | + int my_rank = process_group->getRank(); |
| 27 | + int num_ranks = process_group->getSize(); |
| 28 | + if (my_rank == 0) { |
| 29 | + nvshmemx_get_uniqueid(&id); |
| 30 | + } |
| 31 | + |
| 32 | + auto backend_is_nccl = (process_group->getBackendType() == c10d::ProcessGroup::BackendType::NCCL); |
| 33 | + NVTE_CHECK(backend_is_nccl, "Currently only support NCCL boostrap for NVSHMEM"); |
| 34 | + auto datatensor = |
| 35 | + torch::from_blob(reinterpret_cast<void *>(&id), |
| 36 | + {static_cast<int64_t>(sizeof(nvshmemx_uniqueid_t) / sizeof(uint8_t))}, |
| 37 | + at::device(torch::kCPU).dtype(torch::kUInt8)); |
| 38 | + auto datatmp = (backend_is_nccl) ? datatensor.cuda() : datatensor; |
| 39 | + |
| 40 | + c10d::BroadcastOptions bcast_opts; |
| 41 | + bcast_opts.rootRank = 0; |
| 42 | + std::vector<torch::Tensor> datachunk = {datatmp}; |
| 43 | + auto work = process_group->broadcast(datachunk, bcast_opts); |
| 44 | + work->wait(); |
| 45 | + |
| 46 | + if (backend_is_nccl) { |
| 47 | + datatensor.copy_(datatmp.cpu()); |
| 48 | + datatmp = torch::Tensor(); |
| 49 | + } |
| 50 | + |
| 51 | + nvshmemx_set_attr_uniqueid_args(my_rank, num_ranks, &id, &attr); |
| 52 | + nvshmemx_init_attr(NVSHMEMX_INIT_WITH_UNIQUEID, &attr); |
| 53 | + |
| 54 | + NVTE_CHECK(my_rank == nvshmem_my_pe(), "my_rank: ", my_rank, |
| 55 | + " != nvshmem_my_pe(): ", nvshmem_my_pe()); |
| 56 | + NVTE_CHECK(num_ranks == nvshmem_n_pes(), "num_ranks: ", num_ranks, |
| 57 | + " != nvshmem_n_pes(): ", nvshmem_n_pes()); |
| 58 | +#else |
| 59 | + NVTE_ERROR("Internal TE error: init_nvshmem_backend cannot be initialized with valid PyTorch ", |
| 60 | + "distributed process groups when TE is compiled with NVTE_ENABLE_NVSHMEM=1!"); |
| 61 | +#endif |
| 62 | +} |
| 63 | + |
| 64 | +void nvshmem_wait_on_current_stream(torch::Tensor signal, const std::string &wait_kind) { |
| 65 | +#ifdef NVTE_ENABLE_NVSHMEM |
| 66 | + uint64_t *sig_addr = reinterpret_cast<uint64_t *>(signal.data_ptr()); |
| 67 | + cudaStream_t cur_stream = (cudaStream_t)at::cuda::getCurrentCUDAStream(); |
| 68 | + |
| 69 | + WaitKind wait_kind_enum = WaitKind::STREAM_WAIT; |
| 70 | + |
| 71 | + if (wait_kind == "kernel") { |
| 72 | + wait_kind_enum = WaitKind::KERNEL_WAIT; |
| 73 | + } else if (wait_kind == "nvshmem") { |
| 74 | + wait_kind_enum = WaitKind::NVSHMEM_WAIT; |
| 75 | + } else if (wait_kind == "stream") { |
| 76 | + wait_kind_enum = WaitKind::STREAM_WAIT; |
| 77 | + } else { |
| 78 | + NVTE_ERROR("Invalid wait kind: ", wait_kind); |
| 79 | + } |
| 80 | + nvshmem_wait_on_stream(sig_addr, wait_kind_enum, cur_stream); |
| 81 | + |
| 82 | +#else |
| 83 | + NVTE_ERROR( |
| 84 | + "Internal TE error: nvshmem_wait_on_current_stream cannot be initialized with valid PyTorch ", |
| 85 | + "distributed process groups when TE is compiled with NVTE_ENABLE_NVSHMEM=1!"); |
| 86 | +#endif |
| 87 | +} |
| 88 | + |
| 89 | +torch::Tensor create_nvshmem_tensor(const std::vector<int64_t> &shape, c10::ScalarType dtype) { |
| 90 | +#ifdef NVTE_ENABLE_NVSHMEM |
| 91 | + auto option_gpu = |
| 92 | + at::TensorOptions().dtype(dtype).device(at::kCUDA).device_index(c10::cuda::current_device()); |
| 93 | + auto size = torch::elementSize(dtype) * |
| 94 | + std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<>()); |
| 95 | + return at::from_blob( |
| 96 | + nvshmem_malloc(size), shape, [](void *ptr) { nvshmem_free(ptr); }, option_gpu); |
| 97 | +#else |
| 98 | + NVTE_ERROR("Internal TE error: create_nvshmem_tensor cannot be initialized with valid PyTorch ", |
| 99 | + "distributed process groups when TE is compiled with NVTE_ENABLE_NVSHMEM=1!"); |
| 100 | +#endif |
| 101 | +} |
| 102 | + |
| 103 | +void nvshmem_send_on_current_stream(torch::Tensor src, torch::Tensor dst, int peer, |
| 104 | + torch::Tensor signal) { |
| 105 | +#ifdef NVTE_ENABLE_NVSHMEM |
| 106 | + void *src_ptr = reinterpret_cast<void *>(src.data_ptr()); |
| 107 | + void *dst_ptr = reinterpret_cast<void *>(dst.data_ptr()); |
| 108 | + uint64_t *sig_addr = reinterpret_cast<uint64_t *>(signal.data_ptr()); |
| 109 | + auto nelement = src.numel() * src.element_size(); |
| 110 | + uint64_t sigval = 1; |
| 111 | + at::cuda::CUDAStream cur_stream = at::cuda::getCurrentCUDAStream(); |
| 112 | + |
| 113 | + nvshmemx_putmem_signal_on_stream(dst_ptr, src_ptr, nelement, sig_addr, sigval, NVSHMEM_SIGNAL_SET, |
| 114 | + peer, (cudaStream_t)cur_stream); |
| 115 | +#else |
| 116 | + NVTE_ERROR( |
| 117 | + "Internal TE error: nvshmem_send_on_current_stream cannot be initialized with valid PyTorch ", |
| 118 | + "distributed process groups when TE is compiled with NVTE_ENABLE_NVSHMEM=1!"); |
| 119 | +#endif |
| 120 | +} |
| 121 | +void nvshmem_finalize() { |
| 122 | +#ifdef NVTE_ENABLE_NVSHMEM |
| 123 | + nvshmem_finalize(); |
| 124 | +#else |
| 125 | + NVTE_ERROR("Internal TE error: nvshmem_finalize cannot be initialized with valid PyTorch ", |
| 126 | + "distributed process groups when TE is compiled with NVTE_ENABLE_NVSHMEM=1!"); |
| 127 | +#endif |
| 128 | +} |
| 129 | +} // namespace nvshmem_api |
0 commit comments