|
| 1 | +/* SPDX-License-Identifier: BSD-3-Clause |
| 2 | + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. |
| 3 | + */ |
| 4 | + |
| 5 | +#include <errno.h> |
| 6 | +#include <inttypes.h> |
| 7 | +#include <string.h> |
| 8 | +#include <sys/ioctl.h> |
| 9 | + |
| 10 | +#include <rte_byteorder.h> |
| 11 | +#include <rte_eal.h> |
| 12 | +#include <rte_io.h> |
| 13 | +#include <rte_kvargs.h> |
| 14 | +#include <rte_log.h> |
| 15 | +#include <rte_malloc.h> |
| 16 | + |
| 17 | +#include <rte_dmadev_pmd.h> |
| 18 | + |
| 19 | +#include "hisi_acc_dmadev.h" |
| 20 | + |
| 21 | +RTE_LOG_REGISTER_DEFAULT(hacc_dma_logtype, INFO); |
| 22 | +#define RTE_LOGTYPE_HACC_DMA hacc_dma_logtype |
| 23 | +#define HACC_DMA_LOG(level, ...) \ |
| 24 | + RTE_LOG_LINE_PREFIX(level, HACC_DMA, "%s(): ", __func__, __VA_ARGS__) |
| 25 | +#define HACC_DMA_DEV_LOG(hw, level, ...) \ |
| 26 | + RTE_LOG_LINE_PREFIX(level, HACC_DMA, "%s %s(): ", \ |
| 27 | + (hw)->data->dev_name RTE_LOG_COMMA __func__, __VA_ARGS__) |
| 28 | +#define HACC_DMA_DEBUG(hw, ...) \ |
| 29 | + HACC_DMA_DEV_LOG(hw, DEBUG, __VA_ARGS__) |
| 30 | +#define HACC_DMA_INFO(hw, ...) \ |
| 31 | + HACC_DMA_DEV_LOG(hw, INFO, __VA_ARGS__) |
| 32 | +#define HACC_DMA_WARN(hw, ...) \ |
| 33 | + HACC_DMA_DEV_LOG(hw, WARNING, __VA_ARGS__) |
| 34 | +#define HACC_DMA_ERR(hw, ...) \ |
| 35 | + HACC_DMA_DEV_LOG(hw, ERR, __VA_ARGS__) |
| 36 | + |
| 37 | +static void |
| 38 | +hacc_dma_gen_dev_name(const struct rte_uacce_device *uacce_dev, |
| 39 | + uint16_t queue_id, char *dev_name, size_t size) |
| 40 | +{ |
| 41 | + memset(dev_name, 0, size); |
| 42 | + (void)snprintf(dev_name, size, "%s-dma%u", uacce_dev->device.name, queue_id); |
| 43 | +} |
| 44 | + |
| 45 | +static void |
| 46 | +hacc_dma_gen_dev_prefix(const struct rte_uacce_device *uacce_dev, char *dev_name, size_t size) |
| 47 | +{ |
| 48 | + memset(dev_name, 0, size); |
| 49 | + (void)snprintf(dev_name, size, "%s-dma", uacce_dev->device.name); |
| 50 | +} |
| 51 | + |
| 52 | +static int |
| 53 | +hacc_dma_get_qp_info(struct hacc_dma_dev *hw) |
| 54 | +{ |
| 55 | +#define CMD_QM_GET_QP_CTX _IOWR('H', 10, struct hacc_dma_qp_contex) |
| 56 | +#define CMD_QM_GET_QP_INFO _IOWR('H', 11, struct hacc_dma_qp_info) |
| 57 | +#define QP_ALG_TYPE 2 |
| 58 | + struct hacc_dma_qp_contex { |
| 59 | + uint16_t id; |
| 60 | + uint16_t qc_type; |
| 61 | + } qp_ctx; |
| 62 | + struct hacc_dma_qp_info { |
| 63 | + uint32_t sqe_size; |
| 64 | + uint16_t sq_depth; |
| 65 | + uint16_t cq_depth; |
| 66 | + uint64_t reserved; |
| 67 | + } qp_info; |
| 68 | + int ret; |
| 69 | + |
| 70 | + memset(&qp_ctx, 0, sizeof(qp_ctx)); |
| 71 | + qp_ctx.qc_type = QP_ALG_TYPE; |
| 72 | + ret = rte_uacce_queue_ioctl(&hw->qctx, CMD_QM_GET_QP_CTX, &qp_ctx); |
| 73 | + if (ret != 0) { |
| 74 | + HACC_DMA_ERR(hw, "get qm qp context fail!"); |
| 75 | + return -EINVAL; |
| 76 | + } |
| 77 | + hw->sqn = qp_ctx.id; |
| 78 | + |
| 79 | + memset(&qp_info, 0, sizeof(qp_info)); |
| 80 | + ret = rte_uacce_queue_ioctl(&hw->qctx, CMD_QM_GET_QP_INFO, &qp_info); |
| 81 | + if (ret != 0) { |
| 82 | + HACC_DMA_ERR(hw, "get qm qp info fail!"); |
| 83 | + return -EINVAL; |
| 84 | + } |
| 85 | + if ((qp_info.sq_depth & (qp_info.sq_depth - 1)) != 0) { |
| 86 | + HACC_DMA_ERR(hw, "sq depth is not 2's power!"); |
| 87 | + return -EINVAL; |
| 88 | + } |
| 89 | + hw->sqe_size = qp_info.sqe_size; |
| 90 | + hw->sq_depth = qp_info.sq_depth; |
| 91 | + hw->cq_depth = qp_info.cq_depth; |
| 92 | + hw->sq_depth_mask = hw->sq_depth - 1; |
| 93 | + |
| 94 | + return 0; |
| 95 | +} |
| 96 | + |
| 97 | +static int |
| 98 | +hacc_dma_create(struct rte_uacce_device *uacce_dev, uint16_t queue_id) |
| 99 | +{ |
| 100 | + char name[RTE_DEV_NAME_MAX_LEN]; |
| 101 | + struct rte_dma_dev *dev; |
| 102 | + struct hacc_dma_dev *hw; |
| 103 | + int ret; |
| 104 | + |
| 105 | + hacc_dma_gen_dev_name(uacce_dev, queue_id, name, sizeof(name)); |
| 106 | + dev = rte_dma_pmd_allocate(name, uacce_dev->device.numa_node, |
| 107 | + sizeof(struct hacc_dma_dev)); |
| 108 | + if (dev == NULL) { |
| 109 | + HACC_DMA_LOG(ERR, "%s allocate dmadev fail!", name); |
| 110 | + return -ENOMEM; |
| 111 | + } |
| 112 | + |
| 113 | + dev->device = &uacce_dev->device; |
| 114 | + dev->fp_obj->dev_private = dev->data->dev_private; |
| 115 | + |
| 116 | + hw = dev->data->dev_private; |
| 117 | + hw->data = dev->data; /* make sure ACC_DMA_DEBUG/INFO/WARN/ERR was available. */ |
| 118 | + |
| 119 | + ret = rte_uacce_queue_alloc(uacce_dev, &hw->qctx); |
| 120 | + if (ret != 0) { |
| 121 | + HACC_DMA_ERR(hw, "alloc queue fail!"); |
| 122 | + goto release_dma_pmd; |
| 123 | + } |
| 124 | + |
| 125 | + ret = hacc_dma_get_qp_info(hw); |
| 126 | + if (ret != 0) |
| 127 | + goto free_uacce_queue; |
| 128 | + |
| 129 | + hw->io_base = rte_uacce_queue_mmap(&hw->qctx, RTE_UACCE_QFRT_MMIO); |
| 130 | + if (hw->io_base == NULL) { |
| 131 | + HACC_DMA_ERR(hw, "mmap MMIO region fail!"); |
| 132 | + ret = -EINVAL; |
| 133 | + goto free_uacce_queue; |
| 134 | + } |
| 135 | + hw->doorbell_reg = (void *)((uintptr_t)hw->io_base + HACC_DMA_DOORBELL_OFFSET); |
| 136 | + |
| 137 | + hw->dus_base = rte_uacce_queue_mmap(&hw->qctx, RTE_UACCE_QFRT_DUS); |
| 138 | + if (hw->dus_base == NULL) { |
| 139 | + HACC_DMA_ERR(hw, "mmap DUS region fail!"); |
| 140 | + ret = -EINVAL; |
| 141 | + goto unmap_mmio; |
| 142 | + } |
| 143 | + hw->sqe = hw->dus_base; |
| 144 | + hw->cqe = (void *)((uintptr_t)hw->dus_base + hw->sqe_size * hw->sq_depth); |
| 145 | + hw->sq_status = (uint32_t *)((uintptr_t)hw->dus_base + |
| 146 | + uacce_dev->qfrt_sz[RTE_UACCE_QFRT_DUS] - sizeof(uint32_t)); |
| 147 | + hw->cq_status = hw->sq_status - 1; |
| 148 | + |
| 149 | + hw->status = rte_zmalloc_socket(NULL, sizeof(uint16_t) * hw->sq_depth, |
| 150 | + RTE_CACHE_LINE_SIZE, uacce_dev->numa_node); |
| 151 | + if (hw->status == NULL) { |
| 152 | + HACC_DMA_ERR(hw, "malloc status region fail!"); |
| 153 | + ret = -ENOMEM; |
| 154 | + goto unmap_dus; |
| 155 | + } |
| 156 | + |
| 157 | + dev->state = RTE_DMA_DEV_READY; |
| 158 | + HACC_DMA_DEBUG(hw, "create dmadev %s success!", name); |
| 159 | + |
| 160 | + return 0; |
| 161 | + |
| 162 | +unmap_dus: |
| 163 | + rte_uacce_queue_unmap(&hw->qctx, RTE_UACCE_QFRT_DUS); |
| 164 | +unmap_mmio: |
| 165 | + rte_uacce_queue_unmap(&hw->qctx, RTE_UACCE_QFRT_MMIO); |
| 166 | +free_uacce_queue: |
| 167 | + rte_uacce_queue_free(&hw->qctx); |
| 168 | +release_dma_pmd: |
| 169 | + rte_dma_pmd_release(name); |
| 170 | + return ret; |
| 171 | +} |
| 172 | + |
| 173 | +static int |
| 174 | +hacc_dma_parse_queues(const char *key, const char *value, void *extra_args) |
| 175 | +{ |
| 176 | + struct hacc_dma_config *config = extra_args; |
| 177 | + uint64_t val; |
| 178 | + char *end; |
| 179 | + |
| 180 | + RTE_SET_USED(key); |
| 181 | + |
| 182 | + errno = 0; |
| 183 | + val = strtoull(value, &end, 0); |
| 184 | + if (errno == ERANGE || value == end || *end != '\0' || val == 0) { |
| 185 | + HACC_DMA_LOG(ERR, "%s invalid queues! set to default one queue!", |
| 186 | + config->dev->name); |
| 187 | + config->queues = HACC_DMA_DEFAULT_QUEUES; |
| 188 | + } else if (val > config->avail_queues) { |
| 189 | + HACC_DMA_LOG(WARNING, "%s exceed available queues! set to available queues %u", |
| 190 | + config->dev->name, config->avail_queues); |
| 191 | + config->queues = config->avail_queues; |
| 192 | + } else { |
| 193 | + config->queues = val; |
| 194 | + } |
| 195 | + |
| 196 | + return 0; |
| 197 | +} |
| 198 | + |
| 199 | +static int |
| 200 | +hacc_dma_parse_devargs(struct rte_uacce_device *uacce_dev, struct hacc_dma_config *config) |
| 201 | +{ |
| 202 | + struct rte_kvargs *kvlist; |
| 203 | + int avail_queues; |
| 204 | + |
| 205 | + avail_queues = rte_uacce_avail_queues(uacce_dev); |
| 206 | + if (avail_queues <= 0) { |
| 207 | + HACC_DMA_LOG(ERR, "%s don't have available queues!", uacce_dev->name); |
| 208 | + return -EINVAL; |
| 209 | + } |
| 210 | + config->dev = uacce_dev; |
| 211 | + config->avail_queues = avail_queues <= UINT16_MAX ? avail_queues : UINT16_MAX; |
| 212 | + |
| 213 | + if (uacce_dev->device.devargs == NULL) |
| 214 | + return 0; |
| 215 | + |
| 216 | + kvlist = rte_kvargs_parse(uacce_dev->device.devargs->args, NULL); |
| 217 | + if (kvlist == NULL) |
| 218 | + return 0; |
| 219 | + |
| 220 | + (void)rte_kvargs_process(kvlist, HACC_DMA_DEVARG_QUEUES, &hacc_dma_parse_queues, config); |
| 221 | + |
| 222 | + rte_kvargs_free(kvlist); |
| 223 | + |
| 224 | + return 0; |
| 225 | +} |
| 226 | + |
| 227 | +static int |
| 228 | +hacc_dma_probe(struct rte_uacce_driver *dr, struct rte_uacce_device *uacce_dev) |
| 229 | +{ |
| 230 | + struct hacc_dma_config config = { .queues = HACC_DMA_DEFAULT_QUEUES }; |
| 231 | + int ret = 0; |
| 232 | + uint32_t i; |
| 233 | + |
| 234 | + RTE_SET_USED(dr); |
| 235 | + |
| 236 | + ret = hacc_dma_parse_devargs(uacce_dev, &config); |
| 237 | + if (ret != 0) |
| 238 | + return ret; |
| 239 | + |
| 240 | + for (i = 0; i < config.queues; i++) { |
| 241 | + ret = hacc_dma_create(uacce_dev, i); |
| 242 | + if (ret != 0) { |
| 243 | + HACC_DMA_LOG(ERR, "%s create dmadev No.%u failed!", uacce_dev->name, i); |
| 244 | + break; |
| 245 | + } |
| 246 | + } |
| 247 | + |
| 248 | + if (ret != 0 && i > 0) { |
| 249 | + HACC_DMA_LOG(WARNING, "%s probed %u dmadev, can't probe more!", uacce_dev->name, i); |
| 250 | + ret = 0; |
| 251 | + } |
| 252 | + |
| 253 | + return ret; |
| 254 | +} |
| 255 | + |
| 256 | +static int |
| 257 | +hacc_dma_remove(struct rte_uacce_device *uacce_dev) |
| 258 | +{ |
| 259 | + char name[RTE_DEV_NAME_MAX_LEN]; |
| 260 | + struct rte_dma_info info; |
| 261 | + int i = 0; |
| 262 | + int ret; |
| 263 | + |
| 264 | + hacc_dma_gen_dev_prefix(uacce_dev, name, sizeof(name)); |
| 265 | + RTE_DMA_FOREACH_DEV(i) { |
| 266 | + ret = rte_dma_info_get(i, &info); |
| 267 | + if (ret != 0) |
| 268 | + continue; |
| 269 | + if (strncmp(info.dev_name, name, strlen(name)) == 0) |
| 270 | + rte_dma_pmd_release(info.dev_name); |
| 271 | + } |
| 272 | + |
| 273 | + return 0; |
| 274 | +} |
| 275 | + |
| 276 | +static const struct rte_uacce_id hacc_dma_id_table[] = { |
| 277 | + { "hisi_qm_v5", "udma" }, |
| 278 | + { .dev_api = NULL, }, |
| 279 | +}; |
| 280 | + |
| 281 | +static struct rte_uacce_driver hacc_dma_pmd_drv = { |
| 282 | + .id_table = hacc_dma_id_table, |
| 283 | + .probe = hacc_dma_probe, |
| 284 | + .remove = hacc_dma_remove, |
| 285 | +}; |
| 286 | + |
| 287 | +RTE_PMD_REGISTER_UACCE(dma_hisi_acc, hacc_dma_pmd_drv); |
| 288 | +RTE_PMD_REGISTER_PARAM_STRING(dma_hisi_acc, |
| 289 | + HACC_DMA_DEVARG_QUEUES "=<uint16> "); |
0 commit comments