diff --git a/kernel/linux/efa/Makefile b/kernel/linux/efa/Makefile index 69fb9d90..e783a39c 100644 --- a/kernel/linux/efa/Makefile +++ b/kernel/linux/efa/Makefile @@ -15,8 +15,11 @@ ccflags-y := -I$(src) KERNEL_VERSION ?= $(shell uname -r) ccflags-y += -Wfatal-errors -all: +modules: make -C /lib/modules/$(KERNEL_VERSION)/build M=$(CURDIR) modules +install: modules + make -C /lib/modules/$(KERNEL_VERSION)/build M=$(CURDIR) modules_install + clean: make -C /lib/modules/$(KERNEL_VERSION)/build M=$(CURDIR) clean diff --git a/kernel/linux/efa/RELEASENOTES.md b/kernel/linux/efa/RELEASENOTES.md index b29b8f0c..dfa4f1b9 100644 --- a/kernel/linux/efa/RELEASENOTES.md +++ b/kernel/linux/efa/RELEASENOTES.md @@ -13,6 +13,11 @@ The driver was tested on the following distributions: * CentOS 7.4 * CentOS 7.6 +## r0.9.1 release notes + +* Bug fix in EFA spec file +* Upstream review cleanups + ## r0.9.0 release notes Initial commit diff --git a/kernel/linux/efa/conf/dkms.conf b/kernel/linux/efa/conf/dkms.conf index d0bd06ae..03dbaae9 100644 --- a/kernel/linux/efa/conf/dkms.conf +++ b/kernel/linux/efa/conf/dkms.conf @@ -1,5 +1,5 @@ PACKAGE_NAME="efa" -PACKAGE_VERSION="0.9.0" +PACKAGE_VERSION="0.9.1" CLEAN="make clean" MAKE="make KERNEL_VERSION=${kernelver}" BUILT_MODULE_NAME[0]="efa" diff --git a/kernel/linux/efa/efa.h b/kernel/linux/efa/efa.h index 652e7481..696cae11 100644 --- a/kernel/linux/efa/efa.h +++ b/kernel/linux/efa/efa.h @@ -32,27 +32,6 @@ #define EFA_NUM_MSIX_VEC 1 #define EFA_MGMNT_MSIX_VEC_IDX 0 -#define efa_dbg(_dev, format, ...) \ - dev_dbg(_dev, "(pid %d) %s: " format, current->pid, \ - __func__, ##__VA_ARGS__) -#define efa_info(_dev, format, ...) \ - dev_info(_dev, "(pid %d) %s: " format, current->pid, \ - __func__, ##__VA_ARGS__) -#define efa_warn(_dev, format, ...) \ - dev_warn(_dev, "(pid %d) %s: " format, current->pid, \ - __func__, ##__VA_ARGS__) -#define efa_err(_dev, format, ...) \ - dev_err(_dev, "(pid %d) %s: " format, current->pid, \ - __func__, ##__VA_ARGS__) -#define efa_err_rl(_dev, format, ...) \ - dev_err_ratelimited(_dev, "(pid %d) %s: " format, current->pid, \ - __func__, ##__VA_ARGS__) - -enum { - EFA_DEVICE_RUNNING_BIT, - EFA_MSIX_ENABLED_BIT -}; - struct efa_irq { irq_handler_t handler; void *data; @@ -79,8 +58,8 @@ struct efa_stats { struct efa_dev { struct ib_device ibdev; + struct efa_com_dev edev; struct pci_dev *pdev; - struct efa_com_dev *edev; struct efa_com_get_device_attr_result dev_attr; u64 reg_bar_addr; @@ -97,7 +76,6 @@ struct efa_dev { #else int admin_msix_vector_idx; #endif - unsigned long state; struct efa_irq admin_irq; #ifndef HAVE_CREATE_AH_UDATA @@ -115,10 +93,14 @@ struct efa_dev { struct efa_ucontext { struct ib_ucontext ibucontext; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) + struct xarray mmap_xa; +#else /* Protects ucontext state */ struct mutex lock; struct list_head pending_mmaps; - u64 mmap_key; + u32 mmap_page; +#endif u16 uarn; }; @@ -219,9 +201,16 @@ int efa_mmap(struct ib_ucontext *ibucontext, struct vm_area_struct *vma); #ifdef HAVE_CREATE_AH_UDATA #ifdef HAVE_CREATE_AH_RDMA_ATTR +#ifdef HAVE_CREATE_DESTROY_AH_FLAGS +struct ib_ah *efa_create_ah(struct ib_pd *ibpd, + struct rdma_ah_attr *ah_attr, + u32 flags, + struct ib_udata *udata); +#else struct ib_ah *efa_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *ah_attr, struct ib_udata *udata); +#endif #else struct ib_ah *efa_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *ah_attr, @@ -231,7 +220,11 @@ struct ib_ah *efa_create_ah(struct ib_pd *ibpd, struct ib_ah *efa_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *ah_attr); #endif +#ifdef HAVE_CREATE_DESTROY_AH_FLAGS +int efa_destroy_ah(struct ib_ah *ibah, u32 flags); +#else int efa_destroy_ah(struct ib_ah *ibah); +#endif #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0) int efa_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, diff --git a/kernel/linux/efa/efa_admin_cmds_defs.h b/kernel/linux/efa/efa_admin_cmds_defs.h index ea2de389..2be0469d 100644 --- a/kernel/linux/efa/efa_admin_cmds_defs.h +++ b/kernel/linux/efa/efa_admin_cmds_defs.h @@ -11,9 +11,7 @@ /* EFA admin queue opcodes */ enum efa_admin_aq_opcode { - /* starting opcode of efa admin commands */ - EFA_ADMIN_START_CMD_RANGE = 1, - EFA_ADMIN_CREATE_QP = EFA_ADMIN_START_CMD_RANGE, + EFA_ADMIN_CREATE_QP = 1, EFA_ADMIN_MODIFY_QP = 2, EFA_ADMIN_QUERY_QP = 3, EFA_ADMIN_DESTROY_QP = 4, diff --git a/kernel/linux/efa/efa_com.c b/kernel/linux/efa/efa_com.c index b52e2a56..5ab547a9 100644 --- a/kernel/linux/efa/efa_com.c +++ b/kernel/linux/efa/efa_com.c @@ -3,7 +3,6 @@ * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved. */ -#include "efa.h" #include "efa_com.h" #include "efa_regs_defs.h" @@ -112,17 +111,17 @@ static u32 efa_com_reg_read32(struct efa_com_dev *edev, u16 offset) } while (time_is_after_jiffies(exp_time)); if (read_resp->req_id != mmio_read->seq_num) { - efa_err_rl(edev->dmadev, - "Reading register timed out. expected: req id[%u] offset[%#x] actual: req id[%u] offset[%#x]\n", - mmio_read->seq_num, offset, read_resp->req_id, - read_resp->reg_off); + ibdev_err(edev->efa_dev, + "Reading register timed out. expected: req id[%u] offset[%#x] actual: req id[%u] offset[%#x]\n", + mmio_read->seq_num, offset, read_resp->req_id, + read_resp->reg_off); err = EFA_MMIO_READ_INVALID; goto out; } if (read_resp->reg_off != offset) { - efa_err_rl(edev->dmadev, - "Reading register failed: wrong offset provided\n"); + ibdev_err(edev->efa_dev, + "Reading register failed: wrong offset provided\n"); err = EFA_MMIO_READ_INVALID; goto out; } @@ -137,7 +136,7 @@ static int efa_com_admin_init_sq(struct efa_com_dev *edev) { struct efa_com_admin_queue *aq = &edev->aq; struct efa_com_admin_sq *sq = &aq->sq; - u16 size = ADMIN_SQ_SIZE(aq->depth); + u16 size = aq->depth * sizeof(*sq->entries); u32 addr_high; u32 addr_low; u32 aq_caps; @@ -175,7 +174,7 @@ static int efa_com_admin_init_cq(struct efa_com_dev *edev) { struct efa_com_admin_queue *aq = &edev->aq; struct efa_com_admin_cq *cq = &aq->cq; - u16 size = ADMIN_CQ_SIZE(aq->depth); + u16 size = aq->depth * sizeof(*cq->entries); u32 addr_high; u32 addr_low; u32 acq_caps; @@ -217,11 +216,11 @@ static int efa_com_admin_init_aenq(struct efa_com_dev *edev, u16 size; if (!aenq_handlers) { - efa_err(edev->dmadev, "aenq handlers pointer is NULL\n"); + ibdev_err(edev->efa_dev, "aenq handlers pointer is NULL\n"); return -EINVAL; } - size = ADMIN_AENQ_SIZE(EFA_ASYNC_QUEUE_DEPTH); + size = EFA_ASYNC_QUEUE_DEPTH * sizeof(*aenq->entries); aenq->entries = dma_alloc_coherent(edev->dmadev, size, &aenq->dma_addr, GFP_KERNEL); if (!aenq->entries) @@ -284,7 +283,7 @@ static inline void efa_com_put_comp_ctx(struct efa_com_admin_queue *aq, u16 comp_id = comp_ctx->user_cqe->acq_common_descriptor.command & EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK; - efa_dbg(aq->dmadev, "Putting completion command_id %d\n", comp_id); + ibdev_dbg(aq->efa_dev, "Putting completion command_id %d\n", comp_id); comp_ctx->occupied = 0; efa_com_dealloc_ctx_id(aq, comp_id); } @@ -293,22 +292,21 @@ static struct efa_comp_ctx *efa_com_get_comp_ctx(struct efa_com_admin_queue *aq, u16 command_id, bool capture) { if (command_id >= aq->depth) { - efa_err_rl(aq->dmadev, - "command id is larger than the queue size. cmd_id: %u queue size %d\n", - command_id, aq->depth); + ibdev_err(aq->efa_dev, + "command id is larger than the queue size. cmd_id: %u queue size %d\n", + command_id, aq->depth); return NULL; } if (aq->comp_ctx[command_id].occupied && capture) { - efa_err_rl(aq->dmadev, "Completion context is occupied\n"); + ibdev_err(aq->efa_dev, "Completion context is occupied\n"); return NULL; } if (capture) { aq->comp_ctx[command_id].occupied = 1; - efa_dbg(aq->dmadev, - "Taking completion ctxt command_id %d\n", - command_id); + ibdev_dbg(aq->efa_dev, "Taking completion ctxt command_id %d\n", + command_id); } return &aq->comp_ctx[command_id]; @@ -403,7 +401,7 @@ static struct efa_comp_ctx *efa_com_submit_admin_cmd(struct efa_com_admin_queue spin_lock(&aq->sq.lock); if (!test_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state)) { - efa_err_rl(aq->dmadev, "Admin queue is closed\n"); + ibdev_err(aq->efa_dev, "Admin queue is closed\n"); spin_unlock(&aq->sq.lock); return ERR_PTR(-ENODEV); } @@ -428,8 +426,8 @@ static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *a comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, false); if (!comp_ctx) { - efa_err(aq->dmadev, - "comp_ctx is NULL. Changing the admin queue running state\n"); + ibdev_err(aq->efa_dev, + "comp_ctx is NULL. Changing the admin queue running state\n"); clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state); return; } @@ -522,8 +520,8 @@ static int efa_com_wait_and_process_admin_cq_polling(struct efa_comp_ctx *comp_c break; if (time_is_before_jiffies(timeout)) { - efa_err_rl(aq->dmadev, - "Wait for completion (polling) timeout\n"); + ibdev_err(aq->efa_dev, + "Wait for completion (polling) timeout\n"); /* EFA didn't have any completion */ atomic64_inc(&aq->stats.no_completion); @@ -536,7 +534,7 @@ static int efa_com_wait_and_process_admin_cq_polling(struct efa_comp_ctx *comp_c } if (comp_ctx->status == EFA_CMD_ABORTED) { - efa_err_rl(aq->dmadev, "Command was aborted\n"); + ibdev_err(aq->efa_dev, "Command was aborted\n"); atomic64_inc(&aq->stats.aborted_cmd); err = -ENODEV; goto out; @@ -574,19 +572,17 @@ static int efa_com_wait_and_process_admin_cq_interrupts(struct efa_comp_ctx *com atomic64_inc(&aq->stats.no_completion); if (comp_ctx->status == EFA_CMD_COMPLETED) - efa_err_rl(aq->dmadev, - "The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (ctx: 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n", - efa_com_cmd_str(comp_ctx->cmd_opcode), - comp_ctx->cmd_opcode, comp_ctx->status, - comp_ctx, aq->sq.pc, - aq->sq.cc, aq->cq.cc); + ibdev_err(aq->efa_dev, + "The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (ctx: 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n", + efa_com_cmd_str(comp_ctx->cmd_opcode), + comp_ctx->cmd_opcode, comp_ctx->status, + comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc); else - efa_err_rl(aq->dmadev, - "The device didn't send any completion for admin cmd %s(%d) status %d (ctx 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n", - efa_com_cmd_str(comp_ctx->cmd_opcode), - comp_ctx->cmd_opcode, comp_ctx->status, - comp_ctx, aq->sq.pc, - aq->sq.cc, aq->cq.cc); + ibdev_err(aq->efa_dev, + "The device didn't send any completion for admin cmd %s(%d) status %d (ctx 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n", + efa_com_cmd_str(comp_ctx->cmd_opcode), + comp_ctx->cmd_opcode, comp_ctx->status, + comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc); clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state); err = -ETIME; @@ -643,15 +639,15 @@ int efa_com_cmd_exec(struct efa_com_admin_queue *aq, /* In case of queue FULL */ down(&aq->avail_cmds); - efa_dbg(aq->dmadev, "%s (opcode %d)\n", - efa_com_cmd_str(cmd->aq_common_descriptor.opcode), - cmd->aq_common_descriptor.opcode); + ibdev_dbg(aq->efa_dev, "%s (opcode %d)\n", + efa_com_cmd_str(cmd->aq_common_descriptor.opcode), + cmd->aq_common_descriptor.opcode); comp_ctx = efa_com_submit_admin_cmd(aq, cmd, cmd_size, comp, comp_size); if (IS_ERR(comp_ctx)) { - efa_err_rl(aq->dmadev, - "Failed to submit command %s (opcode %u) err %ld\n", - efa_com_cmd_str(cmd->aq_common_descriptor.opcode), - cmd->aq_common_descriptor.opcode, PTR_ERR(comp_ctx)); + ibdev_err(aq->efa_dev, + "Failed to submit command %s (opcode %u) err %ld\n", + efa_com_cmd_str(cmd->aq_common_descriptor.opcode), + cmd->aq_common_descriptor.opcode, PTR_ERR(comp_ctx)); up(&aq->avail_cmds); return PTR_ERR(comp_ctx); @@ -659,11 +655,11 @@ int efa_com_cmd_exec(struct efa_com_admin_queue *aq, err = efa_com_wait_and_process_admin_cq(comp_ctx, aq); if (err) - efa_err_rl(aq->dmadev, - "Failed to process command %s (opcode %u) comp_status %d err %d\n", - efa_com_cmd_str(cmd->aq_common_descriptor.opcode), - cmd->aq_common_descriptor.opcode, - comp_ctx->comp_status, err); + ibdev_err(aq->efa_dev, + "Failed to process command %s (opcode %u) comp_status %d err %d\n", + efa_com_cmd_str(cmd->aq_common_descriptor.opcode), + cmd->aq_common_descriptor.opcode, + comp_ctx->comp_status, err); up(&aq->avail_cmds); @@ -747,13 +743,13 @@ void efa_com_admin_destroy(struct efa_com_dev *edev) devm_kfree(edev->dmadev, aq->comp_ctx_pool); devm_kfree(edev->dmadev, aq->comp_ctx); - size = ADMIN_SQ_SIZE(aq->depth); + size = aq->depth * sizeof(*sq->entries); dma_free_coherent(edev->dmadev, size, sq->entries, sq->dma_addr); - size = ADMIN_CQ_SIZE(aq->depth); + size = aq->depth * sizeof(*cq->entries); dma_free_coherent(edev->dmadev, size, cq->entries, cq->dma_addr); - size = ADMIN_AENQ_SIZE(aenq->depth); + size = aenq->depth * sizeof(*aenq->entries); dma_free_coherent(edev->dmadev, size, aenq->entries, aenq->dma_addr); } @@ -808,9 +804,9 @@ int efa_com_admin_init(struct efa_com_dev *edev, dev_sts = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF); if (!(dev_sts & EFA_REGS_DEV_STS_READY_MASK)) { - efa_err(edev->dmadev, - "Device isn't ready, abort com init 0x%08x\n", - dev_sts); + ibdev_err(edev->efa_dev, + "Device isn't ready, abort com init 0x%08x\n", + dev_sts); return -ENODEV; } @@ -818,6 +814,7 @@ int efa_com_admin_init(struct efa_com_dev *edev, aq->bus = edev->bus; aq->dmadev = edev->dmadev; + aq->efa_dev = edev->efa_dev; set_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state); sema_init(&aq->avail_cmds, aq->depth); @@ -858,10 +855,10 @@ int efa_com_admin_init(struct efa_com_dev *edev, return 0; err_destroy_cq: - dma_free_coherent(edev->dmadev, ADMIN_CQ_SIZE(aq->depth), + dma_free_coherent(edev->dmadev, aq->depth * sizeof(*aq->cq.entries), aq->cq.entries, aq->cq.dma_addr); err_destroy_sq: - dma_free_coherent(edev->dmadev, ADMIN_SQ_SIZE(aq->depth), + dma_free_coherent(edev->dmadev, aq->depth * sizeof(*aq->sq.entries), aq->sq.entries, aq->sq.dma_addr); err_destroy_comp_ctxt: devm_kfree(edev->dmadev, aq->comp_ctx); @@ -1003,17 +1000,6 @@ void efa_com_mmio_reg_read_destroy(struct efa_com_dev *edev) mmio_read->read_resp, mmio_read->read_resp_dma_addr); } -/** - * efa_com_validate_version - Validate the device parameters - * @edev: EFA communication layer struct - * - * This method validate the device parameters are the same as the saved - * parameters in edev. - * This method is useful after device reset, to validate the device mac address - * and the device offloads are the same as before the reset. - * - * @return - 0 on success negative value otherwise. - */ int efa_com_validate_version(struct efa_com_dev *edev) { u32 ctrl_ver_masked; @@ -1028,27 +1014,26 @@ int efa_com_validate_version(struct efa_com_dev *edev) ctrl_ver = efa_com_reg_read32(edev, EFA_REGS_CONTROLLER_VERSION_OFF); - efa_info(edev->dmadev, - "efa device version: %d.%d\n", - (ver & EFA_REGS_VERSION_MAJOR_VERSION_MASK) >> - EFA_REGS_VERSION_MAJOR_VERSION_SHIFT, - ver & EFA_REGS_VERSION_MINOR_VERSION_MASK); + ibdev_dbg(edev->efa_dev, "efa device version: %d.%d\n", + (ver & EFA_REGS_VERSION_MAJOR_VERSION_MASK) >> + EFA_REGS_VERSION_MAJOR_VERSION_SHIFT, + ver & EFA_REGS_VERSION_MINOR_VERSION_MASK); if (ver < MIN_EFA_VER) { - efa_err(edev->dmadev, - "EFA version is lower than the minimal version the driver supports\n"); + ibdev_err(edev->efa_dev, + "EFA version is lower than the minimal version the driver supports\n"); return -EOPNOTSUPP; } - efa_info(edev->dmadev, - "efa controller version: %d.%d.%d implementation version %d\n", - (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) - >> EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, - (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) - >> EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT, - (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK), - (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >> - EFA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT); + ibdev_dbg(edev->efa_dev, + "efa controller version: %d.%d.%d implementation version %d\n", + (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >> + EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, + (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >> + EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT, + (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK), + (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >> + EFA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT); ctrl_ver_masked = (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) | @@ -1057,8 +1042,8 @@ int efa_com_validate_version(struct efa_com_dev *edev) /* Validate the ctrl version without the implementation ID */ if (ctrl_ver_masked < MIN_EFA_CTRL_VER) { - efa_err(edev->dmadev, - "EFA ctrl version is lower than the minimal ctrl version the driver supports\n"); + ibdev_err(edev->efa_dev, + "EFA ctrl version is lower than the minimal ctrl version the driver supports\n"); return -EOPNOTSUPP; } @@ -1082,10 +1067,10 @@ int efa_com_get_dma_width(struct efa_com_dev *edev) width = (caps & EFA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >> EFA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT; - efa_dbg(edev->dmadev, "DMA width: %d\n", width); + ibdev_dbg(edev->efa_dev, "DMA width: %d\n", width); if (width < 32 || width > 64) { - efa_err(edev->dmadev, "DMA width illegal value: %d\n", width); + ibdev_err(edev->efa_dev, "DMA width illegal value: %d\n", width); return -EINVAL; } @@ -1106,7 +1091,7 @@ static int wait_for_reset_state(struct efa_com_dev *edev, u32 timeout, exp_state) return 0; - efa_dbg(edev->dmadev, "Reset indication val %d\n", val); + ibdev_dbg(edev->efa_dev, "Reset indication val %d\n", val); msleep(EFA_POLL_INTERVAL_MS); } @@ -1130,14 +1115,15 @@ int efa_com_dev_reset(struct efa_com_dev *edev, cap = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF); if (!(stat & EFA_REGS_DEV_STS_READY_MASK)) { - efa_err(edev->dmadev, "Device isn't ready, can't reset device\n"); + ibdev_err(edev->efa_dev, + "Device isn't ready, can't reset device\n"); return -EINVAL; } timeout = (cap & EFA_REGS_CAPS_RESET_TIMEOUT_MASK) >> EFA_REGS_CAPS_RESET_TIMEOUT_SHIFT; if (!timeout) { - efa_err(edev->dmadev, "Invalid timeout value\n"); + ibdev_err(edev->efa_dev, "Invalid timeout value\n"); return -EINVAL; } @@ -1153,7 +1139,7 @@ int efa_com_dev_reset(struct efa_com_dev *edev, err = wait_for_reset_state(edev, timeout, EFA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK); if (err) { - efa_err(edev->dmadev, "Reset indication didn't turn on\n"); + ibdev_err(edev->efa_dev, "Reset indication didn't turn on\n"); return err; } @@ -1161,7 +1147,7 @@ int efa_com_dev_reset(struct efa_com_dev *edev, writel(0, edev->reg_bar + EFA_REGS_DEV_CTL_OFF); err = wait_for_reset_state(edev, timeout, 0); if (err) { - efa_err(edev->dmadev, "Reset indication didn't turn off\n"); + ibdev_err(edev->efa_dev, "Reset indication didn't turn off\n"); return err; } diff --git a/kernel/linux/efa/efa_com.h b/kernel/linux/efa/efa_com.h index c77d4414..16f729e3 100644 --- a/kernel/linux/efa/efa_com.h +++ b/kernel/linux/efa/efa_com.h @@ -7,7 +7,12 @@ #define _EFA_COM_H_ #include +#include +#include #include +#include + +#include #include "kcompat.h" #include "efa_common_defs.h" @@ -17,10 +22,6 @@ #define EFA_MAX_HANDLERS 256 -#define ADMIN_SQ_SIZE(depth) ((depth) * sizeof(struct efa_admin_aq_entry)) -#define ADMIN_CQ_SIZE(depth) ((depth) * sizeof(struct efa_admin_acq_entry)) -#define ADMIN_AENQ_SIZE(depth) ((depth) * sizeof(struct efa_admin_aenq_entry)) - struct efa_com_admin_cq { struct efa_admin_acq_entry *entries; dma_addr_t dma_addr; @@ -58,6 +59,7 @@ enum { struct efa_com_admin_queue { void *dmadev; + void *efa_dev; void *bus; struct efa_comp_ctx *comp_ctx; u32 completion_timeout; /* usecs */ @@ -105,6 +107,7 @@ struct efa_com_dev { struct efa_com_aenq aenq; u8 __iomem *reg_bar; void *dmadev; + void *efa_dev; void *bus; u32 supported_features; u32 dma_addr_bits; diff --git a/kernel/linux/efa/efa_com_cmd.c b/kernel/linux/efa/efa_com_cmd.c index effe1bab..34475d0d 100644 --- a/kernel/linux/efa/efa_com_cmd.c +++ b/kernel/linux/efa/efa_com_cmd.c @@ -45,7 +45,7 @@ int efa_com_create_qp(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&cmd_completion, sizeof(cmd_completion)); if (err) { - efa_err_rl(edev->dmadev, "Failed to create qp [%d]\n", err); + ibdev_err(edev->efa_dev, "Failed to create qp [%d]\n", err); return err; } @@ -83,8 +83,9 @@ int efa_com_modify_qp(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&resp, sizeof(resp)); if (err) { - efa_err_rl(edev->dmadev, "Failed to modify qp-%u modify_mask[%#x] [%d]\n", - cmd.qp_handle, cmd.modify_mask, err); + ibdev_err(edev->efa_dev, + "Failed to modify qp-%u modify_mask[%#x] [%d]\n", + cmd.qp_handle, cmd.modify_mask, err); return err; } @@ -109,8 +110,8 @@ int efa_com_query_qp(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&resp, sizeof(resp)); if (err) { - efa_err_rl(edev->dmadev, "Failed to query qp-%u [%d]\n", - cmd.qp_handle, err); + ibdev_err(edev->efa_dev, "Failed to query qp-%u [%d]\n", + cmd.qp_handle, err); return err; } @@ -139,8 +140,8 @@ int efa_com_destroy_qp(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&cmd_completion, sizeof(cmd_completion)); if (err) - efa_err_rl(edev->dmadev, "Failed to destroy qp-%u [%d]\n", - qp_cmd.qp_handle, err); + ibdev_err(edev->efa_dev, "Failed to destroy qp-%u [%d]\n", + qp_cmd.qp_handle, err); return 0; } @@ -171,7 +172,7 @@ int efa_com_create_cq(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&cmd_completion, sizeof(cmd_completion)); if (err) { - efa_err_rl(edev->dmadev, "Failed to create cq[%d]\n", err); + ibdev_err(edev->efa_dev, "Failed to create cq[%d]\n", err); return err; } @@ -199,9 +200,8 @@ int efa_com_destroy_cq(struct efa_com_dev *edev, sizeof(destroy_resp)); if (err) - efa_err_rl(edev->dmadev, - "Failed to destroy CQ-%u [%d]\n", - params->cq_idx, err); + ibdev_err(edev->efa_dev, "Failed to destroy CQ-%u [%d]\n", + params->cq_idx, err); return 0; } @@ -247,7 +247,7 @@ int efa_com_register_mr(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&cmd_completion, sizeof(cmd_completion)); if (err) { - efa_err_rl(edev->dmadev, "Failed to register mr [%d]\n", err); + ibdev_err(edev->efa_dev, "Failed to register mr [%d]\n", err); return err; } @@ -274,9 +274,9 @@ int efa_com_dereg_mr(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&cmd_completion, sizeof(cmd_completion)); if (err) - efa_err_rl(edev->dmadev, - "Failed to de-register mr(lkey-%u) [%d]\n", - mr_cmd.l_key, err); + ibdev_err(edev->efa_dev, + "Failed to de-register mr(lkey-%u) [%d]\n", + mr_cmd.l_key, err); return 0; } @@ -301,7 +301,7 @@ int efa_com_create_ah(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&cmd_completion, sizeof(cmd_completion)); if (err) { - efa_err_rl(edev->dmadev, "Failed to create ah [%d]\n", err); + ibdev_err(edev->efa_dev, "Failed to create ah [%d]\n", err); return err; } @@ -328,9 +328,8 @@ int efa_com_destroy_ah(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&cmd_completion, sizeof(cmd_completion)); if (err) - efa_err_rl(edev->dmadev, - "Failed to destroy ah-%d pd-%d [%d]\n", - ah_cmd.ah, ah_cmd.pd, err); + ibdev_err(edev->efa_dev, "Failed to destroy ah-%d pd-%d [%d]\n", + ah_cmd.ah, ah_cmd.pd, err); return 0; } @@ -360,8 +359,8 @@ static int efa_com_get_feature_ex(struct efa_com_dev *edev, int err; if (!efa_com_check_supported_feature_id(edev, feature_id)) { - efa_err_rl(edev->dmadev, "Feature %d isn't supported\n", - feature_id); + ibdev_err(edev->efa_dev, "Feature %d isn't supported\n", + feature_id); return -EOPNOTSUPP; } @@ -389,9 +388,9 @@ static int efa_com_get_feature_ex(struct efa_com_dev *edev, sizeof(*get_resp)); if (err) - efa_err_rl(edev->dmadev, - "Failed to submit get_feature command %d [%d]\n", - feature_id, err); + ibdev_err(edev->efa_dev, + "Failed to submit get_feature command %d [%d]\n", + feature_id, err); return 0; } @@ -412,8 +411,8 @@ int efa_com_get_network_attr(struct efa_com_dev *edev, err = efa_com_get_feature(edev, &resp, EFA_ADMIN_NETWORK_ATTR); if (err) { - efa_err_rl(edev->dmadev, - "Failed to get network attributes %d\n", err); + ibdev_err(edev->efa_dev, + "Failed to get network attributes %d\n", err); return err; } @@ -432,8 +431,8 @@ int efa_com_get_device_attr(struct efa_com_dev *edev, err = efa_com_get_feature(edev, &resp, EFA_ADMIN_DEVICE_ATTR); if (err) { - efa_err_rl(edev->dmadev, - "Failed to get device attributes %d\n", err); + ibdev_err(edev->efa_dev, "Failed to get device attributes %d\n", + err); return err; } @@ -447,9 +446,9 @@ int efa_com_get_device_attr(struct efa_com_dev *edev, result->db_bar = resp.u.device_attr.db_bar; if (result->admin_api_version < 1) { - efa_err_rl(edev->dmadev, - "Failed to get device attr api version [%u < 1]\n", - result->admin_api_version); + ibdev_err(edev->efa_dev, + "Failed to get device attr api version [%u < 1]\n", + result->admin_api_version); return -EINVAL; } @@ -457,8 +456,8 @@ int efa_com_get_device_attr(struct efa_com_dev *edev, err = efa_com_get_feature(edev, &resp, EFA_ADMIN_QUEUE_ATTR); if (err) { - efa_err_rl(edev->dmadev, - "Failed to get network attributes %d\n", err); + ibdev_err(edev->efa_dev, + "Failed to get network attributes %d\n", err); return err; } @@ -488,7 +487,7 @@ int efa_com_get_hw_hints(struct efa_com_dev *edev, err = efa_com_get_feature(edev, &resp, EFA_ADMIN_HW_HINTS); if (err) { - efa_err_rl(edev->dmadev, "Failed to get hw hints %d\n", err); + ibdev_err(edev->efa_dev, "Failed to get hw hints %d\n", err); return err; } @@ -511,8 +510,8 @@ static int efa_com_set_feature_ex(struct efa_com_dev *edev, int err; if (!efa_com_check_supported_feature_id(edev, feature_id)) { - efa_err_rl(edev->dmadev, "Feature %d isn't supported\n", - feature_id); + ibdev_err(edev->efa_dev, "Feature %d isn't supported\n", + feature_id); return -EOPNOTSUPP; } @@ -536,9 +535,9 @@ static int efa_com_set_feature_ex(struct efa_com_dev *edev, sizeof(*set_resp)); if (err) - efa_err_rl(edev->dmadev, - "Failed to submit set_feature command %d error: %d\n", - feature_id, err); + ibdev_err(edev->efa_dev, + "Failed to submit set_feature command %d error: %d\n", + feature_id, err); return 0; } @@ -559,25 +558,24 @@ int efa_com_set_aenq_config(struct efa_com_dev *edev, u32 groups) struct efa_admin_set_feature_cmd cmd = {}; int err; - efa_dbg(edev->dmadev, - "Configuring aenq with groups[0x%x]\n", groups); + ibdev_dbg(edev->efa_dev, "Configuring aenq with groups[0x%x]\n", groups); err = efa_com_get_feature(edev, &get_resp, EFA_ADMIN_AENQ_CONFIG); if (err) { - efa_err_rl(edev->dmadev, - "Failed to get aenq attributes: %d\n", err); + ibdev_err(edev->efa_dev, "Failed to get aenq attributes: %d\n", + err); return err; } - efa_dbg(edev->dmadev, - "Get aenq groups: supported[0x%x] enabled[0x%x]\n", - get_resp.u.aenq.supported_groups, - get_resp.u.aenq.enabled_groups); + ibdev_dbg(edev->efa_dev, + "Get aenq groups: supported[0x%x] enabled[0x%x]\n", + get_resp.u.aenq.supported_groups, + get_resp.u.aenq.enabled_groups); if ((get_resp.u.aenq.supported_groups & groups) != groups) { - efa_err_rl(edev->dmadev, - "Trying to set unsupported aenq groups[0x%x] supported[0x%x]\n", - groups, get_resp.u.aenq.supported_groups); + ibdev_err(edev->efa_dev, + "Trying to set unsupported aenq groups[0x%x] supported[0x%x]\n", + groups, get_resp.u.aenq.supported_groups); return -EOPNOTSUPP; } @@ -585,8 +583,8 @@ int efa_com_set_aenq_config(struct efa_com_dev *edev, u32 groups) err = efa_com_set_feature(edev, &set_resp, &cmd, EFA_ADMIN_AENQ_CONFIG); if (err) { - efa_err_rl(edev->dmadev, - "Failed to set aenq attributes: %d\n", err); + ibdev_err(edev->efa_dev, "Failed to set aenq attributes: %d\n", + err); return err; } @@ -609,7 +607,7 @@ int efa_com_alloc_pd(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&resp, sizeof(resp)); if (err) { - efa_err_rl(edev->dmadev, "Failed to allocate pd[%d]\n", err); + ibdev_err(edev->efa_dev, "Failed to allocate pd[%d]\n", err); return err; } @@ -635,8 +633,8 @@ int efa_com_dealloc_pd(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&resp, sizeof(resp)); if (err) { - efa_err_rl(edev->dmadev, "Failed to deallocate pd-%u [%d]\n", - cmd.pd, err); + ibdev_err(edev->efa_dev, "Failed to deallocate pd-%u [%d]\n", + cmd.pd, err); return err; } @@ -659,7 +657,7 @@ int efa_com_alloc_uar(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&resp, sizeof(resp)); if (err) { - efa_err_rl(edev->dmadev, "Failed to allocate uar[%d]\n", err); + ibdev_err(edev->efa_dev, "Failed to allocate uar[%d]\n", err); return err; } @@ -685,8 +683,8 @@ int efa_com_dealloc_uar(struct efa_com_dev *edev, (struct efa_admin_acq_entry *)&resp, sizeof(resp)); if (err) { - efa_err_rl(edev->dmadev, "Failed to deallocate uar-%u [%d]\n", - cmd.uar, err); + ibdev_err(edev->efa_dev, "Failed to deallocate uar-%u [%d]\n", + cmd.uar, err); return err; } diff --git a/kernel/linux/efa/efa_main.c b/kernel/linux/efa/efa_main.c index e7154a52..c15b56a4 100644 --- a/kernel/linux/efa/efa_main.c +++ b/kernel/linux/efa/efa_main.c @@ -23,7 +23,7 @@ static const struct pci_device_id efa_pci_tbl[] = { #define DRV_MODULE_VER_MAJOR 0 #define DRV_MODULE_VER_MINOR 9 -#define DRV_MODULE_VER_SUBMINOR 0 +#define DRV_MODULE_VER_SUBMINOR 1 #ifndef DRV_MODULE_VERSION #define DRV_MODULE_VERSION \ @@ -66,7 +66,7 @@ static void efa_update_network_attr(struct efa_dev *dev, memcpy(dev->addr, network_attr->addr, sizeof(network_attr->addr)); dev->mtu = network_attr->mtu; - efa_dbg(&dev->pdev->dev, "Full address %pI6\n", dev->addr); + dev_dbg(&dev->pdev->dev, "Full address %pI6\n", dev->addr); } /* This handler will called for unknown event group or unimplemented handlers */ @@ -75,8 +75,8 @@ static void unimplemented_aenq_handler(void *data, { struct efa_dev *dev = (struct efa_dev *)data; - efa_err_rl(&dev->ibdev.dev, - "Unknown event was received or event with unimplemented handler\n"); + ibdev_err(&dev->ibdev, + "Unknown event was received or event with unimplemented handler\n"); } static void efa_keep_alive(void *data, struct efa_admin_aenq_entry *aenq_e) @@ -104,13 +104,10 @@ static void efa_release_bars(struct efa_dev *dev, int bars_mask) static irqreturn_t efa_intr_msix_mgmnt(int irq, void *data) { - struct efa_dev *dev = (struct efa_dev *)data; - - efa_com_admin_q_comp_intr_handler(dev->edev); + struct efa_dev *dev = data; - /* Don't call the aenq handler before probe is done */ - if (test_bit(EFA_DEVICE_RUNNING_BIT, &dev->state)) - efa_com_aenq_intr_handler(dev->edev, data); + efa_com_admin_q_comp_intr_handler(&dev->edev); + efa_com_aenq_intr_handler(&dev->edev, data); return IRQ_HANDLED; } @@ -124,12 +121,12 @@ static int efa_request_mgmnt_irq(struct efa_dev *dev) err = request_irq(irq->vector, irq->handler, 0, irq->name, irq->data); if (err) { - efa_err(&dev->pdev->dev, "Failed to request admin irq (%d)\n", + dev_err(&dev->pdev->dev, "Failed to request admin irq (%d)\n", err); return err; } - efa_dbg(&dev->pdev->dev, "Set affinity hint of mgmnt irq to %*pbl (irq vector: %d)\n", + dev_dbg(&dev->pdev->dev, "Set affinity hint of mgmnt irq to %*pbl (irq vector: %d)\n", nr_cpumask_bits, &irq->affinity_hint_mask, irq->vector); irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); @@ -154,7 +151,7 @@ static void efa_setup_mgmnt_irq(struct efa_dev *dev) dev->admin_irq.cpu = cpu; cpumask_set_cpu(cpu, &dev->admin_irq.affinity_hint_mask); - efa_info(&dev->pdev->dev, "Setup irq:%p vector:%d name:%s\n", + dev_info(&dev->pdev->dev, "Setup irq:%p vector:%d name:%s\n", &dev->admin_irq, dev->admin_irq.vector, dev->admin_irq.name); @@ -188,7 +185,7 @@ static int efa_request_doorbell_bar(struct efa_dev *dev) err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME); if (err) { - efa_err(&dev->pdev->dev, + dev_err(&dev->pdev->dev, "pci_request_selected_regions for bar %d failed %d\n", db_bar_idx, err); return err; @@ -210,7 +207,7 @@ static void efa_release_doorbell_bar(struct efa_dev *dev) static void efa_update_hw_hints(struct efa_dev *dev, struct efa_com_get_hw_hints_result *hw_hints) { - struct efa_com_dev *edev = dev->edev; + struct efa_com_dev *edev = &dev->edev; if (hw_hints->mmio_read_timeout) edev->mmio_read.mmio_read_timeout = @@ -233,7 +230,7 @@ static void efa_stats_init(struct efa_dev *dev) atomic64_set(s, 0); } -#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0) +#ifdef HAVE_IB_DEV_OPS static const struct ib_device_ops efa_dev_ops = { .alloc_pd = efa_kzalloc_pd, .alloc_ucontext = efa_kzalloc_ucontext, @@ -281,29 +278,29 @@ static int efa_ib_device_add(struct efa_dev *dev) efa_stats_init(dev); - err = efa_com_get_device_attr(dev->edev, &dev->dev_attr); + err = efa_com_get_device_attr(&dev->edev, &dev->dev_attr); if (err) return err; - efa_dbg(&dev->pdev->dev, "Doorbells bar (%d)\n", dev->dev_attr.db_bar); + dev_dbg(&dev->pdev->dev, "Doorbells bar (%d)\n", dev->dev_attr.db_bar); err = efa_request_doorbell_bar(dev); if (err) return err; - err = efa_com_get_network_attr(dev->edev, &network_attr); + err = efa_com_get_network_attr(&dev->edev, &network_attr); if (err) goto err_release_doorbell_bar; efa_update_network_attr(dev, &network_attr); - err = efa_com_get_hw_hints(dev->edev, &hw_hints); + err = efa_com_get_hw_hints(&dev->edev, &hw_hints); if (err) goto err_release_doorbell_bar; efa_update_hw_hints(dev, &hw_hints); /* Try to enable all the available aenq groups */ - err = efa_com_set_aenq_config(dev->edev, EFA_AENQ_ENABLED_GROUPS); + err = efa_com_set_aenq_config(&dev->edev, EFA_AENQ_ENABLED_GROUPS); if (err) goto err_release_doorbell_bar; @@ -341,7 +338,7 @@ static int efa_ib_device_add(struct efa_dev *dev) (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE); #endif -#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0) +#ifdef HAVE_IB_DEV_OPS ib_set_device_ops(&dev->ibdev, &efa_dev_ops); #else dev->ibdev.alloc_pd = efa_kzalloc_pd; @@ -385,19 +382,17 @@ static int efa_ib_device_add(struct efa_dev *dev) if (err) goto err_release_doorbell_bar; - efa_info(&dev->ibdev.dev, "IB device registered\n"); + ibdev_info(&dev->ibdev, "IB device registered\n"); #ifdef HAVE_CUSTOM_COMMANDS sscanf(dev_name(&dev->ibdev.dev), "efa_%d\n", &devnum); err = efa_everbs_dev_init(dev, devnum); if (err) goto err_unregister_ibdev; - efa_info(&dev->ibdev.dev, "Created everbs device %s%d\n", - EFA_EVERBS_DEVICE_NAME, devnum); + ibdev_info(&dev->ibdev, "Created everbs device %s%d\n", + EFA_EVERBS_DEVICE_NAME, devnum); #endif - set_bit(EFA_DEVICE_RUNNING_BIT, &dev->state); - return 0; #ifdef HAVE_CUSTOM_COMMANDS @@ -414,15 +409,11 @@ static void efa_ib_device_remove(struct efa_dev *dev) #ifndef HAVE_CREATE_AH_UDATA WARN_ON(!list_empty(&dev->efa_ah_list)); #endif - - /* Reset the device only if the device is running. */ - if (test_bit(EFA_DEVICE_RUNNING_BIT, &dev->state)) - efa_com_dev_reset(dev->edev, EFA_REGS_RESET_NORMAL); - + efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_NORMAL); #ifdef HAVE_CUSTOM_COMMANDS efa_everbs_dev_destroy(dev); #endif - efa_info(&dev->ibdev.dev, "Unregister ib device\n"); + ibdev_info(&dev->ibdev, "Unregister ib device\n"); ib_unregister_device(&dev->ibdev); efa_release_doorbell_bar(dev); } @@ -430,11 +421,9 @@ static void efa_ib_device_remove(struct efa_dev *dev) static void efa_disable_msix(struct efa_dev *dev) { #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) - if (test_and_clear_bit(EFA_MSIX_ENABLED_BIT, &dev->state)) - pci_disable_msix(dev->pdev); + pci_disable_msix(dev->pdev); #else - if (test_and_clear_bit(EFA_MSIX_ENABLED_BIT, &dev->state)) - pci_free_irq_vectors(dev->pdev); + pci_free_irq_vectors(dev->pdev); #endif } @@ -442,14 +431,9 @@ static int efa_enable_msix(struct efa_dev *dev) { int msix_vecs, irq_num; - if (test_bit(EFA_MSIX_ENABLED_BIT, &dev->state)) { - efa_err(&dev->pdev->dev, "Error, MSI-X is already enabled\n"); - return -EPERM; - } - /* Reserve the max msix vectors we might need */ msix_vecs = EFA_NUM_MSIX_VEC; - efa_dbg(&dev->pdev->dev, "Trying to enable MSI-X, vectors %d\n", + dev_dbg(&dev->pdev->dev, "Trying to enable MSI-X, vectors %d\n", msix_vecs); #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) @@ -464,20 +448,18 @@ static int efa_enable_msix(struct efa_dev *dev) #endif if (irq_num < 0) { - efa_err(&dev->pdev->dev, "Failed to enable MSI-X. irq_num %d\n", + dev_err(&dev->pdev->dev, "Failed to enable MSI-X. irq_num %d\n", irq_num); return -ENOSPC; } if (irq_num != msix_vecs) { - efa_err(&dev->pdev->dev, + dev_err(&dev->pdev->dev, "Allocated %d MSI-X (out of %d requested)\n", irq_num, msix_vecs); return -ENOSPC; } - set_bit(EFA_MSIX_ENABLED_BIT, &dev->state); - return 0; } @@ -502,13 +484,13 @@ static int efa_device_init(struct efa_com_dev *edev, struct pci_dev *pdev) err = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width)); if (err) { - efa_err(&pdev->dev, "pci_set_dma_mask failed 0x%x\n", err); + dev_err(&pdev->dev, "pci_set_dma_mask failed 0x%x\n", err); return err; } err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width)); if (err) { - efa_err(&pdev->dev, + dev_err(&pdev->dev, "err_pci_set_consistent_dma_mask failed 0x%x\n", err); return err; @@ -517,7 +499,7 @@ static int efa_device_init(struct efa_com_dev *edev, struct pci_dev *pdev) return 0; } -static int efa_probe_device(struct pci_dev *pdev) +static struct efa_dev *efa_probe_device(struct pci_dev *pdev) { struct efa_com_dev *edev; struct efa_dev *dev; @@ -526,36 +508,31 @@ static int efa_probe_device(struct pci_dev *pdev) err = pci_enable_device_mem(pdev); if (err) { - efa_err(&pdev->dev, "pci_enable_device_mem() failed!\n"); - return err; + dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n"); + return ERR_PTR(err); } pci_set_master(pdev); dev = (struct efa_dev *)ib_alloc_device(sizeof(*dev)); if (!dev) { - efa_err(&pdev->dev, "Device alloc failed\n"); + dev_err(&pdev->dev, "Device alloc failed\n"); err = -ENOMEM; goto err_disable_device; } - edev = kzalloc(sizeof(*edev), GFP_KERNEL); - if (!edev) { - err = -ENOMEM; - goto err_ibdev_destroy; - } - pci_set_drvdata(pdev, dev); + edev = &dev->edev; + edev->efa_dev = dev; edev->dmadev = &pdev->dev; - dev->edev = edev; dev->pdev = pdev; bars = pci_select_bars(pdev, IORESOURCE_MEM) & EFA_BASE_BAR_MASK; err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME); if (err) { - efa_err(&pdev->dev, "pci_request_selected_regions failed %d\n", + dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n", err); - goto err_free_efa_dev; + goto err_ibdev_destroy; } dev->reg_bar_addr = pci_resource_start(pdev, EFA_REG_BAR); @@ -567,20 +544,20 @@ static int efa_probe_device(struct pci_dev *pdev) dev->reg_bar_addr, dev->reg_bar_len); if (!edev->reg_bar) { - efa_err(&pdev->dev, "Failed to remap register bar\n"); + dev_err(&pdev->dev, "Failed to remap register bar\n"); err = -EFAULT; goto err_release_bars; } err = efa_com_mmio_reg_read_init(edev); if (err) { - efa_err(&pdev->dev, "Failed to init readless MMIO\n"); + dev_err(&pdev->dev, "Failed to init readless MMIO\n"); goto err_iounmap; } err = efa_device_init(edev, pdev); if (err) { - efa_err(&pdev->dev, "EFA device init failed\n"); + dev_err(&pdev->dev, "EFA device init failed\n"); if (err == -ETIME) err = -EPROBE_DEFER; goto err_reg_read_destroy; @@ -610,7 +587,7 @@ static int efa_probe_device(struct pci_dev *pdev) if (err) goto err_admin_destroy; - return 0; + return dev; err_admin_destroy: efa_com_admin_destroy(edev); @@ -624,13 +601,11 @@ static int efa_probe_device(struct pci_dev *pdev) devm_iounmap(&pdev->dev, edev->reg_bar); err_release_bars: efa_release_bars(dev, EFA_BASE_BAR_MASK); -err_free_efa_dev: - kfree(edev); err_ibdev_destroy: ib_dealloc_device(&dev->ibdev); err_disable_device: pci_disable_device(pdev); - return err; + return ERR_PTR(err); } static void efa_remove_device(struct pci_dev *pdev) @@ -638,7 +613,7 @@ static void efa_remove_device(struct pci_dev *pdev) struct efa_dev *dev = pci_get_drvdata(pdev); struct efa_com_dev *edev; - edev = dev->edev; + edev = &dev->edev; efa_sysfs_destroy(dev); efa_com_admin_destroy(edev); efa_free_mgmnt_irq(dev); @@ -646,7 +621,6 @@ static void efa_remove_device(struct pci_dev *pdev) efa_com_mmio_reg_read_destroy(edev); devm_iounmap(&pdev->dev, edev->reg_bar); efa_release_bars(dev, EFA_BASE_BAR_MASK); - kfree(edev); ib_dealloc_device(&dev->ibdev); pci_disable_device(pdev); } @@ -656,11 +630,10 @@ static int efa_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct efa_dev *dev; int err; - err = efa_probe_device(pdev); - if (err) - return err; + dev = efa_probe_device(pdev); + if (IS_ERR(dev)) + return PTR_ERR(dev); - dev = pci_get_drvdata(pdev); err = efa_ib_device_add(dev); if (err) goto err_remove_device; @@ -674,7 +647,7 @@ static int efa_probe(struct pci_dev *pdev, const struct pci_device_id *ent) static void efa_remove(struct pci_dev *pdev) { - struct efa_dev *dev = (struct efa_dev *)pci_get_drvdata(pdev); + struct efa_dev *dev = pci_get_drvdata(pdev); efa_ib_device_remove(dev); efa_remove_device(pdev); @@ -777,8 +750,8 @@ static int efa_everbs_dev_init(struct efa_dev *dev, int devnum) EFA_EVERBS_DEVICE_NAME "%d", devnum); if (IS_ERR(dev->everbs_dev)) { - efa_err(&dev->ibdev.dev, "Failed to create device: %s%d\n", - EFA_EVERBS_DEVICE_NAME, devnum); + ibdev_err(&dev->ibdev, "Failed to create device: %s%d\n", + EFA_EVERBS_DEVICE_NAME, devnum); goto err; } diff --git a/kernel/linux/efa/efa_verbs.c b/kernel/linux/efa/efa_verbs.c index c0bbfc46..282435ea 100644 --- a/kernel/linux/efa/efa_verbs.c +++ b/kernel/linux/efa/efa_verbs.c @@ -138,7 +138,7 @@ static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr, *dma_addr = dma_map_single(&dev->pdev->dev, addr, size, dir); if (dma_mapping_error(&dev->pdev->dev, *dma_addr)) { - efa_err(&dev->ibdev.dev, "Failed to map DMA address\n"); + ibdev_err(&dev->ibdev, "Failed to map DMA address\n"); free_pages_exact(addr, size); return NULL; } @@ -146,24 +146,52 @@ static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr, return addr; } +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) static void mmap_obj_entries_remove(struct efa_dev *dev, - struct efa_ucontext *ucontext, void *obj) + struct efa_ucontext *ucontext, + void *obj, + bool free) +{ + struct efa_mmap_entry *entry; + unsigned long mmap_page; + + xa_for_each(&ucontext->mmap_xa, mmap_page, entry) { + if (entry->obj != obj) + continue; + + xa_erase(&ucontext->mmap_xa, mmap_page); + ibdev_dbg(&dev->ibdev, + "mmap: obj[%p] key[0x%llx] addr[0x%llX] len[0x%llX] removed\n", + entry->obj, entry->key, entry->address, + entry->length); + if (free) + kfree(entry); + } +} +#else +static void mmap_obj_entries_remove(struct efa_dev *dev, + struct efa_ucontext *ucontext, + void *obj, + bool free) { struct efa_mmap_entry *entry, *tmp; mutex_lock(&ucontext->lock); list_for_each_entry_safe(entry, tmp, &ucontext->pending_mmaps, list) { - if (entry->obj == obj) { - list_del(&entry->list); - efa_dbg(&dev->ibdev.dev, - "mmap: obj[%p] key[0x%llx] addr[0x%llX] len[0x%llX] removed\n", - entry->obj, entry->key, entry->address, - entry->length); + if (entry->obj != obj) + continue; + + list_del(&entry->list); + ibdev_dbg(&dev->ibdev, + "mmap: obj[%p] key[0x%llx] addr[0x%llX] len[0x%llX] removed\n", + entry->obj, entry->key, entry->address, + entry->length); + if (free) kfree(entry); - } } mutex_unlock(&ucontext->lock); } +#endif /* * Since we don't track munmaps, we can't know when a user stopped using his @@ -171,6 +199,26 @@ static void mmap_obj_entries_remove(struct efa_dev *dev, * This should be called on dealloc_ucontext in order to drain the mmap entries * and free the (unmapped) DMA buffers. */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) +static void mmap_entries_remove_free(struct efa_dev *dev, + struct efa_ucontext *ucontext) +{ + struct efa_mmap_entry *entry; + unsigned long mmap_page; + + xa_for_each(&ucontext->mmap_xa, mmap_page, entry) { + xa_erase(&ucontext->mmap_xa, mmap_page); + ibdev_dbg(&dev->ibdev, + "mmap: obj[0x%p] key[%#llx] addr[%#llx] len[%#llx] removed\n", + entry->obj, entry->key, entry->address, entry->length); + if (get_mmap_flag(entry->key) == EFA_MMAP_DMA_PAGE) + /* DMA mapping is already gone, now free the pages */ + free_pages_exact(phys_to_virt(entry->address), + entry->length); + kfree(entry); + } +} +#else static void mmap_entries_remove_free(struct efa_dev *dev, struct efa_ucontext *ucontext) { @@ -179,9 +227,9 @@ static void mmap_entries_remove_free(struct efa_dev *dev, mutex_lock(&ucontext->lock); list_for_each_entry_safe(entry, tmp, &ucontext->pending_mmaps, list) { list_del(&entry->list); - efa_dbg(&dev->ibdev.dev, - "mmap: obj[0x%p] key[%#llx] addr[%#llx] len[%#llx] removed\n", - entry->obj, entry->key, entry->address, entry->length); + ibdev_dbg(&dev->ibdev, + "mmap: obj[0x%p] key[%#llx] addr[%#llx] len[%#llx] removed\n", + entry->obj, entry->key, entry->address, entry->length); if (get_mmap_flag(entry->key) == EFA_MMAP_DMA_PAGE) /* DMA mapping is already gone, now free the pages */ free_pages_exact(phys_to_virt(entry->address), @@ -190,7 +238,30 @@ static void mmap_entries_remove_free(struct efa_dev *dev, } mutex_unlock(&ucontext->lock); } +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) +static struct efa_mmap_entry *mmap_entry_get(struct efa_dev *dev, + struct efa_ucontext *ucontext, + u64 key, + u64 len) +{ + struct efa_mmap_entry *entry; + u32 mmap_page; + + mmap_page = lower_32_bits(key >> PAGE_SHIFT); + entry = xa_load(&ucontext->mmap_xa, mmap_page); + if (!entry || entry->key != key || entry->length != len) + return NULL; + + ibdev_dbg(&dev->ibdev, + "mmap: obj[%p] key[0x%llx] addr[0x%llX] len[0x%llX] removed\n", + entry->obj, key, entry->address, + entry->length); + + return entry; +} +#else static struct efa_mmap_entry *mmap_entry_get(struct efa_dev *dev, struct efa_ucontext *ucontext, u64 key, @@ -201,10 +272,10 @@ static struct efa_mmap_entry *mmap_entry_get(struct efa_dev *dev, mutex_lock(&ucontext->lock); list_for_each_entry_safe(entry, tmp, &ucontext->pending_mmaps, list) { if (entry->key == key && entry->length == len) { - efa_dbg(&dev->ibdev.dev, - "mmap: obj[%p] key[0x%llx] addr[0x%llX] len[0x%llX] removed\n", - entry->obj, key, entry->address, - entry->length); + ibdev_dbg(&dev->ibdev, + "mmap: obj[%p] key[0x%llx] addr[0x%llX] len[0x%llX] removed\n", + entry->obj, key, entry->address, + entry->length); mutex_unlock(&ucontext->lock); return entry; } @@ -213,22 +284,59 @@ static struct efa_mmap_entry *mmap_entry_get(struct efa_dev *dev, return NULL; } +#endif -static void mmap_entry_insert(struct efa_dev *dev, - struct efa_ucontext *ucontext, - struct efa_mmap_entry *entry, - u8 mmap_flag) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) +static int mmap_entry_insert(struct efa_dev *dev, + struct efa_ucontext *ucontext, + struct efa_mmap_entry *entry, + u8 mmap_flag) +{ + u32 mmap_page; + int err; + + err = xa_alloc(&ucontext->mmap_xa, &mmap_page, entry, xa_limit_32b, + GFP_KERNEL); + if (err) { + ibdev_dbg(&dev->ibdev, "mmap xarray full %d\n", err); + return err; + } + + entry->key = (u64)mmap_page << PAGE_SHIFT; + set_mmap_flag(&entry->key, mmap_flag); + + ibdev_dbg(&dev->ibdev, + "mmap: obj[%p] addr[0x%llx], len[0x%llx], key[0x%llx] inserted\n", + entry->obj, entry->address, entry->length, entry->key); + + return 0; +} +#else +static int mmap_entry_insert(struct efa_dev *dev, + struct efa_ucontext *ucontext, + struct efa_mmap_entry *entry, + u8 mmap_flag) { mutex_lock(&ucontext->lock); - entry->key = ucontext->mmap_key; + if (ucontext->mmap_page == U32_MAX) { + ibdev_dbg(&dev->ibdev, "Too many mmap pages\n"); + mutex_unlock(&ucontext->lock); + return -ENOMEM; + } + + entry->key = (u64)ucontext->mmap_page << PAGE_SHIFT; + ucontext->mmap_page++; set_mmap_flag(&entry->key, mmap_flag); - ucontext->mmap_key += PAGE_SIZE; list_add_tail(&entry->list, &ucontext->pending_mmaps); - efa_dbg(&dev->ibdev.dev, - "mmap: obj[%p] addr[0x%llx], len[0x%llx], key[0x%llx] inserted\n", - entry->obj, entry->address, entry->length, entry->key); mutex_unlock(&ucontext->lock); + + ibdev_dbg(&dev->ibdev, + "mmap: obj[%p] addr[0x%llx], len[0x%llx], key[0x%llx] inserted\n", + entry->obj, entry->address, entry->length, entry->key); + + return 0; } +#endif #ifdef HAVE_IB_QUERY_DEVICE_UDATA int efa_query_device(struct ib_device *ibdev, @@ -249,8 +357,8 @@ int efa_query_device(struct ib_device *ibdev, if (udata && udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen)) { - efa_dbg(&ibdev->dev, - "Incompatible ABI params, udata not cleared\n"); + ibdev_dbg(ibdev, + "Incompatible ABI params, udata not cleared\n"); return -EINVAL; } #endif @@ -289,8 +397,8 @@ int efa_query_device(struct ib_device *ibdev, err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen)); if (err) { - efa_dbg(&ibdev->dev, - "Failed to copy udata for query_device\n"); + ibdev_dbg(ibdev, + "Failed to copy udata for query_device\n"); return err; } } @@ -340,9 +448,9 @@ int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, IB_QP_QKEY | IB_QP_SQ_PSN | IB_QP_CAP) if (qp_attr_mask & ~EFA_QUERY_QP_SUPP_MASK) { - efa_dbg(&dev->ibdev.dev, - "Unsupported qp_attr_mask[%#x] supported[%#x]\n", - qp_attr_mask, EFA_QUERY_QP_SUPP_MASK); + ibdev_dbg(&dev->ibdev, + "Unsupported qp_attr_mask[%#x] supported[%#x]\n", + qp_attr_mask, EFA_QUERY_QP_SUPP_MASK); return -EOPNOTSUPP; } @@ -350,7 +458,7 @@ int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, memset(qp_init_attr, 0, sizeof(*qp_init_attr)); params.qp_handle = qp->qp_handle; - err = efa_com_query_qp(dev->edev, ¶ms, &result); + err = efa_com_query_qp(&dev->edev, ¶ms, &result); if (err) return err; @@ -401,7 +509,7 @@ static int efa_pd_dealloc(struct efa_dev *dev, u16 pdn) .pdn = pdn, }; - return efa_com_dealloc_pd(dev->edev, ¶ms); + return efa_com_dealloc_pd(&dev->edev, ¶ms); } int efa_alloc_pd(struct ib_pd *ibpd, @@ -415,7 +523,7 @@ int efa_alloc_pd(struct ib_pd *ibpd, int err; if (!udata) { - efa_dbg(&dev->ibdev.dev, "udata is NULL\n"); + ibdev_dbg(&dev->ibdev, "udata is NULL\n"); err = -EOPNOTSUPP; goto err_out; } @@ -427,13 +535,13 @@ int efa_alloc_pd(struct ib_pd *ibpd, /* WA for e093111ddb6c ("IB/core: Fix input len in multiple user verbs") */ !ib_is_udata_cleared(udata, 0, udata->inlen - sizeof(struct ib_uverbs_cmd_hdr))) { #endif - efa_dbg(&dev->ibdev.dev, - "Incompatible ABI params, udata not cleared\n"); + ibdev_dbg(&dev->ibdev, + "Incompatible ABI params, udata not cleared\n"); err = -EINVAL; goto err_out; } - err = efa_com_alloc_pd(dev->edev, &result); + err = efa_com_alloc_pd(&dev->edev, &result); if (err) goto err_out; @@ -444,13 +552,13 @@ int efa_alloc_pd(struct ib_pd *ibpd, err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen)); if (err) { - efa_dbg(&dev->ibdev.dev, - "Failed to copy udata for alloc_pd\n"); + ibdev_dbg(&dev->ibdev, + "Failed to copy udata for alloc_pd\n"); goto err_dealloc_pd; } } - efa_dbg(&dev->ibdev.dev, "Allocated pd[%d]\n", pd->pdn); + ibdev_dbg(&dev->ibdev, "Allocated pd[%d]\n", pd->pdn); return 0; @@ -492,7 +600,7 @@ int efa_dealloc_pd(struct ib_pd *ibpd) struct efa_dev *dev = to_edev(ibpd->device); struct efa_pd *pd = to_epd(ibpd); - efa_dbg(&dev->ibdev.dev, "Dealloc pd[%d]\n", pd->pdn); + ibdev_dbg(&dev->ibdev, "Dealloc pd[%d]\n", pd->pdn); efa_pd_dealloc(dev, pd->pdn); kfree(pd); @@ -503,7 +611,7 @@ int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle) { struct efa_com_destroy_qp_params params = { .qp_handle = qp_handle }; - return efa_com_destroy_qp(dev->edev, ¶ms); + return efa_com_destroy_qp(&dev->edev, ¶ms); } int efa_destroy_qp(struct ib_qp *ibqp) @@ -512,16 +620,16 @@ int efa_destroy_qp(struct ib_qp *ibqp) struct efa_qp *qp = to_eqp(ibqp); int err; - efa_dbg(&dev->ibdev.dev, "Destroy qp[%u]\n", ibqp->qp_num); + ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num); err = efa_destroy_qp_handle(dev, qp->qp_handle); if (err) return err; if (qp->rq_cpu_addr) { - efa_dbg(&dev->ibdev.dev, - "qp->cpu_addr[%p] freed: size[%lu], dma[%pad]\n", - qp->rq_cpu_addr, qp->rq_size, - &qp->rq_dma_addr); + ibdev_dbg(&dev->ibdev, + "qp->cpu_addr[%p] freed: size[%lu], dma[%pad]\n", + qp->rq_cpu_addr, qp->rq_size, + &qp->rq_dma_addr); dma_unmap_single(&dev->pdev->dev, qp->rq_dma_addr, qp->rq_size, DMA_TO_DEVICE); } @@ -536,35 +644,42 @@ static int qp_mmap_entries_setup(struct efa_qp *qp, struct efa_com_create_qp_params *params, struct efa_ibv_create_qp_resp *resp) { - struct efa_mmap_entry *rq_db_entry; - struct efa_mmap_entry *sq_db_entry; - struct efa_mmap_entry *rq_entry; - struct efa_mmap_entry *sq_entry; + struct efa_mmap_entry *rq_db_entry = NULL; + struct efa_mmap_entry *sq_db_entry = NULL; + struct efa_mmap_entry *rq_entry = NULL; + struct efa_mmap_entry *sq_entry = NULL; + int err; sq_db_entry = kzalloc(sizeof(*sq_db_entry), GFP_KERNEL); sq_entry = kzalloc(sizeof(*sq_entry), GFP_KERNEL); if (!sq_db_entry || !sq_entry) - goto err_alloc; + goto err_free_sq; if (qp->rq_size) { rq_entry = kzalloc(sizeof(*rq_entry), GFP_KERNEL); rq_db_entry = kzalloc(sizeof(*rq_db_entry), GFP_KERNEL); if (!rq_entry || !rq_db_entry) - goto err_alloc_rq; + goto err_free_rq; rq_db_entry->obj = qp; rq_entry->obj = qp; rq_entry->address = virt_to_phys(qp->rq_cpu_addr); rq_entry->length = qp->rq_size; - mmap_entry_insert(dev, ucontext, rq_entry, EFA_MMAP_DMA_PAGE); + err = mmap_entry_insert(dev, ucontext, rq_entry, + EFA_MMAP_DMA_PAGE); + if (err) + goto err_free_rq; resp->rq_mmap_key = rq_entry->key; resp->rq_mmap_size = qp->rq_size; rq_db_entry->address = dev->db_bar_addr + resp->rq_db_offset; rq_db_entry->length = PAGE_SIZE; - mmap_entry_insert(dev, ucontext, rq_db_entry, EFA_MMAP_IO_NC); + err = mmap_entry_insert(dev, ucontext, rq_db_entry, + EFA_MMAP_IO_NC); + if (err) + goto err_remove_entries; resp->rq_db_mmap_key = rq_db_entry->key; resp->rq_db_offset &= ~PAGE_MASK; } @@ -575,22 +690,29 @@ static int qp_mmap_entries_setup(struct efa_qp *qp, sq_db_entry->address = dev->db_bar_addr + resp->sq_db_offset; resp->sq_db_offset &= ~PAGE_MASK; sq_db_entry->length = PAGE_SIZE; - mmap_entry_insert(dev, ucontext, sq_db_entry, EFA_MMAP_IO_NC); + err = mmap_entry_insert(dev, ucontext, sq_db_entry, + EFA_MMAP_IO_NC); + if (err) + goto err_remove_entries; resp->sq_db_mmap_key = sq_db_entry->key; sq_entry->address = dev->mem_bar_addr + resp->llq_desc_offset; resp->llq_desc_offset &= ~PAGE_MASK; sq_entry->length = PAGE_ALIGN(params->sq_ring_size_in_bytes + resp->llq_desc_offset); - mmap_entry_insert(dev, ucontext, sq_entry, EFA_MMAP_IO_WC); + err = mmap_entry_insert(dev, ucontext, sq_entry, EFA_MMAP_IO_WC); + if (err) + goto err_remove_entries; resp->llq_desc_mmap_key = sq_entry->key; return 0; -err_alloc_rq: +err_remove_entries: + mmap_obj_entries_remove(dev, ucontext, qp, false); +err_free_rq: kfree(rq_entry); kfree(rq_db_entry); -err_alloc: +err_free_sq: kfree(sq_entry); kfree(sq_db_entry); return -ENOMEM; @@ -600,36 +722,36 @@ static int efa_qp_validate_cap(struct efa_dev *dev, struct ib_qp_init_attr *init_attr) { if (init_attr->cap.max_send_wr > dev->dev_attr.max_sq_depth) { - efa_dbg(&dev->ibdev.dev, - "qp: requested send wr[%u] exceeds the max[%u]\n", - init_attr->cap.max_send_wr, - dev->dev_attr.max_sq_depth); + ibdev_dbg(&dev->ibdev, + "qp: requested send wr[%u] exceeds the max[%u]\n", + init_attr->cap.max_send_wr, + dev->dev_attr.max_sq_depth); return -EINVAL; } if (init_attr->cap.max_recv_wr > dev->dev_attr.max_rq_depth) { - efa_dbg(&dev->ibdev.dev, - "qp: requested receive wr[%u] exceeds the max[%u]\n", - init_attr->cap.max_recv_wr, - dev->dev_attr.max_rq_depth); + ibdev_dbg(&dev->ibdev, + "qp: requested receive wr[%u] exceeds the max[%u]\n", + init_attr->cap.max_recv_wr, + dev->dev_attr.max_rq_depth); return -EINVAL; } if (init_attr->cap.max_send_sge > dev->dev_attr.max_sq_sge) { - efa_dbg(&dev->ibdev.dev, - "qp: requested sge send[%u] exceeds the max[%u]\n", - init_attr->cap.max_send_sge, dev->dev_attr.max_sq_sge); + ibdev_dbg(&dev->ibdev, + "qp: requested sge send[%u] exceeds the max[%u]\n", + init_attr->cap.max_send_sge, dev->dev_attr.max_sq_sge); return -EINVAL; } if (init_attr->cap.max_recv_sge > dev->dev_attr.max_rq_sge) { - efa_dbg(&dev->ibdev.dev, - "qp: requested sge recv[%u] exceeds the max[%u]\n", - init_attr->cap.max_recv_sge, dev->dev_attr.max_rq_sge); + ibdev_dbg(&dev->ibdev, + "qp: requested sge recv[%u] exceeds the max[%u]\n", + init_attr->cap.max_recv_sge, dev->dev_attr.max_rq_sge); return -EINVAL; } if (init_attr->cap.max_inline_data > dev->dev_attr.inline_buf_size) { - efa_dbg(&dev->ibdev.dev, - "qp: requested inline data[%u] exceeds the max[%u]\n", - init_attr->cap.max_inline_data, - dev->dev_attr.inline_buf_size); + ibdev_dbg(&dev->ibdev, + "qp: requested inline data[%u] exceeds the max[%u]\n", + init_attr->cap.max_inline_data, + dev->dev_attr.inline_buf_size); return -EINVAL; } @@ -641,18 +763,18 @@ static int efa_qp_validate_attr(struct efa_dev *dev, { if (init_attr->qp_type != IB_QPT_DRIVER && init_attr->qp_type != IB_QPT_UD) { - efa_dbg(&dev->ibdev.dev, - "Unsupported qp type %d\n", init_attr->qp_type); + ibdev_dbg(&dev->ibdev, + "Unsupported qp type %d\n", init_attr->qp_type); return -EOPNOTSUPP; } if (init_attr->srq) { - efa_dbg(&dev->ibdev.dev, "SRQ is not supported\n"); + ibdev_dbg(&dev->ibdev, "SRQ is not supported\n"); return -EOPNOTSUPP; } if (init_attr->create_flags) { - efa_dbg(&dev->ibdev.dev, "Unsupported create flags\n"); + ibdev_dbg(&dev->ibdev, "Unsupported create flags\n"); return -EOPNOTSUPP; } @@ -673,7 +795,7 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd, int err; if (!udata) { - efa_dbg(&dev->ibdev.dev, "udata is NULL\n"); + ibdev_dbg(&dev->ibdev, "udata is NULL\n"); err = -EOPNOTSUPP; goto err_out; } @@ -690,8 +812,8 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd, goto err_out; if (!field_avail(cmd, driver_qp_type, udata->inlen)) { - efa_dbg(&dev->ibdev.dev, - "Incompatible ABI params, no input udata\n"); + ibdev_dbg(&dev->ibdev, + "Incompatible ABI params, no input udata\n"); err = -EINVAL; goto err_out; } @@ -705,8 +827,8 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd, !ib_is_udata_cleared(udata, sizeof(cmd), udata->inlen - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr))) { #endif - efa_dbg(&dev->ibdev.dev, - "Incompatible ABI params, unknown fields in udata\n"); + ibdev_dbg(&dev->ibdev, + "Incompatible ABI params, unknown fields in udata\n"); err = -EINVAL; goto err_out; } @@ -714,14 +836,14 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd, err = ib_copy_from_udata(&cmd, udata, min(sizeof(cmd), udata->inlen)); if (err) { - efa_dbg(&dev->ibdev.dev, - "Cannot copy udata for create_qp\n"); + ibdev_dbg(&dev->ibdev, + "Cannot copy udata for create_qp\n"); goto err_out; } if (cmd.comp_mask) { - efa_dbg(&dev->ibdev.dev, - "Incompatible ABI params, unknown fields in udata\n"); + ibdev_dbg(&dev->ibdev, + "Incompatible ABI params, unknown fields in udata\n"); err = -EINVAL; goto err_out; } @@ -740,15 +862,15 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd, } else if (cmd.driver_qp_type == EFA_QP_DRIVER_TYPE_SRD) { create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_SRD; } else { - efa_dbg(&dev->ibdev.dev, - "Unsupported qp type %d driver qp type %d\n", - init_attr->qp_type, cmd.driver_qp_type); + ibdev_dbg(&dev->ibdev, + "Unsupported qp type %d driver qp type %d\n", + init_attr->qp_type, cmd.driver_qp_type); err = -EOPNOTSUPP; goto err_free_qp; } - efa_dbg(&dev->ibdev.dev, "Create QP: qp type %d driver qp type %#x\n", - init_attr->qp_type, cmd.driver_qp_type); + ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n", + init_attr->qp_type, cmd.driver_qp_type); create_qp_params.send_cq_idx = to_ecq(init_attr->send_cq)->cq_idx; create_qp_params.recv_cq_idx = to_ecq(init_attr->recv_cq)->cq_idx; create_qp_params.sq_depth = init_attr->cap.max_send_wr; @@ -765,13 +887,13 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd, goto err_free_qp; } - efa_dbg(&dev->ibdev.dev, - "qp->cpu_addr[%p] allocated: size[%lu], dma[%pad]\n", - qp->rq_cpu_addr, qp->rq_size, &qp->rq_dma_addr); + ibdev_dbg(&dev->ibdev, + "qp->cpu_addr[%p] allocated: size[%lu], dma[%pad]\n", + qp->rq_cpu_addr, qp->rq_size, &qp->rq_dma_addr); create_qp_params.rq_base_addr = qp->rq_dma_addr; } - err = efa_com_create_qp(dev->edev, &create_qp_params, + err = efa_com_create_qp(&dev->edev, &create_qp_params, &create_qp_resp); if (err) goto err_free_mapped; @@ -800,19 +922,19 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd, err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen)); if (err) { - efa_dbg(&dev->ibdev.dev, - "Failed to copy udata for qp[%u]\n", - create_qp_resp.qp_num); + ibdev_dbg(&dev->ibdev, + "Failed to copy udata for qp[%u]\n", + create_qp_resp.qp_num); goto err_mmap_remove; } } - efa_dbg(&dev->ibdev.dev, "Created qp[%d]\n", qp->ibqp.qp_num); + ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num); return &qp->ibqp; err_mmap_remove: - mmap_obj_entries_remove(dev, ucontext, qp); + mmap_obj_entries_remove(dev, ucontext, qp, true); err_destroy_qp: efa_destroy_qp_handle(dev, create_qp_resp.qp_handle); err_free_mapped: @@ -838,9 +960,9 @@ static int efa_modify_qp_validate(struct efa_dev *dev, struct efa_qp *qp, IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY | IB_QP_SQ_PSN) if (qp_attr_mask & ~EFA_MODIFY_QP_SUPP_MASK) { - efa_dbg(&dev->ibdev.dev, - "Unsupported qp_attr_mask[%#x] supported[%#x]\n", - qp_attr_mask, EFA_MODIFY_QP_SUPP_MASK); + ibdev_dbg(&dev->ibdev, + "Unsupported qp_attr_mask[%#x] supported[%#x]\n", + qp_attr_mask, EFA_MODIFY_QP_SUPP_MASK); return -EOPNOTSUPP; } @@ -851,17 +973,17 @@ static int efa_modify_qp_validate(struct efa_dev *dev, struct efa_qp *qp, if (!ib_modify_qp_is_ok(cur_state, new_state, IB_QPT_UD, qp_attr_mask, IB_LINK_LAYER_UNSPECIFIED)) { #endif - efa_dbg(&dev->ibdev.dev, "Invalid modify QP parameters\n"); + ibdev_dbg(&dev->ibdev, "Invalid modify QP parameters\n"); return -EINVAL; } if ((qp_attr_mask & IB_QP_PORT) && qp_attr->port_num != 1) { - efa_dbg(&dev->ibdev.dev, "Can't change port num\n"); + ibdev_dbg(&dev->ibdev, "Can't change port num\n"); return -EOPNOTSUPP; } if ((qp_attr_mask & IB_QP_PKEY_INDEX) && qp_attr->pkey_index) { - efa_dbg(&dev->ibdev.dev, "Can't change pkey index\n"); + ibdev_dbg(&dev->ibdev, "Can't change pkey index\n"); return -EOPNOTSUPP; } @@ -879,14 +1001,14 @@ int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int err; if (!udata) { - efa_dbg(&dev->ibdev.dev, "udata is NULL\n"); + ibdev_dbg(&dev->ibdev, "udata is NULL\n"); return -EOPNOTSUPP; } if (udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen)) { - efa_dbg(&dev->ibdev.dev, - "Incompatible ABI params, udata not cleared\n"); + ibdev_dbg(&dev->ibdev, + "Incompatible ABI params, udata not cleared\n"); return -EINVAL; } @@ -924,7 +1046,7 @@ int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, params.sq_psn = qp_attr->sq_psn; } - err = efa_com_modify_qp(dev->edev, ¶ms); + err = efa_com_modify_qp(&dev->edev, ¶ms); if (err) return err; @@ -937,7 +1059,7 @@ static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx) { struct efa_com_destroy_cq_params params = { .cq_idx = cq_idx }; - return efa_com_destroy_cq(dev->edev, ¶ms); + return efa_com_destroy_cq(&dev->edev, ¶ms); } int efa_destroy_cq(struct ib_cq *ibcq) @@ -946,9 +1068,9 @@ int efa_destroy_cq(struct ib_cq *ibcq) struct efa_cq *cq = to_ecq(ibcq); int err; - efa_dbg(&dev->ibdev.dev, - "Destroy cq[%d] virt[%p] freed: size[%lu], dma[%pad]\n", - cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr); + ibdev_dbg(&dev->ibdev, + "Destroy cq[%d] virt[%p] freed: size[%lu], dma[%pad]\n", + cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr); err = efa_destroy_cq_idx(dev, cq->cq_idx); if (err) @@ -965,6 +1087,7 @@ static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq, struct efa_ibv_create_cq_resp *resp) { struct efa_mmap_entry *cq_entry; + int err; cq_entry = kzalloc(sizeof(*cq_entry), GFP_KERNEL); if (!cq_entry) @@ -974,7 +1097,12 @@ static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq, cq_entry->address = virt_to_phys(cq->cpu_addr); cq_entry->length = cq->size; - mmap_entry_insert(dev, cq->ucontext, cq_entry, EFA_MMAP_DMA_PAGE); + err = mmap_entry_insert(dev, cq->ucontext, cq_entry, EFA_MMAP_DMA_PAGE); + if (err) { + kfree(cq_entry); + return err; + } + resp->q_mmap_key = cq_entry->key; resp->q_mmap_size = cq_entry->length; @@ -993,25 +1121,25 @@ static struct ib_cq *do_create_cq(struct ib_device *ibdev, int entries, struct efa_cq *cq; int err; - efa_dbg(&ibdev->dev, "create_cq entries %d udata %p\n", entries, udata); + ibdev_dbg(ibdev, "create_cq entries %d udata %p\n", entries, udata); if (entries < 1 || entries > dev->dev_attr.max_cq_depth) { - efa_dbg(&ibdev->dev, - "cq: requested entries[%u] non-positive or greater than max[%u]\n", - entries, dev->dev_attr.max_cq_depth); + ibdev_dbg(ibdev, + "cq: requested entries[%u] non-positive or greater than max[%u]\n", + entries, dev->dev_attr.max_cq_depth); err = -EINVAL; goto err_out; } if (!udata) { - efa_dbg(&ibdev->dev, "udata is NULL\n"); + ibdev_dbg(ibdev, "udata is NULL\n"); err = -EOPNOTSUPP; goto err_out; } if (!field_avail(cmd, num_sub_cqs, udata->inlen)) { - efa_dbg(&ibdev->dev, - "Incompatible ABI params, no input udata\n"); + ibdev_dbg(ibdev, + "Incompatible ABI params, no input udata\n"); err = -EINVAL; goto err_out; } @@ -1025,8 +1153,8 @@ static struct ib_cq *do_create_cq(struct ib_device *ibdev, int entries, !ib_is_udata_cleared(udata, sizeof(cmd), udata->inlen - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr))) { #endif - efa_dbg(&ibdev->dev, - "Incompatible ABI params, unknown fields in udata\n"); + ibdev_dbg(ibdev, + "Incompatible ABI params, unknown fields in udata\n"); err = -EINVAL; goto err_out; } @@ -1034,28 +1162,28 @@ static struct ib_cq *do_create_cq(struct ib_device *ibdev, int entries, err = ib_copy_from_udata(&cmd, udata, min(sizeof(cmd), udata->inlen)); if (err) { - efa_dbg(&ibdev->dev, "Cannot copy udata for create_cq\n"); + ibdev_dbg(ibdev, "Cannot copy udata for create_cq\n"); goto err_out; } if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_50)) { - efa_dbg(&ibdev->dev, - "Incompatible ABI params, unknown fields in udata\n"); + ibdev_dbg(ibdev, + "Incompatible ABI params, unknown fields in udata\n"); err = -EINVAL; goto err_out; } if (!cmd.cq_entry_size) { - efa_dbg(&ibdev->dev, - "Invalid entry size [%u]\n", cmd.cq_entry_size); + ibdev_dbg(ibdev, + "Invalid entry size [%u]\n", cmd.cq_entry_size); err = -EINVAL; goto err_out; } if (cmd.num_sub_cqs != dev->dev_attr.sub_cqs_per_cq) { - efa_dbg(&ibdev->dev, - "Invalid number of sub cqs[%u] expected[%u]\n", - cmd.num_sub_cqs, dev->dev_attr.sub_cqs_per_cq); + ibdev_dbg(ibdev, + "Invalid number of sub cqs[%u] expected[%u]\n", + cmd.num_sub_cqs, dev->dev_attr.sub_cqs_per_cq); err = -EINVAL; goto err_out; } @@ -1080,7 +1208,7 @@ static struct ib_cq *do_create_cq(struct ib_device *ibdev, int entries, params.dma_addr = cq->dma_addr; params.entry_size_in_bytes = cmd.cq_entry_size; params.num_sub_cqs = cmd.num_sub_cqs; - err = efa_com_create_cq(dev->edev, ¶ms, &result); + err = efa_com_create_cq(&dev->edev, ¶ms, &result); if (err) goto err_free_mapped; @@ -1091,8 +1219,8 @@ static struct ib_cq *do_create_cq(struct ib_device *ibdev, int entries, err = cq_mmap_entries_setup(dev, cq, &resp); if (err) { - efa_dbg(&ibdev->dev, - "Could not setup cq[%u] mmap entries\n", cq->cq_idx); + ibdev_dbg(ibdev, + "Could not setup cq[%u] mmap entries\n", cq->cq_idx); goto err_destroy_cq; } @@ -1100,20 +1228,20 @@ static struct ib_cq *do_create_cq(struct ib_device *ibdev, int entries, err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen)); if (err) { - efa_dbg(&ibdev->dev, - "Failed to copy udata for create_cq\n"); + ibdev_dbg(ibdev, + "Failed to copy udata for create_cq\n"); goto err_mmap_remove; } } - efa_dbg(&ibdev->dev, - "Created cq[%d], cq depth[%u]. dma[%pad] virt[%p]\n", - cq->cq_idx, result.actual_depth, &cq->dma_addr, cq->cpu_addr); + ibdev_dbg(ibdev, + "Created cq[%d], cq depth[%u]. dma[%pad] virt[%p]\n", + cq->cq_idx, result.actual_depth, &cq->dma_addr, cq->cpu_addr); return &cq->ibcq; err_mmap_remove: - mmap_obj_entries_remove(dev, to_eucontext(ibucontext), cq); + mmap_obj_entries_remove(dev, to_eucontext(ibucontext), cq, true); err_destroy_cq: efa_destroy_cq_idx(dev, cq->cq_idx); err_free_mapped: @@ -1153,38 +1281,65 @@ static int umem_to_page_list(struct efa_dev *dev, u8 hp_shift) { u32 pages_in_hp = BIT(hp_shift - PAGE_SHIFT); - unsigned int page_idx = 0; - unsigned int hp_idx = 0; -#ifndef HAVE_UMEM_SCATTERLIST_IF - struct ib_umem_chunk *chunk; -#else +#ifdef HAVE_SG_DMA_PAGE_ITER + struct sg_dma_page_iter sg_iter; +#elif defined(HAVE_UMEM_SCATTERLIST_IF) struct scatterlist *sg; -#endif unsigned int entry; +#else + struct ib_umem_chunk *chunk; + unsigned int entry; +#endif + unsigned int page_idx = 0; + unsigned int hp_idx = 0; #ifdef HAVE_UMEM_PAGE_SHIFT if (umem->page_shift != PAGE_SHIFT) { - efa_dbg(&dev->ibdev.dev, - "umem invalid page shift %d\n", umem->page_shift); + ibdev_dbg(&dev->ibdev, + "umem invalid page shift %d\n", umem->page_shift); #else if (umem->page_size != PAGE_SIZE) { - efa_dbg(&dev->ibdev.dev, - "umem invalid page size %d\n", umem->page_size); + ibdev_dbg(&dev->ibdev, + "umem invalid page size %d\n", umem->page_size); #endif return -EINVAL; } - efa_dbg(&dev->ibdev.dev, "hp_cnt[%u], pages_in_hp[%u]\n", - hp_cnt, pages_in_hp); + ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n", + hp_cnt, pages_in_hp); -#ifndef HAVE_UMEM_SCATTERLIST_IF +#ifdef HAVE_SG_DMA_PAGE_ITER + for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) { + if (page_idx % pages_in_hp == 0) { + page_list[hp_idx] = sg_page_iter_dma_address(&sg_iter); + hp_idx++; + } + + page_idx++; + } +#elif defined(HAVE_UMEM_SCATTERLIST_IF) + for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { + if (sg_dma_len(sg) != PAGE_SIZE) { + ibdev_dbg(&dev->ibdev, + "sg_dma_len[%u] != PAGE_SIZE[%lu]\n", + sg_dma_len(sg), PAGE_SIZE); + return -EINVAL; + } + + if (page_idx % pages_in_hp == 0) { + page_list[hp_idx] = sg_dma_address(sg); + hp_idx++; + } + page_idx++; + } +#else list_for_each_entry(chunk, &umem->chunk_list, list) { for (entry = 0; entry < chunk->nents; entry++) { if (sg_dma_len(&chunk->page_list[entry]) != PAGE_SIZE) { - efa_dbg(&dev->ibdev.dev, - "sg_dma_len[%u] != PAGE_SIZE[%lu]\n", - sg_dma_len(&chunk->page_list[entry]), - PAGE_SIZE); + ibdev_dbg(&dev->ibdev, + "sg_dma_len[%u] != PAGE_SIZE[%lu]\n", + sg_dma_len(&chunk->page_list[entry]), + PAGE_SIZE); return -EINVAL; } @@ -1196,22 +1351,7 @@ static int umem_to_page_list(struct efa_dev *dev, page_idx++; } } -#else /* HAVE_UMEM_SCATTERLIST_IF */ - for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { - if (sg_dma_len(sg) != PAGE_SIZE) { - efa_dbg(&dev->ibdev.dev, - "sg_dma_len[%u] != PAGE_SIZE[%lu]\n", - sg_dma_len(sg), PAGE_SIZE); - return -EINVAL; - } - - if (page_idx % pages_in_hp == 0) { - page_list[hp_idx] = sg_dma_address(sg); - hp_idx++; - } - page_idx++; - } -#endif /* HAVE_UMEM_SCATTERLIST_IF */ +#endif return 0; } @@ -1267,9 +1407,9 @@ static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl) if (!chunk_list->chunks) return -ENOMEM; - efa_dbg(&dev->ibdev.dev, - "chunk_list_size[%u] - pages[%u]\n", chunk_list_size, - page_cnt); + ibdev_dbg(&dev->ibdev, + "chunk_list_size[%u] - pages[%u]\n", chunk_list_size, + page_cnt); /* allocate chunk buffers: */ for (i = 0; i < chunk_list_size; i++) { @@ -1309,14 +1449,14 @@ static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl) chunk_list->chunks[i].length, DMA_TO_DEVICE); if (dma_mapping_error(&dev->pdev->dev, dma_addr)) { - efa_err(&dev->ibdev.dev, - "chunk[%u] dma_map_failed\n", i); + ibdev_err(&dev->ibdev, + "chunk[%u] dma_map_failed\n", i); goto chunk_list_unmap; } chunk_list->chunks[i].dma_addr = dma_addr; - efa_dbg(&dev->ibdev.dev, - "chunk[%u] mapped at [%pad]\n", i, &dma_addr); + ibdev_dbg(&dev->ibdev, + "chunk[%u] mapped at [%pad]\n", i, &dma_addr); if (!i) break; @@ -1370,14 +1510,14 @@ static int pbl_continuous_initialize(struct efa_dev *dev, dma_addr = dma_map_single(&dev->pdev->dev, pbl->pbl_buf, pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE); if (dma_mapping_error(&dev->pdev->dev, dma_addr)) { - efa_err(&dev->ibdev.dev, "Unable to map pbl to DMA address\n"); + ibdev_err(&dev->ibdev, "Unable to map pbl to DMA address\n"); return -ENOMEM; } pbl->phys.continuous.dma_addr = dma_addr; - efa_dbg(&dev->ibdev.dev, - "pbl continuous - dma_addr = %pad, size[%u]\n", - &dma_addr, pbl->pbl_buf_size_in_bytes); + ibdev_dbg(&dev->ibdev, + "pbl continuous - dma_addr = %pad, size[%u]\n", + &dma_addr, pbl->pbl_buf_size_in_bytes); return 0; } @@ -1409,15 +1549,15 @@ static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl) pbl->phys.indirect.sg_dma_cnt = sg_dma_cnt; err = pbl_chunk_list_create(dev, pbl); if (err) { - efa_dbg(&dev->ibdev.dev, - "chunk_list creation failed[%d]\n", err); + ibdev_dbg(&dev->ibdev, + "chunk_list creation failed[%d]\n", err); goto err_chunk; } - efa_dbg(&dev->ibdev.dev, - "pbl indirect - size[%u], chunks[%u]\n", - pbl->pbl_buf_size_in_bytes, - pbl->phys.indirect.chunk_list.size); + ibdev_dbg(&dev->ibdev, + "pbl indirect - size[%u], chunks[%u]\n", + pbl->pbl_buf_size_in_bytes, + pbl->phys.indirect.chunk_list.size); return 0; @@ -1472,9 +1612,9 @@ static int pbl_create(struct efa_dev *dev, goto err_indirect; } - efa_dbg(&dev->ibdev.dev, - "user_pbl_created: user_pages[%u], continuous[%u]\n", - hp_cnt, pbl->physically_continuous); + ibdev_dbg(&dev->ibdev, + "user_pbl_created: user_pages[%u], continuous[%u]\n", + hp_cnt, pbl->physically_continuous); return 0; @@ -1509,8 +1649,8 @@ static int efa_create_inline_pbl(struct efa_dev *dev, struct efa_mr *mr, if (err) return err; - efa_dbg(&dev->ibdev.dev, - "inline_pbl_array - pages[%u]\n", params->page_num); + ibdev_dbg(&dev->ibdev, + "inline_pbl_array - pages[%u]\n", params->page_num); return 0; } @@ -1525,7 +1665,7 @@ static int efa_create_pbl(struct efa_dev *dev, err = pbl_create(dev, pbl, mr->umem, params->page_num, params->page_shift); if (err) { - efa_dbg(&dev->ibdev.dev, "Failed to create pbl[%d]\n", err); + ibdev_dbg(&dev->ibdev, "Failed to create pbl[%d]\n", err); return err; } @@ -1647,7 +1787,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, int err; if (!udata) { - efa_dbg(&dev->ibdev.dev, "udata is NULL\n"); + ibdev_dbg(&dev->ibdev, "udata is NULL\n"); err = -EOPNOTSUPP; goto err_out; } @@ -1659,16 +1799,16 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, /* WA for e093111ddb6c ("IB/core: Fix input len in multiple user verbs") */ !ib_is_udata_cleared(udata, 0, sizeof(udata->inlen) - sizeof(struct ib_uverbs_cmd_hdr))) { #endif - efa_dbg(&dev->ibdev.dev, - "Incompatible ABI params, udata not cleared\n"); + ibdev_dbg(&dev->ibdev, + "Incompatible ABI params, udata not cleared\n"); err = -EINVAL; goto err_out; } if (access_flags & ~EFA_SUPPORTED_ACCESS_FLAGS) { - efa_dbg(&dev->ibdev.dev, - "Unsupported access flags[%#x], supported[%#x]\n", - access_flags, EFA_SUPPORTED_ACCESS_FLAGS); + ibdev_dbg(&dev->ibdev, + "Unsupported access flags[%#x], supported[%#x]\n", + access_flags, EFA_SUPPORTED_ACCESS_FLAGS); err = -EOPNOTSUPP; goto err_out; } @@ -1683,8 +1823,8 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, access_flags, 0); if (IS_ERR(mr->umem)) { err = PTR_ERR(mr->umem); - efa_dbg(&dev->ibdev.dev, - "Failed to pin and map user space memory[%d]\n", err); + ibdev_dbg(&dev->ibdev, + "Failed to pin and map user space memory[%d]\n", err); goto err_free; } @@ -1696,9 +1836,9 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, efa_cont_pages(mr->umem, start, max_page_shift, &npages, ¶ms.page_shift, ¶ms.page_num); - efa_dbg(&dev->ibdev.dev, - "start %#llx length %#llx npages %d params.page_shift %u params.page_num %u\n", - start, length, npages, params.page_shift, params.page_num); + ibdev_dbg(&dev->ibdev, + "start %#llx length %#llx npages %d params.page_shift %u params.page_num %u\n", + start, length, npages, params.page_shift, params.page_num); inline_size = ARRAY_SIZE(params.pbl.inline_pbl_array); if (params.page_num <= inline_size) { @@ -1706,7 +1846,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, if (err) goto err_unmap; - err = efa_com_register_mr(dev->edev, ¶ms, &result); + err = efa_com_register_mr(&dev->edev, ¶ms, &result); if (err) goto err_unmap; } else { @@ -1714,7 +1854,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, if (err) goto err_unmap; - err = efa_com_register_mr(dev->edev, ¶ms, &result); + err = efa_com_register_mr(&dev->edev, ¶ms, &result); pbl_destroy(dev, &pbl); if (err) @@ -1726,7 +1866,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0) mr->ibmr.length = length; #endif - efa_dbg(&dev->ibdev.dev, "Registered mr[%d]\n", mr->ibmr.lkey); + ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey); return &mr->ibmr; @@ -1746,11 +1886,11 @@ int efa_dereg_mr(struct ib_mr *ibmr) struct efa_mr *mr = to_emr(ibmr); int err; - efa_dbg(&dev->ibdev.dev, "Deregister mr[%d]\n", ibmr->lkey); + ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey); if (mr->umem) { params.l_key = mr->ibmr.lkey; - err = efa_com_dereg_mr(dev->edev, ¶ms); + err = efa_com_dereg_mr(&dev->edev, ¶ms); if (err) return err; ib_umem_release(mr->umem); @@ -1770,7 +1910,7 @@ int efa_get_port_immutable(struct ib_device *ibdev, u8 port_num, err = ib_query_port(ibdev, port_num, &attr); if (err) { - efa_dbg(&ibdev->dev, "Couldn't query port err[%d]\n", err); + ibdev_dbg(ibdev, "Couldn't query port err[%d]\n", err); return err; } @@ -1787,7 +1927,7 @@ static int efa_dealloc_uar(struct efa_dev *dev, u16 uarn) .uarn = uarn, }; - return efa_com_dealloc_uar(dev->edev, ¶ms); + return efa_com_dealloc_uar(&dev->edev, ¶ms); } int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata) @@ -1803,13 +1943,17 @@ int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata) * we will ack input fields in our response. */ - err = efa_com_alloc_uar(dev->edev, &result); + err = efa_com_alloc_uar(&dev->edev, &result); if (err) goto err_out; ucontext->uarn = result.uarn; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) + xa_init_flags(&ucontext->mmap_xa, XA_FLAGS_ALLOC); +#else mutex_init(&ucontext->lock); INIT_LIST_HEAD(&ucontext->pending_mmaps); +#endif #ifdef HAVE_IB_QUERY_DEVICE_UDATA resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE; @@ -1890,9 +2034,9 @@ static int __efa_mmap(struct efa_dev *dev, u64 length = entry->length; int err; - efa_dbg(&dev->ibdev.dev, - "Mapping address[%#llx], length[%#llx], mmap_flag[%d]\n", - address, length, mmap_flag); + ibdev_dbg(&dev->ibdev, + "Mapping address[%#llx], length[%#llx], mmap_flag[%d]\n", + address, length, mmap_flag); switch (mmap_flag) { case EFA_MMAP_IO_NC: @@ -1929,9 +2073,9 @@ static int __efa_mmap(struct efa_dev *dev, } if (err) { - efa_dbg(&dev->ibdev.dev, - "Couldn't mmap address[%#llx] length[%#llx] mmap_flag[%d] err[%d]\n", - address, length, mmap_flag, err); + ibdev_dbg(&dev->ibdev, + "Couldn't mmap address[%#llx] length[%#llx] mmap_flag[%d] err[%d]\n", + address, length, mmap_flag, err); return err; } @@ -1947,21 +2091,21 @@ int efa_mmap(struct ib_ucontext *ibucontext, u64 key = vma->vm_pgoff << PAGE_SHIFT; struct efa_mmap_entry *entry; - efa_dbg(&dev->ibdev.dev, - "start 0x%lx, end 0x%lx, length = 0x%llx, key = 0x%llx\n", - vma->vm_start, vma->vm_end, length, key); + ibdev_dbg(&dev->ibdev, + "start 0x%lx, end 0x%lx, length = 0x%llx, key = 0x%llx\n", + vma->vm_start, vma->vm_end, length, key); if (length % PAGE_SIZE != 0) { - efa_dbg(&dev->ibdev.dev, - "length[0x%llX] is not page size aligned[0x%lX]\n", - length, PAGE_SIZE); + ibdev_dbg(&dev->ibdev, + "length[0x%llX] is not page size aligned[0x%lX]\n", + length, PAGE_SIZE); return -EINVAL; } entry = mmap_entry_get(dev, ucontext, key, length); if (!entry) { - efa_dbg(&dev->ibdev.dev, - "key[0x%llX] does not have valid entry\n", key); + ibdev_dbg(&dev->ibdev, + "key[0x%llX] does not have valid entry\n", key); return -EINVAL; } @@ -2068,15 +2212,22 @@ static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah) .pdn = to_epd(ah->ibah.pd)->pdn, }; - return efa_com_destroy_ah(dev->edev, ¶ms); + return efa_com_destroy_ah(&dev->edev, ¶ms); } #ifdef HAVE_CREATE_AH_UDATA #ifdef HAVE_CREATE_AH_RDMA_ATTR +#ifdef HAVE_CREATE_DESTROY_AH_FLAGS struct ib_ah *efa_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *ah_attr, + u32 flags, struct ib_udata *udata) #else +struct ib_ah *efa_create_ah(struct ib_pd *ibpd, + struct rdma_ah_attr *ah_attr, + struct ib_udata *udata) +#endif +#else struct ib_ah *efa_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *ah_attr, struct ib_udata *udata) @@ -2095,9 +2246,18 @@ struct ib_ah *efa_create_ah(struct ib_pd *ibpd, struct efa_ah *ah; int err; +#ifdef HAVE_CREATE_DESTROY_AH_FLAGS + if (!(flags & RDMA_CREATE_AH_SLEEPABLE)) { + ibdev_dbg(&dev->ibdev, + "Create address handle is not supported in atomic context\n"); + err = -EOPNOTSUPP; + goto err_out; + } +#endif + #ifdef HAVE_CREATE_AH_UDATA if (!udata) { - efa_dbg(&dev->ibdev.dev, "udata is NULL\n"); + ibdev_dbg(&dev->ibdev, "udata is NULL\n"); err = -EOPNOTSUPP; goto err_out; } @@ -2109,8 +2269,7 @@ struct ib_ah *efa_create_ah(struct ib_pd *ibpd, /* WA for e093111ddb6c ("IB/core: Fix input len in multiple user verbs") */ !ib_is_udata_cleared(udata, 0, udata->inlen - sizeof(struct ib_uverbs_cmd_hdr))) { #endif - efa_dbg(&dev->ibdev.dev, - "Incompatiable ABI params\n"); + ibdev_dbg(&dev->ibdev, "Incompatiable ABI params\n"); err = -EINVAL; goto err_out; } @@ -2125,7 +2284,7 @@ struct ib_ah *efa_create_ah(struct ib_pd *ibpd, memcpy(params.dest_addr, ah_attr->grh.dgid.raw, sizeof(params.dest_addr)); params.pdn = to_epd(ibpd)->pdn; - err = efa_com_create_ah(dev->edev, ¶ms, &result); + err = efa_com_create_ah(&dev->edev, ¶ms, &result); if (err) goto err_free; @@ -2139,19 +2298,19 @@ struct ib_ah *efa_create_ah(struct ib_pd *ibpd, err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen)); if (err) { - efa_dbg(&dev->ibdev.dev, - "Failed to copy udata for create_ah response\n"); + ibdev_dbg(&dev->ibdev, + "Failed to copy udata for create_ah response\n"); goto err_destroy_ah; } } #else err = efa_add_ah_id(dev, ah_attr->grh.dgid.raw, result.ah); if (err) { - efa_dbg(&dev->ibdev.dev, "Failed to add AH id\n"); + ibdev_dbg(&dev->ibdev, "Failed to add AH id\n"); goto err_destroy_ah; } #endif - efa_dbg(&dev->ibdev.dev, "Created ah[%d]\n", ah->ah); + ibdev_dbg(&dev->ibdev, "Created ah[%d]\n", ah->ah); return &ah->ibah; @@ -2164,13 +2323,25 @@ struct ib_ah *efa_create_ah(struct ib_pd *ibpd, return ERR_PTR(err); } +#ifdef HAVE_CREATE_DESTROY_AH_FLAGS +int efa_destroy_ah(struct ib_ah *ibah, u32 flags) +#else int efa_destroy_ah(struct ib_ah *ibah) +#endif { struct efa_dev *dev = to_edev(ibah->pd->device); struct efa_ah *ah = to_eah(ibah); int err; - efa_dbg(&dev->ibdev.dev, "Destroy ah[%d]\n", ah->ah); + ibdev_dbg(&dev->ibdev, "Destroy ah[%d]\n", ah->ah); + +#ifdef HAVE_CREATE_DESTROY_AH_FLAGS + if (!(flags & RDMA_DESTROY_AH_SLEEPABLE)) { + ibdev_dbg(&dev->ibdev, + "Destroy address handle is not supported in atomic context\n"); + return -EOPNOTSUPP; + } +#endif err = efa_ah_destroy(dev, ah); if (err) @@ -2195,7 +2366,7 @@ int efa_post_send(struct ib_qp *ibqp, { struct efa_dev *dev = to_edev(ibqp->device); - efa_warn(&dev->ibdev.dev, "Function not supported\n"); + ibdev_warn(&dev->ibdev.dev, "Function not supported\n"); return -EOPNOTSUPP; } @@ -2211,7 +2382,7 @@ int efa_post_recv(struct ib_qp *ibqp, { struct efa_dev *dev = to_edev(ibqp->device); - efa_warn(&dev->ibdev.dev, "Function not supported\n"); + ibdev_warn(&dev->ibdev.dev, "Function not supported\n"); return -EOPNOTSUPP; } @@ -2220,7 +2391,7 @@ int efa_poll_cq(struct ib_cq *ibcq, int num_entries, { struct efa_dev *dev = to_edev(ibcq->device); - efa_warn(&dev->ibdev.dev, "Function not supported\n"); + ibdev_warn(&dev->ibdev.dev, "Function not supported\n"); return -EOPNOTSUPP; } @@ -2229,7 +2400,7 @@ int efa_req_notify_cq(struct ib_cq *ibcq, { struct efa_dev *dev = to_edev(ibcq->device); - efa_warn(&dev->ibdev.dev, "Function not supported\n"); + ibdev_warn(&dev->ibdev.dev, "Function not supported\n"); return -EOPNOTSUPP; } @@ -2237,7 +2408,7 @@ struct ib_mr *efa_get_dma_mr(struct ib_pd *ibpd, int acc) { struct efa_dev *dev = to_edev(ibpd->device); - efa_warn(&dev->ibdev.dev, "Function not supported\n"); + ibdev_warn(&dev->ibdev.dev, "Function not supported\n"); return ERR_PTR(-EOPNOTSUPP); } @@ -2265,7 +2436,7 @@ ssize_t efa_everbs_cmd_get_ah(struct efa_dev *dev, return -EFAULT; if (cmd.comp_mask) { - efa_dbg(&dev->ibdev.dev, + ibdev_dbg(&dev->ibdev, "Incompatible ABI params, unknown fields in udata\n"); return -EINVAL; } @@ -2274,7 +2445,7 @@ ssize_t efa_everbs_cmd_get_ah(struct efa_dev *dev, err = efa_get_ah_id(dev, cmd.gid, false, &resp.efa_address_handle); mutex_unlock(&dev->ah_list_lock); if (err) { - efa_dbg(&dev->ibdev.dev, + ibdev_dbg(&dev->ibdev, "Couldn't find AH with specified GID\n"); return err; } @@ -2304,8 +2475,8 @@ ssize_t efa_everbs_cmd_get_ex_dev_attrs(struct efa_dev *dev, return -EFAULT; if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_20)) { - efa_dbg(&dev->ibdev.dev, - "Incompatible ABI params, unknown fields in udata\n"); + ibdev_dbg(&dev->ibdev, + "Incompatible ABI params, unknown fields in udata\n"); return -EINVAL; } diff --git a/kernel/linux/efa/kcompat.h b/kernel/linux/efa/kcompat.h index 672acb3b..d392a988 100644 --- a/kernel/linux/efa/kcompat.h +++ b/kernel/linux/efa/kcompat.h @@ -72,6 +72,18 @@ #define HAVE_DEV_PARENT #endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0) +#define HAVE_IB_DEV_OPS +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0) +#define HAVE_CREATE_DESTROY_AH_FLAGS +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) +#define HAVE_SG_DMA_PAGE_ITER +#endif + /* End of upstream defines */ #if !defined(HAVE_CREATE_AH_UDATA) || !defined(HAVE_IB_QUERY_DEVICE_UDATA) @@ -115,4 +127,15 @@ static inline bool ib_is_udata_cleared(struct ib_udata *udata, #define IB_QPT_DRIVER 0xFF #endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0) +#define ibdev_err(_ibdev, format, arg...) \ + dev_err(&((struct ib_device *)(_ibdev))->dev, format, ##arg) +#define ibdev_dbg(_ibdev, format, arg...) \ + dev_dbg(&((struct ib_device *)(_ibdev))->dev, format, ##arg) +#define ibdev_warn(_ibdev, format, arg...) \ + dev_warn(&((struct ib_device *)(_ibdev))->dev, format, ##arg) +#define ibdev_info(_ibdev, format, arg...) \ + dev_info(&((struct ib_device *)(_ibdev))->dev, format, ##arg) +#endif + #endif /* _KCOMPAT_H_ */ diff --git a/kernel/linux/efa/rpm/Makefile b/kernel/linux/efa/rpm/Makefile index 44785b61..144a30a6 100644 --- a/kernel/linux/efa/rpm/Makefile +++ b/kernel/linux/efa/rpm/Makefile @@ -1,7 +1,7 @@ # Makefile for creating rpm of the Amazon EFA driver NAME = efa -VERSION = 0.9.0 +VERSION = 0.9.1 TOPDIR := $(shell git rev-parse --show-toplevel) TAG = master diff --git a/kernel/linux/efa/rpm/efa.spec b/kernel/linux/efa/rpm/efa.spec index ca294ced..99ac8018 100644 --- a/kernel/linux/efa/rpm/efa.spec +++ b/kernel/linux/efa/rpm/efa.spec @@ -23,8 +23,10 @@ Requires: dkms %kernel_module_package_buildreqs %post dkms add -m %{name} -v %{driver_version} -dkms build -m %{name} -v %{driver_version} -dkms install -m %{name} -v %{driver_version} +for kernel in $(/bin/ls /lib/modules); do + dkms build -m %{name} -v %{driver_version} -k $kernel + dkms install -m %{name} -v %{driver_version} -k $kernel +done %preun dkms remove -m %{name} -v %{driver_version} --all @@ -60,5 +62,8 @@ install -m 644 RELEASENOTES.md %{buildroot}%{install_path} /etc/modules-load.d/efa.conf %changelog +* Tue Apr 2 2019 Robert Wespetal - 0.9.1 +- Update EFA post install script to install module for all kernels + * Fri Mar 8 2019 Robert Wespetal - 0.9.0 - initial build for RHEL