Skip to content

Commit 2557ad8

Browse files
fengchengwentmonjalo
authored andcommitted
dma/hisi_acc: add control path operations
This commit adds control path ops for Hisilicon accelerator DMA driver. Signed-off-by: Chengwen Feng <[email protected]>
1 parent 5a9c32a commit 2557ad8

File tree

3 files changed

+237
-0
lines changed

3 files changed

+237
-0
lines changed

doc/guides/dmadevs/hisi_acc.rst

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,3 +43,13 @@ named ``hisi_zip-0-dma0`` and ``hisi_zip-0-dma1``.
4343
user could query API and algorithms,
4444
this driver can only match the device whose API is ``hisi_qm_v5``
4545
and algorithms contain ``udma``.
46+
47+
Device Configuration
48+
~~~~~~~~~~~~~~~~~~~~~
49+
50+
Configuration requirements:
51+
52+
* ``ring_size`` obtained from UACCE API and is a fixed value.
53+
* Only one ``vchan`` is supported per ``dmadev``.
54+
* Silent mode is not supported.
55+
* The transfer direction must be set to ``RTE_DMA_DIR_MEM_TO_MEM``.

drivers/dma/hisi_acc/hisi_acc_dmadev.c

Lines changed: 185 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,190 @@ RTE_LOG_REGISTER_DEFAULT(hacc_dma_logtype, INFO);
3434
#define HACC_DMA_ERR(hw, ...) \
3535
HACC_DMA_DEV_LOG(hw, ERR, __VA_ARGS__)
3636

37+
static int
38+
hacc_dma_info_get(const struct rte_dma_dev *dev,
39+
struct rte_dma_info *dev_info,
40+
uint32_t info_sz)
41+
{
42+
struct hacc_dma_dev *hw = dev->data->dev_private;
43+
44+
RTE_SET_USED(info_sz);
45+
46+
dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
47+
RTE_DMA_CAPA_SVA |
48+
RTE_DMA_CAPA_OPS_COPY |
49+
RTE_DMA_CAPA_OPS_FILL;
50+
dev_info->max_vchans = 1;
51+
dev_info->max_desc = hw->sq_depth;
52+
dev_info->min_desc = hw->sq_depth;
53+
54+
return 0;
55+
}
56+
57+
static int
58+
hacc_dma_configure(struct rte_dma_dev *dev,
59+
const struct rte_dma_conf *conf,
60+
uint32_t conf_sz)
61+
{
62+
RTE_SET_USED(dev);
63+
RTE_SET_USED(conf);
64+
RTE_SET_USED(conf_sz);
65+
return 0;
66+
}
67+
68+
static int
69+
hacc_dma_start(struct rte_dma_dev *dev)
70+
{
71+
struct hacc_dma_dev *hw = dev->data->dev_private;
72+
int ret;
73+
74+
if ((*hw->sq_status != 0) || (*hw->cq_status != 0)) {
75+
HACC_DMA_ERR(hw, "detect dev is abnormal!");
76+
return -EIO;
77+
}
78+
79+
if (hw->started) {
80+
hw->ridx = 0;
81+
hw->cridx = 0;
82+
return 0;
83+
}
84+
85+
memset(hw->sqe, 0, hw->sqe_size * hw->sq_depth);
86+
memset(hw->cqe, 0, sizeof(struct hacc_dma_cqe) * hw->cq_depth);
87+
memset(hw->status, 0, sizeof(uint16_t) * hw->sq_depth);
88+
hw->ridx = 0;
89+
hw->cridx = 0;
90+
hw->sq_head = 0;
91+
hw->sq_tail = 0;
92+
hw->cq_sq_head = 0;
93+
hw->avail_sqes = hw->sq_depth - HACC_DMA_SQ_GAP_NUM - 1;
94+
hw->cq_head = 0;
95+
hw->cqs_completed = 0;
96+
hw->cqe_vld = 1;
97+
hw->submitted = 0;
98+
hw->completed = 0;
99+
hw->errors = 0;
100+
hw->invalid_lens = 0;
101+
hw->qfulls = 0;
102+
103+
ret = rte_uacce_queue_start(&hw->qctx);
104+
if (ret == 0)
105+
hw->started = true;
106+
107+
return ret;
108+
}
109+
110+
static int
111+
hacc_dma_stop(struct rte_dma_dev *dev)
112+
{
113+
struct hacc_dma_dev *hw = dev->data->dev_private;
114+
115+
if ((*hw->sq_status != 0) || (*hw->cq_status != 0)) {
116+
/* This indicates that the dev is abnormal. The correct error handling
117+
* is to close the dev (so that kernel module will perform error handling)
118+
* and apply for a new dev.
119+
* If an error code is returned here, the dev cannot be closed. Therefore,
120+
* zero is returned and an error trace is added.
121+
*/
122+
HACC_DMA_ERR(hw, "detect dev is abnormal!");
123+
return 0;
124+
}
125+
126+
return 0;
127+
}
128+
129+
static int
130+
hacc_dma_close(struct rte_dma_dev *dev)
131+
{
132+
struct hacc_dma_dev *hw = dev->data->dev_private;
133+
/* The dmadev already stopped */
134+
rte_free(hw->status);
135+
rte_uacce_queue_unmap(&hw->qctx, RTE_UACCE_QFRT_DUS);
136+
rte_uacce_queue_unmap(&hw->qctx, RTE_UACCE_QFRT_MMIO);
137+
rte_uacce_queue_free(&hw->qctx);
138+
return 0;
139+
}
140+
141+
static int
142+
hacc_dma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
143+
const struct rte_dma_vchan_conf *conf,
144+
uint32_t conf_sz)
145+
{
146+
RTE_SET_USED(dev);
147+
RTE_SET_USED(vchan);
148+
RTE_SET_USED(conf);
149+
RTE_SET_USED(conf_sz);
150+
return 0;
151+
}
152+
153+
static int
154+
hacc_dma_stats_get(const struct rte_dma_dev *dev, uint16_t vchan,
155+
struct rte_dma_stats *stats,
156+
uint32_t stats_sz)
157+
{
158+
struct hacc_dma_dev *hw = dev->data->dev_private;
159+
160+
RTE_SET_USED(vchan);
161+
RTE_SET_USED(stats_sz);
162+
stats->submitted = hw->submitted;
163+
stats->completed = hw->completed;
164+
stats->errors = hw->errors;
165+
166+
return 0;
167+
}
168+
169+
static int
170+
hacc_dma_stats_reset(struct rte_dma_dev *dev, uint16_t vchan)
171+
{
172+
struct hacc_dma_dev *hw = dev->data->dev_private;
173+
174+
RTE_SET_USED(vchan);
175+
hw->submitted = 0;
176+
hw->completed = 0;
177+
hw->errors = 0;
178+
hw->invalid_lens = 0;
179+
hw->io_errors = 0;
180+
hw->qfulls = 0;
181+
182+
return 0;
183+
}
184+
185+
static int
186+
hacc_dma_dump(const struct rte_dma_dev *dev, FILE *f)
187+
{
188+
struct hacc_dma_dev *hw = dev->data->dev_private;
189+
190+
fprintf(f, " sqn: %u sq_status: %s cq_status: %s\n"
191+
" sqe_size: %u sq_depth: %u sq_depth_mask: %u cq_depth: %u\n",
192+
hw->sqn, (*hw->sq_status != 0) ? "ERR" : "OK",
193+
(*hw->cq_status != 0) ? "ERR" : "OK",
194+
hw->sqe_size, hw->sq_depth, hw->sq_depth_mask, hw->cq_depth);
195+
fprintf(f, " ridx: %u cridx: %u\n"
196+
" sq_head: %u sq_tail: %u cq_sq_head: %u avail_sqes: %u\n"
197+
" cq_head: %u cqs_completed: %u cqe_vld: %u\n",
198+
hw->ridx, hw->cridx,
199+
hw->sq_head, hw->sq_tail, hw->cq_sq_head, hw->avail_sqes,
200+
hw->cq_head, hw->cqs_completed, hw->cqe_vld);
201+
fprintf(f, " submitted: %" PRIu64 " completed: %" PRIu64 " errors: %" PRIu64
202+
" invalid_lens: %" PRIu64 " io_errors: %" PRIu64 " qfulls: %" PRIu64 "\n",
203+
hw->submitted, hw->completed, hw->errors, hw->invalid_lens,
204+
hw->io_errors, hw->qfulls);
205+
206+
return 0;
207+
}
208+
209+
static const struct rte_dma_dev_ops hacc_dmadev_ops = {
210+
.dev_info_get = hacc_dma_info_get,
211+
.dev_configure = hacc_dma_configure,
212+
.dev_start = hacc_dma_start,
213+
.dev_stop = hacc_dma_stop,
214+
.dev_close = hacc_dma_close,
215+
.vchan_setup = hacc_dma_vchan_setup,
216+
.stats_get = hacc_dma_stats_get,
217+
.stats_reset = hacc_dma_stats_reset,
218+
.dev_dump = hacc_dma_dump,
219+
};
220+
37221
static void
38222
hacc_dma_gen_dev_name(const struct rte_uacce_device *uacce_dev,
39223
uint16_t queue_id, char *dev_name, size_t size)
@@ -111,6 +295,7 @@ hacc_dma_create(struct rte_uacce_device *uacce_dev, uint16_t queue_id)
111295
}
112296

113297
dev->device = &uacce_dev->device;
298+
dev->dev_ops = &hacc_dmadev_ops;
114299
dev->fp_obj->dev_private = dev->data->dev_private;
115300

116301
hw = dev->data->dev_private;

drivers/dma/hisi_acc/hisi_acc_dmadev.h

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,9 @@
1313
#define HACC_DMA_DEVARG_QUEUES "queues"
1414
#define HACC_DMA_DEFAULT_QUEUES 1
1515

16+
#define HACC_DMA_CQ_DOORBELL_PACE 64
17+
#define HACC_DMA_SQ_GAP_NUM HACC_DMA_CQ_DOORBELL_PACE
18+
1619
struct hacc_dma_config {
1720
uint16_t queues;
1821

@@ -38,7 +41,45 @@ struct hacc_dma_dev {
3841
uint16_t sqn; /**< SQ global number, inited when created. */
3942
uint16_t sq_depth_mask; /**< SQ depth - 1, the SQ depth is power of 2. */
4043

44+
uint16_t ridx; /**< ring index which will assign to the next request. */
45+
uint16_t cridx; /**< ring index which returned by completed APIs. */
46+
47+
/**
48+
* SQE array management fields:
49+
*
50+
* -----------------------------------------------------
51+
* | SQE0 | SQE1 | SQE2 | ... | SQEx | ... | SQEn-1 |
52+
* -----------------------------------------------------
53+
* ^ ^ ^
54+
* | | |
55+
* sq_head cq_sq_head sq_tail
56+
*
57+
* sq_head: next index to the oldest completed request, this filed was
58+
* updated by completed* APIs.
59+
* sq_tail: index of the next new request, this field was updated by
60+
* copy or fill API.
61+
* cq_sq_head: next index of index that has been completed by hardware,
62+
* this filed was updated by completed* APIs.
63+
*
64+
* [sq_head, cq_sq_head): the SQEs that hardware already completed.
65+
* [cq_sq_head, sq_tail): the SQEs that hardware processing.
66+
*/
67+
uint16_t sq_head;
68+
uint16_t sq_tail;
69+
uint16_t cq_sq_head;
70+
uint16_t avail_sqes;
71+
4172
uint16_t cq_depth; /**< CQ depth, inited when created. */
73+
uint16_t cq_head; /**< CQ index for next scans. */
74+
uint16_t cqs_completed; /**< accumulated number of completed CQs. */
75+
uint8_t cqe_vld; /**< valid bit for CQE, will change for every round. */
76+
77+
uint64_t submitted;
78+
uint64_t completed;
79+
uint64_t errors;
80+
uint64_t invalid_lens;
81+
uint64_t io_errors;
82+
uint64_t qfulls;
4283

4384
/**
4485
* The following fields are not accessed in the I/O path, so they are
@@ -50,6 +91,7 @@ struct hacc_dma_dev {
5091
void *dus_base;
5192
uint32_t sqe_size;
5293
uint16_t sq_depth;
94+
bool started;
5395
};
5496

5597
#endif /* HISI_ACC_DMADEV_H */

0 commit comments

Comments
 (0)