Skip to content

Commit 00025ad

Browse files
dvo-plvferruhy
authored andcommitted
net/ntnic: add releasing virt queues
Implemented handler busy states and shutdowns of hardware queues. Added functionality for releasing RX and TX virtual queue resources, managed and releasing packets back into the availability ring. Updated sg_ops structure to include new queue management functions. Signed-off-by: Danylo Vodopianov <[email protected]> Acked-by: Serhii Iliushyk <[email protected]>
1 parent f0bd342 commit 00025ad

File tree

4 files changed

+408
-0
lines changed

4 files changed

+408
-0
lines changed

drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c

Lines changed: 353 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
#include <unistd.h>
77

88
#include "ntos_drv.h"
9+
#include "nt_util.h"
910
#include "ntnic_virt_queue.h"
1011
#include "ntnic_mod_reg.h"
1112
#include "ntlog.h"
@@ -37,6 +38,26 @@
3738

3839
#define VIRTQ_AVAIL_F_NO_INTERRUPT 1
3940

41+
#define vq_log_arg(vq, format, ...)
42+
43+
/*
44+
* Packed Ring helper macros
45+
*/
46+
#define PACKED(vq_type) ((vq_type) == PACKED_RING ? 1 : 0)
47+
48+
#define avail_flag(vq) ((vq)->avail_wrap_count ? VIRTQ_DESC_F_AVAIL : 0)
49+
#define used_flag_inv(vq) ((vq)->avail_wrap_count ? 0 : VIRTQ_DESC_F_USED)
50+
51+
#define inc_avail(vq, num) \
52+
do { \
53+
struct nthw_virt_queue *temp_vq = (vq); \
54+
temp_vq->next_avail += (num); \
55+
if (temp_vq->next_avail >= temp_vq->queue_size) { \
56+
temp_vq->next_avail -= temp_vq->queue_size; \
57+
temp_vq->avail_wrap_count ^= 1; \
58+
} \
59+
} while (0)
60+
4061
struct __rte_aligned(8) virtq_avail {
4162
uint16_t flags;
4263
uint16_t idx;
@@ -115,6 +136,7 @@ struct nthw_virt_queue {
115136
uint32_t host_id;
116137
uint32_t port; /* Only used by TX queues */
117138
uint32_t virtual_port; /* Only used by TX queues */
139+
uint32_t header;
118140
/*
119141
* Only used by TX queues:
120142
* 0: VirtIO-Net header (12 bytes).
@@ -417,6 +439,237 @@ static struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
417439
return &rxvq[index];
418440
}
419441

442+
static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx);
443+
444+
static int dbs_wait_on_busy(struct nthw_virt_queue *vq, uint32_t *idle, int rx)
445+
{
446+
uint32_t busy;
447+
uint32_t queue;
448+
int err = 0;
449+
nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
450+
451+
do {
452+
if (rx)
453+
err = get_rx_idle(p_nthw_dbs, idle, &queue, &busy);
454+
455+
else
456+
err = get_tx_idle(p_nthw_dbs, idle, &queue, &busy);
457+
} while (!err && busy);
458+
459+
return err;
460+
}
461+
462+
static int dbs_wait_hw_queue_shutdown(struct nthw_virt_queue *vq, int rx)
463+
{
464+
int err = 0;
465+
uint32_t idle = 0;
466+
nthw_dbs_t *p_nthw_dbs = vq->mp_nthw_dbs;
467+
468+
err = dbs_wait_on_busy(vq, &idle, rx);
469+
470+
if (err) {
471+
if (err == -ENOTSUP) {
472+
nt_os_wait_usec(200000);
473+
return 0;
474+
}
475+
476+
return -1;
477+
}
478+
479+
do {
480+
if (rx)
481+
err = set_rx_idle(p_nthw_dbs, 1, vq->index);
482+
483+
else
484+
err = set_tx_idle(p_nthw_dbs, 1, vq->index);
485+
486+
if (err)
487+
return -1;
488+
489+
if (dbs_wait_on_busy(vq, &idle, rx) != 0)
490+
return -1;
491+
492+
} while (idle == 0);
493+
494+
return 0;
495+
}
496+
497+
static int dbs_internal_release_rx_virt_queue(struct nthw_virt_queue *rxvq)
498+
{
499+
nthw_dbs_t *p_nthw_dbs = rxvq->mp_nthw_dbs;
500+
501+
if (rxvq == NULL)
502+
return -1;
503+
504+
/* Clear UW */
505+
rxvq->used_struct_phys_addr = NULL;
506+
507+
if (set_rx_uw_data(p_nthw_dbs, rxvq->index, (uint64_t)rxvq->used_struct_phys_addr,
508+
rxvq->host_id, 0, PACKED(rxvq->vq_type), 0, 0, 0) != 0) {
509+
return -1;
510+
}
511+
512+
/* Disable AM */
513+
rxvq->am_enable = RX_AM_DISABLE;
514+
515+
if (set_rx_am_data(p_nthw_dbs,
516+
rxvq->index,
517+
(uint64_t)rxvq->avail_struct_phys_addr,
518+
rxvq->am_enable,
519+
rxvq->host_id,
520+
PACKED(rxvq->vq_type),
521+
0) != 0) {
522+
return -1;
523+
}
524+
525+
/* Let the FPGA finish packet processing */
526+
if (dbs_wait_hw_queue_shutdown(rxvq, 1) != 0)
527+
return -1;
528+
529+
/* Clear rest of AM */
530+
rxvq->avail_struct_phys_addr = NULL;
531+
rxvq->host_id = 0;
532+
533+
if (set_rx_am_data(p_nthw_dbs,
534+
rxvq->index,
535+
(uint64_t)rxvq->avail_struct_phys_addr,
536+
rxvq->am_enable,
537+
rxvq->host_id,
538+
PACKED(rxvq->vq_type),
539+
0) != 0)
540+
return -1;
541+
542+
/* Clear DR */
543+
rxvq->desc_struct_phys_addr = NULL;
544+
545+
if (set_rx_dr_data(p_nthw_dbs,
546+
rxvq->index,
547+
(uint64_t)rxvq->desc_struct_phys_addr,
548+
rxvq->host_id,
549+
0,
550+
rxvq->header,
551+
PACKED(rxvq->vq_type)) != 0)
552+
return -1;
553+
554+
/* Initialize queue */
555+
dbs_init_rx_queue(p_nthw_dbs, rxvq->index, 0, 0);
556+
557+
/* Reset queue state */
558+
rxvq->usage = NTHW_VIRTQ_UNUSED;
559+
rxvq->mp_nthw_dbs = p_nthw_dbs;
560+
rxvq->index = 0;
561+
rxvq->queue_size = 0;
562+
563+
return 0;
564+
}
565+
566+
static int nthw_release_mngd_rx_virt_queue(struct nthw_virt_queue *rxvq)
567+
{
568+
if (rxvq == NULL || rxvq->usage != NTHW_VIRTQ_MANAGED)
569+
return -1;
570+
571+
if (rxvq->p_virtual_addr) {
572+
free(rxvq->p_virtual_addr);
573+
rxvq->p_virtual_addr = NULL;
574+
}
575+
576+
return dbs_internal_release_rx_virt_queue(rxvq);
577+
}
578+
579+
static int dbs_internal_release_tx_virt_queue(struct nthw_virt_queue *txvq)
580+
{
581+
nthw_dbs_t *p_nthw_dbs = txvq->mp_nthw_dbs;
582+
583+
if (txvq == NULL)
584+
return -1;
585+
586+
/* Clear UW */
587+
txvq->used_struct_phys_addr = NULL;
588+
589+
if (set_tx_uw_data(p_nthw_dbs, txvq->index, (uint64_t)txvq->used_struct_phys_addr,
590+
txvq->host_id, 0, PACKED(txvq->vq_type), 0, 0, 0,
591+
txvq->in_order) != 0) {
592+
return -1;
593+
}
594+
595+
/* Disable AM */
596+
txvq->am_enable = TX_AM_DISABLE;
597+
598+
if (set_tx_am_data(p_nthw_dbs,
599+
txvq->index,
600+
(uint64_t)txvq->avail_struct_phys_addr,
601+
txvq->am_enable,
602+
txvq->host_id,
603+
PACKED(txvq->vq_type),
604+
0) != 0) {
605+
return -1;
606+
}
607+
608+
/* Let the FPGA finish packet processing */
609+
if (dbs_wait_hw_queue_shutdown(txvq, 0) != 0)
610+
return -1;
611+
612+
/* Clear rest of AM */
613+
txvq->avail_struct_phys_addr = NULL;
614+
txvq->host_id = 0;
615+
616+
if (set_tx_am_data(p_nthw_dbs,
617+
txvq->index,
618+
(uint64_t)txvq->avail_struct_phys_addr,
619+
txvq->am_enable,
620+
txvq->host_id,
621+
PACKED(txvq->vq_type),
622+
0) != 0) {
623+
return -1;
624+
}
625+
626+
/* Clear DR */
627+
txvq->desc_struct_phys_addr = NULL;
628+
txvq->port = 0;
629+
txvq->header = 0;
630+
631+
if (set_tx_dr_data(p_nthw_dbs,
632+
txvq->index,
633+
(uint64_t)txvq->desc_struct_phys_addr,
634+
txvq->host_id,
635+
0,
636+
txvq->port,
637+
txvq->header,
638+
PACKED(txvq->vq_type)) != 0) {
639+
return -1;
640+
}
641+
642+
/* Clear QP */
643+
txvq->virtual_port = 0;
644+
645+
if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, txvq->index, txvq->virtual_port) != 0)
646+
return -1;
647+
648+
/* Initialize queue */
649+
dbs_init_tx_queue(p_nthw_dbs, txvq->index, 0, 0);
650+
651+
/* Reset queue state */
652+
txvq->usage = NTHW_VIRTQ_UNUSED;
653+
txvq->mp_nthw_dbs = p_nthw_dbs;
654+
txvq->index = 0;
655+
txvq->queue_size = 0;
656+
657+
return 0;
658+
}
659+
660+
static int nthw_release_mngd_tx_virt_queue(struct nthw_virt_queue *txvq)
661+
{
662+
if (txvq == NULL || txvq->usage != NTHW_VIRTQ_MANAGED)
663+
return -1;
664+
665+
if (txvq->p_virtual_addr) {
666+
free(txvq->p_virtual_addr);
667+
txvq->p_virtual_addr = NULL;
668+
}
669+
670+
return dbs_internal_release_tx_virt_queue(txvq);
671+
}
672+
420673
static struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
421674
uint32_t index,
422675
uint16_t start_idx,
@@ -844,11 +1097,111 @@ nthw_setup_mngd_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
8441097
return NULL;
8451098
}
8461099

1100+
/*
1101+
* Put buffers back into Avail Ring
1102+
*/
1103+
static void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n)
1104+
{
1105+
if (rxvq->vq_type == SPLIT_RING) {
1106+
rxvq->am_idx = (uint16_t)(rxvq->am_idx + n);
1107+
rxvq->p_avail->idx = rxvq->am_idx;
1108+
1109+
} else if (rxvq->vq_type == PACKED_RING) {
1110+
int i;
1111+
/*
1112+
* Defer flags update on first segment - due to serialization towards HW and
1113+
* when jumbo segments are added
1114+
*/
1115+
1116+
uint16_t first_flags = VIRTQ_DESC_F_WRITE | avail_flag(rxvq) | used_flag_inv(rxvq);
1117+
struct pvirtq_desc *first_desc = &rxvq->desc[rxvq->next_avail];
1118+
1119+
uint32_t len = rxvq->p_virtual_addr[0].len; /* all same size */
1120+
1121+
/* Optimization point: use in-order release */
1122+
1123+
for (i = 0; i < n; i++) {
1124+
struct pvirtq_desc *desc = &rxvq->desc[rxvq->next_avail];
1125+
1126+
desc->id = rxvq->next_avail;
1127+
desc->addr = (uint64_t)rxvq->p_virtual_addr[desc->id].phys_addr;
1128+
desc->len = len;
1129+
1130+
if (i)
1131+
desc->flags = VIRTQ_DESC_F_WRITE | avail_flag(rxvq) |
1132+
used_flag_inv(rxvq);
1133+
1134+
inc_avail(rxvq, 1);
1135+
}
1136+
1137+
rte_rmb();
1138+
first_desc->flags = first_flags;
1139+
}
1140+
}
1141+
1142+
static void nthw_release_tx_packets(struct nthw_virt_queue *txvq, uint16_t n, uint16_t n_segs[])
1143+
{
1144+
int i;
1145+
1146+
if (txvq->vq_type == SPLIT_RING) {
1147+
/* Valid because queue_size is always 2^n */
1148+
uint16_t queue_mask = (uint16_t)(txvq->queue_size - 1);
1149+
1150+
vq_log_arg(txvq, "pkts %i, avail idx %i, start at %i", n, txvq->am_idx,
1151+
txvq->tx_descr_avail_idx);
1152+
1153+
for (i = 0; i < n; i++) {
1154+
int idx = txvq->am_idx & queue_mask;
1155+
txvq->p_avail->ring[idx] = txvq->tx_descr_avail_idx;
1156+
txvq->tx_descr_avail_idx =
1157+
(txvq->tx_descr_avail_idx + n_segs[i]) & queue_mask;
1158+
txvq->am_idx++;
1159+
}
1160+
1161+
/* Make sure the ring has been updated before HW reads index update */
1162+
rte_mb();
1163+
txvq->p_avail->idx = txvq->am_idx;
1164+
vq_log_arg(txvq, "new avail idx %i, descr_idx %i", txvq->p_avail->idx,
1165+
txvq->tx_descr_avail_idx);
1166+
1167+
} else if (txvq->vq_type == PACKED_RING) {
1168+
/*
1169+
* Defer flags update on first segment - due to serialization towards HW and
1170+
* when jumbo segments are added
1171+
*/
1172+
1173+
uint16_t first_flags = avail_flag(txvq) | used_flag_inv(txvq);
1174+
struct pvirtq_desc *first_desc = &txvq->desc[txvq->next_avail];
1175+
1176+
for (i = 0; i < n; i++) {
1177+
struct pvirtq_desc *desc = &txvq->desc[txvq->next_avail];
1178+
1179+
desc->id = txvq->next_avail;
1180+
desc->addr = (uint64_t)txvq->p_virtual_addr[desc->id].phys_addr;
1181+
1182+
if (i)
1183+
/* bitwise-or here because next flags may already have been setup
1184+
*/
1185+
desc->flags |= avail_flag(txvq) | used_flag_inv(txvq);
1186+
1187+
inc_avail(txvq, 1);
1188+
}
1189+
1190+
/* Proper read barrier before FPGA may see first flags */
1191+
rte_rmb();
1192+
first_desc->flags = first_flags;
1193+
}
1194+
}
1195+
8471196
static struct sg_ops_s sg_ops = {
8481197
.nthw_setup_rx_virt_queue = nthw_setup_rx_virt_queue,
8491198
.nthw_setup_tx_virt_queue = nthw_setup_tx_virt_queue,
8501199
.nthw_setup_mngd_rx_virt_queue = nthw_setup_mngd_rx_virt_queue,
1200+
.nthw_release_mngd_rx_virt_queue = nthw_release_mngd_rx_virt_queue,
8511201
.nthw_setup_mngd_tx_virt_queue = nthw_setup_mngd_tx_virt_queue,
1202+
.nthw_release_mngd_tx_virt_queue = nthw_release_mngd_tx_virt_queue,
1203+
.nthw_release_rx_packets = nthw_release_rx_packets,
1204+
.nthw_release_tx_packets = nthw_release_tx_packets,
8521205
.nthw_virt_queue_init = nthw_virt_queue_init
8531206
};
8541207

0 commit comments

Comments
 (0)