Skip to content

Commit 4206226

Browse files
dvo-plvferruhy
authored andcommitted
net/ntnic: add retrieving and managing packets
Implemented functionality for retrieving received packets from virtual queues, supporting both SPLIT_RING and PACKED_RING types. Updated sg_ops structure to include the new packet retrieval functions. Signed-off-by: Danylo Vodopianov <[email protected]> Acked-by: Serhii Iliushyk <[email protected]>
1 parent 00025ad commit 4206226

File tree

3 files changed

+261
-0
lines changed

3 files changed

+261
-0
lines changed

doc/guides/nics/ntnic.rst

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,50 @@ and they are also supported.
5151
If vfio-pci is not required, kernel version 4.18 is supported.
5252

5353

54+
Configuration
55+
-------------
56+
57+
Command line arguments
58+
~~~~~~~~~~~~~~~~~~~~~~
59+
60+
Following standard DPDK command line arguments are used by the PMD:
61+
62+
-a: Used to specifically define the NT adapter by PCI ID.
63+
--iova-mode: Must be set to ‘pa’ for Physical Address mode.
64+
65+
NTNIC specific arguments can be passed to the PMD in the PCI device parameter list::
66+
67+
<application> ... -a 0000:03:00.0[{,<NTNIC specific argument>}]
68+
69+
The NTNIC specific argument format is::
70+
71+
<object>.<attribute>=[<object-ids>:]<value>
72+
73+
Multiple arguments for the same device are separated by ‘,’ comma.
74+
<object-ids> can be a single value or a range.
75+
76+
77+
- ``rxqs`` parameter [int]
78+
79+
Specify number of RX queues to use.
80+
81+
To specify number of RX queues::
82+
83+
-a <domain>:<bus>:00.0,rxqs=4,txqs=4
84+
85+
By default, the value is set to 1.
86+
87+
- ``txqs`` parameter [int]
88+
89+
Specify number of TX queues to use.
90+
91+
To specify number of TX queues::
92+
93+
-a <domain>:<bus>:00.0,rxqs=4,txqs=4
94+
95+
By default, the value is set to 1.
96+
97+
5498
Logging and Debugging
5599
---------------------
56100

doc/guides/rel_notes/release_24_11.rst

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,7 @@ New Features
138138
* Added NT flow filter init API
139139
* Added NT flow backend initialization API
140140
* Added initialization of FPGA modules related to flow HW offload
141+
* Added basic handling of the virtual queues
141142

142143
* **Added cryptodev queue pair reset support.**
143144

drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c

Lines changed: 216 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,15 @@
5858
} \
5959
} while (0)
6060

61+
#define inc_used(vq, num) do { \
62+
struct nthw_virt_queue *temp_vq = (vq); \
63+
temp_vq->next_used += (num); \
64+
if (temp_vq->next_used >= temp_vq->queue_size) { \
65+
temp_vq->next_used -= temp_vq->queue_size; \
66+
temp_vq->used_wrap_count ^= 1; \
67+
} \
68+
} while (0)
69+
6170
struct __rte_aligned(8) virtq_avail {
6271
uint16_t flags;
6372
uint16_t idx;
@@ -107,6 +116,10 @@ struct nthw_virt_queue {
107116
struct pvirtq_event_suppress *driver_event;
108117
struct pvirtq_event_suppress *device_event;
109118
struct pvirtq_desc *desc;
119+
struct {
120+
uint16_t next;
121+
uint16_t num;
122+
} outs;
110123
/*
111124
* when in-order release used Tx packets from FPGA it may collapse
112125
* into a batch. When getting new Tx buffers we may only need
@@ -1097,6 +1110,107 @@ nthw_setup_mngd_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
10971110
return NULL;
10981111
}
10991112

1113+
static uint16_t nthw_get_rx_packets(struct nthw_virt_queue *rxvq,
1114+
uint16_t n,
1115+
struct nthw_received_packets *rp,
1116+
uint16_t *nb_pkts)
1117+
{
1118+
uint16_t segs = 0;
1119+
uint16_t pkts = 0;
1120+
1121+
if (rxvq->vq_type == SPLIT_RING) {
1122+
uint16_t i;
1123+
uint16_t entries_ready = (uint16_t)(rxvq->cached_idx - rxvq->used_idx);
1124+
1125+
if (entries_ready < n) {
1126+
/* Look for more packets */
1127+
rxvq->cached_idx = rxvq->p_used->idx;
1128+
entries_ready = (uint16_t)(rxvq->cached_idx - rxvq->used_idx);
1129+
1130+
if (entries_ready == 0) {
1131+
*nb_pkts = 0;
1132+
return 0;
1133+
}
1134+
1135+
if (n > entries_ready)
1136+
n = entries_ready;
1137+
}
1138+
1139+
/*
1140+
* Give packets - make sure all packets are whole packets.
1141+
* Valid because queue_size is always 2^n
1142+
*/
1143+
const uint16_t queue_mask = (uint16_t)(rxvq->queue_size - 1);
1144+
const uint32_t buf_len = rxvq->p_desc[0].len;
1145+
1146+
uint16_t used = rxvq->used_idx;
1147+
1148+
for (i = 0; i < n; ++i) {
1149+
uint32_t id = rxvq->p_used->ring[used & queue_mask].id;
1150+
rp[i].addr = rxvq->p_virtual_addr[id].virt_addr;
1151+
rp[i].len = rxvq->p_used->ring[used & queue_mask].len;
1152+
1153+
uint32_t pkt_len = ((struct _pkt_hdr_rx *)rp[i].addr)->cap_len;
1154+
1155+
if (pkt_len > buf_len) {
1156+
/* segmented */
1157+
int nbsegs = (pkt_len + buf_len - 1) / buf_len;
1158+
1159+
if (((int)i + nbsegs) > n) {
1160+
/* don't have enough segments - break out */
1161+
break;
1162+
}
1163+
1164+
int ii;
1165+
1166+
for (ii = 1; ii < nbsegs; ii++) {
1167+
++i;
1168+
id = rxvq->p_used->ring[(used + ii) & queue_mask].id;
1169+
rp[i].addr = rxvq->p_virtual_addr[id].virt_addr;
1170+
rp[i].len =
1171+
rxvq->p_used->ring[(used + ii) & queue_mask].len;
1172+
}
1173+
1174+
used += nbsegs;
1175+
1176+
} else {
1177+
++used;
1178+
}
1179+
1180+
pkts++;
1181+
segs = i + 1;
1182+
}
1183+
1184+
rxvq->used_idx = used;
1185+
1186+
} else if (rxvq->vq_type == PACKED_RING) {
1187+
/* This requires in-order behavior from FPGA */
1188+
int i;
1189+
1190+
for (i = 0; i < n; i++) {
1191+
struct pvirtq_desc *desc = &rxvq->desc[rxvq->next_used];
1192+
1193+
uint16_t flags = desc->flags;
1194+
uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
1195+
uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
1196+
1197+
if (avail != rxvq->used_wrap_count || used != rxvq->used_wrap_count)
1198+
break;
1199+
1200+
rp[pkts].addr = rxvq->p_virtual_addr[desc->id].virt_addr;
1201+
rp[pkts].len = desc->len;
1202+
pkts++;
1203+
1204+
inc_used(rxvq, 1);
1205+
}
1206+
1207+
segs = pkts;
1208+
}
1209+
1210+
*nb_pkts = pkts;
1211+
return segs;
1212+
}
1213+
11001214
/*
11011215
* Put buffers back into Avail Ring
11021216
*/
@@ -1139,6 +1253,106 @@ static void nthw_release_rx_packets(struct nthw_virt_queue *rxvq, uint16_t n)
11391253
}
11401254
}
11411255

1256+
static uint16_t nthw_get_tx_packets(struct nthw_virt_queue *txvq,
1257+
uint16_t n,
1258+
uint16_t *first_idx,
1259+
struct nthw_cvirtq_desc *cvq,
1260+
struct nthw_memory_descriptor **p_virt_addr)
1261+
{
1262+
int m = 0;
1263+
uint16_t queue_mask =
1264+
(uint16_t)(txvq->queue_size - 1); /* Valid because queue_size is always 2^n */
1265+
*p_virt_addr = txvq->p_virtual_addr;
1266+
1267+
if (txvq->vq_type == SPLIT_RING) {
1268+
cvq->s = txvq->p_desc;
1269+
cvq->vq_type = SPLIT_RING;
1270+
1271+
*first_idx = txvq->tx_descr_avail_idx;
1272+
1273+
uint16_t entries_used =
1274+
(uint16_t)((txvq->tx_descr_avail_idx - txvq->cached_idx) & queue_mask);
1275+
uint16_t entries_ready = (uint16_t)(txvq->queue_size - 1 - entries_used);
1276+
1277+
vq_log_arg(txvq,
1278+
"ask %i: descrAvail %i, cachedidx %i, used: %i, ready %i used->idx %i",
1279+
n, txvq->tx_descr_avail_idx, txvq->cached_idx, entries_used, entries_ready,
1280+
txvq->p_used->idx);
1281+
1282+
if (entries_ready < n) {
1283+
/*
1284+
* Look for more packets.
1285+
* Using the used_idx in the avail ring since they are held synchronous
1286+
* because of in-order
1287+
*/
1288+
txvq->cached_idx =
1289+
txvq->p_avail->ring[(txvq->p_used->idx - 1) & queue_mask];
1290+
1291+
vq_log_arg(txvq, "Update: get cachedidx %i (used_idx-1 %i)",
1292+
txvq->cached_idx, (txvq->p_used->idx - 1) & queue_mask);
1293+
entries_used =
1294+
(uint16_t)((txvq->tx_descr_avail_idx - txvq->cached_idx)
1295+
& queue_mask);
1296+
entries_ready = (uint16_t)(txvq->queue_size - 1 - entries_used);
1297+
vq_log_arg(txvq, "new used: %i, ready %i", entries_used, entries_ready);
1298+
1299+
if (n > entries_ready)
1300+
n = entries_ready;
1301+
}
1302+
1303+
} else if (txvq->vq_type == PACKED_RING) {
1304+
int i;
1305+
1306+
cvq->p = txvq->desc;
1307+
cvq->vq_type = PACKED_RING;
1308+
1309+
if (txvq->outs.num) {
1310+
*first_idx = txvq->outs.next;
1311+
uint16_t num = min(n, txvq->outs.num);
1312+
txvq->outs.next = (txvq->outs.next + num) & queue_mask;
1313+
txvq->outs.num -= num;
1314+
1315+
if (n == num)
1316+
return n;
1317+
1318+
m = num;
1319+
n -= num;
1320+
1321+
} else {
1322+
*first_idx = txvq->next_used;
1323+
}
1324+
1325+
/* iterate the ring - this requires in-order behavior from FPGA */
1326+
for (i = 0; i < n; i++) {
1327+
struct pvirtq_desc *desc = &txvq->desc[txvq->next_used];
1328+
1329+
uint16_t flags = desc->flags;
1330+
uint8_t avail = !!(flags & VIRTQ_DESC_F_AVAIL);
1331+
uint8_t used = !!(flags & VIRTQ_DESC_F_USED);
1332+
1333+
if (avail != txvq->used_wrap_count || used != txvq->used_wrap_count) {
1334+
n = i;
1335+
break;
1336+
}
1337+
1338+
uint16_t incr = (desc->id - txvq->next_used) & queue_mask;
1339+
i += incr;
1340+
inc_used(txvq, incr + 1);
1341+
}
1342+
1343+
if (i > n) {
1344+
int outs_num = i - n;
1345+
txvq->outs.next = (txvq->next_used - outs_num) & queue_mask;
1346+
txvq->outs.num = outs_num;
1347+
}
1348+
1349+
} else {
1350+
return 0;
1351+
}
1352+
1353+
return m + n;
1354+
}
1355+
11421356
static void nthw_release_tx_packets(struct nthw_virt_queue *txvq, uint16_t n, uint16_t n_segs[])
11431357
{
11441358
int i;
@@ -1200,7 +1414,9 @@ static struct sg_ops_s sg_ops = {
12001414
.nthw_release_mngd_rx_virt_queue = nthw_release_mngd_rx_virt_queue,
12011415
.nthw_setup_mngd_tx_virt_queue = nthw_setup_mngd_tx_virt_queue,
12021416
.nthw_release_mngd_tx_virt_queue = nthw_release_mngd_tx_virt_queue,
1417+
.nthw_get_rx_packets = nthw_get_rx_packets,
12031418
.nthw_release_rx_packets = nthw_release_rx_packets,
1419+
.nthw_get_tx_packets = nthw_get_tx_packets,
12041420
.nthw_release_tx_packets = nthw_release_tx_packets,
12051421
.nthw_virt_queue_init = nthw_virt_queue_init
12061422
};

0 commit comments

Comments
 (0)