Skip to content

Commit aa3b984

Browse files
Dimon-Zhaoshemminger
authored andcommitted
net/nbl: add support for Tx and Rx VLAN offload
We simulate support for Tx and Rx VLAN offload, while in reality we handle Tx VLAN insertion and Rx VLAN stripping in software. This implementation is necessary because some of our customers assume our NICs natively support Tx and Rx VLAN offload capabilities. They use packet vlan offload during packet transmission and reception without checking the eth_dev capabilities. Signed-off-by: Dimon Zhao <[email protected]>
1 parent b9eba46 commit aa3b984

File tree

2 files changed

+51
-0
lines changed

2 files changed

+51
-0
lines changed

drivers/net/nbl/nbl_dev/nbl_dev.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -300,6 +300,7 @@ int nbl_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *dev_
300300
struct nbl_adapter *adapter = ETH_DEV_TO_NBL_DEV_PF_PRIV(eth_dev);
301301
struct nbl_dev_mgt *dev_mgt = NBL_ADAPTER_TO_DEV_MGT(adapter);
302302
struct nbl_dev_ring_mgt *ring_mgt = &dev_mgt->net_dev->ring_mgt;
303+
struct nbl_common_info *common = NBL_DEV_MGT_TO_COMMON(dev_mgt);
303304
struct nbl_board_port_info *board_info = &dev_mgt->common->board_info;
304305
u8 speed_mode = board_info->speed;
305306

@@ -330,6 +331,10 @@ int nbl_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *dev_
330331
dev_info->default_txportconf.nb_queues = ring_mgt->tx_ring_num;
331332
dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
332333
dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
334+
if (!common->is_vf) {
335+
dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
336+
dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
337+
}
333338
switch (speed_mode) {
334339
case NBL_FW_PORT_SPEED_100G:
335340
dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100G;

drivers/net/nbl/nbl_hw/nbl_txrx.c

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -237,8 +237,11 @@ static int nbl_res_txrx_start_rx_ring(void *priv,
237237
const struct nbl_hw_ops *hw_ops = NBL_RES_MGT_TO_HW_OPS(res_mgt);
238238
struct nbl_common_info *common = NBL_RES_MGT_TO_COMMON(res_mgt);
239239
const struct rte_memzone *memzone;
240+
uint64_t offloads;
240241
u32 size;
241242

243+
offloads = param->conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
244+
242245
if (eth_dev->data->rx_queues[param->queue_idx] != NULL) {
243246
NBL_LOG(WARNING, "re-setup an already allocated rx queue");
244247
nbl_res_txrx_stop_rx_ring(priv, param->queue_idx);
@@ -284,6 +287,7 @@ static int nbl_res_txrx_start_rx_ring(void *priv,
284287
rx_ring->dma_limit_msb = common->dma_limit_msb;
285288
rx_ring->common = common;
286289
rx_ring->notify = hw_ops->get_tail_ptr(NBL_RES_MGT_TO_HW_PRIV(res_mgt));
290+
rx_ring->offloads = offloads;
287291

288292
switch (param->product) {
289293
case NBL_LEONIS_TYPE:
@@ -437,6 +441,23 @@ static inline void nbl_fill_rx_ring(struct nbl_res_rx_ring *rxq,
437441
rxq->next_to_use = desc_index;
438442
}
439443

444+
static inline void nbl_res_txrx_vlan_insert_out_mbuf(struct rte_mbuf *tx_pkt,
445+
union nbl_tx_extend_head *u,
446+
u16 vlan_proto, u16 vlan_tci)
447+
{
448+
struct rte_vlan_hdr *vlan_hdr;
449+
struct rte_ether_hdr *ether_hdr;
450+
451+
ether_hdr = (struct rte_ether_hdr *)((u8 *)u + sizeof(struct nbl_tx_ehdr_leonis));
452+
memcpy(ether_hdr, rte_pktmbuf_mtod(tx_pkt, u8 *), sizeof(struct rte_ether_hdr));
453+
454+
vlan_hdr = (struct rte_vlan_hdr *)(ether_hdr + 1);
455+
vlan_hdr->vlan_tci = rte_cpu_to_be_16(vlan_tci);
456+
vlan_hdr->eth_proto = ether_hdr->ether_type;
457+
458+
ether_hdr->ether_type = rte_cpu_to_be_16(vlan_proto);
459+
}
460+
440461
static u16
441462
nbl_res_txrx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, u16 nb_pkts, u16 extend_set)
442463
{
@@ -477,6 +498,12 @@ nbl_res_txrx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, u16 nb_pkts, u
477498

478499
tx_pkt = *tx_pkts++;
479500

501+
if (tx_pkt->ol_flags & RTE_MBUF_F_TX_VLAN) {
502+
required_headroom += sizeof(struct rte_vlan_hdr);
503+
/* extend_hdr + ether_hdr + vlan_hdr */
504+
tx_extend_len = required_headroom + sizeof(struct rte_ether_hdr);
505+
}
506+
480507
if (rte_pktmbuf_headroom(tx_pkt) >= required_headroom) {
481508
can_push = 1;
482509
u = rte_pktmbuf_mtod_offset(tx_pkt, union nbl_tx_extend_head *,
@@ -485,6 +512,21 @@ nbl_res_txrx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, u16 nb_pkts, u
485512
can_push = 0;
486513
u = (union nbl_tx_extend_head *)(&tx_region[desc_index]);
487514
}
515+
516+
if (tx_pkt->ol_flags & RTE_MBUF_F_TX_VLAN) {
517+
if (likely(can_push)) {
518+
if (rte_vlan_insert(&tx_pkt)) {
519+
can_push = 0;
520+
u = (union nbl_tx_extend_head *)(&tx_region[desc_index]);
521+
}
522+
}
523+
if (unlikely(!can_push)) {
524+
addr_offset += sizeof(struct rte_ether_hdr);
525+
nbl_res_txrx_vlan_insert_out_mbuf(tx_pkt, u, RTE_ETHER_TYPE_VLAN,
526+
tx_pkt->vlan_tci);
527+
}
528+
}
529+
488530
nb_descs = !can_push + tx_pkt->nb_segs;
489531

490532
if (nb_descs > txq->vq_free_cnt) {
@@ -638,6 +680,10 @@ nbl_res_txrx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, u16 nb_pkts)
638680

639681
if (--num_sg)
640682
continue;
683+
684+
if (rxq->eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
685+
rte_vlan_strip(rx_mbuf);
686+
641687
if (drop) {
642688
rxq->rxq_stats.rx_drop_proto++;
643689
rte_pktmbuf_free(rx_mbuf);

0 commit comments

Comments
 (0)