|
11 | 11 | static inline void |
12 | 12 | gve_rx_refill_dqo(struct gve_rx_queue *rxq) |
13 | 13 | { |
14 | | - volatile struct gve_rx_desc_dqo *rx_buf_ring; |
15 | 14 | volatile struct gve_rx_desc_dqo *rx_buf_desc; |
16 | 15 | struct rte_mbuf *nmb[rxq->nb_rx_hold]; |
17 | 16 | uint16_t nb_refill = rxq->nb_rx_hold; |
18 | | - uint16_t nb_desc = rxq->nb_rx_desc; |
19 | 17 | uint16_t next_avail = rxq->bufq_tail; |
20 | 18 | struct rte_eth_dev *dev; |
21 | 19 | uint64_t dma_addr; |
22 | | - uint16_t delta; |
23 | 20 | int i; |
24 | 21 |
|
25 | 22 | if (rxq->nb_rx_hold < rxq->free_thresh) |
26 | 23 | return; |
27 | 24 |
|
28 | | - rx_buf_ring = rxq->rx_ring; |
29 | | - delta = nb_desc - next_avail; |
30 | | - if (unlikely(delta < nb_refill)) { |
31 | | - if (likely(rte_pktmbuf_alloc_bulk(rxq->mpool, nmb, delta) == 0)) { |
32 | | - for (i = 0; i < delta; i++) { |
33 | | - rx_buf_desc = &rx_buf_ring[next_avail + i]; |
34 | | - rxq->sw_ring[next_avail + i] = nmb[i]; |
35 | | - dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i])); |
36 | | - rx_buf_desc->header_buf_addr = 0; |
37 | | - rx_buf_desc->buf_addr = dma_addr; |
38 | | - } |
39 | | - nb_refill -= delta; |
40 | | - next_avail = 0; |
41 | | - rxq->nb_rx_hold -= delta; |
42 | | - } else { |
43 | | - rxq->stats.no_mbufs_bulk++; |
44 | | - rxq->stats.no_mbufs += nb_desc - next_avail; |
45 | | - dev = &rte_eth_devices[rxq->port_id]; |
46 | | - dev->data->rx_mbuf_alloc_failed += nb_desc - next_avail; |
47 | | - PMD_DRV_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u", |
48 | | - rxq->port_id, rxq->queue_id); |
49 | | - return; |
50 | | - } |
| 25 | + if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mpool, nmb, nb_refill))) { |
| 26 | + rxq->stats.no_mbufs_bulk++; |
| 27 | + rxq->stats.no_mbufs += nb_refill; |
| 28 | + dev = &rte_eth_devices[rxq->port_id]; |
| 29 | + dev->data->rx_mbuf_alloc_failed += nb_refill; |
| 30 | + PMD_DRV_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u", |
| 31 | + rxq->port_id, rxq->queue_id); |
| 32 | + return; |
51 | 33 | } |
52 | 34 |
|
53 | | - if (nb_desc - next_avail >= nb_refill) { |
54 | | - if (likely(rte_pktmbuf_alloc_bulk(rxq->mpool, nmb, nb_refill) == 0)) { |
55 | | - for (i = 0; i < nb_refill; i++) { |
56 | | - rx_buf_desc = &rx_buf_ring[next_avail + i]; |
57 | | - rxq->sw_ring[next_avail + i] = nmb[i]; |
58 | | - dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i])); |
59 | | - rx_buf_desc->header_buf_addr = 0; |
60 | | - rx_buf_desc->buf_addr = dma_addr; |
61 | | - } |
62 | | - next_avail += nb_refill; |
63 | | - rxq->nb_rx_hold -= nb_refill; |
64 | | - } else { |
65 | | - rxq->stats.no_mbufs_bulk++; |
66 | | - rxq->stats.no_mbufs += nb_desc - next_avail; |
67 | | - dev = &rte_eth_devices[rxq->port_id]; |
68 | | - dev->data->rx_mbuf_alloc_failed += nb_desc - next_avail; |
69 | | - PMD_DRV_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u", |
70 | | - rxq->port_id, rxq->queue_id); |
71 | | - } |
| 35 | + for (i = 0; i < nb_refill; i++) { |
| 36 | + rx_buf_desc = &rxq->rx_ring[next_avail]; |
| 37 | + rxq->sw_ring[next_avail] = nmb[i]; |
| 38 | + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i])); |
| 39 | + rx_buf_desc->header_buf_addr = 0; |
| 40 | + rx_buf_desc->buf_addr = dma_addr; |
| 41 | + next_avail = (next_avail + 1) & (rxq->nb_rx_desc - 1); |
72 | 42 | } |
73 | | - |
| 43 | + rxq->nb_rx_hold -= nb_refill; |
74 | 44 | rte_write32(next_avail, rxq->qrx_tail); |
75 | 45 |
|
76 | 46 | rxq->bufq_tail = next_avail; |
|
0 commit comments