@@ -34,17 +34,6 @@ struct pmd_internals;
3434struct null_queue {
3535 struct pmd_internals * internals ;
3636
37- /**
38- * For RX queue:
39- * Mempool to allocate mbufs from.
40- *
41- * For TX queue:
42- * Mempool to free mbufs to, if fast release of mbufs is enabled.
43- * UINTPTR_MAX if the mempool for fast release of mbufs has not yet been detected.
44- * NULL if fast release of mbufs is not enabled.
45- *
46- * @see RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
47- */
4837 struct rte_mempool * mb_pool ;
4938 void * dummy_packet ;
5039
@@ -152,15 +141,8 @@ eth_null_no_rx(void *q __rte_unused, struct rte_mbuf **bufs __rte_unused,
152141 return 0 ;
153142}
154143
155- enum eth_tx_free_mode {
156- ETH_TX_FREE_MODE_NO_MBUF_FAST_FREE , /* MBUF_FAST_FREE not possible. */
157- ETH_TX_FREE_MODE_MBUF_FAST_FREE , /* MBUF_FAST_FREE enabled for the device. */
158- ETH_TX_FREE_MODE_PER_QUEUE , /* Varies per TX queue. */
159- };
160-
161- static __rte_always_inline uint16_t
162- eth_null_tx_common (void * q , struct rte_mbuf * * bufs , uint16_t nb_bufs ,
163- enum eth_tx_free_mode mode )
144+ static uint16_t
145+ eth_null_tx (void * q , struct rte_mbuf * * bufs , uint16_t nb_bufs )
164146{
165147 struct null_queue * h = q ;
166148 unsigned int i ;
@@ -169,42 +151,13 @@ eth_null_tx_common(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs,
169151 for (i = 0 ; i < nb_bufs ; i ++ )
170152 bytes += rte_pktmbuf_pkt_len (bufs [i ]);
171153
172- if (mode == ETH_TX_FREE_MODE_MBUF_FAST_FREE ||
173- (mode == ETH_TX_FREE_MODE_PER_QUEUE && h -> mb_pool != NULL )) {
174- /* RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE */
175- if (unlikely (h -> mb_pool == (void * )UINTPTR_MAX )) {
176- if (unlikely (nb_bufs == 0 ))
177- return 0 ; /* Do not dereference uninitialized bufs[0]. */
178- h -> mb_pool = bufs [0 ]-> pool ;
179- }
180- rte_mbuf_raw_free_bulk (h -> mb_pool , bufs , nb_bufs );
181- } else {
182- rte_pktmbuf_free_bulk (bufs , nb_bufs );
183- }
154+ rte_pktmbuf_free_bulk (bufs , nb_bufs );
184155 rte_atomic_fetch_add_explicit (& h -> tx_pkts , nb_bufs , rte_memory_order_relaxed );
185156 rte_atomic_fetch_add_explicit (& h -> tx_bytes , bytes , rte_memory_order_relaxed );
186157
187158 return nb_bufs ;
188159}
189160
190- static uint16_t
191- eth_null_tx (void * q , struct rte_mbuf * * bufs , uint16_t nb_bufs )
192- {
193- return eth_null_tx_common (q , bufs , nb_bufs , ETH_TX_FREE_MODE_PER_QUEUE );
194- }
195-
196- static uint16_t
197- eth_null_tx_no_mbuf_fast_free (void * q , struct rte_mbuf * * bufs , uint16_t nb_bufs )
198- {
199- return eth_null_tx_common (q , bufs , nb_bufs , ETH_TX_FREE_MODE_NO_MBUF_FAST_FREE );
200- }
201-
202- static uint16_t
203- eth_null_tx_mbuf_fast_free (void * q , struct rte_mbuf * * bufs , uint16_t nb_bufs )
204- {
205- return eth_null_tx_common (q , bufs , nb_bufs , ETH_TX_FREE_MODE_MBUF_FAST_FREE );
206- }
207-
208161static uint16_t
209162eth_null_copy_tx (void * q , struct rte_mbuf * * bufs , uint16_t nb_bufs )
210163{
@@ -225,48 +178,9 @@ eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
225178 return nb_bufs ;
226179}
227180
228- static void
229- eth_dev_assign_rxtx_ops (struct rte_eth_dev * dev )
230- {
231- struct pmd_internals * internals = dev -> data -> dev_private ;
232-
233- if (internals -> packet_copy ) {
234- dev -> rx_pkt_burst = eth_null_copy_rx ;
235- dev -> tx_pkt_burst = eth_null_copy_tx ;
236- } else {
237- if (internals -> no_rx )
238- dev -> rx_pkt_burst = eth_null_no_rx ;
239- else
240- dev -> rx_pkt_burst = eth_null_rx ;
241-
242- dev -> tx_pkt_burst = eth_null_tx ;
243- if (dev -> data -> dev_conf .txmode .offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS )
244- dev -> tx_pkt_burst = eth_null_tx_no_mbuf_fast_free ;
245- if (dev -> data -> dev_conf .txmode .offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE )
246- dev -> tx_pkt_burst = eth_null_tx_mbuf_fast_free ;
247- }
248- }
249-
250181static int
251- eth_dev_configure (struct rte_eth_dev * dev )
182+ eth_dev_configure (struct rte_eth_dev * dev __rte_unused )
252183{
253- struct pmd_internals * internals = dev -> data -> dev_private ;
254-
255- if ((dev -> data -> dev_conf .txmode .offloads &
256- (RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | RTE_ETH_TX_OFFLOAD_MULTI_SEGS )) ==
257- (RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | RTE_ETH_TX_OFFLOAD_MULTI_SEGS )) {
258- PMD_LOG (ERR ,
259- "TX offloads MBUF_FAST_FREE and MULTI_SEGS are mutually exclusive" );
260- return - EINVAL ;
261- }
262- if (dev -> data -> dev_conf .txmode .offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE &&
263- internals -> packet_copy ) {
264- PMD_LOG (INFO ,
265- "TX offload MBUF_FAST_FREE is ignored with %s argument" ,
266- ETH_NULL_PACKET_COPY_ARG );
267- }
268- /* Assign RX/TX ops depending on device TX offloads. */
269- eth_dev_assign_rxtx_ops (dev );
270184 return 0 ;
271185}
272186
@@ -345,7 +259,7 @@ static int
345259eth_tx_queue_setup (struct rte_eth_dev * dev , uint16_t tx_queue_id ,
346260 uint16_t nb_tx_desc __rte_unused ,
347261 unsigned int socket_id __rte_unused ,
348- const struct rte_eth_txconf * tx_conf )
262+ const struct rte_eth_txconf * tx_conf __rte_unused )
349263{
350264 struct rte_mbuf * dummy_packet ;
351265 struct pmd_internals * internals ;
@@ -359,20 +273,6 @@ eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
359273 if (tx_queue_id >= dev -> data -> nb_tx_queues )
360274 return - ENODEV ;
361275
362- if (((dev -> data -> dev_conf .txmode .offloads | tx_conf -> offloads ) &
363- (RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | RTE_ETH_TX_OFFLOAD_MULTI_SEGS )) ==
364- (RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | RTE_ETH_TX_OFFLOAD_MULTI_SEGS )) {
365- PMD_LOG (ERR ,
366- "TX offloads MBUF_FAST_FREE and MULTI_SEGS are mutually exclusive" );
367- return - EINVAL ;
368- }
369- if (tx_conf -> offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE &&
370- internals -> packet_copy ) {
371- PMD_LOG (INFO ,
372- "TX offload MBUF_FAST_FREE is ignored with %s argument" ,
373- ETH_NULL_PACKET_COPY_ARG );
374- }
375-
376276 packet_size = internals -> packet_size ;
377277
378278 dev -> data -> tx_queues [tx_queue_id ] =
@@ -384,10 +284,6 @@ eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
384284
385285 internals -> tx_null_queues [tx_queue_id ].internals = internals ;
386286 internals -> tx_null_queues [tx_queue_id ].dummy_packet = dummy_packet ;
387- internals -> tx_null_queues [tx_queue_id ].mb_pool =
388- (dev -> data -> dev_conf .txmode .offloads | tx_conf -> offloads ) &
389- RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE ?
390- (void * )UINTPTR_MAX : NULL ;
391287
392288 return 0 ;
393289}
@@ -413,10 +309,7 @@ eth_dev_info(struct rte_eth_dev *dev,
413309 dev_info -> max_rx_queues = RTE_DIM (internals -> rx_null_queues );
414310 dev_info -> max_tx_queues = RTE_DIM (internals -> tx_null_queues );
415311 dev_info -> min_rx_bufsize = 0 ;
416- dev_info -> tx_queue_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
417- RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE ;
418- dev_info -> tx_offload_capa = RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
419- dev_info -> tx_queue_offload_capa ;
312+ dev_info -> tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS | RTE_ETH_TX_OFFLOAD_MT_LOCKFREE ;
420313
421314 dev_info -> reta_size = internals -> reta_size ;
422315 dev_info -> flow_type_rss_offloads = internals -> flow_type_rss_offloads ;
@@ -698,7 +591,16 @@ eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
698591 eth_dev -> dev_ops = & ops ;
699592
700593 /* finally assign rx and tx ops */
701- eth_dev_assign_rxtx_ops (eth_dev );
594+ if (internals -> packet_copy ) {
595+ eth_dev -> rx_pkt_burst = eth_null_copy_rx ;
596+ eth_dev -> tx_pkt_burst = eth_null_copy_tx ;
597+ } else if (internals -> no_rx ) {
598+ eth_dev -> rx_pkt_burst = eth_null_no_rx ;
599+ eth_dev -> tx_pkt_burst = eth_null_tx ;
600+ } else {
601+ eth_dev -> rx_pkt_burst = eth_null_rx ;
602+ eth_dev -> tx_pkt_burst = eth_null_tx ;
603+ }
702604
703605 rte_eth_dev_probing_finish (eth_dev );
704606 return 0 ;
@@ -777,6 +679,7 @@ rte_pmd_null_probe(struct rte_vdev_device *dev)
777679 PMD_LOG (INFO , "Initializing pmd_null for %s" , name );
778680
779681 if (rte_eal_process_type () == RTE_PROC_SECONDARY ) {
682+ struct pmd_internals * internals ;
780683 eth_dev = rte_eth_dev_attach_secondary (name );
781684 if (!eth_dev ) {
782685 PMD_LOG (ERR , "Failed to probe %s" , name );
@@ -785,7 +688,17 @@ rte_pmd_null_probe(struct rte_vdev_device *dev)
785688 /* TODO: request info from primary to set up Rx and Tx */
786689 eth_dev -> dev_ops = & ops ;
787690 eth_dev -> device = & dev -> device ;
788- eth_dev_assign_rxtx_ops (eth_dev );
691+ internals = eth_dev -> data -> dev_private ;
692+ if (internals -> packet_copy ) {
693+ eth_dev -> rx_pkt_burst = eth_null_copy_rx ;
694+ eth_dev -> tx_pkt_burst = eth_null_copy_tx ;
695+ } else if (internals -> no_rx ) {
696+ eth_dev -> rx_pkt_burst = eth_null_no_rx ;
697+ eth_dev -> tx_pkt_burst = eth_null_tx ;
698+ } else {
699+ eth_dev -> rx_pkt_burst = eth_null_rx ;
700+ eth_dev -> tx_pkt_burst = eth_null_tx ;
701+ }
789702 rte_eth_dev_probing_finish (eth_dev );
790703 return 0 ;
791704 }
0 commit comments