@@ -240,6 +240,9 @@ config_dmadevs(struct test_configure *cfg)
240240 uint8_t nb_sges = 0 ;
241241 char * dma_name ;
242242
243+ if (cfg -> test_type != TEST_TYPE_DMA_MEM_COPY )
244+ return 0 ;
245+
243246 if (cfg -> is_sg )
244247 nb_sges = RTE_MAX (cfg -> nb_src_sges , cfg -> nb_dst_sges );
245248
@@ -540,7 +543,7 @@ dummy_free_ext_buf(void *addr, void *opaque)
540543
541544static int
542545setup_memory_env (struct test_configure * cfg , uint32_t nr_buf ,
543- struct rte_mbuf * * * srcs , struct rte_mbuf * * * dsts ,
546+ struct rte_mbuf * * * srcs , struct rte_mbuf * * * dsts ,
544547 struct rte_dma_sge * * src_sges , struct rte_dma_sge * * dst_sges ,
545548 struct rte_dma_op * * * dma_ops )
546549{
@@ -681,6 +684,39 @@ setup_memory_env(struct test_configure *cfg, uint32_t nr_buf,
681684 return 0 ;
682685}
683686
687+ static void
688+ teardown_memory_env (uint32_t nr_buf , struct rte_mbuf * * srcs , struct rte_mbuf * * dsts ,
689+ struct rte_dma_sge * src_sges , struct rte_dma_sge * dst_sges ,
690+ struct rte_dma_op * * dma_ops )
691+ {
692+ /* free mbufs used in the test */
693+ if (srcs != NULL )
694+ rte_pktmbuf_free_bulk (srcs , nr_buf );
695+ if (dsts != NULL )
696+ rte_pktmbuf_free_bulk (dsts , nr_buf );
697+
698+ /* free the points for the mbufs */
699+ rte_free (srcs );
700+ srcs = NULL ;
701+ rte_free (dsts );
702+ dsts = NULL ;
703+
704+ rte_mempool_free (src_pool );
705+ src_pool = NULL ;
706+
707+ rte_mempool_free (dst_pool );
708+ dst_pool = NULL ;
709+
710+ /* free sges for mbufs */
711+ rte_free (src_sges );
712+ src_sges = NULL ;
713+
714+ rte_free (dst_sges );
715+ dst_sges = NULL ;
716+
717+ rte_free (dma_ops );
718+ }
719+
684720static uint32_t
685721align_buffer_count (struct test_configure * cfg , uint32_t * nr_sgsrc , uint32_t * nr_sgdst )
686722{
@@ -883,48 +919,23 @@ verify_data(struct test_configure *cfg, struct rte_mbuf **srcs, struct rte_mbuf
883919 return 0 ;
884920}
885921
886- int
887- mem_copy_benchmark (struct test_configure * cfg )
922+ static int
923+ setup_worker (struct test_configure * cfg , uint32_t nr_buf ,
924+ struct rte_mbuf * * srcs , struct rte_mbuf * * dsts ,
925+ struct rte_dma_sge * src_sges , struct rte_dma_sge * dst_sges ,
926+ struct rte_dma_op * * dma_ops ,
927+ uint32_t nr_sgsrc , uint32_t nr_sgdst )
888928{
889- struct rte_mbuf * * srcs = NULL , * * dsts = NULL , * * m = NULL ;
890- struct rte_dma_sge * src_sges = NULL , * dst_sges = NULL ;
891- struct vchan_dev_config * vchan_dev = NULL ;
892929 struct lcore_dma_map_t * lcore_dma_map = NULL ;
893- struct rte_dma_op * * dma_ops = NULL ;
930+ struct vchan_dev_config * vchan_dev = NULL ;
894931 unsigned int buf_size = cfg -> buf_size .cur ;
895932 uint16_t kick_batch = cfg -> kick_batch .cur ;
896933 uint16_t test_secs = global_cfg .test_secs ;
897934 uint16_t nb_workers = cfg -> num_worker ;
898- uint32_t nr_sgsrc = 0 , nr_sgdst = 0 ;
899- float bandwidth , bandwidth_total ;
900935 unsigned int lcore_id = 0 ;
901- uint32_t avg_cycles_total ;
902- bool dev_stopped = false;
903- uint32_t avg_cycles = 0 ;
904- float mops , mops_total ;
905- float memory = 0 ;
906936 uint32_t i , j , k ;
907- uint32_t nr_buf ;
908937 uint32_t nr_ops ;
909938 uint32_t offset ;
910- int ret = 0 ;
911-
912- nr_buf = align_buffer_count (cfg , & nr_sgsrc , & nr_sgdst );
913-
914- if (setup_memory_env (cfg , nr_buf , & srcs , & dsts , & src_sges , & dst_sges , & dma_ops ) < 0 )
915- goto out ;
916-
917- if (cfg -> test_type == TEST_TYPE_DMA_MEM_COPY )
918- if (config_dmadevs (cfg ) < 0 )
919- goto out ;
920-
921- if (global_cfg .cache_flush > 0 ) {
922- cache_flush_buf (srcs , buf_size , nr_buf );
923- cache_flush_buf (dsts , buf_size , nr_buf );
924- rte_mb ();
925- }
926-
927- printf ("Start testing....\n" );
928939
929940 for (i = 0 ; i < nb_workers ; i ++ ) {
930941 lcore_dma_map = & cfg -> dma_config [i ].lcore_dma_map ;
@@ -935,7 +946,7 @@ mem_copy_benchmark(struct test_configure *cfg)
935946 lcores [i ] = rte_malloc (NULL , sizeof (struct lcore_params ), 0 );
936947 if (lcores [i ] == NULL ) {
937948 printf ("lcore parameters malloc failure for lcore %d\n" , lcore_id );
938- break ;
949+ return -1 ;
939950 }
940951 if (cfg -> test_type == TEST_TYPE_DMA_MEM_COPY ) {
941952 lcores [i ]-> dma_name = lcore_dma_map -> dma_names ;
@@ -963,7 +974,7 @@ mem_copy_benchmark(struct test_configure *cfg)
963974 vchan_dev -> tdir == RTE_DMA_DIR_MEM_TO_DEV ) {
964975 if (attach_ext_buffer (vchan_dev , lcores [i ], cfg -> is_sg ,
965976 (nr_sgsrc /nb_workers ), (nr_sgdst /nb_workers )) < 0 )
966- goto stop_dmadev ;
977+ return -1 ;
967978 }
968979
969980 if (cfg -> is_sg && cfg -> use_ops ) {
@@ -988,6 +999,88 @@ mem_copy_benchmark(struct test_configure *cfg)
988999 rte_eal_remote_launch (get_work_function (cfg ), (void * )(lcores [i ]), lcore_id );
9891000 }
9901001
1002+ return 0 ;
1003+ }
1004+
1005+ static void
1006+ teardown_worker_res (struct test_configure * cfg , uint32_t nr_buf ,
1007+ struct rte_mbuf * * srcs , struct rte_mbuf * * dsts )
1008+ {
1009+ uint16_t nb_workers = cfg -> num_worker ;
1010+ struct vchan_dev_config * vchan_dev ;
1011+ struct rte_mbuf * * m ;
1012+ uint32_t offset ;
1013+ uint32_t i , j ;
1014+
1015+ for (i = 0 ; i < nb_workers ; i ++ ) {
1016+ struct rte_mbuf * * sbuf = NULL , * * dbuf = NULL ;
1017+ vchan_dev = & cfg -> dma_config [i ].vchan_dev ;
1018+ offset = nr_buf / nb_workers * i ;
1019+ m = NULL ;
1020+ if (vchan_dev -> tdir == RTE_DMA_DIR_DEV_TO_MEM ) {
1021+ sbuf = srcs + offset ;
1022+ m = sbuf ;
1023+ } else if (vchan_dev -> tdir == RTE_DMA_DIR_MEM_TO_DEV ) {
1024+ dbuf = dsts + offset ;
1025+ m = dbuf ;
1026+ }
1027+
1028+ if (m ) {
1029+ for (j = 0 ; j < (nr_buf / nb_workers ); j ++ )
1030+ rte_pktmbuf_detach_extbuf (m [j ]);
1031+
1032+ if (m [0 ]-> shinfo && rte_mbuf_ext_refcnt_read (m [0 ]-> shinfo ) == 0 )
1033+ rte_free (m [0 ]-> shinfo );
1034+ }
1035+
1036+ rte_free (lcores [i ]);
1037+ lcores [i ] = NULL ;
1038+ }
1039+ }
1040+
1041+ int
1042+ mem_copy_benchmark (struct test_configure * cfg )
1043+ {
1044+ struct rte_mbuf * * srcs = NULL , * * dsts = NULL ;
1045+ struct rte_dma_sge * src_sges = NULL , * dst_sges = NULL ;
1046+ struct vchan_dev_config * vchan_dev = NULL ;
1047+ unsigned int buf_size = cfg -> buf_size .cur ;
1048+ uint16_t kick_batch = cfg -> kick_batch .cur ;
1049+ uint16_t test_secs = global_cfg .test_secs ;
1050+ uint16_t nb_workers = cfg -> num_worker ;
1051+ uint32_t nr_sgsrc = 0 , nr_sgdst = 0 ;
1052+ struct rte_dma_op * * dma_ops = NULL ;
1053+ float bandwidth , bandwidth_total ;
1054+ uint32_t avg_cycles_total ;
1055+ bool dev_stopped = false;
1056+ uint32_t avg_cycles = 0 ;
1057+ float mops , mops_total ;
1058+ float memory = 0 ;
1059+ uint32_t nr_buf ;
1060+ int ret = -1 ;
1061+ uint32_t i ;
1062+
1063+ nr_buf = align_buffer_count (cfg , & nr_sgsrc , & nr_sgdst );
1064+
1065+ if (setup_memory_env (cfg , nr_buf , & srcs , & dsts , & src_sges , & dst_sges , & dma_ops ) < 0 )
1066+ goto out ;
1067+
1068+ if (config_dmadevs (cfg ) < 0 )
1069+ goto out ;
1070+
1071+ if (global_cfg .cache_flush > 0 ) {
1072+ cache_flush_buf (srcs , buf_size , nr_buf );
1073+ cache_flush_buf (dsts , buf_size , nr_buf );
1074+ rte_mb ();
1075+ }
1076+
1077+ printf ("Start testing....\n" );
1078+
1079+ ret = setup_worker (cfg , nr_buf , srcs , dsts , src_sges , dst_sges , dma_ops ,
1080+ nr_sgsrc , nr_sgdst );
1081+ if (ret != 0 )
1082+ goto stop_dmadev ;
1083+
9911084 while (1 ) {
9921085 bool ready = true;
9931086 for (i = 0 ; i < nb_workers ; i ++ ) {
@@ -1048,58 +1141,8 @@ mem_copy_benchmark(struct test_configure *cfg)
10481141 stop_dmadev (cfg , & dev_stopped );
10491142
10501143out :
1051- for (k = 0 ; k < nb_workers ; k ++ ) {
1052- struct rte_mbuf * * sbuf = NULL , * * dbuf = NULL ;
1053- vchan_dev = & cfg -> dma_config [k ].vchan_dev ;
1054- offset = nr_buf / nb_workers * k ;
1055- m = NULL ;
1056- if (vchan_dev -> tdir == RTE_DMA_DIR_DEV_TO_MEM ) {
1057- sbuf = srcs + offset ;
1058- m = sbuf ;
1059- } else if (vchan_dev -> tdir == RTE_DMA_DIR_MEM_TO_DEV ) {
1060- dbuf = dsts + offset ;
1061- m = dbuf ;
1062- }
1063-
1064- if (m ) {
1065- for (i = 0 ; i < (nr_buf / nb_workers ); i ++ )
1066- rte_pktmbuf_detach_extbuf (m [i ]);
1067-
1068- if (m [0 ]-> shinfo && rte_mbuf_ext_refcnt_read (m [0 ]-> shinfo ) == 0 )
1069- rte_free (m [0 ]-> shinfo );
1070- }
1071- }
1072-
1073- /* free mbufs used in the test */
1074- if (srcs != NULL )
1075- rte_pktmbuf_free_bulk (srcs , nr_buf );
1076- if (dsts != NULL )
1077- rte_pktmbuf_free_bulk (dsts , nr_buf );
1078-
1079- /* free the points for the mbufs */
1080- rte_free (srcs );
1081- srcs = NULL ;
1082- rte_free (dsts );
1083- dsts = NULL ;
1084-
1085- rte_mempool_free (src_pool );
1086- src_pool = NULL ;
1087-
1088- rte_mempool_free (dst_pool );
1089- dst_pool = NULL ;
1090-
1091- /* free sges for mbufs */
1092- rte_free (src_sges );
1093- src_sges = NULL ;
1094-
1095- rte_free (dst_sges );
1096- dst_sges = NULL ;
1097-
1098- /* free the worker parameters */
1099- for (i = 0 ; i < nb_workers ; i ++ ) {
1100- rte_free (lcores [i ]);
1101- lcores [i ] = NULL ;
1102- }
1144+ teardown_worker_res (cfg , nr_buf , srcs , dsts );
1145+ teardown_memory_env (nr_buf , srcs , dsts , src_sges , dst_sges , dma_ops );
11031146
11041147 return ret ;
11051148}
0 commit comments