@@ -69,16 +69,33 @@ enum nthw_virt_queue_usage {
6969
7070struct nthw_virt_queue {
7171 /* Pointers to virt-queue structs */
72- struct {
73- /* SPLIT virtqueue */
74- struct virtq_avail * p_avail ;
75- struct virtq_used * p_used ;
76- struct virtq_desc * p_desc ;
77- /* Control variables for virt-queue structs */
78- uint16_t am_idx ;
79- uint16_t used_idx ;
80- uint16_t cached_idx ;
81- uint16_t tx_descr_avail_idx ;
72+ union {
73+ struct {
74+ /* SPLIT virtqueue */
75+ struct virtq_avail * p_avail ;
76+ struct virtq_used * p_used ;
77+ struct virtq_desc * p_desc ;
78+ /* Control variables for virt-queue structs */
79+ uint16_t am_idx ;
80+ uint16_t used_idx ;
81+ uint16_t cached_idx ;
82+ uint16_t tx_descr_avail_idx ;
83+ };
84+ struct {
85+ /* PACKED virtqueue */
86+ struct pvirtq_event_suppress * driver_event ;
87+ struct pvirtq_event_suppress * device_event ;
88+ struct pvirtq_desc * desc ;
89+ /*
90+ * when in-order release used Tx packets from FPGA it may collapse
91+ * into a batch. When getting new Tx buffers we may only need
92+ * partial
93+ */
94+ uint16_t next_avail ;
95+ uint16_t next_used ;
96+ uint16_t avail_wrap_count ;
97+ uint16_t used_wrap_count ;
98+ };
8299 };
83100
84101 /* Array with packet buffers */
@@ -108,6 +125,11 @@ struct nthw_virt_queue {
108125 void * desc_struct_phys_addr ;
109126};
110127
128+ struct pvirtq_struct_layout_s {
129+ size_t driver_event_offset ;
130+ size_t device_event_offset ;
131+ };
132+
111133static struct nthw_virt_queue rxvq [MAX_VIRT_QUEUES ];
112134static struct nthw_virt_queue txvq [MAX_VIRT_QUEUES ];
113135
@@ -606,6 +628,143 @@ nthw_setup_mngd_tx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
606628 return & txvq [index ];
607629}
608630
631+ /*
632+ * Packed Ring
633+ */
634+ static int nthw_setup_managed_virt_queue_packed (struct nthw_virt_queue * vq ,
635+ struct pvirtq_struct_layout_s * pvirtq_layout ,
636+ struct nthw_memory_descriptor * p_virt_struct_area ,
637+ struct nthw_memory_descriptor * p_packet_buffers ,
638+ uint16_t flags ,
639+ int rx )
640+ {
641+ /* page aligned */
642+ assert (((uintptr_t )p_virt_struct_area -> phys_addr & 0xfff ) == 0 );
643+ assert (p_packet_buffers );
644+
645+ /* clean canvas */
646+ memset (p_virt_struct_area -> virt_addr , 0 ,
647+ sizeof (struct pvirtq_desc ) * vq -> queue_size +
648+ sizeof (struct pvirtq_event_suppress ) * 2 + sizeof (int ) * vq -> queue_size );
649+
650+ pvirtq_layout -> device_event_offset = sizeof (struct pvirtq_desc ) * vq -> queue_size ;
651+ pvirtq_layout -> driver_event_offset =
652+ pvirtq_layout -> device_event_offset + sizeof (struct pvirtq_event_suppress );
653+
654+ vq -> desc = p_virt_struct_area -> virt_addr ;
655+ vq -> device_event = (void * )((uintptr_t )vq -> desc + pvirtq_layout -> device_event_offset );
656+ vq -> driver_event = (void * )((uintptr_t )vq -> desc + pvirtq_layout -> driver_event_offset );
657+
658+ vq -> next_avail = 0 ;
659+ vq -> next_used = 0 ;
660+ vq -> avail_wrap_count = 1 ;
661+ vq -> used_wrap_count = 1 ;
662+
663+ /*
664+ * Only possible if FPGA always delivers in-order
665+ * Buffer ID used is the index in the p_packet_buffers array
666+ */
667+ unsigned int i ;
668+ struct pvirtq_desc * p_desc = vq -> desc ;
669+
670+ for (i = 0 ; i < vq -> queue_size ; i ++ ) {
671+ if (rx ) {
672+ p_desc [i ].addr = (uint64_t )p_packet_buffers [i ].phys_addr ;
673+ p_desc [i ].len = p_packet_buffers [i ].len ;
674+ }
675+
676+ p_desc [i ].id = i ;
677+ p_desc [i ].flags = flags ;
678+ }
679+
680+ if (rx )
681+ vq -> avail_wrap_count ^= 1 ; /* filled up available buffers for Rx */
682+ else
683+ vq -> used_wrap_count ^= 1 ; /* pre-fill free buffer IDs */
684+
685+ if (vq -> queue_size == 0 )
686+ return -1 ; /* don't allocate memory with size of 0 bytes */
687+
688+ vq -> p_virtual_addr = malloc (vq -> queue_size * sizeof (* p_packet_buffers ));
689+
690+ if (vq -> p_virtual_addr == NULL )
691+ return -1 ;
692+
693+ memcpy (vq -> p_virtual_addr , p_packet_buffers , vq -> queue_size * sizeof (* p_packet_buffers ));
694+
695+ /* Not used yet by FPGA - make sure we disable */
696+ vq -> device_event -> flags = RING_EVENT_FLAGS_DISABLE ;
697+
698+ return 0 ;
699+ }
700+
701+ static struct nthw_virt_queue *
702+ nthw_setup_managed_rx_virt_queue_packed (nthw_dbs_t * p_nthw_dbs ,
703+ uint32_t index ,
704+ uint32_t queue_size ,
705+ uint32_t host_id ,
706+ uint32_t header ,
707+ struct nthw_memory_descriptor * p_virt_struct_area ,
708+ struct nthw_memory_descriptor * p_packet_buffers ,
709+ int irq_vector )
710+ {
711+ struct pvirtq_struct_layout_s pvirtq_layout ;
712+ struct nthw_virt_queue * vq = & rxvq [index ];
713+ /* Set size and setup packed vq ring */
714+ vq -> queue_size = queue_size ;
715+
716+ /* Use Avail flag bit == 1 because wrap bit is initially set to 1 - and Used is inverse */
717+ if (nthw_setup_managed_virt_queue_packed (vq , & pvirtq_layout , p_virt_struct_area ,
718+ p_packet_buffers ,
719+ VIRTQ_DESC_F_WRITE | VIRTQ_DESC_F_AVAIL , 1 ) != 0 )
720+ return NULL ;
721+
722+ nthw_setup_rx_virt_queue (p_nthw_dbs , index , 0x8000 , 0 , /* start wrap ring counter as 1 */
723+ (void * )((uintptr_t )p_virt_struct_area -> phys_addr +
724+ pvirtq_layout .driver_event_offset ),
725+ (void * )((uintptr_t )p_virt_struct_area -> phys_addr +
726+ pvirtq_layout .device_event_offset ),
727+ p_virt_struct_area -> phys_addr , (uint16_t )queue_size , host_id ,
728+ header , PACKED_RING , irq_vector );
729+
730+ vq -> usage = NTHW_VIRTQ_MANAGED ;
731+ return vq ;
732+ }
733+
734+ static struct nthw_virt_queue *
735+ nthw_setup_managed_tx_virt_queue_packed (nthw_dbs_t * p_nthw_dbs ,
736+ uint32_t index ,
737+ uint32_t queue_size ,
738+ uint32_t host_id ,
739+ uint32_t port ,
740+ uint32_t virtual_port ,
741+ uint32_t header ,
742+ int irq_vector ,
743+ uint32_t in_order ,
744+ struct nthw_memory_descriptor * p_virt_struct_area ,
745+ struct nthw_memory_descriptor * p_packet_buffers )
746+ {
747+ struct pvirtq_struct_layout_s pvirtq_layout ;
748+ struct nthw_virt_queue * vq = & txvq [index ];
749+ /* Set size and setup packed vq ring */
750+ vq -> queue_size = queue_size ;
751+
752+ if (nthw_setup_managed_virt_queue_packed (vq , & pvirtq_layout , p_virt_struct_area ,
753+ p_packet_buffers , 0 , 0 ) != 0 )
754+ return NULL ;
755+
756+ nthw_setup_tx_virt_queue (p_nthw_dbs , index , 0x8000 , 0 , /* start wrap ring counter as 1 */
757+ (void * )((uintptr_t )p_virt_struct_area -> phys_addr +
758+ pvirtq_layout .driver_event_offset ),
759+ (void * )((uintptr_t )p_virt_struct_area -> phys_addr +
760+ pvirtq_layout .device_event_offset ),
761+ p_virt_struct_area -> phys_addr , (uint16_t )queue_size , host_id ,
762+ port , virtual_port , header , PACKED_RING , irq_vector , in_order );
763+
764+ vq -> usage = NTHW_VIRTQ_MANAGED ;
765+ return vq ;
766+ }
767+
609768/*
610769 * Create a Managed Rx Virt Queue
611770 *
@@ -630,6 +789,11 @@ nthw_setup_mngd_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
630789 host_id , header , p_virt_struct_area ,
631790 p_packet_buffers , irq_vector );
632791
792+ case PACKED_RING :
793+ return nthw_setup_managed_rx_virt_queue_packed (p_nthw_dbs , index , queue_size ,
794+ host_id , header , p_virt_struct_area ,
795+ p_packet_buffers , irq_vector );
796+
633797 default :
634798 break ;
635799 }
@@ -666,6 +830,13 @@ nthw_setup_mngd_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
666830 p_virt_struct_area ,
667831 p_packet_buffers );
668832
833+ case PACKED_RING :
834+ return nthw_setup_managed_tx_virt_queue_packed (p_nthw_dbs , index , queue_size ,
835+ host_id , port , virtual_port , header ,
836+ irq_vector , in_order ,
837+ p_virt_struct_area ,
838+ p_packet_buffers );
839+
669840 default :
670841 break ;
671842 }
0 commit comments