3232#error DWC2 require either CFG_TUH_DWC2_SLAVE_ENABLE or CFG_TUH_DWC2_DMA_ENABLE to be enabled
3333#endif
3434
35- // Debug level for DWC2
36- #define DWC2_DEBUG 2
37-
3835#include "host/hcd.h"
3936#include "host/usbh.h"
4037#include "dwc2_common.h"
4138
42- // Max number of endpoints application can open, can be larger than DWC2_CHANNEL_COUNT_MAX
43- #ifndef CFG_TUH_DWC2_ENDPOINT_MAX
44- #define CFG_TUH_DWC2_ENDPOINT_MAX 16
45- #endif
39+ // Debug level for DWC2
40+ #define DWC2_DEBUG 2
41+
42+ // Max number of endpoints application can open, can be larger than DWC2_CHANNEL_COUNT_MAX
43+ #ifndef CFG_TUH_DWC2_ENDPOINT_MAX
44+ #define CFG_TUH_DWC2_ENDPOINT_MAX 16u
45+ #endif
4646
47- #define DWC2_CHANNEL_COUNT_MAX 16 // absolute max channel count
47+ #define DWC2_CHANNEL_COUNT_MAX 16u // absolute max channel count
4848TU_VERIFY_STATIC (CFG_TUH_DWC2_ENDPOINT_MAX <= 255 , "currently only use 8-bit for index" );
4949
5050enum {
@@ -79,7 +79,8 @@ typedef struct {
7979 uint32_t speed : 2 ;
8080 uint32_t next_pid : 2 ; // PID for next transfer
8181 uint32_t next_do_ping : 1 ; // Do PING for next transfer if possible (highspeed OUT)
82- // uint32_t : 9;
82+ uint32_t closing : 1 ; // endpoint is closing
83+ // uint32_t : 8;
8384 };
8485
8586 uint32_t uframe_countdown ; // micro-frame count down to transfer for periodic, only need 18-bit
@@ -96,6 +97,7 @@ typedef struct {
9697 uint8_t err_count : 3 ;
9798 uint8_t period_split_nyet_count : 3 ;
9899 uint8_t halted_nyet : 1 ;
100+ uint8_t closing : 1 ; // closing channel
99101 };
100102 uint8_t result ;
101103
@@ -195,16 +197,31 @@ TU_ATTR_ALWAYS_INLINE static inline void channel_dealloc(dwc2_regs_t* dwc2, uint
195197}
196198
197199TU_ATTR_ALWAYS_INLINE static inline bool channel_disable (const dwc2_regs_t * dwc2 , dwc2_channel_t * channel ) {
198- // disable also require request queue
199- TU_ASSERT (req_queue_avail (dwc2 , channel_is_periodic (channel -> hcchar )));
200+ const bool is_period = channel_is_periodic (channel -> hcchar );
201+ if (dma_host_enabled (dwc2 )) {
202+ // In buffer DMA or external DMA mode:
203+ // - Channel disable must not be programmed for non-split periodic channels. At the end of the next uframe/frame (in
204+ // the worst case), the controller generates a channel halted and disables the channel automatically.
205+ // - For split enabled channels (both non-periodic and periodic), channel disable must not be programmed randomly.
206+ // However, channel disable can be programmed for specific scenarios such as NAK and FrmOvrn.
207+ if (is_period && (channel -> hcsplt & HCSPLT_SPLITEN )) {
208+ return true;
209+ }
210+ } else {
211+ while (0 == req_queue_avail (dwc2 , is_period )) {
212+ // blocking wait for request queue available
213+ }
214+ }
200215 channel -> hcintmsk |= HCINT_HALTED ;
201216 channel -> hcchar |= HCCHAR_CHDIS | HCCHAR_CHENA ; // must set both CHDIS and CHENA
202217 return true;
203218}
204219
205220// attempt to send IN token to receive data
206221TU_ATTR_ALWAYS_INLINE static inline bool channel_send_in_token (const dwc2_regs_t * dwc2 , dwc2_channel_t * channel ) {
207- TU_ASSERT (req_queue_avail (dwc2 , channel_is_periodic (channel -> hcchar )));
222+ while (0 == req_queue_avail (dwc2 , channel_is_periodic (channel -> hcchar ))) {
223+ // blocking wait for request queue available
224+ }
208225 channel -> hcchar |= HCCHAR_CHENA ;
209226 return true;
210227}
@@ -237,13 +254,37 @@ TU_ATTR_ALWAYS_INLINE static inline uint8_t edpt_alloc(void) {
237254 return TUSB_INDEX_INVALID_8 ;
238255}
239256
240- // Find a endpoint that is opened previously with hcd_edpt_open()
257+ TU_ATTR_ALWAYS_INLINE static inline void edpt_dealloc (hcd_endpoint_t * edpt ) {
258+ edpt -> hcchar_bm .enable = 0 ;
259+ }
260+
261+ // close an opened endpoint
262+ static void edpt_close (dwc2_regs_t * dwc2 , uint8_t ep_id ) {
263+ hcd_endpoint_t * edpt = & _hcd_data .edpt [ep_id ];
264+ edpt -> closing = 1 ; // mark endpoint as closing
265+
266+ // disable active channel belong to this endpoint
267+ for (uint8_t ch_id = 0 ; ch_id < DWC2_CHANNEL_COUNT_MAX ; ch_id ++ ) {
268+ hcd_xfer_t * xfer = & _hcd_data .xfer [ch_id ];
269+ if (xfer -> allocated && xfer -> ep_id == ep_id ) {
270+ dwc2_channel_t * channel = & dwc2 -> channel [ch_id ];
271+ xfer -> closing = 1 ;
272+ channel_disable (dwc2 , channel );
273+ return ; // only 1 active channel per endpoint
274+ }
275+ }
276+
277+ edpt_dealloc (edpt ); // no active channel, safe to de-alloc now
278+ }
279+
280+ // Find an endpoint that is opened previously with hcd_edpt_open()
241281// Note: EP0 is bidirectional
242282TU_ATTR_ALWAYS_INLINE static inline uint8_t edpt_find_opened (uint8_t dev_addr , uint8_t ep_num , uint8_t ep_dir ) {
243283 for (uint8_t i = 0 ; i < (uint8_t )CFG_TUH_DWC2_ENDPOINT_MAX ; i ++ ) {
244- const dwc2_channel_char_t * hcchar_bm = & _hcd_data .edpt [i ].hcchar_bm ;
245- if (hcchar_bm -> enable && hcchar_bm -> dev_addr == dev_addr &&
246- hcchar_bm -> ep_num == ep_num && (ep_num == 0 || hcchar_bm -> ep_dir == ep_dir )) {
284+ const hcd_endpoint_t * edpt = & _hcd_data .edpt [i ];
285+ const dwc2_channel_char_t hcchar_bm = edpt -> hcchar_bm ;
286+ if (hcchar_bm .enable && hcchar_bm .dev_addr == dev_addr && hcchar_bm .ep_num == ep_num &&
287+ (ep_num == 0 || hcchar_bm .ep_dir == ep_dir )) {
247288 return i ;
248289 }
249290 }
@@ -456,11 +497,11 @@ tusb_speed_t hcd_port_speed_get(uint8_t rhport) {
456497
457498// HCD closes all opened endpoints belong to this device
458499void hcd_device_close (uint8_t rhport , uint8_t dev_addr ) {
459- ( void ) rhport ;
460- for (uint8_t i = 0 ; i < ( uint8_t ) CFG_TUH_DWC2_ENDPOINT_MAX ; i ++ ) {
461- hcd_endpoint_t * edpt = & _hcd_data .edpt [i ];
500+ dwc2_regs_t * dwc2 = DWC2_REG ( rhport ) ;
501+ for (uint8_t ep_id = 0 ; ep_id < CFG_TUH_DWC2_ENDPOINT_MAX ; ep_id ++ ) {
502+ const hcd_endpoint_t * edpt = & _hcd_data .edpt [ep_id ];
462503 if (edpt -> hcchar_bm .enable && edpt -> hcchar_bm .dev_addr == dev_addr ) {
463- tu_memclr ( edpt , sizeof ( hcd_endpoint_t ) );
504+ edpt_close ( dwc2 , ep_id );
464505 }
465506 }
466507}
@@ -520,8 +561,15 @@ bool hcd_edpt_open(uint8_t rhport, uint8_t dev_addr, const tusb_desc_endpoint_t*
520561}
521562
522563bool hcd_edpt_close (uint8_t rhport , uint8_t daddr , uint8_t ep_addr ) {
523- (void ) rhport ; (void ) daddr ; (void ) ep_addr ;
524- return false; // TODO not implemented yet
564+ dwc2_regs_t * dwc2 = DWC2_REG (rhport );
565+ const uint8_t ep_num = tu_edpt_number (ep_addr );
566+ const uint8_t ep_dir = tu_edpt_dir (ep_addr );
567+ const uint8_t ep_id = edpt_find_opened (daddr , ep_num , ep_dir );
568+ TU_ASSERT (ep_id < CFG_TUH_DWC2_ENDPOINT_MAX );
569+
570+ edpt_close (dwc2 , ep_id );
571+
572+ return true;
525573}
526574
527575// clean up channel after part of transfer is done but the whole urb is not complete
@@ -590,8 +638,7 @@ static bool channel_xfer_start(dwc2_regs_t* dwc2, uint8_t ch_id) {
590638 channel -> hcint = 0xFFFFFFFFU ; // clear all channel interrupts
591639
592640 if (dma_host_enabled (dwc2 )) {
593- uint32_t hcintmsk = HCINT_HALTED ;
594- channel -> hcintmsk = hcintmsk ;
641+ channel -> hcintmsk = HCINT_HALTED ;
595642 dwc2 -> haintmsk |= TU_BIT (ch_id );
596643
597644 channel -> hcdma = (uint32_t ) edpt -> buffer ;
@@ -646,15 +693,15 @@ static bool edpt_xfer_kickoff(dwc2_regs_t* dwc2, uint8_t ep_id) {
646693 return channel_xfer_start (dwc2 , ch_id );
647694}
648695
649- // Submit a transfer, when complete hcd_event_xfer_complete() must be invoked
650696bool hcd_edpt_xfer (uint8_t rhport , uint8_t dev_addr , uint8_t ep_addr , uint8_t * buffer , uint16_t buflen ) {
651697 dwc2_regs_t * dwc2 = DWC2_REG (rhport );
652698 const uint8_t ep_num = tu_edpt_number (ep_addr );
653699 const uint8_t ep_dir = tu_edpt_dir (ep_addr );
654700
655701 uint8_t ep_id = edpt_find_opened (dev_addr , ep_num , ep_dir );
656702 TU_ASSERT (ep_id < CFG_TUH_DWC2_ENDPOINT_MAX );
657- hcd_endpoint_t * edpt = & _hcd_data .edpt [ep_id ];
703+ hcd_endpoint_t * edpt = & _hcd_data .edpt [ep_id ];
704+ TU_VERIFY (edpt -> closing == 0 ); // skip if endpoint is closing
658705
659706 edpt -> buffer = buffer ;
660707 edpt -> buflen = buflen ;
@@ -945,6 +992,8 @@ static bool handle_channel_in_slave(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t h
945992 is_done = true;
946993 } else if (xfer -> err_count == HCD_XFER_ERROR_MAX ) {
947994 xfer -> result = XFER_RESULT_FAILED ;
995+ is_done = true;
996+ } else if (xfer -> closing ) {
948997 is_done = true;
949998 } else {
950999 // got here due to NAK or NYET
@@ -1006,6 +1055,8 @@ static bool handle_channel_out_slave(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t
10061055 } else if (xfer -> err_count == HCD_XFER_ERROR_MAX ) {
10071056 xfer -> result = XFER_RESULT_FAILED ;
10081057 is_done = true;
1058+ } else if (xfer -> closing ) {
1059+ is_done = true;
10091060 } else {
10101061 // Got here due to NAK or NYET
10111062 TU_ASSERT (channel_xfer_start (dwc2 , ch_id ));
@@ -1122,6 +1173,10 @@ static bool handle_channel_in_dma(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hci
11221173 // retry start-split in next binterval
11231174 channel_xfer_in_retry (dwc2 , ch_id , hcint );
11241175 }
1176+
1177+ if (xfer -> closing ) {
1178+ is_done = true;
1179+ }
11251180 }
11261181
11271182 return is_done ;
@@ -1182,6 +1237,10 @@ static bool handle_channel_out_dma(dwc2_regs_t* dwc2, uint8_t ch_id, uint32_t hc
11821237 channel -> hcchar |= HCCHAR_CHENA ;
11831238 }
11841239 }
1240+
1241+ if (xfer -> closing ) {
1242+ is_done = true;
1243+ }
11851244 } else if (hcint & HCINT_ACK ) {
11861245 xfer -> err_count = 0 ;
11871246 channel -> hcintmsk &= ~HCINT_ACK ;
@@ -1226,12 +1285,17 @@ static void handle_channel_irq(uint8_t rhport, bool in_isr) {
12261285 } else {
12271286 is_done = handle_channel_in_slave (dwc2 , ch_id , hcint );
12281287 }
1229- #endif
1288+ #endif
12301289 }
12311290
12321291 if (is_done ) {
1233- const uint8_t ep_addr = tu_edpt_addr (hcchar .ep_num , hcchar .ep_dir );
1234- hcd_event_xfer_complete (hcchar .dev_addr , ep_addr , xfer -> xferred_bytes , (xfer_result_t )xfer -> result , in_isr );
1292+ if (xfer -> closing ) {
1293+ hcd_endpoint_t * edpt = & _hcd_data .edpt [xfer -> ep_id ];
1294+ edpt_dealloc (edpt );
1295+ } else {
1296+ const uint8_t ep_addr = tu_edpt_addr (hcchar .ep_num , hcchar .ep_dir );
1297+ hcd_event_xfer_complete (hcchar .dev_addr , ep_addr , xfer -> xferred_bytes , (xfer_result_t )xfer -> result , in_isr );
1298+ }
12351299 channel_dealloc (dwc2 , ch_id );
12361300 }
12371301 }
@@ -1250,16 +1314,18 @@ static bool handle_sof_irq(uint8_t rhport, bool in_isr) {
12501314 const uint32_t ucount = (hprt_speed_get (dwc2 ) == TUSB_SPEED_HIGH ? 1 : 8 );
12511315
12521316 for (uint8_t ep_id = 0 ; ep_id < CFG_TUH_DWC2_ENDPOINT_MAX ; ep_id ++ ) {
1253- hcd_endpoint_t * edpt = & _hcd_data .edpt [ep_id ];
1254- if (edpt -> hcchar_bm .enable && channel_is_periodic (edpt -> hcchar ) && edpt -> uframe_countdown > 0 ) {
1255- edpt -> uframe_countdown -= tu_min32 (ucount , edpt -> uframe_countdown );
1256- if (edpt -> uframe_countdown == 0 ) {
1257- if (!edpt_xfer_kickoff (dwc2 , ep_id )) {
1258- edpt -> uframe_countdown = ucount ; // failed to start, try again next frame
1317+ hcd_endpoint_t * edpt = & _hcd_data .edpt [ep_id ];
1318+ if (edpt -> closing == 0 ) {
1319+ if (edpt -> hcchar_bm .enable && channel_is_periodic (edpt -> hcchar ) && edpt -> uframe_countdown > 0 ) {
1320+ edpt -> uframe_countdown -= tu_min32 (ucount , edpt -> uframe_countdown );
1321+ if (edpt -> uframe_countdown == 0 ) {
1322+ if (!edpt_xfer_kickoff (dwc2 , ep_id )) {
1323+ edpt -> uframe_countdown = ucount ; // failed to start, try again next frame
1324+ }
12591325 }
1260- }
12611326
1262- more_isr = true;
1327+ more_isr = true;
1328+ }
12631329 }
12641330 }
12651331
0 commit comments