? sys/external/bsd/dwc2/cscope.out ? sys/external/bsd/dwc2/dist/cscope.out Index: sys/external/bsd/dwc2/dist/dwc2_core.h =================================================================== RCS file: /cvsroot/src/sys/external/bsd/dwc2/dist/dwc2_core.h,v retrieving revision 1.8 diff -u -p -r1.8 dwc2_core.h --- sys/external/bsd/dwc2/dist/dwc2_core.h 24 Feb 2016 22:17:54 -0000 1.8 +++ sys/external/bsd/dwc2/dist/dwc2_core.h 7 Aug 2018 05:50:00 -0000 @@ -756,6 +756,7 @@ struct dwc2_hsotg { } flags; struct list_head non_periodic_sched_inactive; + struct list_head non_periodic_sched_waiting; struct list_head non_periodic_sched_active; struct list_head *non_periodic_qh_ptr; struct list_head periodic_sched_inactive; Index: sys/external/bsd/dwc2/dist/dwc2_hcd.c =================================================================== RCS file: /cvsroot/src/sys/external/bsd/dwc2/dist/dwc2_hcd.c,v retrieving revision 1.20 diff -u -p -r1.20 dwc2_hcd.c --- sys/external/bsd/dwc2/dist/dwc2_hcd.c 1 Aug 2018 16:44:14 -0000 1.20 +++ sys/external/bsd/dwc2/dist/dwc2_hcd.c 7 Aug 2018 05:50:01 -0000 @@ -117,6 +117,10 @@ static void dwc2_dump_channel_info(struc list_for_each_entry(qh, &hsotg->non_periodic_sched_inactive, qh_list_entry) dev_dbg(hsotg->dev, " %p\n", qh); + dev_dbg(hsotg->dev, " NP waiting sched:\n"); + list_for_each_entry(qh, &hsotg->non_periodic_sched_waiting, + qh_list_entry) + dev_dbg(hsotg->dev, " %p\n", qh); dev_dbg(hsotg->dev, " NP active sched:\n"); list_for_each_entry(qh, &hsotg->non_periodic_sched_active, qh_list_entry) @@ -194,6 +198,7 @@ static void dwc2_qh_list_free(struct dwc static void dwc2_kill_all_urbs(struct dwc2_hsotg *hsotg) { dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_inactive); + dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_waiting); dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_active); dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_inactive); dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_ready); @@ -852,6 +857,8 @@ static int dwc2_assign_and_init_hc(struc /* Non DWORD-aligned buffer case */ if (bufptr) { dev_vdbg(hsotg->dev, "Non-aligned buffer\n"); +printf("%s: qh %p chan (%s) addr %d:%d max %d\n", __func__, qh, + chan->ep_is_in ? "in" : "out", chan->dev_addr, chan->ep_num, chan->max_packet); if (dwc2_hc_setup_align_buf(hsotg, qh, chan, urb, bufptr)) { dev_err(hsotg->dev, "%s: Failed to allocate memory to handle non-dword aligned buffer\n", @@ -2215,6 +2222,7 @@ static void dwc2_hcd_free(struct dwc2_hs /* Free memory for QH/QTD lists */ dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_inactive); + dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_waiting); dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_active); dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_inactive); dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_ready); @@ -2337,6 +2345,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hso /* Initialize the non-periodic schedule */ INIT_LIST_HEAD(&hsotg->non_periodic_sched_inactive); + INIT_LIST_HEAD(&hsotg->non_periodic_sched_waiting); INIT_LIST_HEAD(&hsotg->non_periodic_sched_active); /* Initialize the periodic schedule */ Index: sys/external/bsd/dwc2/dist/dwc2_hcd.h =================================================================== RCS file: /cvsroot/src/sys/external/bsd/dwc2/dist/dwc2_hcd.h,v retrieving revision 1.14 diff -u -p -r1.14 dwc2_hcd.h --- sys/external/bsd/dwc2/dist/dwc2_hcd.h 23 Apr 2016 10:15:30 -0000 1.14 +++ sys/external/bsd/dwc2/dist/dwc2_hcd.h 7 Aug 2018 05:50:01 -0000 @@ -222,6 +222,7 @@ enum dwc2_transaction_type { /** * struct dwc2_qh - Software queue head structure * + * @hsotg: The HCD state structure for the DWC OTG controller * @ep_type: Endpoint type. One of the following values: * - USB_ENDPOINT_XFER_CONTROL * - USB_ENDPOINT_XFER_BULK @@ -264,13 +265,18 @@ enum dwc2_transaction_type { * @n_bytes: Xfer Bytes array. Each element corresponds to a transfer * descriptor and indicates original XferSize value for the * descriptor + * @wait_timer: Timer used to wait before re-queuing. * @tt_buffer_dirty True if clear_tt_buffer_complete is pending + * @want_wait: We should wait before re-queuing; only matters for non- + * periodic transfers and is ignored for periodic ones. + * @wait_timer_cancel: Set to true to cancel the wait_timer. * * A Queue Head (QH) holds the static characteristics of an endpoint and * maintains a list of transfers (QTDs) for that endpoint. A QH structure may * be entered in either the non-periodic or periodic schedule. */ struct dwc2_qh { + struct dwc2_hsotg *hsotg; u8 ep_type; u8 ep_is_in; u16 maxp; @@ -299,7 +305,10 @@ struct dwc2_qh { dma_addr_t desc_list_dma; u32 desc_list_sz; u32 *n_bytes; + callout_t wait_timer; unsigned tt_buffer_dirty:1; + unsigned want_wait:1; + unsigned wait_timer_cancel:1; }; /** @@ -330,6 +339,7 @@ struct dwc2_qh { * @n_desc: Number of DMA descriptors for this QTD * @isoc_frame_index_last: Last activated frame (packet) index, used in * descriptor DMA mode only + * @num_naks: Number of NAKs received on this QTD. * @urb: URB for this transfer * @qh: Queue head for this QTD * @qtd_list_entry: For linking to the QH's list of QTDs @@ -360,6 +370,7 @@ struct dwc2_qtd { u8 error_count; u8 n_desc; u16 isoc_frame_index_last; + u16 num_naks; struct dwc2_hcd_urb *urb; struct dwc2_qh *qh; struct list_head qtd_list_entry; Index: sys/external/bsd/dwc2/dist/dwc2_hcdintr.c =================================================================== RCS file: /cvsroot/src/sys/external/bsd/dwc2/dist/dwc2_hcdintr.c,v retrieving revision 1.13 diff -u -p -r1.13 dwc2_hcdintr.c --- sys/external/bsd/dwc2/dist/dwc2_hcdintr.c 14 Feb 2016 10:53:30 -0000 1.13 +++ sys/external/bsd/dwc2/dist/dwc2_hcdintr.c 7 Aug 2018 05:50:03 -0000 @@ -60,6 +60,32 @@ __KERNEL_RCSID(0, "$NetBSD: dwc2_hcdintr #include "dwc2_core.h" #include "dwc2_hcd.h" +/* XXXsimonb */ +#include + +#define DWC2_COUNTERS /* XXX */ +#ifdef DWC2_COUNTERS +#define DWC2_STATS(name) \ +static struct evcnt n##name = \ + EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "dwc2hdc", #name); \ +EVCNT_ATTACH_STATIC(n##name) + +DWC2_STATS(hcd_intr); +DWC2_STATS(hc_nak_ping); +DWC2_STATS(hc_nak_out); + +#define DWC2_COUNT(var) n##var.ev_count++ +#else +#define DWC2_COUNT(var) do { /* nothing */ } while (0) +#endif /* DWC2_COUNTERS */ +/* XXXsimonb */ + +/* + * If we get this many NAKs on a split transaction we'll slow down + * retransmission. A 1 here means delay after the first NAK. + */ +#define DWC2_NAKS_BEFORE_DELAY 3 + /* This function is for debug only */ static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg) { @@ -1256,6 +1282,16 @@ static void dwc2_hc_nak_intr(struct dwc2 /* * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and * interrupt. Re-start the SSPLIT transfer. + * + * Normally for non-periodic transfers we'll retry right away, but to + * avoid interrupt storms we'll wait before retrying if we've got + * several NAKs. If we didn't do this we'd retry directly from the + * interrupt handler and could end up quickly getting another + * interrupt (another NAK), which we'd retry. + * + * Note that in DMA mode software only gets involved to re-send NAKed + * transfers for split transactions unless the core is missing OUT NAK + * enhancement. */ if (chan->do_split) { /* @@ -1272,6 +1308,8 @@ static void dwc2_hc_nak_intr(struct dwc2 if (chan->complete_split) qtd->error_count = 0; qtd->complete_split = 0; + qtd->num_naks++; + qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY; dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK); goto handle_nak_done; } @@ -1297,7 +1335,12 @@ static void dwc2_hc_nak_intr(struct dwc2 */ qtd->error_count = 0; - if (!chan->qh->ping_state) { + if (hsotg->core_params->dma_enable > 0 && !chan->ep_is_in) { +DWC2_COUNT(hc_nak_out); + qtd->num_naks++; + qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY; + } else if (!chan->qh->ping_state) { +DWC2_COUNT(hc_nak_ping); dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, qtd, DWC2_HC_XFER_NAK); dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); @@ -2180,6 +2223,7 @@ irqreturn_t dwc2_handle_hcd_intr(struct dev_warn(hsotg->dev, "Controller is dead\n"); return retval; } +DWC2_COUNT(hcd_intr); KASSERT(mutex_owned(&hsotg->lock)); Index: sys/external/bsd/dwc2/dist/dwc2_hcdqueue.c =================================================================== RCS file: /cvsroot/src/sys/external/bsd/dwc2/dist/dwc2_hcdqueue.c,v retrieving revision 1.14 diff -u -p -r1.14 dwc2_hcdqueue.c --- sys/external/bsd/dwc2/dist/dwc2_hcdqueue.c 14 Feb 2016 10:53:30 -0000 1.14 +++ sys/external/bsd/dwc2/dist/dwc2_hcdqueue.c 7 Aug 2018 05:50:03 -0000 @@ -63,7 +63,34 @@ __KERNEL_RCSID(0, "$NetBSD: dwc2_hcdqueu #include "dwc2_core.h" #include "dwc2_hcd.h" + +/* XXXsimonb */ +#include + +#define DWC2_COUNTERS /* XXX */ +#ifdef DWC2_COUNTERS +#define DWC2_STATS(name) \ +static struct evcnt n##name = \ + EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "dwc2q", #name); \ +EVCNT_ATTACH_STATIC(n##name) + +DWC2_STATS(wait_timer_fn); +DWC2_STATS(wait_timer_fn_run); +DWC2_STATS(qh_add); +DWC2_STATS(qh_add_schedule); + +#define DWC2_COUNT(var) n##var.ev_count++ +#else +#define DWC2_COUNT(var) do { /* nothing */ } while (0) +#endif /* DWC2_COUNTERS */ +/* XXXsimonb */ + + static u32 dwc2_calc_bus_time(struct dwc2_hsotg *, int, int, int, int); +static void dwc2_wait_timer_fn(void *); + +/* If we get a NAK, wait this long before retrying */ +#define DWC2_RETRY_WAIT_DELAY 1 /* msec */ /** * dwc2_qh_init() - Initializes a QH structure @@ -82,6 +109,10 @@ static void dwc2_qh_init(struct dwc2_hso dev_vdbg(hsotg->dev, "%s()\n", __func__); /* Initialize QH */ + qh->hsotg = hsotg; + //XXX anything rely on new qh->unreserve_timer ? + callout_init(&qh->wait_timer, 0); + callout_setfunc(&qh->wait_timer, dwc2_wait_timer_fn, qh); qh->ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info); qh->ep_is_in = dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0; @@ -250,6 +281,16 @@ struct dwc2_qh *dwc2_hcd_qh_create(struc void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) { struct dwc2_softc *sc = hsotg->hsotg_sc; + + /* + * We don't have the lock so we can safely wait until the wait timer + * finishes. Of course, at this point in time we'd better have set + * wait_timer_active to false so if this timer was still pending it + * won't do anything anyway, but we want it to finish before we free + * memory. + */ + callout_destroy(&qh->wait_timer); /* XXX need to callout_halt() first? */ + if (qh->desc_list) { dwc2_hcd_qh_free_ddma(hsotg, qh); } else if (qh->dw_align_buf) { @@ -583,6 +624,57 @@ static void dwc2_deschedule_periodic(str } /** + * dwc2_wait_timer_fn() - Timer function to re-queue after waiting + * + * As per the spec, a NAK indicates that "a function is temporarily unable to + * transmit or receive data, but will eventually be able to do so without need + * of host intervention". + * + * That means that when we encounter a NAK we're supposed to retry. + * + * ...but if we retry right away (from the interrupt handler that saw the NAK) + * then we can end up with an interrupt storm (if the other side keeps NAKing + * us) because on slow enough CPUs it could take us longer to get out of the + * interrupt routine than it takes for the device to send another NAK. That + * leads to a constant stream of NAK interrupts and the CPU locks. + * + * ...so instead of retrying right away in the case of a NAK we'll set a timer + * to retry some time later. This function handles that timer and moves the + * qh back to the "inactive" list, then queues transactions. + * + * @t: Pointer to wait_timer in a qh. + */ +static void dwc2_wait_timer_fn(void *arg) +{ + struct dwc2_qh *qh = arg; + struct dwc2_hsotg *hsotg = qh->hsotg; + unsigned long flags; + +DWC2_COUNT(wait_timer_fn); + spin_lock_irqsave(&hsotg->lock, flags); + + /* + * We'll set wait_timer_cancel to true if we want to cancel this + * operation in dwc2_hcd_qh_unlink(). + */ + if (!qh->wait_timer_cancel) { + enum dwc2_transaction_type tr_type; + +DWC2_COUNT(wait_timer_fn_run); + qh->want_wait = false; + + list_move(&qh->qh_list_entry, + &hsotg->non_periodic_sched_inactive); + + tr_type = dwc2_hcd_select_transactions(hsotg); + if (tr_type != DWC2_TRANSACTION_NONE) + dwc2_hcd_queue_transactions(hsotg, tr_type); + } + + spin_unlock_irqrestore(&hsotg->lock, flags); +} + +/** * dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic * schedule if it is not already in the schedule. If the QH is already in * the schedule, no action is taken. @@ -597,6 +689,7 @@ int dwc2_hcd_qh_add(struct dwc2_hsotg *h int status; u32 intr_mask; +DWC2_COUNT(qh_add); if (dbg_qh(qh)) dev_vdbg(hsotg->dev, "%s()\n", __func__); @@ -614,9 +707,17 @@ int dwc2_hcd_qh_add(struct dwc2_hsotg *h /* Add the new QH to the appropriate schedule */ if (dwc2_qh_is_non_per(qh)) { - /* Always start in inactive schedule */ - list_add_tail(&qh->qh_list_entry, - &hsotg->non_periodic_sched_inactive); + if (qh->want_wait) { + list_add_tail(&qh->qh_list_entry, + &hsotg->non_periodic_sched_waiting); + qh->wait_timer_cancel = false; +DWC2_COUNT(qh_add_schedule); + callout_schedule(&qh->wait_timer, + mstohz(DWC2_RETRY_WAIT_DELAY)); + } else { + list_add_tail(&qh->qh_list_entry, + &hsotg->non_periodic_sched_inactive); + } return 0; } @@ -646,6 +747,9 @@ void dwc2_hcd_qh_unlink(struct dwc2_hsot dev_vdbg(hsotg->dev, "%s()\n", __func__); + /* If the wait_timer is pending, this will stop it from acting */ + qh->wait_timer_cancel = true; + if (list_empty(&qh->qh_list_entry)) /* QH is not in a schedule */ return; @@ -726,7 +830,7 @@ void dwc2_hcd_qh_deactivate(struct dwc2_ if (dwc2_qh_is_non_per(qh)) { dwc2_hcd_qh_unlink(hsotg, qh); if (!list_empty(&qh->qtd_list)) - /* Add back to inactive non-periodic schedule */ + /* Add back to inactive/waiting non-periodic schedule */ dwc2_hcd_qh_add(hsotg, qh); return; }