From c4bc865386205a2d7f27865299bd8363ae7e8360 Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Fri, 10 Aug 2018 05:51:54 +0000 Subject: [PATCH 1/7] Teach usb_rem_task to return whether removed from queue or not. --- sys/dev/usb/usb.c | 10 +++++++--- sys/dev/usb/usbdi.h | 2 +- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/sys/dev/usb/usb.c b/sys/dev/usb/usb.c index fe6759f805d4..63ca9c8ad0f2 100644 --- a/sys/dev/usb/usb.c +++ b/sys/dev/usb/usb.c @@ -431,14 +431,16 @@ usb_add_task(struct usbd_device *dev, struct usb_task *task, int queue) /* * usb_rem_task(dev, task) * - * If task is queued to run, remove it from the queue. + * If task is queued to run, remove it from the queue. Return + * true if it successfully removed the task from the queue, false + * if not. * * Caller is _not_ guaranteed that the task is not running when * this is done. * * Never sleeps. */ -void +bool usb_rem_task(struct usbd_device *dev, struct usb_task *task) { unsigned queue; @@ -452,10 +454,12 @@ usb_rem_task(struct usbd_device *dev, struct usb_task *task) TAILQ_REMOVE(&taskq->tasks, task, next); task->queue = USB_NUM_TASKQS; mutex_exit(&taskq->lock); - break; + return true; /* removed from the queue */ } mutex_exit(&taskq->lock); } + + return false; /* was not removed from the queue */ } /* diff --git a/sys/dev/usb/usbdi.h b/sys/dev/usb/usbdi.h index fe12af92c615..2abd695e601a 100644 --- a/sys/dev/usb/usbdi.h +++ b/sys/dev/usb/usbdi.h @@ -222,7 +222,7 @@ struct usb_task { #define USB_TASKQ_MPSAFE 0x80 void usb_add_task(struct usbd_device *, struct usb_task *, int); -void usb_rem_task(struct usbd_device *, struct usb_task *); +bool usb_rem_task(struct usbd_device *, struct usb_task *); bool usb_rem_task_wait(struct usbd_device *, struct usb_task *, int, kmutex_t *); #define usb_init_task(t, f, a, fl) ((t)->fun = (f), (t)->arg = (a), (t)->queue = USB_NUM_TASKQS, (t)->flags = (fl)) From d95c54ad8f8aec5742f55f5f4c01c5c487475b1e Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Fri, 10 Aug 2018 05:56:42 +0000 Subject: [PATCH 2/7] New function usb_task_pending for diagnostic assertions. Usable only for negative diagnostic assertions: KASSERT(!usb_task_pending(dev, task)) If you can think of a better name for this than !usb_task_pending, I'm all ears. --- sys/dev/usb/usb.c | 17 +++++++++++++++++ sys/dev/usb/usbdi.h | 1 + 2 files changed, 18 insertions(+) diff --git a/sys/dev/usb/usb.c b/sys/dev/usb/usb.c index 63ca9c8ad0f2..17d6ec717559 100644 --- a/sys/dev/usb/usb.c +++ b/sys/dev/usb/usb.c @@ -530,6 +530,23 @@ usb_rem_task_wait(struct usbd_device *dev, struct usb_task *task, int queue, return removed; } +/* + * usb_task_pending(dev, task) + * + * True if task is queued, false if not. Note that if task is + * already running, it is not considered queued. + * + * For _negative_ diagnostic assertions only: + * + * KASSERT(!usb_task_pending(dev, task)); + */ +bool +usb_task_pending(struct usbd_device *dev, struct usb_task *task) +{ + + return task->queue != USB_NUM_TASKQS; +} + void usb_event_thread(void *arg) { diff --git a/sys/dev/usb/usbdi.h b/sys/dev/usb/usbdi.h index 2abd695e601a..62d7fefb8f18 100644 --- a/sys/dev/usb/usbdi.h +++ b/sys/dev/usb/usbdi.h @@ -225,6 +225,7 @@ void usb_add_task(struct usbd_device *, struct usb_task *, int); bool usb_rem_task(struct usbd_device *, struct usb_task *); bool usb_rem_task_wait(struct usbd_device *, struct usb_task *, int, kmutex_t *); +bool usb_task_pending(struct usbd_device *, struct usb_task *); #define usb_init_task(t, f, a, fl) ((t)->fun = (f), (t)->arg = (a), (t)->queue = USB_NUM_TASKQS, (t)->flags = (fl)) struct usb_devno { From c21f0db043c14611ed2e38772d0f0c3c0bb31683 Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Fri, 10 Aug 2018 05:57:48 +0000 Subject: [PATCH 3/7] Nothing guarantees xfer's timeout has completed. Wait for it when we free the xfer. --- sys/dev/usb/usbdi.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/sys/dev/usb/usbdi.c b/sys/dev/usb/usbdi.c index 242bc1ebd858..4e232674cbd9 100644 --- a/sys/dev/usb/usbdi.c +++ b/sys/dev/usb/usbdi.c @@ -491,12 +491,14 @@ usbd_free_xfer(struct usbd_xfer *xfer) if (xfer->ux_buf) { usbd_free_buffer(xfer); } -#if defined(DIAGNOSTIC) - if (callout_pending(&xfer->ux_callout)) { - callout_stop(&xfer->ux_callout); - printf("usbd_free_xfer: timeout_handle pending\n"); - } -#endif + + /* Wait for any straggling timeout to complete. */ + mutex_enter(xfer->ux_bus->ub_lock); + callout_halt(&xfer->ux_callout, xfer->ux_bus->ub_lock); + usb_rem_task_wait(xfer->ux_pipe->up_dev, &xfer->ux_aborttask, + USB_TASKQ_HC, xfer->ux_bus->ub_lock); + mutex_exit(xfer->ux_bus->ub_lock); + cv_destroy(&xfer->ux_cv); xfer->ux_bus->ub_methods->ubm_freex(xfer->ux_bus, xfer); return USBD_NORMAL_COMPLETION; From f9c59671b5b7d26ab8a55803ed66305b036f38b0 Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Fri, 10 Aug 2018 05:59:03 +0000 Subject: [PATCH 4/7] New xfer state variables ux_timeout_set and ux_timeout_reset. These are needed because: - The host controller interrupt cannot wait for the callout or task to finish running. - Nothing in the USBD API as is waits for the callout or task to finish running. - Callers expect to be able to resubmit USB xfers from xfer callbacks without waiting for anything to finish running. The variable ux_timeout_set can be used by a host controller to decide on submission whether to schedule the callout or to ask an already-scheduled callout or already-queued task to reschedule the callout, by setting the variable ux_timeout_reset to true. When the callout or task runs and sees that ux_timeout_reset is true, rather than queue the task or abort the xfer, it can instead just schedule the callout anew. --- sys/dev/usb/usbdi.c | 1 + sys/dev/usb/usbdivar.h | 13 +++++++++++++ 2 files changed, 14 insertions(+) diff --git a/sys/dev/usb/usbdi.c b/sys/dev/usb/usbdi.c index 4e232674cbd9..3431cee5efc4 100644 --- a/sys/dev/usb/usbdi.c +++ b/sys/dev/usb/usbdi.c @@ -494,6 +494,7 @@ usbd_free_xfer(struct usbd_xfer *xfer) /* Wait for any straggling timeout to complete. */ mutex_enter(xfer->ux_bus->ub_lock); + xfer->ux_timeout_reset = false; /* do not resuscitate */ callout_halt(&xfer->ux_callout, xfer->ux_bus->ub_lock); usb_rem_task_wait(xfer->ux_pipe->up_dev, &xfer->ux_aborttask, USB_TASKQ_HC, xfer->ux_bus->ub_lock); diff --git a/sys/dev/usb/usbdivar.h b/sys/dev/usb/usbdivar.h index 50e3994b7132..3ac1a1cae467 100644 --- a/sys/dev/usb/usbdivar.h +++ b/sys/dev/usb/usbdivar.h @@ -288,6 +288,19 @@ struct usbd_xfer { struct usb_task ux_aborttask; struct callout ux_callout; + + /* + * Protected by bus lock. + * + * - ux_timeout_set: The timeout is scheduled as a callout or + * usb task, and has not yet acquired the bus lock. + * + * - ux_timeout_reset: The xfer completed, and was resubmitted + * before the callout or task was able to acquire the bus + * lock, so one or the other needs to schedule a new callout. + */ + bool ux_timeout_set; + bool ux_timeout_reset; }; void usbd_init(void); From adfd771fb629e6be5cd72fed6d130cc7a27f04aa Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Fri, 10 Aug 2018 06:04:07 +0000 Subject: [PATCH 5/7] Fix steady state of timeouts in ehci. This is complicated because: 1. There are three ways that an xfer can be completed: (a) hardware interrupt completes xfer (b) software decision aborts xfer with USBD_CANCELLED (c) timeout aborts xfer with USBD_TIMEOUT 2. The timeout abort can't be done in callout because ehci_sync_hc, called unconditionally by ehci_abort_xfer to wait until the device has finished using any references to the xfer, may sleep. So we have to schedule a callout that, when run, will schedule a usb_task. 3. The hardware completion interrupt can't sleep waiting for a callout or task to finish -- can't use callout_halt or usb_rem_task_wait. So the callout and usb_task must be able to run _after_ the hardware completion interrupt, and recognize that they're late to the party. (Note, though, that usbd_free_xfer does wait for the callout and task to complete, so there's no danger they may use themselves after free.) 4. The xfer may resubmitted -- and the timeout may be rescheduled -- immediately after the hardware completion interrupt, _while_ the callout and/or usb_task may still be scheduled. Specifically, we may have the following sequence of events: (a) hardware completion interrupt (b) callout or usb_task fires (c) driver resubmits xfer (d) callout or usb_task acquires lock and looks around dazed and bewildered at the firehose of events like reading the news in 2019 The mechanism for sorting this out is that we have two bits of state: - xfer->ux_timeout_set informs the driver, when submitting an xfer and setting up its timeout, whether either the callout or usb_task is already scheduled or not. - xfer->ux_timeout_reset informs the callout or usb_task whether it should reschedule the callout, because the xfer got resubmitted, or not. --- sys/dev/usb/ehci.c | 327 +++++++++++++++++++++++++++++++++++++++------ 1 file changed, 285 insertions(+), 42 deletions(-) diff --git a/sys/dev/usb/ehci.c b/sys/dev/usb/ehci.c index eecdad5924b4..20f25c91101e 100644 --- a/sys/dev/usb/ehci.c +++ b/sys/dev/usb/ehci.c @@ -168,6 +168,9 @@ Static void ehci_check_sitd_intr(ehci_softc_t *, struct ehci_xfer *, Static void ehci_idone(struct ehci_xfer *, ex_completeq_t *); Static void ehci_timeout(void *); Static void ehci_timeout_task(void *); +Static bool ehci_probe_timeout(struct usbd_xfer *); +Static void ehci_schedule_timeout(struct usbd_xfer *); +Static void ehci_cancel_timeout_async(struct usbd_xfer *); Static void ehci_intrlist_timeout(void *); Static void ehci_doorbell(void *); Static void ehci_pcd(void *); @@ -1057,13 +1060,11 @@ ehci_idone(struct ehci_xfer *ex, ex_completeq_t *cq) } /* - * Cancel the timeout and the task, which have not yet - * run. If they have already fired, at worst they are - * waiting for the lock. They will see that the xfer - * is no longer in progress and give up. + * We are completing the xfer. Cancel the timeout if we can, + * but only asynchronously. See ehci_cancel_timeout_async for + * why we need not wait for the callout or task here. */ - callout_stop(&xfer->ux_callout); - usb_rem_task(xfer->ux_pipe->up_dev, &xfer->ux_aborttask); + ehci_cancel_timeout_async(xfer); #ifdef DIAGNOSTIC #ifdef EHCI_DEBUG @@ -3235,35 +3236,38 @@ ehci_abort_xfer(struct usbd_xfer *xfer, usbd_status status) KASSERT(mutex_owned(&sc->sc_lock)); ASSERT_SLEEPABLE(); + /* + * Nobody else can set this status: only one caller can time + * out, and only one caller can synchronously abort. So the + * status can't be the status we're trying to set this to. + */ + KASSERT(xfer->ux_status != status); + + /* + * If host controller or timer interrupt has completed it, too + * late to abort. Forget about it. + */ + if (xfer->ux_status != USBD_IN_PROGRESS) + return; + if (status == USBD_CANCELLED) { /* - * We are synchronously aborting. Try to stop the - * callout and task, but if we can't, wait for them to - * complete. + * We are synchronously aborting. Cancel the timeout + * if we can, but only asynchronously. See + * ehci_cancel_timeout_async for why we need not wait + * for the callout or task here. */ - callout_halt(&xfer->ux_callout, &sc->sc_lock); - usb_rem_task_wait(xfer->ux_pipe->up_dev, &xfer->ux_aborttask, - USB_TASKQ_HC, &sc->sc_lock); + ehci_cancel_timeout_async(xfer); } else { - /* Otherwise, we are timing out. */ + /* + * Otherwise, we are timing out. No action needed to + * cancel the timeout because we _are_ the timeout. + * This case should happen only via the timeout task, + * invoked via the callout. + */ KASSERT(status == USBD_TIMEOUT); } - /* - * The xfer cannot have been cancelled already. It is the - * responsibility of the caller of usbd_abort_pipe not to try - * to abort a pipe multiple times, whether concurrently or - * sequentially. - */ - KASSERT(xfer->ux_status != USBD_CANCELLED); - - /* Only the timeout, which runs only once, can time it out. */ - KASSERT(xfer->ux_status != USBD_TIMEOUT); - - /* If anyone else beat us, we're done. */ - if (xfer->ux_status != USBD_IN_PROGRESS) - return; - /* We beat everyone else. Claim the status. */ xfer->ux_status = status; @@ -3473,6 +3477,16 @@ dying: KASSERT(mutex_owned(&sc->sc_lock)); } +/* + * ehci_timeout(xfer) + * + * Called at IPL_SOFTCLOCK when too much time has elapsed waiting + * for xfer to complete. Since we can't abort the xfer at + * IPL_SOFTCLOCK, defer to a usb_task to run it in thread context, + * unless the xfer has completed or aborted concurrently -- and if + * the xfer has also been resubmitted, take care of rescheduling + * the callout. + */ Static void ehci_timeout(void *addr) { @@ -3489,12 +3503,38 @@ ehci_timeout(void *addr) } #endif + /* Acquire the lock so we can transition the timeout state. */ mutex_enter(&sc->sc_lock); - if (!sc->sc_dying && xfer->ux_status == USBD_IN_PROGRESS) + + /* + * Use ehci_probe_timeout to check whether the timeout is still + * valid, or to reschedule the callout if necessary. If it is + * still valid, schedule the task. + */ + if (ehci_probe_timeout(xfer)) usb_add_task(dev, &xfer->ux_aborttask, USB_TASKQ_HC); + + /* + * Notify ehci_cancel_timeout_async that we may have scheduled + * the task. This causes callout_invoking to return false in + * ehci_cancel_timeout_async so that it can tell which stage in + * the callout->task->abort process we're at. + */ + callout_ack(&xfer->ux_callout); + + /* All done -- release the lock. */ mutex_exit(&sc->sc_lock); } +/* + * ehci_timeout_task(xfer) + * + * Called in thread context when too much time has elapsed waiting + * for xfer to complete. Issue ehci_abort_xfer(USBD_TIMEOUT), + * unless the xfer has completed or aborted concurrently -- and if + * the xfer has also been resubmitted, take care of rescheduling + * the callout. + */ Static void ehci_timeout_task(void *addr) { @@ -3505,11 +3545,223 @@ ehci_timeout_task(void *addr) DPRINTF("xfer=%#jx", (uintptr_t)xfer, 0, 0, 0); + /* Acquire the lock so we can transition the timeout state. */ mutex_enter(&sc->sc_lock); - ehci_abort_xfer(xfer, USBD_TIMEOUT); + + /* + * Use ehci_probe_timeout to check whether the timeout is still + * valid, or to reschedule the callout if necessary. If it is + * still valid, schedule the task. + */ + if (ehci_probe_timeout(xfer)) + ehci_abort_xfer(xfer, USBD_TIMEOUT); + + /* All done -- release the lock. */ mutex_exit(&sc->sc_lock); } +/* + * ehci_probe_timeout(xfer) + * + * Probe the status of xfer's timeout. Acknowledge and process a + * request to reschedule. Return true if the timeout is still + * valid and the caller should take further action (queueing a + * task or aborting the xfer), false if it must stop here. + */ +Static bool +ehci_probe_timeout(struct usbd_xfer *xfer) +{ + EHCIHIST_FUNC(); EHCIHIST_CALLED(); + struct ehci_softc *sc = EHCI_XFER2SC(xfer); + bool valid; + + KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); + + /* The timeout must be set. */ + KASSERT(xfer->ux_timeout_set); + + /* + * Neither callout nor task may be pending; they execute + * alternately in lock step. + */ + KASSERT(!callout_pending(&xfer->ux_callout)); + KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)); + + /* There are a few cases... */ + if (sc->sc_dying) { + /* Host controller dying. Drop it all on the floor. */ + xfer->ux_timeout_set = false; + xfer->ux_timeout_reset = false; + valid = false; + } else if (xfer->ux_timeout_reset) { + /* + * The xfer completed _and_ got resubmitted while we + * waited for the lock. Acknowledge the request to + * reschedule, and reschedule it if there is a timeout + * and the bus is not polling. + */ + xfer->ux_timeout_reset = false; + if (xfer->ux_timeout && !sc->sc_bus.ub_usepolling) { + KASSERT(xfer->ux_timeout_set); + callout_schedule(&xfer->ux_callout, + mstohz(xfer->ux_timeout)); + } else { + /* No more callout or task scheduled. */ + xfer->ux_timeout_set = false; + } + valid = false; + } else if (xfer->ux_status != USBD_IN_PROGRESS) { + /* + * The xfer has completed by hardware completion or by + * software abort, and has not been resubmitted, so the + * timeout must be unset, and is no longer valid for + * the caller. + */ + xfer->ux_timeout_set = false; + valid = false; + } else { + /* + * The xfer has not yet completed, so the timeout is + * valid. + */ + valid = true; + } + + /* Any reset must have been processed. */ + KASSERT(!xfer->ux_timeout_reset); + + /* + * Either we claim the timeout is set, or the callout is idle. + * If the timeout is still set, we may be handing off to the + * task instead, so this is an if but not an iff. + */ + KASSERT(xfer->ux_timeout_set || !callout_pending(&xfer->ux_callout)); + + /* + * The task must be idle now. + * + * - If the caller is the callout, _and_ the timeout is still + * valid, the caller will schedule it, but it hasn't been + * scheduled yet. (If the timeout is not valid, the task + * should not be scheduled.) + * + * - If the caller is the task, it cannot be scheduled again + * until the callout runs again, which won't happen until we + * next release the lock. + */ + KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)); + + KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); + + return valid; +} + +/* + * ehci_schedule_timeout(xfer) + * + * Ensure that xfer has a timeout. If the callout is already + * queued or the task is already running, request that they + * reschedule the callout. If not, and if we're not polling, + * schedule the callout anew. + */ +Static void +ehci_schedule_timeout(struct usbd_xfer *xfer) +{ + EHCIHIST_FUNC(); EHCIHIST_CALLED(); + ehci_softc_t *sc = EHCI_XFER2SC(xfer); + + KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); + + if (xfer->ux_timeout_set) { + /* + * Callout or task has fired from a prior completed + * xfer but has not yet noticed that the xfer is done. + * Ask it to reschedule itself to ux_timeout. + */ + xfer->ux_timeout_reset = true; + } else if (xfer->ux_timeout && !sc->sc_bus.ub_usepolling) { + /* Callout is not scheduled. Schedule it. */ + KASSERT(!callout_pending(&xfer->ux_callout)); + callout_reset(&xfer->ux_callout, mstohz(xfer->ux_timeout), + ehci_timeout, xfer); + xfer->ux_timeout_set = true; + } + + KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); +} + +/* + * ehci_cancel_timeout_async(xfer) + * + * Cancel the callout and the task of xfer, which have not yet run + * to completion, but don't wait for the callout or task to finish + * running. + * + * If they have already fired, at worst they are waiting for the + * bus lock. They will see that the xfer is no longer in progress + * and give up, or they will see that the xfer has been + * resubmitted with a new timeout and reschedule the callout. + * + * If a resubmitted request completed so fast that the callout + * didn't have time to process a timer reset, just cancel the + * timer reset. + */ +Static void +ehci_cancel_timeout_async(struct usbd_xfer *xfer) +{ + EHCIHIST_FUNC(); EHCIHIST_CALLED(); + struct ehci_softc *sc = EHCI_XFER2SC(xfer); + + KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); + + KASSERT(xfer->ux_timeout_set); + xfer->ux_timeout_reset = false; + if (!callout_stop(&xfer->ux_callout)) { + /* + * We stopped the callout before it ran. The timeout + * is no longer set. + */ + xfer->ux_timeout_set = false; + } else if (callout_invoking(&xfer->ux_callout)) { + /* + * The callout has begun to run but it has not yet + * acquired the lock and called callout_ack. The task + * cannot be queued yet, and the callout cannot have + * been rescheduled yet. + * + * By the time the callout acquires the lock, we will + * have transitioned from USBD_IN_PROGRESS to a + * completed status, and possibly also resubmitted the + * xfer and set xfer->ux_timeout_reset = true. In both + * cases, the callout will DTRT, so no further action + * is needed here. + */ + } else if (usb_rem_task(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)) { + /* + * The callout had fired and scheduled the task, but we + * stopped the task before it could run. The timeout + * is therefore no longer set -- the next resubmission + * of the xfer must schedule a new timeout. + * + * The callout should not be be pending at this point: + * it is scheduled only under the lock, and only when + * xfer->ux_timeout_set is false, or by the callout or + * task itself when xfer->ux_timeout_reset is true. + */ + xfer->ux_timeout_set = false; + } + + /* + * The callout cannot be scheduled and the task cannot be + * queued at this point. Either we cancelled them, or they are + * already running and waiting for the bus lock. + */ + KASSERT(!callout_pending(&xfer->ux_callout)); + KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)); + + KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); +} + /************************/ Static int @@ -3751,10 +4003,7 @@ ehci_device_ctrl_start(struct usbd_xfer *xfer) /* Insert qTD in QH list - also does usb_syncmem(sqh) */ ehci_set_qh_qtd(sqh, setup); - if (xfer->ux_timeout && !sc->sc_bus.ub_usepolling) { - callout_reset(&xfer->ux_callout, mstohz(xfer->ux_timeout), - ehci_timeout, xfer); - } + ehci_schedule_timeout(xfer); ehci_add_intr_list(sc, exfer); xfer->ux_status = USBD_IN_PROGRESS; if (!polling) @@ -3952,10 +4201,7 @@ ehci_device_bulk_start(struct usbd_xfer *xfer) /* also does usb_syncmem(sqh) */ ehci_set_qh_qtd(sqh, exfer->ex_sqtdstart); - if (xfer->ux_timeout && !sc->sc_bus.ub_usepolling) { - callout_reset(&xfer->ux_callout, mstohz(xfer->ux_timeout), - ehci_timeout, xfer); - } + ehci_schedule_timeout(xfer); ehci_add_intr_list(sc, exfer); xfer->ux_status = USBD_IN_PROGRESS; if (!polling) @@ -4171,10 +4417,7 @@ ehci_device_intr_start(struct usbd_xfer *xfer) /* also does usb_syncmem(sqh) */ ehci_set_qh_qtd(sqh, exfer->ex_sqtdstart); - if (xfer->ux_timeout && !sc->sc_bus.ub_usepolling) { - callout_reset(&xfer->ux_callout, mstohz(xfer->ux_timeout), - ehci_timeout, xfer); - } + ehci_schedule_timeout(xfer); ehci_add_intr_list(sc, exfer); xfer->ux_status = USBD_IN_PROGRESS; if (!polling) From bc1c0f266cf085b226df491ef4f12a6e5bdc2455 Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Sat, 14 Dec 2019 23:18:05 +0000 Subject: [PATCH 6/7] Factor out HCI-independent xfer completion logic. New API for HCI drivers to synchronize hardware completion interrupts, synchronous aborts, and asynchronous timeouts: - When submitting an xfer to hardware, call usbd_xfer_schedule_timeout(xfer). - On HCI completion interrupt for xfer completion: if (!usbd_xfer_trycomplete(xfer)) return; /* timed out or aborted, ignore it */ - In upm_abort methods, call usbd_xfer_abort(xfer). For HCI drivers that use this API (not needed in drivers that don't, or for xfers like root intr xfers that don't use it): - New ubm_abortx method serves role of former *hci_abort_xfer, but without any logic for wrangling timeouts/callouts/tasks -- caller in usbd_xfer_abort has already handled them. - New ubm_dying method, returns true if the device is in the process of detaching, used by the timeout logic. Converted and tested: - ehci - ohci Converted and compile-tested: - ahci (XXX did this ever work?) - dwc2 - motg (XXX missing usbd_xfer_schedule_timeout in motg_*_start?) - uhci - xhci Not changed: - slhci (sys/dev/ic/sl811hs.c) -- doesn't use a separate per-xfer callout for timeouts (XXX but maybe should?) - ugenhc (sys/rump/dev/lib/libugenhc/ugenhc.c) -- doesn't manage its own transfer timeouts - vhci -- times transfers out only on detach; could be adapted easily if we wanted to use the xfer->ux_callout --- sys/arch/mips/adm5120/dev/ahci.c | 21 +- sys/dev/usb/ehci.c | 389 +++---------------------------- sys/dev/usb/motg.c | 66 +++--- sys/dev/usb/ohci.c | 156 +++---------- sys/dev/usb/uhci.c | 142 +++-------- sys/dev/usb/usbdi.c | 385 ++++++++++++++++++++++++++++++ sys/dev/usb/usbdi.h | 5 + sys/dev/usb/usbdivar.h | 2 + sys/dev/usb/xhci.c | 163 +++---------- sys/external/bsd/dwc2/dwc2.c | 235 ++++++++----------- sys/external/bsd/dwc2/dwc2var.h | 1 + 11 files changed, 653 insertions(+), 912 deletions(-) diff --git a/sys/arch/mips/adm5120/dev/ahci.c b/sys/arch/mips/adm5120/dev/ahci.c index 234779c33531..7a7d21ee28d7 100644 --- a/sys/arch/mips/adm5120/dev/ahci.c +++ b/sys/arch/mips/adm5120/dev/ahci.c @@ -98,6 +98,7 @@ static void ahci_poll_device(void *arg); static struct usbd_xfer * ahci_allocx(struct usbd_bus *, unsigned int); static void ahci_freex(struct usbd_bus *, struct usbd_xfer *); +static void ahci_abortx(struct usbd_xfer *); static void ahci_get_lock(struct usbd_bus *, kmutex_t **); static int ahci_roothub_ctrl(struct usbd_bus *, usb_device_request_t *, @@ -136,7 +137,6 @@ static void ahci_device_bulk_done(struct usbd_xfer *); static int ahci_transaction(struct ahci_softc *, struct usbd_pipe *, uint8_t, int, u_char *, uint8_t); static void ahci_noop(struct usbd_pipe *); -static void ahci_abort_xfer(struct usbd_xfer *, usbd_status); static void ahci_device_clear_toggle(struct usbd_pipe *); extern int usbdebug; @@ -169,6 +169,7 @@ struct usbd_bus_methods ahci_bus_methods = { .ubm_dopoll = ahci_poll, .ubm_allocx = ahci_allocx, .ubm_freex = ahci_freex, + .ubm_abortx = ahci_abortx, .ubm_getlock = ahci_get_lock, .ubm_rhctrl = ahci_roothub_ctrl, }; @@ -919,7 +920,7 @@ static void ahci_device_ctrl_abort(struct usbd_xfer *xfer) { DPRINTF(D_TRACE, ("Cab ")); - ahci_abort_xfer(xfer, USBD_CANCELLED); + usbd_xfer_abort(xfer); } static void @@ -1031,7 +1032,7 @@ ahci_device_intr_abort(struct usbd_xfer *xfer) } else { printf("%s: sx == NULL!\n", __func__); } - ahci_abort_xfer(xfer, USBD_CANCELLED); + usbd_xfer_abort(xfer); } static void @@ -1247,7 +1248,7 @@ static void ahci_device_bulk_abort(struct usbd_xfer *xfer) { DPRINTF(D_TRACE, ("Bab ")); - ahci_abort_xfer(xfer, USBD_CANCELLED); + usbd_xfer_abort(xfer); } static void @@ -1377,11 +1378,15 @@ ahci_transaction(struct ahci_softc *sc, struct usbd_pipe *pipe, #endif } -void -ahci_abort_xfer(struct usbd_xfer *xfer, usbd_status status) +static void +ahci_abortx(struct usbd_xfer *xfer) { - xfer->ux_status = status; - usb_transfer_complete(xfer); + /* + * XXX This is totally busted; there's no way it can possibly + * work! All transfers are busy-waited, it seems, so there is + * no opportunity to abort. + */ + KASSERT(xfer->ux_status != USBD_IN_PROGRESS); } void diff --git a/sys/dev/usb/ehci.c b/sys/dev/usb/ehci.c index 20f25c91101e..ce4be68a83cd 100644 --- a/sys/dev/usb/ehci.c +++ b/sys/dev/usb/ehci.c @@ -166,11 +166,6 @@ Static void ehci_check_itd_intr(ehci_softc_t *, struct ehci_xfer *, Static void ehci_check_sitd_intr(ehci_softc_t *, struct ehci_xfer *, ex_completeq_t *); Static void ehci_idone(struct ehci_xfer *, ex_completeq_t *); -Static void ehci_timeout(void *); -Static void ehci_timeout_task(void *); -Static bool ehci_probe_timeout(struct usbd_xfer *); -Static void ehci_schedule_timeout(struct usbd_xfer *); -Static void ehci_cancel_timeout_async(struct usbd_xfer *); Static void ehci_intrlist_timeout(void *); Static void ehci_doorbell(void *); Static void ehci_pcd(void *); @@ -180,6 +175,7 @@ Static struct usbd_xfer * Static void ehci_freex(struct usbd_bus *, struct usbd_xfer *); Static void ehci_get_lock(struct usbd_bus *, kmutex_t **); +Static bool ehci_dying(struct usbd_bus *); Static int ehci_roothub_ctrl(struct usbd_bus *, usb_device_request_t *, void *, int); @@ -281,7 +277,7 @@ Static void ehci_set_qh_qtd(ehci_soft_qh_t *, ehci_soft_qtd_t *); Static void ehci_sync_hc(ehci_softc_t *); Static void ehci_close_pipe(struct usbd_pipe *, ehci_soft_qh_t *); -Static void ehci_abort_xfer(struct usbd_xfer *, usbd_status); +Static void ehci_abortx(struct usbd_xfer *); #ifdef EHCI_DEBUG Static ehci_softc_t *theehci; @@ -322,6 +318,8 @@ Static const struct usbd_bus_methods ehci_bus_methods = { .ubm_dopoll = ehci_poll, .ubm_allocx = ehci_allocx, .ubm_freex = ehci_freex, + .ubm_abortx = ehci_abortx, + .ubm_dying = ehci_dying, .ubm_getlock = ehci_get_lock, .ubm_rhctrl = ehci_roothub_ctrl, }; @@ -1049,22 +1047,11 @@ ehci_idone(struct ehci_xfer *ex, ex_completeq_t *cq) DPRINTF("ex=%#jx", (uintptr_t)ex, 0, 0, 0); /* - * If software has completed it, either by cancellation - * or timeout, drop it on the floor. + * Try to claim this xfer for completion. If it has already + * completed or aborted, drop it on the floor. */ - if (xfer->ux_status != USBD_IN_PROGRESS) { - KASSERT(xfer->ux_status == USBD_CANCELLED || - xfer->ux_status == USBD_TIMEOUT); - DPRINTF("aborted xfer=%#jx", (uintptr_t)xfer, 0, 0, 0); + if (!usbd_xfer_trycomplete(xfer)) return; - } - - /* - * We are completing the xfer. Cancel the timeout if we can, - * but only asynchronously. See ehci_cancel_timeout_async for - * why we need not wait for the callout or task here. - */ - ehci_cancel_timeout_async(xfer); #ifdef DIAGNOSTIC #ifdef EHCI_DEBUG @@ -1539,9 +1526,6 @@ ehci_allocx(struct usbd_bus *bus, unsigned int nframes) if (xfer != NULL) { memset(xfer, 0, sizeof(struct ehci_xfer)); - /* Initialise this always so we can call remove on it. */ - usb_init_task(&xfer->ux_aborttask, ehci_timeout_task, xfer, - USB_TASKQ_MPSAFE); #ifdef DIAGNOSTIC struct ehci_xfer *ex = EHCI_XFER2EXFER(xfer); ex->ex_isdone = true; @@ -1569,6 +1553,14 @@ ehci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer) pool_cache_put(sc->sc_xferpool, xfer); } +Static bool +ehci_dying(struct usbd_bus *bus) +{ + struct ehci_softc *sc = EHCI_BUS2SC(bus); + + return sc->sc_dying; +} + Static void ehci_get_lock(struct usbd_bus *bus, kmutex_t **lock) { @@ -3201,22 +3193,12 @@ ehci_close_pipe(struct usbd_pipe *pipe, ehci_soft_qh_t *head) } /* - * Cancel or timeout a device request. We have two cases to deal with - * - * 1) A driver wants to stop scheduled or inflight transfers - * 2) A transfer has timed out - * - * have (partially) happened since the hardware runs concurrently. - * - * Transfer state is protected by the bus lock and we set the transfer status - * as soon as either of the above happens (with bus lock held). - * - * Then we arrange for the hardware to tells us that it is not still + * Arrrange for the hardware to tells us that it is not still * processing the TDs by setting the QH halted bit and wait for the ehci * door bell */ Static void -ehci_abort_xfer(struct usbd_xfer *xfer, usbd_status status) +ehci_abortx(struct usbd_xfer *xfer) { EHCIHIST_FUNC(); EHCIHIST_CALLED(); struct ehci_pipe *epipe = EHCI_XFER2EPIPE(xfer); @@ -3228,48 +3210,14 @@ ehci_abort_xfer(struct usbd_xfer *xfer, usbd_status status) uint32_t qhstatus; int hit; - KASSERTMSG((status == USBD_CANCELLED || status == USBD_TIMEOUT), - "invalid status for abort: %d", (int)status); - DPRINTF("xfer=%#jx pipe=%#jx", (uintptr_t)xfer, (uintptr_t)epipe, 0, 0); KASSERT(mutex_owned(&sc->sc_lock)); ASSERT_SLEEPABLE(); - /* - * Nobody else can set this status: only one caller can time - * out, and only one caller can synchronously abort. So the - * status can't be the status we're trying to set this to. - */ - KASSERT(xfer->ux_status != status); - - /* - * If host controller or timer interrupt has completed it, too - * late to abort. Forget about it. - */ - if (xfer->ux_status != USBD_IN_PROGRESS) - return; - - if (status == USBD_CANCELLED) { - /* - * We are synchronously aborting. Cancel the timeout - * if we can, but only asynchronously. See - * ehci_cancel_timeout_async for why we need not wait - * for the callout or task here. - */ - ehci_cancel_timeout_async(xfer); - } else { - /* - * Otherwise, we are timing out. No action needed to - * cancel the timeout because we _are_ the timeout. - * This case should happen only via the timeout task, - * invoked via the callout. - */ - KASSERT(status == USBD_TIMEOUT); - } - - /* We beat everyone else. Claim the status. */ - xfer->ux_status = status; + KASSERTMSG((xfer->ux_status == USBD_CANCELLED || + xfer->ux_status == USBD_TIMEOUT), + "bad abort status: %d", xfer->ux_status); /* * If we're dying, skip the hardware action and just notify the @@ -3477,291 +3425,6 @@ dying: KASSERT(mutex_owned(&sc->sc_lock)); } -/* - * ehci_timeout(xfer) - * - * Called at IPL_SOFTCLOCK when too much time has elapsed waiting - * for xfer to complete. Since we can't abort the xfer at - * IPL_SOFTCLOCK, defer to a usb_task to run it in thread context, - * unless the xfer has completed or aborted concurrently -- and if - * the xfer has also been resubmitted, take care of rescheduling - * the callout. - */ -Static void -ehci_timeout(void *addr) -{ - EHCIHIST_FUNC(); EHCIHIST_CALLED(); - struct usbd_xfer *xfer = addr; - ehci_softc_t *sc = EHCI_XFER2SC(xfer); - struct usbd_device *dev = xfer->ux_pipe->up_dev; - - DPRINTF("xfer %#jx", (uintptr_t)xfer, 0, 0, 0); -#ifdef EHCI_DEBUG - if (ehcidebug >= 2) { - struct usbd_pipe *pipe = xfer->ux_pipe; - usbd_dump_pipe(pipe); - } -#endif - - /* Acquire the lock so we can transition the timeout state. */ - mutex_enter(&sc->sc_lock); - - /* - * Use ehci_probe_timeout to check whether the timeout is still - * valid, or to reschedule the callout if necessary. If it is - * still valid, schedule the task. - */ - if (ehci_probe_timeout(xfer)) - usb_add_task(dev, &xfer->ux_aborttask, USB_TASKQ_HC); - - /* - * Notify ehci_cancel_timeout_async that we may have scheduled - * the task. This causes callout_invoking to return false in - * ehci_cancel_timeout_async so that it can tell which stage in - * the callout->task->abort process we're at. - */ - callout_ack(&xfer->ux_callout); - - /* All done -- release the lock. */ - mutex_exit(&sc->sc_lock); -} - -/* - * ehci_timeout_task(xfer) - * - * Called in thread context when too much time has elapsed waiting - * for xfer to complete. Issue ehci_abort_xfer(USBD_TIMEOUT), - * unless the xfer has completed or aborted concurrently -- and if - * the xfer has also been resubmitted, take care of rescheduling - * the callout. - */ -Static void -ehci_timeout_task(void *addr) -{ - struct usbd_xfer *xfer = addr; - ehci_softc_t *sc = EHCI_XFER2SC(xfer); - - EHCIHIST_FUNC(); EHCIHIST_CALLED(); - - DPRINTF("xfer=%#jx", (uintptr_t)xfer, 0, 0, 0); - - /* Acquire the lock so we can transition the timeout state. */ - mutex_enter(&sc->sc_lock); - - /* - * Use ehci_probe_timeout to check whether the timeout is still - * valid, or to reschedule the callout if necessary. If it is - * still valid, schedule the task. - */ - if (ehci_probe_timeout(xfer)) - ehci_abort_xfer(xfer, USBD_TIMEOUT); - - /* All done -- release the lock. */ - mutex_exit(&sc->sc_lock); -} - -/* - * ehci_probe_timeout(xfer) - * - * Probe the status of xfer's timeout. Acknowledge and process a - * request to reschedule. Return true if the timeout is still - * valid and the caller should take further action (queueing a - * task or aborting the xfer), false if it must stop here. - */ -Static bool -ehci_probe_timeout(struct usbd_xfer *xfer) -{ - EHCIHIST_FUNC(); EHCIHIST_CALLED(); - struct ehci_softc *sc = EHCI_XFER2SC(xfer); - bool valid; - - KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); - - /* The timeout must be set. */ - KASSERT(xfer->ux_timeout_set); - - /* - * Neither callout nor task may be pending; they execute - * alternately in lock step. - */ - KASSERT(!callout_pending(&xfer->ux_callout)); - KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)); - - /* There are a few cases... */ - if (sc->sc_dying) { - /* Host controller dying. Drop it all on the floor. */ - xfer->ux_timeout_set = false; - xfer->ux_timeout_reset = false; - valid = false; - } else if (xfer->ux_timeout_reset) { - /* - * The xfer completed _and_ got resubmitted while we - * waited for the lock. Acknowledge the request to - * reschedule, and reschedule it if there is a timeout - * and the bus is not polling. - */ - xfer->ux_timeout_reset = false; - if (xfer->ux_timeout && !sc->sc_bus.ub_usepolling) { - KASSERT(xfer->ux_timeout_set); - callout_schedule(&xfer->ux_callout, - mstohz(xfer->ux_timeout)); - } else { - /* No more callout or task scheduled. */ - xfer->ux_timeout_set = false; - } - valid = false; - } else if (xfer->ux_status != USBD_IN_PROGRESS) { - /* - * The xfer has completed by hardware completion or by - * software abort, and has not been resubmitted, so the - * timeout must be unset, and is no longer valid for - * the caller. - */ - xfer->ux_timeout_set = false; - valid = false; - } else { - /* - * The xfer has not yet completed, so the timeout is - * valid. - */ - valid = true; - } - - /* Any reset must have been processed. */ - KASSERT(!xfer->ux_timeout_reset); - - /* - * Either we claim the timeout is set, or the callout is idle. - * If the timeout is still set, we may be handing off to the - * task instead, so this is an if but not an iff. - */ - KASSERT(xfer->ux_timeout_set || !callout_pending(&xfer->ux_callout)); - - /* - * The task must be idle now. - * - * - If the caller is the callout, _and_ the timeout is still - * valid, the caller will schedule it, but it hasn't been - * scheduled yet. (If the timeout is not valid, the task - * should not be scheduled.) - * - * - If the caller is the task, it cannot be scheduled again - * until the callout runs again, which won't happen until we - * next release the lock. - */ - KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)); - - KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); - - return valid; -} - -/* - * ehci_schedule_timeout(xfer) - * - * Ensure that xfer has a timeout. If the callout is already - * queued or the task is already running, request that they - * reschedule the callout. If not, and if we're not polling, - * schedule the callout anew. - */ -Static void -ehci_schedule_timeout(struct usbd_xfer *xfer) -{ - EHCIHIST_FUNC(); EHCIHIST_CALLED(); - ehci_softc_t *sc = EHCI_XFER2SC(xfer); - - KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); - - if (xfer->ux_timeout_set) { - /* - * Callout or task has fired from a prior completed - * xfer but has not yet noticed that the xfer is done. - * Ask it to reschedule itself to ux_timeout. - */ - xfer->ux_timeout_reset = true; - } else if (xfer->ux_timeout && !sc->sc_bus.ub_usepolling) { - /* Callout is not scheduled. Schedule it. */ - KASSERT(!callout_pending(&xfer->ux_callout)); - callout_reset(&xfer->ux_callout, mstohz(xfer->ux_timeout), - ehci_timeout, xfer); - xfer->ux_timeout_set = true; - } - - KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); -} - -/* - * ehci_cancel_timeout_async(xfer) - * - * Cancel the callout and the task of xfer, which have not yet run - * to completion, but don't wait for the callout or task to finish - * running. - * - * If they have already fired, at worst they are waiting for the - * bus lock. They will see that the xfer is no longer in progress - * and give up, or they will see that the xfer has been - * resubmitted with a new timeout and reschedule the callout. - * - * If a resubmitted request completed so fast that the callout - * didn't have time to process a timer reset, just cancel the - * timer reset. - */ -Static void -ehci_cancel_timeout_async(struct usbd_xfer *xfer) -{ - EHCIHIST_FUNC(); EHCIHIST_CALLED(); - struct ehci_softc *sc = EHCI_XFER2SC(xfer); - - KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); - - KASSERT(xfer->ux_timeout_set); - xfer->ux_timeout_reset = false; - if (!callout_stop(&xfer->ux_callout)) { - /* - * We stopped the callout before it ran. The timeout - * is no longer set. - */ - xfer->ux_timeout_set = false; - } else if (callout_invoking(&xfer->ux_callout)) { - /* - * The callout has begun to run but it has not yet - * acquired the lock and called callout_ack. The task - * cannot be queued yet, and the callout cannot have - * been rescheduled yet. - * - * By the time the callout acquires the lock, we will - * have transitioned from USBD_IN_PROGRESS to a - * completed status, and possibly also resubmitted the - * xfer and set xfer->ux_timeout_reset = true. In both - * cases, the callout will DTRT, so no further action - * is needed here. - */ - } else if (usb_rem_task(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)) { - /* - * The callout had fired and scheduled the task, but we - * stopped the task before it could run. The timeout - * is therefore no longer set -- the next resubmission - * of the xfer must schedule a new timeout. - * - * The callout should not be be pending at this point: - * it is scheduled only under the lock, and only when - * xfer->ux_timeout_set is false, or by the callout or - * task itself when xfer->ux_timeout_reset is true. - */ - xfer->ux_timeout_set = false; - } - - /* - * The callout cannot be scheduled and the task cannot be - * queued at this point. Either we cancelled them, or they are - * already running and waiting for the bus lock. - */ - KASSERT(!callout_pending(&xfer->ux_callout)); - KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)); - - KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); -} - /************************/ Static int @@ -4003,7 +3666,7 @@ ehci_device_ctrl_start(struct usbd_xfer *xfer) /* Insert qTD in QH list - also does usb_syncmem(sqh) */ ehci_set_qh_qtd(sqh, setup); - ehci_schedule_timeout(xfer); + usbd_xfer_schedule_timeout(xfer); ehci_add_intr_list(sc, exfer); xfer->ux_status = USBD_IN_PROGRESS; if (!polling) @@ -4054,7 +3717,7 @@ ehci_device_ctrl_abort(struct usbd_xfer *xfer) EHCIHIST_FUNC(); EHCIHIST_CALLED(); DPRINTF("xfer=%#jx", (uintptr_t)xfer, 0, 0, 0); - ehci_abort_xfer(xfer, USBD_CANCELLED); + usbd_xfer_abort(xfer); } /* Close a device control pipe. */ @@ -4201,7 +3864,7 @@ ehci_device_bulk_start(struct usbd_xfer *xfer) /* also does usb_syncmem(sqh) */ ehci_set_qh_qtd(sqh, exfer->ex_sqtdstart); - ehci_schedule_timeout(xfer); + usbd_xfer_schedule_timeout(xfer); ehci_add_intr_list(sc, exfer); xfer->ux_status = USBD_IN_PROGRESS; if (!polling) @@ -4232,7 +3895,7 @@ ehci_device_bulk_abort(struct usbd_xfer *xfer) EHCIHIST_FUNC(); EHCIHIST_CALLED(); DPRINTF("xfer %#jx", (uintptr_t)xfer, 0, 0, 0); - ehci_abort_xfer(xfer, USBD_CANCELLED); + usbd_xfer_abort(xfer); } /* @@ -4417,7 +4080,7 @@ ehci_device_intr_start(struct usbd_xfer *xfer) /* also does usb_syncmem(sqh) */ ehci_set_qh_qtd(sqh, exfer->ex_sqtdstart); - ehci_schedule_timeout(xfer); + usbd_xfer_schedule_timeout(xfer); ehci_add_intr_list(sc, exfer); xfer->ux_status = USBD_IN_PROGRESS; if (!polling) @@ -4451,7 +4114,7 @@ ehci_device_intr_abort(struct usbd_xfer *xfer) * async doorbell. That's dependent on the async list, wheras * intr xfers are periodic, should not use this? */ - ehci_abort_xfer(xfer, USBD_CANCELLED); + usbd_xfer_abort(xfer); } Static void diff --git a/sys/dev/usb/motg.c b/sys/dev/usb/motg.c index f36b92cc0a53..255f0858440e 100644 --- a/sys/dev/usb/motg.c +++ b/sys/dev/usb/motg.c @@ -144,6 +144,7 @@ static void motg_softintr(void *); static struct usbd_xfer * motg_allocx(struct usbd_bus *, unsigned int); static void motg_freex(struct usbd_bus *, struct usbd_xfer *); +static bool motg_dying(struct usbd_bus *); static void motg_get_lock(struct usbd_bus *, kmutex_t **); static int motg_roothub_ctrl(struct usbd_bus *, usb_device_request_t *, void *, int); @@ -174,7 +175,7 @@ static void motg_device_data_read(struct usbd_xfer *); static void motg_device_data_write(struct usbd_xfer *); static void motg_device_clear_toggle(struct usbd_pipe *); -static void motg_device_xfer_abort(struct usbd_xfer *); +static void motg_abortx(struct usbd_xfer *); #define UBARR(sc) bus_space_barrier((sc)->sc_iot, (sc)->sc_ioh, 0, (sc)->sc_size, \ BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE) @@ -233,6 +234,8 @@ const struct usbd_bus_methods motg_bus_methods = { .ubm_dopoll = motg_poll, .ubm_allocx = motg_allocx, .ubm_freex = motg_freex, + .ubm_abortx = motg_abortx, + .ubm_dying = motg_dying, .ubm_getlock = motg_get_lock, .ubm_rhctrl = motg_roothub_ctrl, }; @@ -770,6 +773,14 @@ motg_freex(struct usbd_bus *bus, struct usbd_xfer *xfer) pool_cache_put(sc->sc_xferpool, xfer); } +static bool +motg_dying(struct usbd_bus *bus) +{ + struct motg_softc *sc = MOTG_BUS2SC(bus); + + return sc->sc_dying; +} + static void motg_get_lock(struct usbd_bus *bus, kmutex_t **lock) { @@ -1387,6 +1398,13 @@ motg_device_ctrl_intr_rx(struct motg_softc *sc) KASSERT(mutex_owned(&sc->sc_lock)); + /* + * Try to claim this xfer for completion. If it has already + * completed or aborted, drop it on the floor. + */ + if (!usbd_xfer_trycomplete(xfer)) + return; + KASSERT(ep->phase == DATA_IN || ep->phase == STATUS_IN); /* select endpoint 0 */ UWRITE1(sc, MUSB2_REG_EPINDEX, 0); @@ -1501,6 +1519,14 @@ motg_device_ctrl_intr_tx(struct motg_softc *sc) MOTGHIST_FUNC(); MOTGHIST_CALLED(); KASSERT(mutex_owned(&sc->sc_lock)); + + /* + * Try to claim this xfer for completion. If it has already + * completed or aborted, drop it on the floor. + */ + if (!usbd_xfer_trycomplete(xfer)) + return; + if (ep->phase == DATA_IN || ep->phase == STATUS_IN) { motg_device_ctrl_intr_rx(sc); return; @@ -1633,7 +1659,7 @@ motg_device_ctrl_abort(struct usbd_xfer *xfer) { MOTGHIST_FUNC(); MOTGHIST_CALLED(); - motg_device_xfer_abort(xfer); + usbd_xfer_abort(xfer); } /* Close a device control pipe */ @@ -2019,6 +2045,14 @@ motg_device_intr_tx(struct motg_softc *sc, int epnumber) MOTGHIST_FUNC(); MOTGHIST_CALLED(); KASSERT(mutex_owned(&sc->sc_lock)); + + /* + * Try to claim this xfer for completion. If it has already + * completed or aborted, drop it on the floor. + */ + if (!usbd_xfer_trycomplete(xfer)) + return; + KASSERT(ep->ep_number == epnumber); DPRINTFN(MD_BULK, " on ep %jd", epnumber, 0, 0, 0); @@ -2102,7 +2136,7 @@ motg_device_data_abort(struct usbd_xfer *xfer) MOTGHIST_FUNC(); MOTGHIST_CALLED(); - motg_device_xfer_abort(xfer); + usbd_xfer_abort(xfer); } /* Close a device control pipe */ @@ -2151,7 +2185,7 @@ motg_device_clear_toggle(struct usbd_pipe *pipe) /* Abort a device control request. */ static void -motg_device_xfer_abort(struct usbd_xfer *xfer) +motg_abortx(struct usbd_xfer *xfer) { MOTGHIST_FUNC(); MOTGHIST_CALLED(); uint8_t csr; @@ -2161,30 +2195,6 @@ motg_device_xfer_abort(struct usbd_xfer *xfer) KASSERT(mutex_owned(&sc->sc_lock)); ASSERT_SLEEPABLE(); - /* - * We are synchronously aborting. Try to stop the - * callout and task, but if we can't, wait for them to - * complete. - */ - callout_halt(&xfer->ux_callout, &sc->sc_lock); - usb_rem_task_wait(xfer->ux_pipe->up_dev, &xfer->ux_aborttask, - USB_TASKQ_HC, &sc->sc_lock); - - /* - * The xfer cannot have been cancelled already. It is the - * responsibility of the caller of usbd_abort_pipe not to try - * to abort a pipe multiple times, whether concurrently or - * sequentially. - */ - KASSERT(xfer->ux_status != USBD_CANCELLED); - - /* If anyone else beat us, we're done. */ - if (xfer->ux_status != USBD_IN_PROGRESS) - return; - - /* We beat everyone else. Claim the status. */ - xfer->ux_status = USBD_CANCELLED; - /* * If we're dying, skip the hardware action and just notify the * software that we're done. diff --git a/sys/dev/usb/ohci.c b/sys/dev/usb/ohci.c index adf396becfd8..8425fc5c28f5 100644 --- a/sys/dev/usb/ohci.c +++ b/sys/dev/usb/ohci.c @@ -169,6 +169,7 @@ Static void ohci_device_isoc_enter(struct usbd_xfer *); Static struct usbd_xfer * ohci_allocx(struct usbd_bus *, unsigned int); Static void ohci_freex(struct usbd_bus *, struct usbd_xfer *); +Static bool ohci_dying(struct usbd_bus *); Static void ohci_get_lock(struct usbd_bus *, kmutex_t **); Static int ohci_roothub_ctrl(struct usbd_bus *, usb_device_request_t *, void *, int); @@ -213,12 +214,10 @@ Static void ohci_device_isoc_done(struct usbd_xfer *); Static usbd_status ohci_device_setintr(ohci_softc_t *, struct ohci_pipe *, int); -Static void ohci_timeout(void *); -Static void ohci_timeout_task(void *); Static void ohci_rhsc_enable(void *); Static void ohci_close_pipe(struct usbd_pipe *, ohci_soft_ed_t *); -Static void ohci_abort_xfer(struct usbd_xfer *, usbd_status); +Static void ohci_abortx(struct usbd_xfer *); Static void ohci_device_clear_toggle(struct usbd_pipe *); Static void ohci_noop(struct usbd_pipe *); @@ -287,6 +286,8 @@ Static const struct usbd_bus_methods ohci_bus_methods = { .ubm_dopoll = ohci_poll, .ubm_allocx = ohci_allocx, .ubm_freex = ohci_freex, + .ubm_abortx = ohci_abortx, + .ubm_dying = ohci_dying, .ubm_getlock = ohci_get_lock, .ubm_rhctrl = ohci_roothub_ctrl, }; @@ -1068,9 +1069,6 @@ ohci_allocx(struct usbd_bus *bus, unsigned int nframes) if (xfer != NULL) { memset(xfer, 0, sizeof(struct ohci_xfer)); - /* Initialise this always so we can call remove on it. */ - usb_init_task(&xfer->ux_aborttask, ohci_timeout_task, xfer, - USB_TASKQ_MPSAFE); #ifdef DIAGNOSTIC xfer->ux_state = XFER_BUSY; #endif @@ -1092,6 +1090,14 @@ ohci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer) pool_cache_put(sc->sc_xferpool, xfer); } +Static bool +ohci_dying(struct usbd_bus *bus) +{ + ohci_softc_t *sc = OHCI_BUS2SC(bus); + + return sc->sc_dying; +} + Static void ohci_get_lock(struct usbd_bus *bus, kmutex_t **lock) { @@ -1463,23 +1469,11 @@ ohci_softintr(void *v) } /* - * If software has completed it, either by cancellation - * or timeout, drop it on the floor. + * Try to claim this xfer for completion. If it has + * already completed or aborted, drop it on the floor. */ - if (xfer->ux_status != USBD_IN_PROGRESS) { - KASSERT(xfer->ux_status == USBD_CANCELLED || - xfer->ux_status == USBD_TIMEOUT); + if (!usbd_xfer_trycomplete(xfer)) continue; - } - - /* - * Cancel the timeout and the task, which have not yet - * run. If they have already fired, at worst they are - * waiting for the lock. They will see that the xfer - * is no longer in progress and give up. - */ - callout_stop(&xfer->ux_callout); - usb_rem_task(xfer->ux_pipe->up_dev, &xfer->ux_aborttask); len = std->len; if (std->td.td_cbp != 0) @@ -1553,23 +1547,11 @@ ohci_softintr(void *v) continue; /* - * If software has completed it, either by cancellation - * or timeout, drop it on the floor. + * Try to claim this xfer for completion. If it has + * already completed or aborted, drop it on the floor. */ - if (xfer->ux_status != USBD_IN_PROGRESS) { - KASSERT(xfer->ux_status == USBD_CANCELLED || - xfer->ux_status == USBD_TIMEOUT); + if (!usbd_xfer_trycomplete(xfer)) continue; - } - - /* - * Cancel the timeout and the task, which have not yet - * run. If they have already fired, at worst they are - * waiting for the lock. They will see that the xfer - * is no longer in progress and give up. - */ - callout_stop(&xfer->ux_callout); - usb_rem_task(xfer->ux_pipe->up_dev, &xfer->ux_aborttask); KASSERT(!sitd->isdone); #ifdef DIAGNOSTIC @@ -1909,37 +1891,6 @@ ohci_hash_find_itd(ohci_softc_t *sc, ohci_physaddr_t a) return NULL; } -void -ohci_timeout(void *addr) -{ - OHCIHIST_FUNC(); OHCIHIST_CALLED(); - struct usbd_xfer *xfer = addr; - ohci_softc_t *sc = OHCI_XFER2SC(xfer); - struct usbd_device *dev = xfer->ux_pipe->up_dev; - - DPRINTF("xfer=%#jx", (uintptr_t)xfer, 0, 0, 0); - - mutex_enter(&sc->sc_lock); - if (!sc->sc_dying && xfer->ux_status == USBD_IN_PROGRESS) - usb_add_task(dev, &xfer->ux_aborttask, USB_TASKQ_HC); - mutex_exit(&sc->sc_lock); -} - -void -ohci_timeout_task(void *addr) -{ - struct usbd_xfer *xfer = addr; - ohci_softc_t *sc = OHCI_XFER2SC(xfer); - - OHCIHIST_FUNC(); OHCIHIST_CALLED(); - - DPRINTF("xfer=%#jx", (uintptr_t)xfer, 0, 0, 0); - - mutex_enter(&sc->sc_lock); - ohci_abort_xfer(xfer, USBD_TIMEOUT); - mutex_exit(&sc->sc_lock); -} - #ifdef OHCI_DEBUG void ohci_dump_tds(ohci_softc_t *sc, ohci_soft_td_t *std) @@ -2212,19 +2163,8 @@ ohci_close_pipe(struct usbd_pipe *pipe, ohci_soft_ed_t *head) } /* - * Cancel or timeout a device request. We have two cases to deal with - * - * 1) A driver wants to stop scheduled or inflight transfers - * 2) A transfer has timed out - * - * It's impossible to guarantee that the requested transfer will not - * have (partially) happened since the hardware runs concurrently. - * - * Transfer state is protected by the bus lock and we set the transfer status - * as soon as either of the above happens (with bus lock held). - * - * Then we arrange for the hardware to tells us that it is not still - * processing the TDs by setting the sKip bit and requesting a SOF interrupt + * Arrange for the hardware to tells us that it is not still processing + * the TDs by setting the sKip bit and requesting a SOF interrupt * * Once we see the SOF interrupt we can check the transfer TDs/iTDs to see if * they've been processed and either @@ -2233,7 +2173,7 @@ ohci_close_pipe(struct usbd_pipe *pipe, ohci_soft_ed_t *head) * used. The softint handler will free the old ones. */ void -ohci_abort_xfer(struct usbd_xfer *xfer, usbd_status status) +ohci_abortx(struct usbd_xfer *xfer) { OHCIHIST_FUNC(); OHCIHIST_CALLED(); struct ohci_pipe *opipe = OHCI_PIPE2OPIPE(xfer->ux_pipe); @@ -2243,46 +2183,15 @@ ohci_abort_xfer(struct usbd_xfer *xfer, usbd_status status) ohci_physaddr_t headp; int hit; - KASSERTMSG((status == USBD_CANCELLED || status == USBD_TIMEOUT), - "invalid status for abort: %d", (int)status); - DPRINTF("xfer=%#jx pipe=%#jx sed=%#jx", (uintptr_t)xfer, (uintptr_t)opipe, (uintptr_t)sed, 0); KASSERT(mutex_owned(&sc->sc_lock)); ASSERT_SLEEPABLE(); - if (status == USBD_CANCELLED) { - /* - * We are synchronously aborting. Try to stop the - * callout and task, but if we can't, wait for them to - * complete. - */ - callout_halt(&xfer->ux_callout, &sc->sc_lock); - usb_rem_task_wait(xfer->ux_pipe->up_dev, &xfer->ux_aborttask, - USB_TASKQ_HC, &sc->sc_lock); - } else { - /* Otherwise, we are timing out. */ - KASSERT(status == USBD_TIMEOUT); - } - - /* - * The xfer cannot have been cancelled already. It is the - * responsibility of the caller of usbd_abort_pipe not to try - * to abort a pipe multiple times, whether concurrently or - * sequentially. - */ - KASSERT(xfer->ux_status != USBD_CANCELLED); - - /* Only the timeout, which runs only once, can time it out. */ - KASSERT(xfer->ux_status != USBD_TIMEOUT); - - /* If anyone else beat us, we're done. */ - if (xfer->ux_status != USBD_IN_PROGRESS) - return; - - /* We beat everyone else. Claim the status. */ - xfer->ux_status = status; + KASSERTMSG((xfer->ux_status == USBD_CANCELLED || + xfer->ux_status == USBD_TIMEOUT), + "bad abort status: %d", xfer->ux_status); /* * If we're dying, skip the hardware action and just notify the @@ -2875,10 +2784,7 @@ ohci_device_ctrl_start(struct usbd_xfer *xfer) sizeof(sed->ed.ed_tailp), BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); OWRITE4(sc, OHCI_COMMAND_STATUS, OHCI_CLF); - if (xfer->ux_timeout && !polling) { - callout_reset(&xfer->ux_callout, mstohz(xfer->ux_timeout), - ohci_timeout, xfer); - } + usbd_xfer_schedule_timeout(xfer); DPRINTF("done", 0, 0, 0, 0); @@ -2899,7 +2805,7 @@ ohci_device_ctrl_abort(struct usbd_xfer *xfer) OHCIHIST_FUNC(); OHCIHIST_CALLED(); DPRINTF("xfer=%#jx", (uintptr_t)xfer, 0, 0, 0); - ohci_abort_xfer(xfer, USBD_CANCELLED); + usbd_xfer_abort(xfer); } /* Close a device control pipe. */ @@ -3090,11 +2996,7 @@ ohci_device_bulk_start(struct usbd_xfer *xfer) usb_syncmem(&sed->dma, sed->offs, sizeof(sed->ed), BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); OWRITE4(sc, OHCI_COMMAND_STATUS, OHCI_BLF); - if (xfer->ux_timeout && !sc->sc_bus.ub_usepolling) { - callout_reset(&xfer->ux_callout, mstohz(xfer->ux_timeout), - ohci_timeout, xfer); - } - + usbd_xfer_schedule_timeout(xfer); xfer->ux_status = USBD_IN_PROGRESS; if (!polling) mutex_exit(&sc->sc_lock); @@ -3112,7 +3014,7 @@ ohci_device_bulk_abort(struct usbd_xfer *xfer) KASSERT(mutex_owned(&sc->sc_lock)); DPRINTF("xfer=%#jx", (uintptr_t)xfer, 0, 0, 0); - ohci_abort_xfer(xfer, USBD_CANCELLED); + usbd_xfer_abort(xfer); } /* @@ -3300,7 +3202,7 @@ ohci_device_intr_abort(struct usbd_xfer *xfer) KASSERT(mutex_owned(&sc->sc_lock)); KASSERT(xfer->ux_pipe->up_intrxfer == xfer); - ohci_abort_xfer(xfer, USBD_CANCELLED); + usbd_xfer_abort(xfer); } /* Close a device interrupt pipe. */ diff --git a/sys/dev/usb/uhci.c b/sys/dev/usb/uhci.c index 70d8549cf6a2..5bcdaf96245e 100644 --- a/sys/dev/usb/uhci.c +++ b/sys/dev/usb/uhci.c @@ -194,10 +194,8 @@ Static void uhci_check_intr(uhci_softc_t *, struct uhci_xfer *, ux_completeq_t *); Static void uhci_idone(struct uhci_xfer *, ux_completeq_t *); -Static void uhci_abort_xfer(struct usbd_xfer *, usbd_status); +Static void uhci_abortx(struct usbd_xfer *); -Static void uhci_timeout(void *); -Static void uhci_timeout_task(void *); Static void uhci_add_ls_ctrl(uhci_softc_t *, uhci_soft_qh_t *); Static void uhci_add_hs_ctrl(uhci_softc_t *, uhci_soft_qh_t *); Static void uhci_add_bulk(uhci_softc_t *, uhci_soft_qh_t *); @@ -212,6 +210,7 @@ Static usbd_status uhci_setup_isoc(struct usbd_pipe *); Static struct usbd_xfer * uhci_allocx(struct usbd_bus *, unsigned int); Static void uhci_freex(struct usbd_bus *, struct usbd_xfer *); +Static bool uhci_dying(struct usbd_bus *); Static void uhci_get_lock(struct usbd_bus *, kmutex_t **); Static int uhci_roothub_ctrl(struct usbd_bus *, usb_device_request_t *, void *, int); @@ -330,6 +329,8 @@ const struct usbd_bus_methods uhci_bus_methods = { .ubm_dopoll = uhci_poll, .ubm_allocx = uhci_allocx, .ubm_freex = uhci_freex, + .ubm_abortx = uhci_abortx, + .ubm_dying = uhci_dying, .ubm_getlock = uhci_get_lock, .ubm_rhctrl = uhci_roothub_ctrl, }; @@ -657,9 +658,6 @@ uhci_allocx(struct usbd_bus *bus, unsigned int nframes) if (xfer != NULL) { memset(xfer, 0, sizeof(struct uhci_xfer)); - /* Initialise this always so we can call remove on it. */ - usb_init_task(&xfer->ux_aborttask, uhci_timeout_task, xfer, - USB_TASKQ_MPSAFE); #ifdef DIAGNOSTIC struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer); uxfer->ux_isdone = true; @@ -686,6 +684,14 @@ uhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer) pool_cache_put(sc->sc_xferpool, xfer); } +Static bool +uhci_dying(struct usbd_bus *bus) +{ + struct uhci_softc *sc = UHCI_BUS2SC(bus); + + return sc->sc_dying; +} + Static void uhci_get_lock(struct usbd_bus *bus, kmutex_t **lock) { @@ -1565,24 +1571,11 @@ uhci_idone(struct uhci_xfer *ux, ux_completeq_t *cqp) DPRINTFN(12, "ux=%#jx", (uintptr_t)ux, 0, 0, 0); /* - * If software has completed it, either by cancellation - * or timeout, drop it on the floor. + * Try to claim this xfer for completion. If it has already + * completed or aborted, drop it on the floor. */ - if (xfer->ux_status != USBD_IN_PROGRESS) { - KASSERT(xfer->ux_status == USBD_CANCELLED || - xfer->ux_status == USBD_TIMEOUT); - DPRINTF("aborted xfer=%#jx", (uintptr_t)xfer, 0, 0, 0); + if (!usbd_xfer_trycomplete(xfer)) return; - } - - /* - * Cancel the timeout and the task, which have not yet - * run. If they have already fired, at worst they are - * waiting for the lock. They will see that the xfer - * is no longer in progress and give up. - */ - callout_stop(&xfer->ux_callout); - usb_rem_task(xfer->ux_pipe->up_dev, &xfer->ux_aborttask); #ifdef DIAGNOSTIC #ifdef UHCI_DEBUG @@ -1712,40 +1705,6 @@ uhci_idone(struct uhci_xfer *ux, ux_completeq_t *cqp) DPRINTFN(12, "ux=%#jx done", (uintptr_t)ux, 0, 0, 0); } -/* - * Called when a request does not complete. - */ -void -uhci_timeout(void *addr) -{ - UHCIHIST_FUNC(); UHCIHIST_CALLED(); - struct usbd_xfer *xfer = addr; - uhci_softc_t *sc = UHCI_XFER2SC(xfer); - struct usbd_device *dev = xfer->ux_pipe->up_dev; - - DPRINTF("xfer %#jx", (uintptr_t)xfer, 0, 0, 0); - - mutex_enter(&sc->sc_lock); - if (!sc->sc_dying && xfer->ux_status == USBD_IN_PROGRESS) - usb_add_task(dev, &xfer->ux_aborttask, USB_TASKQ_HC); - mutex_exit(&sc->sc_lock); -} - -void -uhci_timeout_task(void *addr) -{ - struct usbd_xfer *xfer = addr; - uhci_softc_t *sc = UHCI_XFER2SC(xfer); - - UHCIHIST_FUNC(); UHCIHIST_CALLED(); - - DPRINTF("xfer=%#jx", (uintptr_t)xfer, 0, 0, 0); - - mutex_enter(&sc->sc_lock); - uhci_abort_xfer(xfer, USBD_TIMEOUT); - mutex_exit(&sc->sc_lock); -} - void uhci_poll(struct usbd_bus *bus) { @@ -2314,11 +2273,7 @@ uhci_device_bulk_start(struct usbd_xfer *xfer) uhci_add_bulk(sc, sqh); uhci_add_intr_list(sc, ux); - - if (xfer->ux_timeout && !polling) { - callout_reset(&xfer->ux_callout, mstohz(xfer->ux_timeout), - uhci_timeout, xfer); - } + usbd_xfer_schedule_timeout(xfer); xfer->ux_status = USBD_IN_PROGRESS; if (!polling) mutex_exit(&sc->sc_lock); @@ -2336,25 +2291,14 @@ uhci_device_bulk_abort(struct usbd_xfer *xfer) UHCIHIST_FUNC(); UHCIHIST_CALLED(); - uhci_abort_xfer(xfer, USBD_CANCELLED); + usbd_xfer_abort(xfer); } /* - * Cancel or timeout a device request. We have two cases to deal with - * - * 1) A driver wants to stop scheduled or inflight transfers - * 2) A transfer has timed out - * - * It's impossible to guarantee that the requested transfer will not - * have (partially) happened since the hardware runs concurrently. - * - * Transfer state is protected by the bus lock and we set the transfer status - * as soon as either of the above happens (with bus lock held). - * * To allow the hardware time to notice we simply wait. */ -void -uhci_abort_xfer(struct usbd_xfer *xfer, usbd_status status) +Static void +uhci_abortx(struct usbd_xfer *xfer) { UHCIHIST_FUNC(); UHCIHIST_CALLED(); struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer); @@ -2362,45 +2306,14 @@ uhci_abort_xfer(struct usbd_xfer *xfer, usbd_status status) uhci_softc_t *sc = UHCI_XFER2SC(xfer); uhci_soft_td_t *std; - KASSERTMSG((status == USBD_CANCELLED || status == USBD_TIMEOUT), - "invalid status for abort: %d", (int)status); - - DPRINTFN(1,"xfer=%#jx, status=%jd", (uintptr_t)xfer, status, 0, 0); + DPRINTFN(1,"xfer=%#jx", (uintptr_t)xfer, 0, 0, 0); KASSERT(mutex_owned(&sc->sc_lock)); ASSERT_SLEEPABLE(); - if (status == USBD_CANCELLED) { - /* - * We are synchronously aborting. Try to stop the - * callout and task, but if we can't, wait for them to - * complete. - */ - callout_halt(&xfer->ux_callout, &sc->sc_lock); - usb_rem_task_wait(xfer->ux_pipe->up_dev, &xfer->ux_aborttask, - USB_TASKQ_HC, &sc->sc_lock); - } else { - /* Otherwise, we are timing out. */ - KASSERT(status == USBD_TIMEOUT); - } - - /* - * The xfer cannot have been cancelled already. It is the - * responsibility of the caller of usbd_abort_pipe not to try - * to abort a pipe multiple times, whether concurrently or - * sequentially. - */ - KASSERT(xfer->ux_status != USBD_CANCELLED); - - /* Only the timeout, which runs only once, can time it out. */ - KASSERT(xfer->ux_status != USBD_TIMEOUT); - - /* If anyone else beat us, we're done. */ - if (xfer->ux_status != USBD_IN_PROGRESS) - return; - - /* We beat everyone else. Claim the status. */ - xfer->ux_status = status; + KASSERTMSG((xfer->ux_status == USBD_CANCELLED || + xfer->ux_status == USBD_TIMEOUT), + "bad abort status: %d", xfer->ux_status); /* * If we're dying, skip the hardware action and just notify the @@ -2672,10 +2585,7 @@ uhci_device_ctrl_start(struct usbd_xfer *xfer) DPRINTF("--- dump end ---", 0, 0, 0, 0); } #endif - if (xfer->ux_timeout && !polling) { - callout_reset(&xfer->ux_callout, mstohz(xfer->ux_timeout), - uhci_timeout, xfer); - } + usbd_xfer_schedule_timeout(xfer); xfer->ux_status = USBD_IN_PROGRESS; if (!polling) mutex_exit(&sc->sc_lock); @@ -2834,7 +2744,7 @@ uhci_device_ctrl_abort(struct usbd_xfer *xfer) KASSERT(mutex_owned(&sc->sc_lock)); UHCIHIST_FUNC(); UHCIHIST_CALLED(); - uhci_abort_xfer(xfer, USBD_CANCELLED); + usbd_xfer_abort(xfer); } /* Close a device control pipe. */ @@ -2862,7 +2772,7 @@ uhci_device_intr_abort(struct usbd_xfer *xfer) UHCIHIST_FUNC(); UHCIHIST_CALLED(); DPRINTF("xfer=%#jx", (uintptr_t)xfer, 0, 0, 0); - uhci_abort_xfer(xfer, USBD_CANCELLED); + usbd_xfer_abort(xfer); } /* Close a device interrupt pipe. */ diff --git a/sys/dev/usb/usbdi.c b/sys/dev/usb/usbdi.c index 3431cee5efc4..db9d046ad58c 100644 --- a/sys/dev/usb/usbdi.c +++ b/sys/dev/usb/usbdi.c @@ -71,6 +71,10 @@ static void usbd_free_buffer(struct usbd_xfer *); static struct usbd_xfer *usbd_alloc_xfer(struct usbd_device *, unsigned int); static usbd_status usbd_free_xfer(struct usbd_xfer *); static void usbd_request_async_cb(struct usbd_xfer *, void *, usbd_status); +static void usbd_xfer_timeout(void *); +static void usbd_xfer_timeout_task(void *); +static bool usbd_xfer_probe_timeout(struct usbd_xfer *); +static void usbd_xfer_cancel_timeout_async(struct usbd_xfer *); #if defined(USB_DEBUG) void @@ -474,7 +478,10 @@ usbd_alloc_xfer(struct usbd_device *dev, unsigned int nframes) goto out; xfer->ux_bus = dev->ud_bus; callout_init(&xfer->ux_callout, CALLOUT_MPSAFE); + callout_setfunc(&xfer->ux_callout, usbd_xfer_timeout, xfer); cv_init(&xfer->ux_cv, "usbxfer"); + usb_init_task(&xfer->ux_aborttask, usbd_xfer_timeout_task, xfer, + USB_TASKQ_MPSAFE); out: USBHIST_CALLARGS(usbdebug, "returns %#jx", (uintptr_t)xfer, 0, 0, 0); @@ -1402,3 +1409,381 @@ usbd_get_string0(struct usbd_device *dev, int si, char *buf, int unicode) #endif return USBD_NORMAL_COMPLETION; } + +/* + * usbd_xfer_trycomplete(xfer) + * + * Try to claim xfer for completion. Return true if successful, + * false if the xfer has been synchronously aborted or has timed + * out. + * + * If this returns true, caller is responsible for setting + * xfer->ux_status and calling usb_transfer_complete. To be used + * in a host controller interrupt handler. + * + * Caller must either hold the bus lock or have the bus in polling + * mode. + */ +bool +usbd_xfer_trycomplete(struct usbd_xfer *xfer) +{ + struct usbd_bus *bus __diagused = xfer->ux_bus; + + KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); + + /* + * If software has completed it, either by synchronous abort or + * by timeout, too late. + */ + if (xfer->ux_status != USBD_IN_PROGRESS) + return false; + + /* + * We are completing the xfer. Cancel the timeout if we can, + * but only asynchronously. See usbd_xfer_cancel_timeout_async + * for why we need not wait for the callout or task here. + */ + usbd_xfer_cancel_timeout_async(xfer); + + /* Success! Note: Caller must set xfer->ux_status afterwar. */ + return true; +} + +/* + * usbd_xfer_abort(xfer) + * + * Try to claim xfer to abort. If successful, mark it completed + * with USBD_CANCELLED and call the bus-specific method to abort + * at the hardware level. + * + * To be called in thread context from struct + * usbd_pipe_methods::upm_abort. + * + * Caller must hold the bus lock. + */ +void +usbd_xfer_abort(struct usbd_xfer *xfer) +{ + struct usbd_bus *bus = xfer->ux_bus; + + KASSERT(mutex_owned(bus->ub_lock)); + + /* + * If host controller interrupt or timer interrupt has + * completed it, too late. But the xfer cannot be + * cancelled already -- only one caller can synchronously + * abort. + */ + KASSERT(xfer->ux_status != USBD_CANCELLED); + if (xfer->ux_status != USBD_IN_PROGRESS) + return; + + /* + * Cancel the timeout if we can, but only asynchronously; see + * usbd_xfer_cancel_timeout_async for why we need not wait for + * the callout or task here. + */ + usbd_xfer_cancel_timeout_async(xfer); + + /* + * We beat everyone else. Claim the status as cancelled and do + * the bus-specific dance to abort the hardware. + */ + xfer->ux_status = USBD_CANCELLED; + bus->ub_methods->ubm_abortx(xfer); +} + +/* + * usbd_xfer_timeout(xfer) + * + * Called at IPL_SOFTCLOCK when too much time has elapsed waiting + * for xfer to complete. Since we can't abort the xfer at + * IPL_SOFTCLOCK, defer to a usb_task to run it in thread context, + * unless the xfer has completed or aborted concurrently -- and if + * the xfer has also been resubmitted, take care of rescheduling + * the callout. + */ +static void +usbd_xfer_timeout(void *cookie) +{ + struct usbd_xfer *xfer = cookie; + struct usbd_bus *bus = xfer->ux_bus; + struct usbd_device *dev = xfer->ux_pipe->up_dev; + + /* Acquire the lock so we can transition the timeout state. */ + mutex_enter(bus->ub_lock); + + /* + * Use usbd_xfer_probe_timeout to check whether the timeout is + * still valid, or to reschedule the callout if necessary. If + * it is still valid, schedule the task. + */ + if (usbd_xfer_probe_timeout(xfer)) + usb_add_task(dev, &xfer->ux_aborttask, USB_TASKQ_HC); + + /* + * Notify usbd_xfer_cancel_timeout_async that we may have + * scheduled the task. This causes callout_invoking to return + * false in usbd_xfer_cancel_timeout_async so that it can tell + * which stage in the callout->task->abort process we're at. + */ + callout_ack(&xfer->ux_callout); + + /* All done -- release the lock. */ + mutex_exit(bus->ub_lock); +} + +/* + * usbd_xfer_timeout_task(xfer) + * + * Called in thread context when too much time has elapsed waiting + * for xfer to complete. Abort the xfer with USBD_TIMEOUT, unless + * it has completed or aborted concurrently -- and if the xfer has + * also been resubmitted, take care of rescheduling the callout. + */ +static void +usbd_xfer_timeout_task(void *cookie) +{ + struct usbd_xfer *xfer = cookie; + struct usbd_bus *bus = xfer->ux_bus; + + /* Acquire the lock so we can transition the timeout state. */ + mutex_enter(bus->ub_lock); + + /* + * Use usbd_xfer_probe_timeout to check whether the timeout is + * still valid, or to reschedule the callout if necessary. If + * it is not valid -- the timeout has been asynchronously + * cancelled, or the xfer has already been resubmitted -- then + * we're done here. + */ + if (!usbd_xfer_probe_timeout(xfer)) + goto out; + + /* + * May have completed or been aborted, but we're the only one + * who can time it out. If it has completed or been aborted, + * no need to timeout. + */ + KASSERT(xfer->ux_status != USBD_TIMEOUT); + if (xfer->ux_status != USBD_IN_PROGRESS) + goto out; + + /* + * We beat everyone else. Claim the status as timed out and do + * the bus-specific dance to abort the hardware. + */ + xfer->ux_status = USBD_TIMEOUT; + bus->ub_methods->ubm_abortx(xfer); + +out: /* All done -- release the lock. */ + mutex_exit(bus->ub_lock); +} + +/* + * usbd_xfer_probe_timeout(xfer) + * + * Probe the status of xfer's timeout. Acknowledge and process a + * request to reschedule. Return true if the timeout is still + * valid and the caller should take further action (queueing a + * task or aborting the xfer), false if it must stop here. + */ +static bool +usbd_xfer_probe_timeout(struct usbd_xfer *xfer) +{ + struct usbd_bus *bus = xfer->ux_bus; + bool valid; + + KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); + + /* The timeout must be set. */ + KASSERT(xfer->ux_timeout_set); + + /* + * Neither callout nor task may be pending; they execute + * alternately in lock step. + */ + KASSERT(!callout_pending(&xfer->ux_callout)); + KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)); + + /* There are a few cases... */ + if (bus->ub_methods->ubm_dying(bus)) { + /* Host controller dying. Drop it all on the floor. */ + xfer->ux_timeout_set = false; + xfer->ux_timeout_reset = false; + valid = false; + } else if (xfer->ux_timeout_reset) { + /* + * The xfer completed _and_ got resubmitted while we + * waited for the lock. Acknowledge the request to + * reschedule, and reschedule it if there is a timeout + * and the bus is not polling. + */ + xfer->ux_timeout_reset = false; + if (xfer->ux_timeout && !bus->ub_usepolling) { + KASSERT(xfer->ux_timeout_set); + callout_schedule(&xfer->ux_callout, + mstohz(xfer->ux_timeout)); + } else { + /* No more callout or task scheduled. */ + xfer->ux_timeout_set = false; + } + valid = false; + } else if (xfer->ux_status != USBD_IN_PROGRESS) { + /* + * The xfer has completed by hardware completion or by + * software abort, and has not been resubmitted, so the + * timeout must be unset, and is no longer valid for + * the caller. + */ + xfer->ux_timeout_set = false; + valid = false; + } else { + /* + * The xfer has not yet completed, so the timeout is + * valid. + */ + valid = true; + } + + /* Any reset must have been processed. */ + KASSERT(!xfer->ux_timeout_reset); + + /* + * Either we claim the timeout is set, or the callout is idle. + * If the timeout is still set, we may be handing off to the + * task instead, so this is an if but not an iff. + */ + KASSERT(xfer->ux_timeout_set || !callout_pending(&xfer->ux_callout)); + + /* + * The task must be idle now. + * + * - If the caller is the callout, _and_ the timeout is still + * valid, the caller will schedule it, but it hasn't been + * scheduled yet. (If the timeout is not valid, the task + * should not be scheduled.) + * + * - If the caller is the task, it cannot be scheduled again + * until the callout runs again, which won't happen until we + * next release the lock. + */ + KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)); + + KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); + + return valid; +} + +/* + * usbd_xfer_schedule_timeout(xfer) + * + * Ensure that xfer has a timeout. If the callout is already + * queued or the task is already running, request that they + * reschedule the callout. If not, and if we're not polling, + * schedule the callout anew. + * + * To be called in thread context from struct + * usbd_pipe_methods::upm_start. + */ +void +usbd_xfer_schedule_timeout(struct usbd_xfer *xfer) +{ + struct usbd_bus *bus = xfer->ux_bus; + + KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); + + if (xfer->ux_timeout_set) { + /* + * Callout or task has fired from a prior completed + * xfer but has not yet noticed that the xfer is done. + * Ask it to reschedule itself to ux_timeout. + */ + xfer->ux_timeout_reset = true; + } else if (xfer->ux_timeout && !bus->ub_usepolling) { + /* Callout is not scheduled. Schedule it. */ + KASSERT(!callout_pending(&xfer->ux_callout)); + callout_schedule(&xfer->ux_callout, mstohz(xfer->ux_timeout)); + xfer->ux_timeout_set = true; + } + + KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); +} + +/* + * usbd_xfer_cancel_timeout_async(xfer) + * + * Cancel the callout and the task of xfer, which have not yet run + * to completion, but don't wait for the callout or task to finish + * running. + * + * If they have already fired, at worst they are waiting for the + * bus lock. They will see that the xfer is no longer in progress + * and give up, or they will see that the xfer has been + * resubmitted with a new timeout and reschedule the callout. + * + * If a resubmitted request completed so fast that the callout + * didn't have time to process a timer reset, just cancel the + * timer reset. + */ +static void +usbd_xfer_cancel_timeout_async(struct usbd_xfer *xfer) +{ + struct usbd_bus *bus __diagused = xfer->ux_bus; + + KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); + + /* + * If the timer wasn't running anyway, forget about it. This + * can happen if we are completing an isochronous transfer + * which doesn't use the same timeout logic. + */ + if (!xfer->ux_timeout_set) + return; + + xfer->ux_timeout_reset = false; + if (!callout_stop(&xfer->ux_callout)) { + /* + * We stopped the callout before it ran. The timeout + * is no longer set. + */ + xfer->ux_timeout_set = false; + } else if (callout_invoking(&xfer->ux_callout)) { + /* + * The callout has begun to run but it has not yet + * acquired the lock and called callout_ack. The task + * cannot be queued yet, and the callout cannot have + * been rescheduled yet. + * + * By the time the callout acquires the lock, we will + * have transitioned from USBD_IN_PROGRESS to a + * completed status, and possibly also resubmitted the + * xfer and set xfer->ux_timeout_reset = true. In both + * cases, the callout will DTRT, so no further action + * is needed here. + */ + } else if (usb_rem_task(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)) { + /* + * The callout had fired and scheduled the task, but we + * stopped the task before it could run. The timeout + * is therefore no longer set -- the next resubmission + * of the xfer must schedule a new timeout. + * + * The callout should not be be pending at this point: + * it is scheduled only under the lock, and only when + * xfer->ux_timeout_set is false, or by the callout or + * task itself when xfer->ux_timeout_reset is true. + */ + xfer->ux_timeout_set = false; + } + + /* + * The callout cannot be scheduled and the task cannot be + * queued at this point. Either we cancelled them, or they are + * already running and waiting for the bus lock. + */ + KASSERT(!callout_pending(&xfer->ux_callout)); + KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)); + + KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); +} diff --git a/sys/dev/usb/usbdi.h b/sys/dev/usb/usbdi.h index 62d7fefb8f18..a364778798d9 100644 --- a/sys/dev/usb/usbdi.h +++ b/sys/dev/usb/usbdi.h @@ -188,6 +188,11 @@ int usbd_ratecheck(struct timeval *); usbd_status usbd_get_string(struct usbd_device *, int, char *); usbd_status usbd_get_string0(struct usbd_device *, int, char *, int); +/* For use by HCI drivers, not USB device drivers */ +void usbd_xfer_schedule_timeout(struct usbd_xfer *); +bool usbd_xfer_trycomplete(struct usbd_xfer *); +void usbd_xfer_abort(struct usbd_xfer *); + /* An iterator for descriptors. */ typedef struct { const uByte *cur; diff --git a/sys/dev/usb/usbdivar.h b/sys/dev/usb/usbdivar.h index 3ac1a1cae467..a629abf53c7b 100644 --- a/sys/dev/usb/usbdivar.h +++ b/sys/dev/usb/usbdivar.h @@ -96,6 +96,8 @@ struct usbd_bus_methods { void (*ubm_dopoll)(struct usbd_bus *); struct usbd_xfer *(*ubm_allocx)(struct usbd_bus *, unsigned int); void (*ubm_freex)(struct usbd_bus *, struct usbd_xfer *); + void (*ubm_abortx)(struct usbd_xfer *); + bool (*ubm_dying)(struct usbd_bus *); void (*ubm_getlock)(struct usbd_bus *, kmutex_t **); usbd_status (*ubm_newdev)(device_t, struct usbd_bus *, int, int, int, struct usbd_port *); diff --git a/sys/dev/usb/xhci.c b/sys/dev/usb/xhci.c index b22253ef38e7..a6abb51f2a67 100644 --- a/sys/dev/usb/xhci.c +++ b/sys/dev/usb/xhci.c @@ -143,6 +143,8 @@ static void xhci_softintr(void *); static void xhci_poll(struct usbd_bus *); static struct usbd_xfer *xhci_allocx(struct usbd_bus *, unsigned int); static void xhci_freex(struct usbd_bus *, struct usbd_xfer *); +static void xhci_abortx(struct usbd_xfer *); +static bool xhci_dying(struct usbd_bus *); static void xhci_get_lock(struct usbd_bus *, kmutex_t **); static usbd_status xhci_new_device(device_t, struct usbd_bus *, int, int, int, struct usbd_port *); @@ -208,15 +210,14 @@ static void xhci_device_bulk_abort(struct usbd_xfer *); static void xhci_device_bulk_close(struct usbd_pipe *); static void xhci_device_bulk_done(struct usbd_xfer *); -static void xhci_timeout(void *); -static void xhci_timeout_task(void *); - static const struct usbd_bus_methods xhci_bus_methods = { .ubm_open = xhci_open, .ubm_softint = xhci_softintr, .ubm_dopoll = xhci_poll, .ubm_allocx = xhci_allocx, .ubm_freex = xhci_freex, + .ubm_abortx = xhci_abortx, + .ubm_dying = xhci_dying, .ubm_getlock = xhci_get_lock, .ubm_newdev = xhci_new_device, .ubm_rhctrl = xhci_roothub_ctrl, @@ -1720,53 +1721,22 @@ xhci_close_pipe(struct usbd_pipe *pipe) * Should be called with sc_lock held. */ static void -xhci_abort_xfer(struct usbd_xfer *xfer, usbd_status status) +xhci_abortx(struct usbd_xfer *xfer) { XHCIHIST_FUNC(); struct xhci_softc * const sc = XHCI_XFER2SC(xfer); struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); - KASSERTMSG((status == USBD_CANCELLED || status == USBD_TIMEOUT), - "invalid status for abort: %d", (int)status); - - XHCIHIST_CALLARGS("xfer %#jx pipe %#jx status %jd", - (uintptr_t)xfer, (uintptr_t)xfer->ux_pipe, status, 0); + XHCIHIST_CALLARGS("xfer %#jx pipe %#jx", + (uintptr_t)xfer, (uintptr_t)xfer->ux_pipe, 0, 0); KASSERT(mutex_owned(&sc->sc_lock)); ASSERT_SLEEPABLE(); - if (status == USBD_CANCELLED) { - /* - * We are synchronously aborting. Try to stop the - * callout and task, but if we can't, wait for them to - * complete. - */ - callout_halt(&xfer->ux_callout, &sc->sc_lock); - usb_rem_task_wait(xfer->ux_pipe->up_dev, &xfer->ux_aborttask, - USB_TASKQ_HC, &sc->sc_lock); - } else { - /* Otherwise, we are timing out. */ - KASSERT(status == USBD_TIMEOUT); - } - - /* - * The xfer cannot have been cancelled already. It is the - * responsibility of the caller of usbd_abort_pipe not to try - * to abort a pipe multiple times, whether concurrently or - * sequentially. - */ - KASSERT(xfer->ux_status != USBD_CANCELLED); - - /* Only the timeout, which runs only once, can time it out. */ - KASSERT(xfer->ux_status != USBD_TIMEOUT); - - /* If anyone else beat us, we're done. */ - if (xfer->ux_status != USBD_IN_PROGRESS) - return; - - /* We beat everyone else. Claim the status. */ - xfer->ux_status = status; + KASSERTMSG((xfer->ux_status == USBD_CANCELLED || + xfer->ux_status == USBD_TIMEOUT), + "bad abort status: %d", xfer->ux_status); /* * If we're dying, skip the hardware action and just notify the @@ -1998,6 +1968,13 @@ xhci_event_transfer(struct xhci_softc * const sc, return; } + /* + * Try to claim this xfer for completion. If it has already + * completed or aborted, drop it on the floor. + */ + if (!usbd_xfer_trycomplete(xfer)) + return; + /* 4.11.5.2 Event Data TRB */ if ((trb_3 & XHCI_TRB_3_ED_BIT) != 0) { DPRINTFN(14, "transfer Event Data: 0x%016jx 0x%08jx" @@ -2046,17 +2023,7 @@ xhci_event_transfer(struct xhci_softc * const sc, case XHCI_TRB_ERROR_STOPPED: case XHCI_TRB_ERROR_LENGTH: case XHCI_TRB_ERROR_STOPPED_SHORT: - /* - * don't complete the transfer being aborted - * as abort_xfer does instead. - */ - if (xfer->ux_status == USBD_CANCELLED || - xfer->ux_status == USBD_TIMEOUT) { - DPRINTFN(14, "ignore aborting xfer %#jx", - (uintptr_t)xfer, 0, 0, 0); - return; - } - err = USBD_CANCELLED; + err = USBD_IOERROR; break; case XHCI_TRB_ERROR_STALL: case XHCI_TRB_ERROR_BABBLE: @@ -2079,15 +2046,6 @@ xhci_event_transfer(struct xhci_softc * const sc, /* Override the status. */ xfer->ux_status = USBD_STALLED; - /* - * Cancel the timeout and the task, which have not yet - * run. If they have already fired, at worst they are - * waiting for the lock. They will see that the xfer - * is no longer in progress and give up. - */ - callout_stop(&xfer->ux_callout); - usb_rem_task(xfer->ux_pipe->up_dev, &xfer->ux_aborttask); - xhci_clear_endpoint_stall_async(xfer); return; default: @@ -2096,29 +2054,9 @@ xhci_event_transfer(struct xhci_softc * const sc, break; } - /* - * If software has completed it, either by cancellation - * or timeout, drop it on the floor. - */ - if (xfer->ux_status != USBD_IN_PROGRESS) { - KASSERTMSG((xfer->ux_status == USBD_CANCELLED || - xfer->ux_status == USBD_TIMEOUT), - "xfer %p status %x", xfer, xfer->ux_status); - return; - } - - /* Otherwise, set the status. */ + /* Set the status. */ xfer->ux_status = err; - /* - * Cancel the timeout and the task, which have not yet - * run. If they have already fired, at worst they are - * waiting for the lock. They will see that the xfer - * is no longer in progress and give up. - */ - callout_stop(&xfer->ux_callout); - usb_rem_task(xfer->ux_pipe->up_dev, &xfer->ux_aborttask); - if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0 || (trb_0 & 0x3) == 0x0) { usb_transfer_complete(xfer); @@ -2285,8 +2223,6 @@ xhci_allocx(struct usbd_bus *bus, unsigned int nframes) xfer = pool_cache_get(sc->sc_xferpool, PR_WAITOK); if (xfer != NULL) { memset(xfer, 0, sizeof(struct xhci_xfer)); - usb_init_task(&xfer->ux_aborttask, xhci_timeout_task, xfer, - USB_TASKQ_MPSAFE); #ifdef DIAGNOSTIC xfer->ux_state = XFER_BUSY; #endif @@ -2313,6 +2249,14 @@ xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer) pool_cache_put(sc->sc_xferpool, xfer); } +static bool +xhci_dying(struct usbd_bus *bus) +{ + struct xhci_softc * const sc = XHCI_BUS2SC(bus); + + return sc->sc_dying; +} + static void xhci_get_lock(struct usbd_bus *bus, kmutex_t **lock) { @@ -3908,11 +3852,7 @@ xhci_device_ctrl_start(struct usbd_xfer *xfer) mutex_enter(&sc->sc_lock); xfer->ux_status = USBD_IN_PROGRESS; xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); - - if (xfer->ux_timeout && !xhci_polling_p(sc)) { - callout_reset(&xfer->ux_callout, mstohz(xfer->ux_timeout), - xhci_timeout, xfer); - } + usbd_xfer_schedule_timeout(xfer); if (!polling) mutex_exit(&sc->sc_lock); @@ -3937,7 +3877,7 @@ xhci_device_ctrl_abort(struct usbd_xfer *xfer) { XHCIHIST_FUNC(); XHCIHIST_CALLED(); - xhci_abort_xfer(xfer, USBD_CANCELLED); + usbd_xfer_abort(xfer); } static void @@ -4032,11 +3972,7 @@ xhci_device_bulk_start(struct usbd_xfer *xfer) mutex_enter(&sc->sc_lock); xfer->ux_status = USBD_IN_PROGRESS; xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); - - if (xfer->ux_timeout && !xhci_polling_p(sc)) { - callout_reset(&xfer->ux_callout, mstohz(xfer->ux_timeout), - xhci_timeout, xfer); - } + usbd_xfer_schedule_timeout(xfer); if (!polling) mutex_exit(&sc->sc_lock); @@ -4065,7 +4001,7 @@ xhci_device_bulk_abort(struct usbd_xfer *xfer) { XHCIHIST_FUNC(); XHCIHIST_CALLED(); - xhci_abort_xfer(xfer, USBD_CANCELLED); + usbd_xfer_abort(xfer); } static void @@ -4146,11 +4082,7 @@ xhci_device_intr_start(struct usbd_xfer *xfer) mutex_enter(&sc->sc_lock); xfer->ux_status = USBD_IN_PROGRESS; xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); - - if (xfer->ux_timeout && !polling) { - callout_reset(&xfer->ux_callout, mstohz(xfer->ux_timeout), - xhci_timeout, xfer); - } + usbd_xfer_schedule_timeout(xfer); if (!polling) mutex_exit(&sc->sc_lock); @@ -4187,7 +4119,7 @@ xhci_device_intr_abort(struct usbd_xfer *xfer) KASSERT(mutex_owned(&sc->sc_lock)); KASSERT(xfer->ux_pipe->up_intrxfer == xfer); - xhci_abort_xfer(xfer, USBD_CANCELLED); + usbd_xfer_abort(xfer); } static void @@ -4200,32 +4132,3 @@ xhci_device_intr_close(struct usbd_pipe *pipe) xhci_close_pipe(pipe); } - -/* ------------ */ - -static void -xhci_timeout(void *addr) -{ - XHCIHIST_FUNC(); XHCIHIST_CALLED(); - struct xhci_xfer * const xx = addr; - struct usbd_xfer * const xfer = &xx->xx_xfer; - struct xhci_softc * const sc = XHCI_XFER2SC(xfer); - struct usbd_device *dev = xfer->ux_pipe->up_dev; - - mutex_enter(&sc->sc_lock); - if (!sc->sc_dying && xfer->ux_status == USBD_IN_PROGRESS) - usb_add_task(dev, &xfer->ux_aborttask, USB_TASKQ_HC); - mutex_exit(&sc->sc_lock); -} - -static void -xhci_timeout_task(void *addr) -{ - XHCIHIST_FUNC(); XHCIHIST_CALLED(); - struct usbd_xfer * const xfer = addr; - struct xhci_softc * const sc = XHCI_XFER2SC(xfer); - - mutex_enter(&sc->sc_lock); - xhci_abort_xfer(xfer, USBD_TIMEOUT); - mutex_exit(&sc->sc_lock); -} diff --git a/sys/external/bsd/dwc2/dwc2.c b/sys/external/bsd/dwc2/dwc2.c index 13b5f47dd9d7..28b94a5f147d 100644 --- a/sys/external/bsd/dwc2/dwc2.c +++ b/sys/external/bsd/dwc2/dwc2.c @@ -116,6 +116,7 @@ Static struct usbd_xfer * dwc2_allocx(struct usbd_bus *, unsigned int); Static void dwc2_freex(struct usbd_bus *, struct usbd_xfer *); Static void dwc2_get_lock(struct usbd_bus *, kmutex_t **); +Static bool dwc2_dying(struct usbd_bus *); Static int dwc2_roothub_ctrl(struct usbd_bus *, usb_device_request_t *, void *, int); @@ -150,7 +151,7 @@ Static void dwc2_device_isoc_done(struct usbd_xfer *); Static usbd_status dwc2_device_start(struct usbd_xfer *); Static void dwc2_close_pipe(struct usbd_pipe *); -Static void dwc2_abort_xfer(struct usbd_xfer *, usbd_status); +Static void dwc2_abortx(struct usbd_xfer *); Static void dwc2_device_clear_toggle(struct usbd_pipe *); Static void dwc2_noop(struct usbd_pipe *pipe); @@ -158,9 +159,6 @@ Static void dwc2_noop(struct usbd_pipe *pipe); Static int dwc2_interrupt(struct dwc2_softc *); Static void dwc2_rhc(void *); -Static void dwc2_timeout(void *); -Static void dwc2_timeout_task(void *); - static inline void dwc2_allocate_bus_bandwidth(struct dwc2_hsotg *hsotg, u16 bw, @@ -180,6 +178,8 @@ Static const struct usbd_bus_methods dwc2_bus_methods = { .ubm_dopoll = dwc2_poll, .ubm_allocx = dwc2_allocx, .ubm_freex = dwc2_freex, + .ubm_abortx = dwc2_abortx, + .ubm_dying = dwc2_dying, .ubm_getlock = dwc2_get_lock, .ubm_rhctrl = dwc2_roothub_ctrl, }; @@ -232,22 +232,15 @@ dwc2_allocx(struct usbd_bus *bus, unsigned int nframes) { struct dwc2_softc *sc = DWC2_BUS2SC(bus); struct dwc2_xfer *dxfer; - struct usbd_xfer *xfer; DPRINTFN(10, "\n"); DWC2_EVCNT_INCR(sc->sc_ev_xferpoolget); dxfer = pool_cache_get(sc->sc_xferpool, PR_WAITOK); - xfer = (struct usbd_xfer *)dxfer; if (dxfer != NULL) { memset(dxfer, 0, sizeof(*dxfer)); - dxfer->urb = dwc2_hcd_urb_alloc(sc->sc_hsotg, nframes, GFP_KERNEL); - - /* Initialise this always so we can call remove on it. */ - usb_init_task(&xfer->ux_aborttask, dwc2_timeout_task, xfer, - USB_TASKQ_MPSAFE); #ifdef DIAGNOSTIC dxfer->xfer.ux_state = XFER_BUSY; #endif @@ -275,6 +268,14 @@ dwc2_freex(struct usbd_bus *bus, struct usbd_xfer *xfer) pool_cache_put(sc->sc_xferpool, xfer); } +Static bool +dwc2_dying(struct usbd_bus *bus) +{ + struct dwc2_softc *sc = DWC2_BUS2SC(bus); + + return sc->sc_dying; +} + Static void dwc2_get_lock(struct usbd_bus *bus, kmutex_t **lock) { @@ -318,61 +319,49 @@ dwc2_softintr(void *v) struct usbd_bus *bus = v; struct dwc2_softc *sc = DWC2_BUS2SC(bus); struct dwc2_hsotg *hsotg = sc->sc_hsotg; - struct dwc2_xfer *dxfer; + struct dwc2_xfer *dxfer, *next; + TAILQ_HEAD(, dwc2_xfer) claimed = TAILQ_HEAD_INITIALIZER(claimed); KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); + /* + * Grab all the xfers that have not been aborted or timed out. + * Do so under a single lock -- without dropping it to run + * usb_transfer_complete as we go -- so that dwc2_abortx won't + * remove next out from under us during iteration when we've + * dropped the lock. + */ mutex_spin_enter(&hsotg->lock); - while ((dxfer = TAILQ_FIRST(&sc->sc_complete)) != NULL) { - - KASSERTMSG(!callout_pending(&dxfer->xfer.ux_callout), - "xfer %p pipe %p\n", dxfer, dxfer->xfer.ux_pipe); - - /* - * dwc2_abort_xfer will remove this transfer from the - * sc_complete queue - */ - /*XXXNH not tested */ - if (dxfer->xfer.ux_status == USBD_CANCELLED || - dxfer->xfer.ux_status == USBD_TIMEOUT) { + TAILQ_FOREACH_SAFE(dxfer, &sc->sc_complete, xnext, next) { + if (!usbd_xfer_trycomplete(&dxfer->xfer)) + /* + * The hard interrput handler decided to + * complete the xfer, and put it on sc_complete + * to pass it to us in the soft interrupt + * handler, but in the time between hard + * interrupt and soft interrupt, the xfer was + * aborted or timed out and we lost the race. + */ continue; - } - + KASSERT(dxfer->xfer.ux_status == USBD_IN_PROGRESS); + KASSERT(dxfer->intr_status != USBD_CANCELLED); + KASSERT(dxfer->intr_status != USBD_TIMEOUT); TAILQ_REMOVE(&sc->sc_complete, dxfer, xnext); - - mutex_spin_exit(&hsotg->lock); - usb_transfer_complete(&dxfer->xfer); - mutex_spin_enter(&hsotg->lock); + TAILQ_INSERT_TAIL(&claimed, dxfer, xnext); } mutex_spin_exit(&hsotg->lock); -} - -Static void -dwc2_timeout(void *addr) -{ - struct usbd_xfer *xfer = addr; - struct dwc2_softc *sc = DWC2_XFER2SC(xfer); - struct usbd_device *dev = xfer->ux_pipe->up_dev; - - DPRINTF("xfer=%p\n", xfer); - - mutex_enter(&sc->sc_lock); - if (!sc->sc_dying && xfer->ux_status == USBD_IN_PROGRESS) - usb_add_task(dev, &xfer->ux_aborttask, USB_TASKQ_HC); - mutex_exit(&sc->sc_lock); -} - -Static void -dwc2_timeout_task(void *addr) -{ - struct usbd_xfer *xfer = addr; - struct dwc2_softc *sc = DWC2_XFER2SC(xfer); - DPRINTF("xfer=%p\n", xfer); + /* Now complete them. */ + while (!TAILQ_EMPTY(&claimed)) { + dxfer = TAILQ_FIRST(&claimed); + KASSERT(dxfer->xfer.ux_status == USBD_IN_PROGRESS); + KASSERT(dxfer->intr_status != USBD_CANCELLED); + KASSERT(dxfer->intr_status != USBD_TIMEOUT); + TAILQ_REMOVE(&claimed, dxfer, xnext); - mutex_enter(&sc->sc_lock); - dwc2_abort_xfer(xfer, USBD_TIMEOUT); - mutex_exit(&sc->sc_lock); + dxfer->xfer.ux_status = dxfer->intr_status; + usb_transfer_complete(&dxfer->xfer); + } } usbd_status @@ -469,53 +458,53 @@ dwc2_close_pipe(struct usbd_pipe *pipe) * Abort a device request. */ Static void -dwc2_abort_xfer(struct usbd_xfer *xfer, usbd_status status) +dwc2_abortx(struct usbd_xfer *xfer) { struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer); struct dwc2_softc *sc = DWC2_XFER2SC(xfer); struct dwc2_hsotg *hsotg = sc->sc_hsotg; - struct dwc2_xfer *d, *tmp; + struct dwc2_xfer *d; int err; - KASSERTMSG((status == USBD_CANCELLED || status == USBD_TIMEOUT), - "invalid status for abort: %d", (int)status); - - DPRINTF("xfer %p pipe %p status 0x%08x", xfer, xfer->ux_pipe, status); + DPRINTF("xfer %p pipe %p status 0x%08x", xfer, xfer->ux_pipe, + xfer->ux_status); KASSERT(mutex_owned(&sc->sc_lock)); ASSERT_SLEEPABLE(); - if (status == USBD_CANCELLED) { - /* - * We are synchronously aborting. Try to stop the - * callout and task, but if we can't, wait for them to - * complete. - */ - callout_halt(&xfer->ux_callout, &sc->sc_lock); - usb_rem_task_wait(xfer->ux_pipe->up_dev, &xfer->ux_aborttask, - USB_TASKQ_HC, &sc->sc_lock); - } else { - /* Otherwise, we are timing out. */ - KASSERT(status == USBD_TIMEOUT); - } + KASSERTMSG((xfer->ux_status == USBD_CANCELLED || + xfer->ux_status == USBD_TIMEOUT), + "bad abort status: %d", xfer->ux_status); + + mutex_spin_enter(&hsotg->lock); /* - * The xfer cannot have been cancelled already. It is the - * responsibility of the caller of usbd_abort_pipe not to try - * to abort a pipe multiple times, whether concurrently or - * sequentially. + * Check whether we aborted or timed out after the hardware + * completion interrupt determined that it's done but before + * the soft interrupt could actually complete it. If so, it's + * too late for the soft interrupt -- at this point we've + * already committed to abort it or time it out, so we need to + * take it off the softint's list of work in case the caller, + * say, frees the xfer before the softint runs. + * + * This logic is unusual among host controller drivers, and + * happens because dwc2 decides to complete xfers in the hard + * interrupt handler rather than in the soft interrupt handler, + * but usb_transfer_complete must be deferred to softint -- and + * we happened to swoop in between the hard interrupt and the + * soft interrupt. Other host controller drivers do almost all + * processing in the softint so there's no intermediate stage. + * + * Fortunately, this linear search to discern the intermediate + * stage is not likely to be a serious performance impact + * because it happens only on abort or timeout. */ - KASSERT(xfer->ux_status != USBD_CANCELLED); - - /* Only the timeout, which runs only once, can time it out. */ - KASSERT(xfer->ux_status != USBD_TIMEOUT); - - /* If anyone else beat us, we're done. */ - if (xfer->ux_status != USBD_IN_PROGRESS) - return; - - /* We beat everyone else. Claim the status. */ - xfer->ux_status = status; + TAILQ_FOREACH(d, &sc->sc_complete, xnext) { + if (d == dxfer) { + TAILQ_REMOVE(&sc->sc_complete, dxfer, xnext); + break; + } + } /* * If we're dying, skip the hardware action and just notify the @@ -529,26 +518,17 @@ dwc2_abort_xfer(struct usbd_xfer *xfer, usbd_status status) /* * HC Step 1: Handle the hardware. */ - mutex_spin_enter(&hsotg->lock); - /* XXXNH suboptimal */ - TAILQ_FOREACH_SAFE(d, &sc->sc_complete, xnext, tmp) { - if (d == dxfer) { - TAILQ_REMOVE(&sc->sc_complete, dxfer, xnext); - break; - } - } - err = dwc2_hcd_urb_dequeue(hsotg, dxfer->urb); if (err) { DPRINTF("dwc2_hcd_urb_dequeue failed\n"); } +dying: mutex_spin_exit(&hsotg->lock); /* * Final Step: Notify completion to waiting xfers. */ -dying: usb_transfer_complete(xfer); KASSERT(mutex_owned(&sc->sc_lock)); } @@ -761,7 +741,7 @@ dwc2_device_ctrl_abort(struct usbd_xfer *xfer) KASSERT(mutex_owned(&sc->sc_lock)); DPRINTF("xfer=%p\n", xfer); - dwc2_abort_xfer(xfer, USBD_CANCELLED); + usbd_xfer_abort(xfer); } Static void @@ -811,7 +791,7 @@ dwc2_device_bulk_abort(struct usbd_xfer *xfer) KASSERT(mutex_owned(&sc->sc_lock)); DPRINTF("xfer=%p\n", xfer); - dwc2_abort_xfer(xfer, USBD_CANCELLED); + usbd_xfer_abort(xfer); } Static void @@ -885,8 +865,7 @@ dwc2_device_intr_abort(struct usbd_xfer *xfer) KASSERT(xfer->ux_pipe->up_intrxfer == xfer); DPRINTF("xfer=%p\n", xfer); - - dwc2_abort_xfer(xfer, USBD_CANCELLED); + usbd_xfer_abort(xfer); } Static void @@ -935,7 +914,7 @@ dwc2_device_isoc_abort(struct usbd_xfer *xfer) KASSERT(mutex_owned(&sc->sc_lock)); DPRINTF("xfer=%p\n", xfer); - dwc2_abort_xfer(xfer, USBD_CANCELLED); + usbd_xfer_abort(xfer); } void @@ -1150,14 +1129,11 @@ dwc2_device_start(struct usbd_xfer *xfer) /* might need to check cpu_intr_p */ mutex_spin_enter(&hsotg->lock); - - if (xfer->ux_timeout && !sc->sc_bus.ub_usepolling) { - callout_reset(&xfer->ux_callout, mstohz(xfer->ux_timeout), - dwc2_timeout, xfer); - } retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, qtd); if (retval) goto fail2; + usbd_xfer_schedule_timeout(xfer); + xfer->ux_status = USBD_IN_PROGRESS; if (alloc_bandwidth) { dwc2_allocate_bus_bandwidth(hsotg, @@ -1171,7 +1147,6 @@ dwc2_device_start(struct usbd_xfer *xfer) return USBD_IN_PROGRESS; fail2: - callout_halt(&xfer->ux_callout, &hsotg->lock); dwc2_urb->priv = NULL; mutex_spin_exit(&hsotg->lock); pool_cache_put(sc->sc_qtdpool, qtd); @@ -1461,6 +1436,8 @@ void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, usb_endpoint_descriptor_t *ed; uint8_t xfertype; + KASSERT(mutex_owned(&hsotg->lock)); + if (!qtd) { dev_dbg(hsotg->dev, "## %s: qtd is NULL ##\n", __func__); return; @@ -1477,25 +1454,6 @@ void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, return; } - /* - * If software has completed it, either by cancellation - * or timeout, drop it on the floor. - */ - if (xfer->ux_status != USBD_IN_PROGRESS) { - KASSERT(xfer->ux_status == USBD_CANCELLED || - xfer->ux_status == USBD_TIMEOUT); - return; - } - - /* - * Cancel the timeout and the task, which have not yet - * run. If they have already fired, at worst they are - * waiting for the lock. They will see that the xfer - * is no longer in progress and give up. - */ - callout_stop(&xfer->ux_callout); - usb_rem_task(xfer->ux_pipe->up_dev, &xfer->ux_aborttask); - dxfer = DWC2_XFER2DXFER(xfer); sc = DWC2_XFER2SC(xfer); ed = xfer->ux_pipe->up_endpoint->ue_edesc; @@ -1533,29 +1491,26 @@ void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, switch (status) { case 0: - xfer->ux_status = USBD_NORMAL_COMPLETION; + dxfer->intr_status = USBD_NORMAL_COMPLETION; break; case -EPIPE: - xfer->ux_status = USBD_STALLED; - break; - case -ETIMEDOUT: - xfer->ux_status = USBD_TIMEOUT; + dxfer->intr_status = USBD_STALLED; break; case -EPROTO: - xfer->ux_status = USBD_INVAL; + dxfer->intr_status = USBD_INVAL; break; case -EIO: - xfer->ux_status = USBD_IOERROR; + dxfer->intr_status = USBD_IOERROR; break; case -EOVERFLOW: - xfer->ux_status = USBD_IOERROR; + dxfer->intr_status = USBD_IOERROR; break; default: - xfer->ux_status = USBD_IOERROR; + dxfer->intr_status = USBD_IOERROR; printf("%s: unknown error status %d\n", __func__, status); } - if (xfer->ux_status == USBD_NORMAL_COMPLETION) { + if (dxfer->intr_status == USBD_NORMAL_COMPLETION) { /* * control transfers with no data phase don't touch dmabuf, but * everything else does. diff --git a/sys/external/bsd/dwc2/dwc2var.h b/sys/external/bsd/dwc2/dwc2var.h index 0b57a368b898..53c817e7abdf 100644 --- a/sys/external/bsd/dwc2/dwc2var.h +++ b/sys/external/bsd/dwc2/dwc2var.h @@ -43,6 +43,7 @@ struct dwc2_xfer { struct dwc2_hcd_urb *urb; TAILQ_ENTRY(dwc2_xfer) xnext; /* list of complete xfers */ + usbd_status intr_status; }; struct dwc2_pipe { From 5a794d8149f8d75d0dcc48ffd59fc20feefebca6 Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Sun, 15 Dec 2019 17:01:19 +0000 Subject: [PATCH 7/7] Fix steady state of root intr xfers. Why? - Avoid completing a root intr xfer multiple times in races. - Avoid potential use-after-free in poll_hub callouts (uhci, ahci). How? - Use sc->sc_intr_xfer or equivalent to store only a pending xfer that has not yet completed -- whether successfully, by timeout, or by synchronous abort. When any of those happens, set it to null under the lock, so the xfer is completed only once. - For hci drivers that use a callout to poll the root hub (uhci, ahci): . Pass the softc pointer, not the xfer, to the callout, so the callout is not even tempted to use xfer after free -- if the callout fires, but the xfer is synchronously aborted before the callout can do anything, the xfer might be freed by the time the callout starts to examine it. . Teach the callout to do nothing if it is callout_pending after it has fired. This way: 1. completion or synchronous abort can just callout_stop 2. start can just callout_schedule If the callout had already fired before (1), and doesn't acquire the bus lock until after (2), it may be tempted to abort the new root intr xfer just after submission, which would be wrong -- so instead we just have the callout do nothing if it notices it has been rescheduled, since it will fire again after the appropriate time has elapsed. --- sys/arch/mips/adm5120/dev/ahci.c | 96 ++++++++++++++++++++++---- sys/dev/usb/ehci.c | 30 +++++++-- sys/dev/usb/ohci.c | 26 +++++-- sys/dev/usb/uhci.c | 112 +++++++++++++++++++++++++------ sys/dev/usb/vhci.c | 26 ++++++- sys/dev/usb/xhci.c | 34 ++++++++-- sys/external/bsd/dwc2/dwc2.c | 26 +++++-- 7 files changed, 297 insertions(+), 53 deletions(-) diff --git a/sys/arch/mips/adm5120/dev/ahci.c b/sys/arch/mips/adm5120/dev/ahci.c index 7a7d21ee28d7..4c8c469f4062 100644 --- a/sys/arch/mips/adm5120/dev/ahci.c +++ b/sys/arch/mips/adm5120/dev/ahci.c @@ -283,6 +283,7 @@ ahci_attach(device_t parent, device_t self, void *aux) SIMPLEQ_INIT(&sc->sc_free_xfers); callout_init(&sc->sc_poll_handle, 0); + callout_setfunc(&sc->sc_poll_handle, ahci_poll_hub, sc); mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB); mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_SCHED /* XXXNH */); @@ -422,13 +423,39 @@ ahci_poll(struct usbd_bus *bus) void ahci_poll_hub(void *arg) { - struct usbd_xfer *xfer = arg; - struct ahci_softc *sc = AHCI_XFER2SC(xfer); + struct ahci_softc *sc = arg; + struct usbd_xfer *xfer; u_char *p; static int p0_state=0; static int p1_state=0; - callout_reset(&sc->sc_poll_handle, sc->sc_interval, ahci_poll_hub, xfer); + mutex_enter(&sc->sc_lock); + + /* + * If the intr xfer has completed or been synchronously + * aborted, we have nothing to do. + */ + xfer = sc->sc_intr_xfer; + if (xfer == NULL) + goto out; + + /* + * If the intr xfer for which we were scheduled is done, and + * another intr xfer has been submitted, let that one be dealt + * with when the callout fires again. + * + * The call to callout_pending is racy, but the the transition + * from pending to invoking happens atomically. The + * callout_ack ensures callout_invoking does not return true + * due to this invocation of the callout; the lock ensures the + * next invocation of the callout cannot callout_ack (unless it + * had already run to completion and nulled sc->sc_intr_xfer, + * in which case would have bailed out already). + */ + callout_ack(&sc->sc_poll_handle); + if (callout_pending(&sc->sc_poll_handle) || + callout_invoking(&sc->sc_poll_handle)) + goto out; /* USB spec 11.13.3 (p.260) */ p = KERNADDR(&xfer->ux_dmabuf, 0); @@ -444,15 +471,23 @@ ahci_poll_hub(void *arg) p1_state=(REG_READ(ADMHCD_REG_PORTSTATUS1) & ADMHCD_CCS); }; - /* no change, return NAK */ - if (p[0] == 0) - return; + /* no change, return NAK and try again later */ + if (p[0] == 0) { + callout_schedule(&sc->sc_poll_handle, sc->sc_interval); + goto out; + } + /* + * Interrupt completed, and the xfer has not been completed or + * synchronously aborted. Complete the xfer now. + * + * XXX Set ux_isdone if DIAGNOSTIC? + */ xfer->ux_actlen = 1; xfer->ux_status = USBD_NORMAL_COMPLETION; - mutex_enter(&sc->sc_lock); usb_transfer_complete(xfer); - mutex_exit(&sc->sc_lock); + +out: mutex_exit(&sc->sc_lock); } struct usbd_xfer * @@ -719,8 +754,10 @@ ahci_root_intr_start(struct usbd_xfer *xfer) DPRINTF(D_TRACE, ("SLRIstart ")); + KASSERT(sc->sc_intr_xfer == NULL); + sc->sc_interval = MS_TO_TICKS(xfer->ux_pipe->up_endpoint->ue_edesc->bInterval); - callout_reset(&sc->sc_poll_handle, sc->sc_interval, ahci_poll_hub, xfer); + callout_schedule(&sc->sc_poll_handle, sc->sc_interval); sc->sc_intr_xfer = xfer; return USBD_IN_PROGRESS; } @@ -728,24 +765,59 @@ ahci_root_intr_start(struct usbd_xfer *xfer) static void ahci_root_intr_abort(struct usbd_xfer *xfer) { + struct ahci_softc *sc = AHCI_XFER2SC(xfer); + DPRINTF(D_TRACE, ("SLRIabort ")); + + KASSERT(mutex_owned(&sc->sc_lock)); + KASSERT(xfer->ux_pipe->up_intrxfer == xfer); + + /* + * Try to stop the callout before it starts. If we got in too + * late, too bad; but if the callout had yet to run and time + * out the xfer, cancel it ourselves. + */ + callout_stop(&sc->sc_poll_handle); + if (sc->sc_intr_xfer == NULL) + return; + + KASSERT(sc->sc_intr_xfer == xfer); + xfer->ux_status = USBD_CANCELLED; + usb_transfer_complete(xfer); } static void ahci_root_intr_close(struct usbd_pipe *pipe) { - struct ahci_softc *sc = AHCI_PIPE2SC(pipe); + struct ahci_softc *sc __diagused = AHCI_PIPE2SC(pipe); DPRINTF(D_TRACE, ("SLRIclose ")); - callout_stop(&sc->sc_poll_handle); - sc->sc_intr_xfer = NULL; + KASSERT(mutex_owned(&sc->sc_lock)); + + /* + * The caller must arrange to have aborted the pipe already, so + * there can be no intr xfer in progress. The callout may + * still be pending from a prior intr xfer -- if it has already + * fired, it will see there is nothing to do, and do nothing. + */ + KASSERT(sc->sc_intr_xfer == NULL); + KASSERT(!callout_pending(&sc->sc_poll_handle)); } static void ahci_root_intr_done(struct usbd_xfer *xfer) { + struct ahci_softc *sc = AHCI_XFER2SC(xfer); + //DPRINTF(D_XFER, ("RIdn ")); + + KASSERT(mutex_owned(&sc->sc_lock)); + + /* Claim the xfer so it doesn't get completed again. */ + KASSERT(sc->sc_intr_xfer == xfer); + KASSERT(xfer->ux_status != USBD_IN_PROGRESS); + sc->sc_intr_xfer = NULL; } static usbd_status diff --git a/sys/dev/usb/ehci.c b/sys/dev/usb/ehci.c index ce4be68a83cd..f72c67c33c1c 100644 --- a/sys/dev/usb/ehci.c +++ b/sys/dev/usb/ehci.c @@ -2722,11 +2722,13 @@ ehci_root_intr_start(struct usbd_xfer *xfer) if (!polling) mutex_enter(&sc->sc_lock); + KASSERT(sc->sc_intrxfer == NULL); sc->sc_intrxfer = xfer; if (!polling) mutex_exit(&sc->sc_lock); - return USBD_IN_PROGRESS; + xfer->ux_status = USBD_IN_PROGRESS; + return xfer->ux_status; } /* Abort a root interrupt request. */ @@ -2738,8 +2740,16 @@ ehci_root_intr_abort(struct usbd_xfer *xfer) KASSERT(mutex_owned(&sc->sc_lock)); KASSERT(xfer->ux_pipe->up_intrxfer == xfer); - sc->sc_intrxfer = NULL; + /* If xfer has already completed, nothing to do here. */ + if (sc->sc_intrxfer == NULL) + return; + /* + * Otherwise, sc->sc_intrxfer had better be this transfer. + * Cancel it. + */ + KASSERT(sc->sc_intrxfer == xfer); + KASSERT(xfer->ux_status == USBD_IN_PROGRESS); xfer->ux_status = USBD_CANCELLED; usb_transfer_complete(xfer); } @@ -2748,18 +2758,30 @@ ehci_root_intr_abort(struct usbd_xfer *xfer) Static void ehci_root_intr_close(struct usbd_pipe *pipe) { - ehci_softc_t *sc = EHCI_PIPE2SC(pipe); + ehci_softc_t *sc __diagused = EHCI_PIPE2SC(pipe); EHCIHIST_FUNC(); EHCIHIST_CALLED(); KASSERT(mutex_owned(&sc->sc_lock)); - sc->sc_intrxfer = NULL; + /* + * Caller must guarantee the xfer has completed first, by + * closing the pipe only after normal completion or an abort. + */ + KASSERT(sc->sc_intrxfer == NULL); } Static void ehci_root_intr_done(struct usbd_xfer *xfer) { + struct ehci_softc *sc = EHCI_XFER2SC(xfer); + + KASSERT(mutex_owned(&sc->sc_lock)); + + /* Claim the xfer so it doesn't get completed again. */ + KASSERT(sc->sc_intrxfer == xfer); + KASSERT(xfer->ux_status != USBD_IN_PROGRESS); + sc->sc_intrxfer = NULL; } /************************/ diff --git a/sys/dev/usb/ohci.c b/sys/dev/usb/ohci.c index 8425fc5c28f5..9161a40c70a9 100644 --- a/sys/dev/usb/ohci.c +++ b/sys/dev/usb/ohci.c @@ -1708,6 +1708,7 @@ ohci_rhsc(ohci_softc_t *sc, struct usbd_xfer *xfer) p[i/8] |= 1 << (i%8); } DPRINTF("change=0x%02jx", *p, 0, 0, 0); + KASSERT(xfer->ux_status == USBD_IN_PROGRESS); xfer->ux_actlen = xfer->ux_length; xfer->ux_status = USBD_NORMAL_COMPLETION; @@ -1721,7 +1722,9 @@ ohci_root_intr_done(struct usbd_xfer *xfer) KASSERT(mutex_owned(&sc->sc_lock)); + /* Claim the xfer so it doesn't get completed again. */ KASSERT(sc->sc_intrxfer == xfer); + KASSERT(xfer->ux_status != USBD_IN_PROGRESS); sc->sc_intrxfer = NULL; } @@ -2516,18 +2519,29 @@ ohci_root_intr_start(struct usbd_xfer *xfer) if (!polling) mutex_exit(&sc->sc_lock); - return USBD_IN_PROGRESS; + xfer->ux_status = USBD_IN_PROGRESS; + return xfer->ux_status; } /* Abort a root interrupt request. */ Static void ohci_root_intr_abort(struct usbd_xfer *xfer) { - ohci_softc_t *sc __diagused = OHCI_XFER2SC(xfer); + ohci_softc_t *sc = OHCI_XFER2SC(xfer); KASSERT(mutex_owned(&sc->sc_lock)); KASSERT(xfer->ux_pipe->up_intrxfer == xfer); + /* If xfer has already completed, nothing to do here. */ + if (sc->sc_intrxfer == NULL) + return; + + /* + * Otherwise, sc->sc_intrxfer had better be this transfer. + * Cancel it. + */ + KASSERT(sc->sc_intrxfer == xfer); + KASSERT(xfer->ux_status == USBD_IN_PROGRESS); xfer->ux_status = USBD_CANCELLED; usb_transfer_complete(xfer); } @@ -2536,13 +2550,17 @@ ohci_root_intr_abort(struct usbd_xfer *xfer) Static void ohci_root_intr_close(struct usbd_pipe *pipe) { - ohci_softc_t *sc = OHCI_PIPE2SC(pipe); + ohci_softc_t *sc __diagused = OHCI_PIPE2SC(pipe); KASSERT(mutex_owned(&sc->sc_lock)); OHCIHIST_FUNC(); OHCIHIST_CALLED(); - sc->sc_intrxfer = NULL; + /* + * Caller must guarantee the xfer has completed first, by + * closing the pipe only after normal completion or an abort. + */ + KASSERT(sc->sc_intrxfer == NULL); } /************************/ diff --git a/sys/dev/usb/uhci.c b/sys/dev/usb/uhci.c index 5bcdaf96245e..37f78b875ac2 100644 --- a/sys/dev/usb/uhci.c +++ b/sys/dev/usb/uhci.c @@ -574,6 +574,7 @@ uhci_init(uhci_softc_t *sc) "uhcixfer", NULL, IPL_USB, NULL, NULL, NULL); callout_init(&sc->sc_poll_handle, CALLOUT_MPSAFE); + callout_setfunc(&sc->sc_poll_handle, uhci_poll_hub, sc); /* Set up the bus struct. */ sc->sc_bus.ub_methods = &uhci_bus_methods; @@ -739,8 +740,7 @@ uhci_resume(device_t dv, const pmf_qual_t *qual) usb_delay_ms_locked(&sc->sc_bus, USB_RESUME_RECOVERY, &sc->sc_intr_lock); sc->sc_bus.ub_usepolling--; if (sc->sc_intr_xfer != NULL) - callout_reset(&sc->sc_poll_handle, sc->sc_ival, uhci_poll_hub, - sc->sc_intr_xfer); + callout_schedule(&sc->sc_poll_handle, sc->sc_ival); #ifdef UHCI_DEBUG if (uhcidebug >= 2) uhci_dumpregs(sc); @@ -766,9 +766,9 @@ uhci_suspend(device_t dv, const pmf_qual_t *qual) if (uhcidebug >= 2) uhci_dumpregs(sc); #endif - if (sc->sc_intr_xfer != NULL) - callout_stop(&sc->sc_poll_handle); sc->sc_suspend = PWR_SUSPEND; + if (sc->sc_intr_xfer != NULL) + callout_halt(&sc->sc_poll_handle, &sc->sc_intr_lock); sc->sc_bus.ub_usepolling++; uhci_run(sc, 0, 1); /* stop the controller */ @@ -998,38 +998,85 @@ void iidump(void) { uhci_dump_iis(thesc); } void uhci_poll_hub(void *addr) { - struct usbd_xfer *xfer = addr; - struct usbd_pipe *pipe = xfer->ux_pipe; - uhci_softc_t *sc; + struct uhci_softc *sc = addr; + struct usbd_xfer *xfer; u_char *p; UHCIHIST_FUNC(); UHCIHIST_CALLED(); - if (__predict_false(pipe->up_dev == NULL || pipe->up_dev->ud_bus == NULL)) - return; /* device has detached */ - sc = UHCI_PIPE2SC(pipe); - callout_reset(&sc->sc_poll_handle, sc->sc_ival, uhci_poll_hub, xfer); + mutex_enter(&sc->sc_lock); + + /* + * If the intr xfer has completed or been synchronously + * aborted, we have nothing to do. + */ + xfer = sc->sc_intr_xfer; + if (xfer == NULL) + goto out; + + /* + * If the intr xfer for which we were scheduled is done, and + * another intr xfer has been submitted, let that one be dealt + * with when the callout fires again. + * + * The call to callout_pending is racy, but the the transition + * from pending to invoking happens atomically. The + * callout_ack ensures callout_invoking does not return true + * due to this invocation of the callout; the lock ensures the + * next invocation of the callout cannot callout_ack (unless it + * had already run to completion and nulled sc->sc_intr_xfer, + * in which case would have bailed out already). + */ + callout_ack(&sc->sc_poll_handle); + if (callout_pending(&sc->sc_poll_handle) || + callout_invoking(&sc->sc_poll_handle)) + goto out; + /* + * Check flags for the two interrupt ports, and set them in the + * buffer if an interrupt arrived; otherwise arrange . + */ p = xfer->ux_buf; p[0] = 0; if (UREAD2(sc, UHCI_PORTSC1) & (UHCI_PORTSC_CSC|UHCI_PORTSC_OCIC)) p[0] |= 1<<1; if (UREAD2(sc, UHCI_PORTSC2) & (UHCI_PORTSC_CSC|UHCI_PORTSC_OCIC)) p[0] |= 1<<2; - if (p[0] == 0) - /* No change, try again in a while */ - return; + if (p[0] == 0) { + /* + * No change -- try again in a while, unless we're + * suspending, in which case we'll try again after + * resume. + */ + if (sc->sc_suspend != PWR_SUSPEND) + callout_schedule(&sc->sc_poll_handle, sc->sc_ival); + goto out; + } + /* + * Interrupt completed, and the xfer has not been completed or + * synchronously aborted. Complete the xfer now. + * + * XXX Set ux_isdone if DIAGNOSTIC? + */ xfer->ux_actlen = 1; xfer->ux_status = USBD_NORMAL_COMPLETION; - mutex_enter(&sc->sc_lock); usb_transfer_complete(xfer); - mutex_exit(&sc->sc_lock); + +out: mutex_exit(&sc->sc_lock); } void uhci_root_intr_done(struct usbd_xfer *xfer) { + struct uhci_softc *sc = UHCI_XFER2SC(xfer); + + KASSERT(mutex_owned(&sc->sc_lock)); + + /* Claim the xfer so it doesn't get completed again. */ + KASSERT(sc->sc_intr_xfer == xfer); + KASSERT(xfer->ux_status != USBD_IN_PROGRESS); + sc->sc_intr_xfer = NULL; } /* @@ -3793,9 +3840,16 @@ uhci_root_intr_abort(struct usbd_xfer *xfer) KASSERT(mutex_owned(&sc->sc_lock)); KASSERT(xfer->ux_pipe->up_intrxfer == xfer); + /* + * Try to stop the callout before it starts. If we got in too + * late, too bad; but if the callout had yet to run and time + * out the xfer, cancel it ourselves. + */ callout_stop(&sc->sc_poll_handle); - sc->sc_intr_xfer = NULL; + if (sc->sc_intr_xfer == NULL) + return; + KASSERT(sc->sc_intr_xfer == xfer); xfer->ux_status = USBD_CANCELLED; #ifdef DIAGNOSTIC UHCI_XFER2UXFER(xfer)->ux_isdone = true; @@ -3830,6 +3884,7 @@ uhci_root_intr_start(struct usbd_xfer *xfer) struct usbd_pipe *pipe = xfer->ux_pipe; uhci_softc_t *sc = UHCI_PIPE2SC(pipe); unsigned int ival; + const bool polling = sc->sc_bus.ub_usepolling; UHCIHIST_FUNC(); UHCIHIST_CALLED(); DPRINTF("xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer, xfer->ux_length, @@ -3838,11 +3893,20 @@ uhci_root_intr_start(struct usbd_xfer *xfer) if (sc->sc_dying) return USBD_IOERROR; + if (!polling) + mutex_enter(&sc->sc_lock); + + KASSERT(sc->sc_intr_xfer == NULL); + /* XXX temporary variable needed to avoid gcc3 warning */ ival = xfer->ux_pipe->up_endpoint->ue_edesc->bInterval; sc->sc_ival = mstohz(ival); - callout_reset(&sc->sc_poll_handle, sc->sc_ival, uhci_poll_hub, xfer); + callout_schedule(&sc->sc_poll_handle, sc->sc_ival); sc->sc_intr_xfer = xfer; + + if (!polling) + mutex_exit(&sc->sc_lock); + return USBD_IN_PROGRESS; } @@ -3850,11 +3914,17 @@ uhci_root_intr_start(struct usbd_xfer *xfer) void uhci_root_intr_close(struct usbd_pipe *pipe) { - uhci_softc_t *sc = UHCI_PIPE2SC(pipe); + uhci_softc_t *sc __diagused = UHCI_PIPE2SC(pipe); UHCIHIST_FUNC(); UHCIHIST_CALLED(); KASSERT(mutex_owned(&sc->sc_lock)); - callout_stop(&sc->sc_poll_handle); - sc->sc_intr_xfer = NULL; + /* + * The caller must arrange to have aborted the pipe already, so + * there can be no intr xfer in progress. The callout may + * still be pending from a prior intr xfer -- if it has already + * fired, it will see there is nothing to do, and do nothing. + */ + KASSERT(sc->sc_intr_xfer == NULL); + KASSERT(!callout_pending(&sc->sc_poll_handle)); } diff --git a/sys/dev/usb/vhci.c b/sys/dev/usb/vhci.c index 0061779b981f..3ac164f2a711 100644 --- a/sys/dev/usb/vhci.c +++ b/sys/dev/usb/vhci.c @@ -627,6 +627,7 @@ vhci_root_intr_start(struct usbd_xfer *xfer) if (!polling) mutex_enter(&sc->sc_lock); + KASSERT(sc->sc_intrxfer == NULL); sc->sc_intrxfer = xfer; if (!polling) mutex_exit(&sc->sc_lock); @@ -644,8 +645,15 @@ vhci_root_intr_abort(struct usbd_xfer *xfer) KASSERT(mutex_owned(&sc->sc_lock)); KASSERT(xfer->ux_pipe->up_intrxfer == xfer); - sc->sc_intrxfer = NULL; + /* If xfer has already completed, nothing to do here. */ + if (sc->sc_intrxfer == NULL) + return; + /* + * Otherwise, sc->sc_intrxfer had better be this transfer. + * Cancel it. + */ + KASSERT(sc->sc_intrxfer == xfer); xfer->ux_status = USBD_CANCELLED; usb_transfer_complete(xfer); } @@ -653,13 +661,17 @@ vhci_root_intr_abort(struct usbd_xfer *xfer) static void vhci_root_intr_close(struct usbd_pipe *pipe) { - vhci_softc_t *sc = pipe->up_dev->ud_bus->ub_hcpriv; + vhci_softc_t *sc __diagused = pipe->up_dev->ud_bus->ub_hcpriv; DPRINTF("%s: called\n", __func__); KASSERT(mutex_owned(&sc->sc_lock)); - sc->sc_intrxfer = NULL; + /* + * Caller must guarantee the xfer has completed first, by + * closing the pipe only after normal completion or an abort. + */ + KASSERT(sc->sc_intrxfer == NULL); } static void @@ -671,6 +683,14 @@ vhci_root_intr_cleartoggle(struct usbd_pipe *pipe) static void vhci_root_intr_done(struct usbd_xfer *xfer) { + vhci_softc_t *sc = xfer->ux_bus->ub_hcpriv; + + KASSERT(mutex_owned(&sc->sc_lock)); + + /* Claim the xfer so it doesn't get completed again. */ + KASSERT(sc->sc_intrxfer == xfer); + KASSERT(xfer->ux_status != USBD_IN_PROGRESS); + sc->sc_intrxfer = NULL; } /* -------------------------------------------------------------------------- */ diff --git a/sys/dev/usb/xhci.c b/sys/dev/usb/xhci.c index a6abb51f2a67..ec3ed0e9f280 100644 --- a/sys/dev/usb/xhci.c +++ b/sys/dev/usb/xhci.c @@ -3715,6 +3715,7 @@ xhci_root_intr_start(struct usbd_xfer *xfer) if (!polling) mutex_enter(&sc->sc_lock); + KASSERT(sc->sc_intrxfer[bn] == NULL); sc->sc_intrxfer[bn] = xfer; if (!polling) mutex_exit(&sc->sc_lock); @@ -3725,13 +3726,23 @@ xhci_root_intr_start(struct usbd_xfer *xfer) static void xhci_root_intr_abort(struct usbd_xfer *xfer) { - struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer); + struct xhci_softc * const sc = XHCI_XFER2SC(xfer); + const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; XHCIHIST_FUNC(); XHCIHIST_CALLED(); KASSERT(mutex_owned(&sc->sc_lock)); KASSERT(xfer->ux_pipe->up_intrxfer == xfer); + /* If xfer has already completed, nothing to do here. */ + if (sc->sc_intrxfer[bn] == NULL) + return; + + /* + * Otherwise, sc->sc_intrxfer[bn] had better be this transfer. + * Cancel it. + */ + KASSERT(sc->sc_intrxfer[bn] == xfer); xfer->ux_status = USBD_CANCELLED; usb_transfer_complete(xfer); } @@ -3739,22 +3750,35 @@ xhci_root_intr_abort(struct usbd_xfer *xfer) static void xhci_root_intr_close(struct usbd_pipe *pipe) { - struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); - const struct usbd_xfer *xfer = pipe->up_intrxfer; - const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; + struct xhci_softc * const sc __diagused = XHCI_PIPE2SC(pipe); + const struct usbd_xfer *xfer __diagused = pipe->up_intrxfer; + const size_t bn __diagused = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; XHCIHIST_FUNC(); XHCIHIST_CALLED(); KASSERT(mutex_owned(&sc->sc_lock)); - sc->sc_intrxfer[bn] = NULL; + /* + * Caller must guarantee the xfer has completed first, by + * closing the pipe only after normal completion or an abort. + */ + KASSERT(sc->sc_intrxfer[bn] == NULL); } static void xhci_root_intr_done(struct usbd_xfer *xfer) { + struct xhci_softc * const sc = XHCI_XFER2SC(xfer); + const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; + XHCIHIST_FUNC(); XHCIHIST_CALLED(); + KASSERT(mutex_owned(&sc->sc_lock)); + + /* Claim the xfer so it doesn't get completed again. */ + KASSERT(sc->sc_intrxfer[bn] == xfer); + KASSERT(xfer->ux_status != USBD_IN_PROGRESS); + sc->sc_intrxfer[bn] = NULL; } /* -------------- */ diff --git a/sys/external/bsd/dwc2/dwc2.c b/sys/external/bsd/dwc2/dwc2.c index 28b94a5f147d..d9c00000abb0 100644 --- a/sys/external/bsd/dwc2/dwc2.c +++ b/sys/external/bsd/dwc2/dwc2.c @@ -649,7 +649,8 @@ dwc2_root_intr_start(struct usbd_xfer *xfer) if (!polling) mutex_exit(&sc->sc_lock); - return USBD_IN_PROGRESS; + xfer->ux_status = USBD_IN_PROGRESS; + return xfer->ux_status; } /* Abort a root interrupt request. */ @@ -663,6 +664,16 @@ dwc2_root_intr_abort(struct usbd_xfer *xfer) KASSERT(mutex_owned(&sc->sc_lock)); KASSERT(xfer->ux_pipe->up_intrxfer == xfer); + /* If xfer has already completed, nothing to do here. */ + if (sc->sc_intrxfer == NULL) + return; + + /* + * Otherwise, sc->sc_intrxfer had better be this transfer. + * Cancel it. + */ + KASSERT(sc->sc_intrxfer == xfer); + KASSERT(xfer->ux_status == USBD_IN_PROGRESS); xfer->ux_status = USBD_CANCELLED; usb_transfer_complete(xfer); } @@ -676,7 +687,11 @@ dwc2_root_intr_close(struct usbd_pipe *pipe) KASSERT(mutex_owned(&sc->sc_lock)); - sc->sc_intrxfer = NULL; + /* + * Caller must guarantee the xfer has completed first, by + * closing the pipe only after normal completion or an abort. + */ + KASSERT(sc->sc_intrxfer == NULL); } Static void @@ -684,9 +699,12 @@ dwc2_root_intr_done(struct usbd_xfer *xfer) { struct dwc2_softc *sc = DWC2_XFER2SC(xfer); - KASSERT(sc->sc_intrxfer != NULL); - sc->sc_intrxfer = NULL; DPRINTF("\n"); + + /* Claim the xfer so it doesn't get completed again. */ + KASSERT(sc->sc_intrxfer == xfer); + KASSERT(xfer->ux_status != USBD_IN_PROGRESS); + sc->sc_intrxfer = NULL; } /***********************************************************************/