diff --git a/sys/dev/pci/if_vioif.c b/sys/dev/pci/if_vioif.c index ed89412..ec8d93d 100644 --- a/sys/dev/pci/if_vioif.c +++ b/sys/dev/pci/if_vioif.c @@ -1130,7 +1130,7 @@ vioif_tx_vq_done(struct virtqueue *vq) out: VIOIF_TX_UNLOCK(sc); if (r) - vioif_start(ifp); + if_schedule_deferred_start(ifp); return r; } diff --git a/sys/dev/pci/if_wm.c b/sys/dev/pci/if_wm.c index 7eb6708..9f7c8af 100644 --- a/sys/dev/pci/if_wm.c +++ b/sys/dev/pci/if_wm.c @@ -696,6 +696,7 @@ static void wm_nq_start_locked(struct ifnet *); static int wm_nq_transmit(struct ifnet *, struct mbuf *); static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *); static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool); +static void wm_deferred_start(struct ifnet *); /* Interrupt */ static int wm_txeof(struct wm_softc *, struct wm_txqueue *); static void wm_rxeof(struct wm_rxqueue *); @@ -2514,12 +2515,16 @@ alloc_retry: ifp->if_ioctl = wm_ioctl; if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { ifp->if_start = wm_nq_start; - if (sc->sc_nqueues > 1) + if (sc->sc_nqueues > 1) { ifp->if_transmit = wm_nq_transmit; + ifp->if_deferred_start = wm_deferred_start; + } } else { ifp->if_start = wm_start; - if (sc->sc_nqueues > 1) + if (sc->sc_nqueues > 1) { ifp->if_transmit = wm_transmit; + ifp->if_deferred_start = wm_deferred_start; + } } ifp->if_watchdog = wm_watchdog; ifp->if_init = wm_init; @@ -7233,6 +7238,53 @@ wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq, } } +static void +wm_deferred_start(struct ifnet *ifp) +{ + struct wm_softc *sc = ifp->if_softc; + int qid = 0; + + /* + * Try to transmit on all Tx queues. Passing a txq somehow and + * transmitting only on the txq may be better. + */ +restart: + WM_CORE_LOCK(sc); + if (sc->sc_core_stopping) + goto out; + + for (; qid < sc->sc_nqueues; qid++) { + struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq; + + if (!mutex_tryenter(txq->txq_lock)) + continue; + + if (txq->txq_stopping) { + mutex_exit(txq->txq_lock); + continue; + } + WM_CORE_UNLOCK(sc); + + if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { + /* XXX need for ALTQ */ + if (qid == 0) + wm_nq_start_locked(ifp); + wm_nq_transmit_locked(ifp, txq); + } else { + /* XXX need for ALTQ */ + if (qid == 0) + wm_start_locked(ifp); + wm_transmit_locked(ifp, txq); + } + mutex_exit(txq->txq_lock); + + qid++; + goto restart; + } +out: + WM_CORE_UNLOCK(sc); +} + /* Interrupt */ /* @@ -7914,7 +7966,8 @@ wm_intr_legacy(void *arg) if (handled) { /* Try to get more packets going. */ - ifp->if_start(ifp); + if (!IFQ_IS_EMPTY(&ifp->if_snd)) + if_schedule_deferred_start_percpuq(ifp, sc->sc_ipq); } return handled; @@ -7953,14 +8006,14 @@ wm_txrxintr_msix(void *arg) /* Try to get more packets going. */ if (pcq_peek(txq->txq_interq) != NULL) - wm_nq_transmit_locked(ifp, txq); + if_schedule_deferred_start_percpuq(ifp, sc->sc_ipq); /* * There are still some upper layer processing which call * ifp->if_start(). e.g. ALTQ */ if (wmq->wmq_id == 0) { if (!IFQ_IS_EMPTY(&ifp->if_snd)) - wm_nq_start_locked(ifp); + if_schedule_deferred_start_percpuq(ifp, sc->sc_ipq); } mutex_exit(txq->txq_lock); diff --git a/sys/net/if.c b/sys/net/if.c index ebef3a1..28864ac 100644 --- a/sys/net/if.c +++ b/sys/net/if.c @@ -223,6 +223,8 @@ static int sysctl_percpuq_drops_handler(SYSCTLFN_PROTO); static void sysctl_percpuq_setup(struct sysctllog **, const char *, struct if_percpuq *); +static void if_deferred_start_softint(struct ifnet *); + #if defined(INET) || defined(INET6) static void sysctl_net_pktq_setup(struct sysctllog **, int); #endif @@ -696,6 +698,8 @@ if_initialize(ifnet_t *ifp) psref_target_init(&ifp->if_psref, ifnet_psref_class); ifp->if_ioctl_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); + ifp->if_kick_deferred_start_percpu = percpu_alloc(sizeof(bool)); + IFNET_LOCK(); if_getindex(ifp); IFNET_UNLOCK(); @@ -765,6 +769,8 @@ if_percpuq_softint(void *arg) struct ifnet *ifp = ipq->ipq_ifp; struct mbuf *m; + if_deferred_start_softint(ifp); + while ((m = if_percpuq_dequeue(ipq)) != NULL) ifp->_if_input(ifp, m); } @@ -952,6 +958,68 @@ bad: return; } +/* + * The common APIs to defer if_start to softint when if_start is requested + * from a device driver running in hardware interrupt context. + * We reuse the softint of the if_percpuq framework. + */ +/* + * Call ifp->if_start (or something equivalent). It is called from the softint + * of the if_percpuq framework. + */ +static void +if_deferred_start_softint(struct ifnet *ifp) +{ + int s; + bool *kick_start, do_it; + + s = splnet(); + kick_start = percpu_getref(ifp->if_kick_deferred_start_percpu); + do_it = *kick_start; + *kick_start = false; + percpu_putref(ifp->if_kick_deferred_start_percpu); + splx(s); + + if (!do_it) + return; + + if (ifp->if_deferred_start != NULL) { +#ifdef DEBUG + log(LOG_DEBUG, "%s: if_deferred_start on %s\n", __func__, + ifp->if_xname); +#endif + ifp->if_deferred_start(ifp); + } else { +#ifdef DEBUG + log(LOG_DEBUG, "%s: if_start_lock on %s\n", __func__, + ifp->if_xname); +#endif + if_start_lock(ifp); + } +} + +/* + * Device drivers use either of the APIs to schedule deferred if_start. + */ +void +if_schedule_deferred_start_percpuq(struct ifnet *ifp, struct if_percpuq *ipq) +{ + bool *kick_start; + + kick_start = percpu_getref(ifp->if_kick_deferred_start_percpu); + *kick_start = true; + percpu_putref(ifp->if_kick_deferred_start_percpu); + + softint_schedule(ipq->ipq_si); +} + +void +if_schedule_deferred_start(struct ifnet *ifp) +{ + + KASSERT(ifp->if_percpuq != NULL); + if_schedule_deferred_start_percpuq(ifp, ifp->if_percpuq); +} /* * The common interface input routine that is called by device drivers, diff --git a/sys/net/if.h b/sys/net/if.h index 0599d1e..a276afa 100644 --- a/sys/net/if.h +++ b/sys/net/if.h @@ -342,6 +342,8 @@ typedef struct ifnet { struct pslist_entry if_pslist_entry; struct psref_target if_psref; struct pslist_head if_addr_pslist; + void (*if_deferred_start)(struct ifnet *); + percpu_t *if_kick_deferred_start_percpu; /* bool */ #endif } ifnet_t; @@ -989,6 +991,9 @@ void if_percpuq_destroy(struct if_percpuq *); void if_percpuq_enqueue(struct if_percpuq *, struct mbuf *); +void if_schedule_deferred_start(struct ifnet *); +void if_schedule_deferred_start_percpuq(struct ifnet *, struct if_percpuq *); + void ifa_insert(struct ifnet *, struct ifaddr *); void ifa_remove(struct ifnet *, struct ifaddr *);