diff --git a/sys/arch/xen/xen/xennetback_xenbus.c b/sys/arch/xen/xen/xennetback_xenbus.c index dac32a00d8db..559230017ff5 100644 --- a/sys/arch/xen/xen/xennetback_xenbus.c +++ b/sys/arch/xen/xen/xennetback_xenbus.c @@ -497,9 +497,7 @@ xennetback_connect(struct xnetback_instance *xneti) goto err2; } xneti->xni_evtchn = evop.u.bind_interdomain.local_port; - xen_wmb(); xneti->xni_status = CONNECTED; - xen_wmb(); xneti->xni_ih = xen_intr_establish_xname(-1, &xen_pic, xneti->xni_evtchn, IST_LEVEL, IPL_NET, xennetback_evthandler, @@ -805,28 +803,27 @@ xennetback_evthandler(void *arg) netif_tx_request_t txreq; struct mbuf *m, *m0 = NULL, *mlast = NULL; int receive_pending; - RING_IDX req_cons; int queued = 0, m0_len = 0; struct xnetback_xstate *xst; const bool discard = ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) != (IFF_UP | IFF_RUNNING)); XENPRINTF(("xennetback_evthandler ")); - req_cons = xneti->xni_txring.req_cons; - while (1) { - xen_rmb(); /* be sure to read the request before updating */ - xneti->xni_txring.req_cons = req_cons; - xen_wmb(); - RING_FINAL_CHECK_FOR_REQUESTS(&xneti->xni_txring, - receive_pending); - if (receive_pending == 0) - break; - RING_COPY_REQUEST(&xneti->xni_txring, req_cons, - &txreq); +again: + while (RING_HAS_UNCONSUMED_REQUESTS(&xneti->xni_txring)) { + /* + * Ensure we have read the producer's queue index in + * RING_FINAL_CHECK_FOR_REQUESTS before we read the + * content of the producer's next request in + * RING_COPY_REQUEST. + */ xen_rmb(); + RING_COPY_REQUEST(&xneti->xni_txring, + xneti->xni_txring.req_cons, + &txreq); XENPRINTF(("%s pkt size %d\n", xneti->xni_if.if_xname, txreq.size)); - req_cons++; + xneti->xni_txring.req_cons++; if (__predict_false(discard)) { /* interface not up, drop all requests */ if_statinc(ifp, if_iqdrops); @@ -875,7 +872,7 @@ mbuf_fail: */ int cnt; m0_len = xennetback_tx_m0len_fragment(xneti, - txreq.size, req_cons, &cnt); + txreq.size, xneti->xni_txring.req_cons, &cnt); m->m_len = m0_len; KASSERT(cnt <= XEN_NETIF_NR_SLOTS_MIN); @@ -941,7 +938,8 @@ mbuf_fail: XENPRINTF(("%s pkt offset %d size %d id %d req_cons %d\n", xneti->xni_if.if_xname, txreq.offset, - txreq.size, txreq.id, req_cons & (RING_SIZE(&xneti->xni_txring) - 1))); + txreq.size, txreq.id, + xneti->xni_txring.req_cons & (RING_SIZE(&xneti->xni_txring) - 1))); xst = &xneti->xni_xstate[queued]; xst->xs_m = (m0 == NULL || m == m0) ? m : NULL; @@ -962,6 +960,9 @@ mbuf_fail: queued = 0; } } + RING_FINAL_CHECK_FOR_REQUESTS(&xneti->xni_txring, receive_pending); + if (receive_pending) + goto again; if (m0) { /* Queue empty, and still unfinished multi-fragment request */ printf("%s: dropped unfinished multi-fragment\n", @@ -1021,14 +1022,12 @@ xennetback_rx_copy_process(struct ifnet *ifp, struct xnetback_instance *xneti, } /* update pointer */ - xen_rmb(); xneti->xni_rxring.req_cons += queued; xneti->xni_rxring.rsp_prod_pvt += queued; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xneti->xni_rxring, notify); /* send event */ if (notify) { - xen_rmb(); XENPRINTF(("%s receive event\n", xneti->xni_if.if_xname)); hypervisor_notify_via_evtchn(xneti->xni_evtchn);