diff --git a/sys/altq/if_altq.h b/sys/altq/if_altq.h
index c441651..a65c06f 100644
--- a/sys/altq/if_altq.h
+++ b/sys/altq/if_altq.h
@@ -45,6 +45,9 @@ struct	ifaltq {
 	int	ifq_len;
 	int	ifq_maxlen;
 	int	ifq_drops;
+#ifdef NET_MPSAFE  /* may be defined in sys/net/if.h */
+	kmutex_t	ifq_lock;
+#endif
 
 	/* alternate queueing related fields */
 	int	altq_type;		/* discipline type */
diff --git a/sys/dev/pci/if_vioif.c b/sys/dev/pci/if_vioif.c
index 6a03a05..9413bbd 100644
--- a/sys/dev/pci/if_vioif.c
+++ b/sys/dev/pci/if_vioif.c
@@ -186,6 +186,9 @@ struct vioif_softc {
 	}			sc_ctrl_inuse;
 	kcondvar_t		sc_ctrl_wait;
 	kmutex_t		sc_ctrl_wait_lock;
+	kmutex_t		sc_core_lock;
+	kmutex_t		sc_tx_lock;
+	kmutex_t		sc_rx_lock;
 };
 #define VIRTIO_NET_TX_MAXNSEGS		(16) /* XXX */
 #define VIRTIO_NET_CTRL_MAC_MAXENTRIES	(64) /* XXX */
@@ -198,7 +201,9 @@ static void	vioif_deferred_init(device_t);
 /* ifnet interface functions */
 static int	vioif_init(struct ifnet *);
 static void	vioif_stop(struct ifnet *, int);
+static void	vioif_stop_locked(struct ifnet *, int);
 static void	vioif_start(struct ifnet *);
+static void	vioif_start_locked(struct ifnet *);
 static int	vioif_ioctl(struct ifnet *, u_long, void *);
 static void	vioif_watchdog(struct ifnet *);
 
@@ -207,12 +212,15 @@ static int	vioif_add_rx_mbuf(struct vioif_softc *, int);
 static void	vioif_free_rx_mbuf(struct vioif_softc *, int);
 static void	vioif_populate_rx_mbufs(struct vioif_softc *);
 static int	vioif_rx_deq(struct vioif_softc *);
+static int	vioif_rx_deq_locked(struct vioif_softc *);
 static int	vioif_rx_vq_done(struct virtqueue *);
+static int	vioif_rx_vq_done_locked(struct virtqueue *);
 static void	vioif_rx_softint(void *);
 static void	vioif_rx_drain(struct vioif_softc *);
 
 /* tx */
 static int	vioif_tx_vq_done(struct virtqueue *);
+static int	vioif_tx_vq_done_locked(struct virtqueue *);
 static void	vioif_tx_drain(struct vioif_softc *);
 
 /* other control */
@@ -223,6 +231,7 @@ static int	vioif_set_allmulti(struct vioif_softc *, bool);
 static int	vioif_set_rx_filter(struct vioif_softc *);
 static int	vioif_rx_filter(struct vioif_softc *);
 static int	vioif_ctrl_vq_done(struct virtqueue *);
+static int	vioif_ctrl_vq_done_locked(struct virtqueue *);
 
 CFATTACH_DECL_NEW(vioif, sizeof(struct vioif_softc),
 		  vioif_match, vioif_attach, NULL, NULL);
@@ -556,6 +565,9 @@ vioif_attach(device_t parent, device_t self, void *aux)
 			vsc->sc_nvqs = 3;
 		}
 	}
+	mutex_init(&sc->sc_core_lock, MUTEX_DEFAULT, IPL_NET);
+	mutex_init(&sc->sc_tx_lock, MUTEX_DEFAULT, IPL_NET);
+	mutex_init(&sc->sc_rx_lock, MUTEX_DEFAULT, IPL_NET);
 
 	sc->sc_rx_softint = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,
 					      vioif_rx_softint, sc);
@@ -585,6 +597,10 @@ vioif_attach(device_t parent, device_t self, void *aux)
 	return;
 
 err:
+	mutex_destroy(&sc->sc_core_lock);
+	mutex_destroy(&sc->sc_tx_lock);
+	mutex_destroy(&sc->sc_rx_lock);
+
 	if (vsc->sc_nvqs == 3) {
 		virtio_free_vq(vsc, &sc->sc_vq[2]);
 		cv_destroy(&sc->sc_ctrl_wait);
@@ -627,8 +643,14 @@ vioif_init(struct ifnet *ifp)
 {
 	struct vioif_softc *sc = ifp->if_softc;
 
-	vioif_stop(ifp, 0);
+	mutex_enter(&sc->sc_core_lock);
+	vioif_stop_locked(ifp, 0);
+	mutex_exit(&sc->sc_core_lock);
+
+	mutex_enter(&sc->sc_rx_lock);
 	vioif_populate_rx_mbufs(sc);
+	mutex_exit(&sc->sc_rx_lock);
+
 	vioif_updown(sc, true);
 	ifp->if_flags |= IFF_RUNNING;
 	ifp->if_flags &= ~IFF_OACTIVE;
@@ -641,6 +663,16 @@ static void
 vioif_stop(struct ifnet *ifp, int disable)
 {
 	struct vioif_softc *sc = ifp->if_softc;
+
+	mutex_enter(&sc->sc_core_lock);
+	vioif_stop_locked(ifp, disable);
+	mutex_exit(&sc->sc_core_lock);
+}
+
+static void
+vioif_stop_locked(struct ifnet *ifp, int disable)
+{
+	struct vioif_softc *sc = ifp->if_softc;
 	struct virtio_softc *vsc = sc->sc_virtio;
 
 	/* only way to stop I/O and DMA is resetting... */
@@ -666,6 +698,16 @@ static void
 vioif_start(struct ifnet *ifp)
 {
 	struct vioif_softc *sc = ifp->if_softc;
+
+	mutex_enter(&sc->sc_tx_lock);
+	vioif_start_locked(ifp);
+	mutex_exit(&sc->sc_tx_lock);
+}
+
+static void
+vioif_start_locked(struct ifnet *ifp)
+{
+	struct vioif_softc *sc = ifp->if_softc;
 	struct virtio_softc *vsc = sc->sc_virtio;
 	struct virtqueue *vq = &sc->sc_vq[1]; /* tx vq */
 	struct mbuf *m;
@@ -684,7 +726,7 @@ vioif_start(struct ifnet *ifp)
 		r = virtio_enqueue_prep(vsc, vq, &slot);
 		if (r == EAGAIN) {
 			ifp->if_flags |= IFF_OACTIVE;
-			vioif_tx_vq_done(vq);
+			vioif_tx_vq_done_locked(vq);
 			if (retry++ == 0)
 				continue;
 			else
@@ -707,7 +749,7 @@ vioif_start(struct ifnet *ifp)
 			bus_dmamap_unload(vsc->sc_dmat,
 					  sc->sc_tx_dmamaps[slot]);
 			ifp->if_flags |= IFF_OACTIVE;
-			vioif_tx_vq_done(vq);
+			vioif_tx_vq_done_locked(vq);
 			if (retry++ == 0)
 				continue;
 			else
@@ -855,6 +897,19 @@ vioif_populate_rx_mbufs(struct vioif_softc *sc)
 static int
 vioif_rx_deq(struct vioif_softc *sc)
 {
+	int r;
+
+	mutex_enter(&sc->sc_rx_lock);
+	r = vioif_rx_deq_locked(sc);
+	mutex_exit(&sc->sc_rx_lock);
+
+	return r;
+}
+
+/* dequeue recieved packets */
+static int
+vioif_rx_deq_locked(struct vioif_softc *sc)
+{
 	struct virtio_softc *vsc = sc->sc_virtio;
 	struct virtqueue *vq = &sc->sc_vq[0];
 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
@@ -880,7 +935,10 @@ vioif_rx_deq(struct vioif_softc *sc)
 		m->m_len = m->m_pkthdr.len = len;
 		ifp->if_ipackets++;
 		bpf_mtap(ifp, m);
+
+		mutex_exit(&sc->sc_rx_lock);
 		(*ifp->if_input)(ifp, m);
+		mutex_enter(&sc->sc_rx_lock);
 	}
 
 	return r;
@@ -894,7 +952,21 @@ vioif_rx_vq_done(struct virtqueue *vq)
 	struct vioif_softc *sc = device_private(vsc->sc_child);
 	int r;
 
-	r = vioif_rx_deq(sc);
+	mutex_enter(&sc->sc_rx_lock);
+	r = vioif_rx_vq_done_locked(vq);
+	mutex_exit(&sc->sc_rx_lock);
+
+	return r;
+}
+
+static int
+vioif_rx_vq_done_locked(struct virtqueue *vq)
+{
+	struct virtio_softc *vsc = vq->vq_owner;
+	struct vioif_softc *sc = device_private(vsc->sc_child);
+	int r;
+
+	r = vioif_rx_deq_locked(sc);
 	if (r)
 		softint_schedule(sc->sc_rx_softint);
 
@@ -907,7 +979,9 @@ vioif_rx_softint(void *arg)
 {
 	struct vioif_softc *sc = arg;
 
+	mutex_enter(&sc->sc_rx_lock);
 	vioif_populate_rx_mbufs(sc);
+	mutex_exit(&sc->sc_rx_lock);
 }
 
 /* free all the mbufs; called from if_stop(disable) */
@@ -939,6 +1013,20 @@ vioif_tx_vq_done(struct virtqueue *vq)
 {
 	struct virtio_softc *vsc = vq->vq_owner;
 	struct vioif_softc *sc = device_private(vsc->sc_child);
+	int r;
+
+	mutex_enter(&sc->sc_tx_lock);
+	r = vioif_tx_vq_done_locked(vq);
+	mutex_exit(&sc->sc_tx_lock);
+
+	return r;
+}
+
+static int
+vioif_tx_vq_done_locked(struct virtqueue *vq)
+{
+	struct virtio_softc *vsc = vq->vq_owner;
+	struct vioif_softc *sc = device_private(vsc->sc_child);
 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 	struct mbuf *m;
 	int r = 0;
@@ -1197,6 +1285,20 @@ vioif_ctrl_vq_done(struct virtqueue *vq)
 {
 	struct virtio_softc *vsc = vq->vq_owner;
 	struct vioif_softc *sc = device_private(vsc->sc_child);
+	int r;
+
+	mutex_enter(&sc->sc_core_lock);
+	r = vioif_ctrl_vq_done_locked(vq);
+	mutex_exit(&sc->sc_core_lock);
+
+	return r;
+}
+
+static int
+vioif_ctrl_vq_done_locked(struct virtqueue *vq)
+{
+	struct virtio_softc *vsc = vq->vq_owner;
+	struct vioif_softc *sc = device_private(vsc->sc_child);
 	int r, slot;
 
 	r = virtio_dequeue(vsc, vq, &slot, NULL);
diff --git a/sys/dev/pci/if_wm.c b/sys/dev/pci/if_wm.c
index 1c41f32..abbe6b8 100644
--- a/sys/dev/pci/if_wm.c
+++ b/sys/dev/pci/if_wm.c
@@ -141,6 +141,10 @@ int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
 #define	DPRINTF(x, y)	/* nothing */
 #endif /* WM_DEBUG */
 
+#ifdef NET_MPSAFE
+#define WM_MPSAFE	1
+#endif
+
 /*
  * Transmit descriptor list size.  Due to errata, we can only have
  * 256 hardware descriptors in the ring on < 82544, but we use 4096
@@ -380,8 +384,22 @@ struct wm_softc {
 	int sc_mchash_type;		/* multicast filter offset */
 
 	krndsource_t rnd_source;	/* random source */
+
+#ifdef WM_MPSAFE
+	kmutex_t sc_txrx_lock;		/* lock for tx/rx operations */
+					/* XXX need separation? */
+#endif
 };
 
+#ifdef WM_MPSAFE
+#define WM_LOCK(_sc)	mutex_enter(&(_sc)->sc_txrx_lock)
+#define WM_UNLOCK(_sc)	mutex_exit(&(_sc)->sc_txrx_lock)
+#else
+/* Supress "unused variable 'sc'" warning */
+#define WM_LOCK(_sc)	do {(void)(_sc);} while (0)
+#define WM_UNLOCK(_sc)	do {} while (0)
+#endif
+
 #define	WM_RXCHAIN_RESET(sc)						\
 do {									\
 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
@@ -495,7 +513,9 @@ do {									\
 } while (/*CONSTCOND*/0)
 
 static void	wm_start(struct ifnet *);
+static void	wm_start_locked(struct ifnet *);
 static void	wm_nq_start(struct ifnet *);
+static void	wm_nq_start_locked(struct ifnet *);
 static void	wm_watchdog(struct ifnet *);
 static int	wm_ifflags_cb(struct ethercom *);
 static int	wm_ioctl(struct ifnet *, u_long, void *);
@@ -1315,6 +1335,9 @@ wm_attach(device_t parent, device_t self, void *aux)
 		return;
 	}
 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
+#ifdef WM_MPSAFE
+	pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
+#endif
 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
 	if (sc->sc_ih == NULL) {
 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
@@ -2051,6 +2074,10 @@ wm_attach(device_t parent, device_t self, void *aux)
 		ifp->if_capabilities |= IFCAP_TSOv6;
 	}
 
+#ifdef WM_MPSAFE
+	mutex_init(&sc->sc_txrx_lock, MUTEX_DEFAULT, IPL_NET);
+#endif
+
 	/*
 	 * Attach the interface.
 	 */
@@ -2218,6 +2245,10 @@ wm_detach(device_t self, int flags __unused)
 		sc->sc_ios = 0;
 	}
 
+#ifdef WM_MPSAFE
+	mutex_destroy(&sc->sc_txrx_lock);
+#endif
+
 	return 0;
 }
 
@@ -2546,6 +2577,16 @@ static void
 wm_start(struct ifnet *ifp)
 {
 	struct wm_softc *sc = ifp->if_softc;
+
+	WM_LOCK(sc);
+	wm_start_locked(ifp);
+	WM_UNLOCK(sc);
+}
+
+static void
+wm_start_locked(struct ifnet *ifp)
+{
+	struct wm_softc *sc = ifp->if_softc;
 	struct mbuf *m0;
 	struct m_tag *mtag;
 	struct wm_txsoft *txs;
@@ -3053,6 +3094,16 @@ static void
 wm_nq_start(struct ifnet *ifp)
 {
 	struct wm_softc *sc = ifp->if_softc;
+
+	WM_LOCK(sc);
+	wm_nq_start_locked(ifp);
+	WM_UNLOCK(sc);
+}
+
+static void
+wm_nq_start_locked(struct ifnet *ifp)
+{
+	struct wm_softc *sc = ifp->if_softc;
 	struct mbuf *m0;
 	struct m_tag *mtag;
 	struct wm_txsoft *txs;
@@ -3335,7 +3386,9 @@ wm_watchdog(struct ifnet *ifp)
 	 * Since we're using delayed interrupts, sweep up
 	 * before we report an error.
 	 */
+	WM_LOCK(sc);
 	wm_txintr(sc);
+	WM_UNLOCK(sc);
 
 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
 #ifdef WM_DEBUG
@@ -3486,6 +3539,8 @@ wm_intr(void *arg)
 
 		handled = 1;
 
+		WM_LOCK(sc);
+
 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
 			DPRINTF(WM_DEBUG_RX,
@@ -3512,6 +3567,8 @@ wm_intr(void *arg)
 			wm_linkintr(sc, icr);
 		}
 
+		WM_UNLOCK(sc);
+
 		if (icr & ICR_RXO) {
 #if defined(WM_DEBUG)
 			log(LOG_WARNING, "%s: Receive overrun\n",
@@ -3818,7 +3875,9 @@ wm_rxintr(struct wm_softc *sc)
 		bpf_mtap(ifp, m);
 
 		/* Pass it on. */
+		WM_UNLOCK(sc);
 		(*ifp->if_input)(ifp, m);
+		WM_LOCK(sc);
 	}
 
 	/* Update the receive pointer. */
diff --git a/sys/dev/pci/virtio.c b/sys/dev/pci/virtio.c
index cdcc7ab..9c67765 100644
--- a/sys/dev/pci/virtio.c
+++ b/sys/dev/pci/virtio.c
@@ -170,8 +170,12 @@ virtio_attach(device_t parent, device_t self, void *aux)
 		virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
 		return;
 	}
+
 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
+	/* XXX: we want to enable MPSAFE only for vioif */
+	pci_intr_setattr(pc, &ih, PCI_INTR_MPSAFE, true);
 	sc->sc_ih = pci_intr_establish(pc, ih, sc->sc_ipl, virtio_intr, sc);
+
 	if (sc->sc_ih == NULL) {
 		aprint_error_dev(self, "couldn't establish interrupt");
 		if (intrstr != NULL)
diff --git a/sys/net/bridgestp.c b/sys/net/bridgestp.c
index 93d8f85..9cd4bb7 100644
--- a/sys/net/bridgestp.c
+++ b/sys/net/bridgestp.c
@@ -219,7 +219,8 @@ bstp_send_config_bpdu(struct bridge_softc *sc, struct bridge_iflist *bif,
 	struct mbuf *m;
 	struct ether_header *eh;
 	struct bstp_cbpdu bpdu;
-	int s;
+
+	KASSERT(mutex_owned(&sc->sc_iflist_lock));
 
 	ifp = bif->bif_ifp;
 
@@ -274,9 +275,9 @@ bstp_send_config_bpdu(struct bridge_softc *sc, struct bridge_iflist *bif,
 
 	memcpy(mtod(m, char *) + sizeof(*eh), &bpdu, sizeof(bpdu));
 
-	s = splnet();
+	mutex_exit(&sc->sc_iflist_lock);
 	bridge_enqueue(sc, ifp, m, 0);
-	splx(s);
+	mutex_enter(&sc->sc_iflist_lock);
 }
 
 static int
@@ -361,7 +362,8 @@ bstp_transmit_tcn(struct bridge_softc *sc)
 	struct ifnet *ifp;
 	struct ether_header *eh;
 	struct mbuf *m;
-	int s;
+
+	KASSERT(mutex_owned(&sc->sc_iflist_lock));
 
 	KASSERT(bif != NULL);
 	ifp = bif->bif_ifp;
@@ -390,9 +392,9 @@ bstp_transmit_tcn(struct bridge_softc *sc)
 
 	memcpy(mtod(m, char *) + sizeof(*eh), &bpdu, sizeof(bpdu));
 
-	s = splnet();
+	mutex_exit(&sc->sc_iflist_lock);
 	bridge_enqueue(sc, ifp, m, 0);
-	splx(s);
+	mutex_enter(&sc->sc_iflist_lock);
 }
 
 static void
@@ -592,6 +594,8 @@ bstp_input(struct bridge_softc *sc, struct bridge_iflist *bif, struct mbuf *m)
 	struct bstp_tcn_unit tu;
 	uint16_t len;
 
+	KASSERT(bif->bif_refs > 0);
+
 	eh = mtod(m, struct ether_header *);
 
 	if ((bif->bif_flags & IFBIF_STP) == 0)
@@ -621,7 +625,11 @@ bstp_input(struct bridge_softc *sc, struct bridge_iflist *bif, struct mbuf *m)
 	switch (tpdu.tbu_bpdutype) {
 	case BSTP_MSGTYPE_TCN:
 		tu.tu_message_type = tpdu.tbu_bpdutype;
+
+		mutex_enter(&sc->sc_iflist_lock);
 		bstp_received_tcn_bpdu(sc, bif, &tu);
+		mutex_exit(&sc->sc_iflist_lock);
+
 		break;
 	case BSTP_MSGTYPE_CFG:
 		if (m->m_len < sizeof(cpdu) &&
@@ -658,7 +666,11 @@ bstp_input(struct bridge_softc *sc, struct bridge_iflist *bif, struct mbuf *m)
 		    (cpdu.cbu_flags & BSTP_FLAG_TCA) ? 1 : 0;
 		cu.cu_topology_change =
 		    (cpdu.cbu_flags & BSTP_FLAG_TC) ? 1 : 0;
+
+		mutex_enter(&sc->sc_iflist_lock);
 		bstp_received_config_bpdu(sc, bif, &cu);
+		mutex_exit(&sc->sc_iflist_lock);
+
 		break;
 	default:
 		goto out;
@@ -805,6 +817,9 @@ bstp_initialization(struct bridge_softc *sc)
 	struct bridge_iflist *bif, *mif;
 
 	mif = NULL;
+
+	mutex_enter(&sc->sc_iflist_lock);
+
 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
 		if ((bif->bif_flags & IFBIF_STP) == 0)
 			continue;
@@ -823,7 +838,9 @@ bstp_initialization(struct bridge_softc *sc)
 			continue;
 		}
 	}
+
 	if (mif == NULL) {
+		mutex_exit(&sc->sc_iflist_lock);
 		bstp_stop(sc);
 		return;
 	}
@@ -837,6 +854,8 @@ bstp_initialization(struct bridge_softc *sc)
 	    (((uint64_t)(uint8_t)CLLADDR(mif->bif_ifp->if_sadl)[4]) << 8) |
 	    (((uint64_t)(uint8_t)CLLADDR(mif->bif_ifp->if_sadl)[5]) << 0);
 
+	mutex_exit(&sc->sc_iflist_lock);
+
 	sc->sc_designated_root = sc->sc_bridge_id;
 	sc->sc_root_path_cost = 0;
 	sc->sc_root_port = NULL;
@@ -853,6 +872,8 @@ bstp_initialization(struct bridge_softc *sc)
 		callout_reset(&sc->sc_bstpcallout, hz,
 		    bstp_tick, sc);
 
+	mutex_enter(&sc->sc_iflist_lock);
+
 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
 		if (bif->bif_flags & IFBIF_STP)
 			bstp_enable_port(sc, bif);
@@ -863,6 +884,8 @@ bstp_initialization(struct bridge_softc *sc)
 	bstp_port_state_selection(sc);
 	bstp_config_bpdu_generation(sc);
 	bstp_timer_start(&sc->sc_hello_timer, 0);
+
+	mutex_exit(&sc->sc_iflist_lock);
 }
 
 void
@@ -870,12 +893,14 @@ bstp_stop(struct bridge_softc *sc)
 {
 	struct bridge_iflist *bif;
 
+	mutex_enter(&sc->sc_iflist_lock);
 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
 		bstp_set_port_state(bif, BSTP_IFSTATE_DISABLED);
 		bstp_timer_stop(&bif->bif_hold_timer);
 		bstp_timer_stop(&bif->bif_message_age_timer);
 		bstp_timer_stop(&bif->bif_forward_delay_timer);
 	}
+	mutex_exit(&sc->sc_iflist_lock);
 
 	callout_stop(&sc->sc_bstpcallout);
 
@@ -1029,9 +1054,8 @@ bstp_tick(void *arg)
 {
 	struct bridge_softc *sc = arg;
 	struct bridge_iflist *bif;
-	int s;
 
-	s = splnet();
+	mutex_enter(&sc->sc_iflist_lock);
 
 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
 		if ((bif->bif_flags & IFBIF_STP) == 0)
@@ -1076,10 +1100,10 @@ bstp_tick(void *arg)
 			bstp_hold_timer_expiry(sc, bif);
 	}
 
+	mutex_exit(&sc->sc_iflist_lock);
+
 	if (sc->sc_if.if_flags & IFF_RUNNING)
 		callout_reset(&sc->sc_bstpcallout, hz, bstp_tick, sc);
-
-	splx(s);
 }
 
 static void
diff --git a/sys/net/if.c b/sys/net/if.c
index 2a6fa0f..d33baf5 100644
--- a/sys/net/if.c
+++ b/sys/net/if.c
@@ -611,6 +611,10 @@ if_attach(ifnet_t *ifp)
 	ifp->if_snd.altq_ifp  = ifp;
 #endif
 
+#ifdef NET_MPSAFE
+	mutex_init(&ifp->if_snd.ifq_lock, MUTEX_DEFAULT, IPL_NET);
+#endif
+
 	ifp->if_pfil = pfil_head_create(PFIL_TYPE_IFNET, ifp);
 	(void)pfil_run_hooks(if_pfil,
 	    (struct mbuf **)PFIL_IFNET_ATTACH, ifp, PFIL_IFNET);
@@ -732,6 +736,10 @@ if_detach(struct ifnet *ifp)
 		altq_detach(&ifp->if_snd);
 #endif
 
+#ifdef NET_MPSAFE
+	mutex_destroy(&ifp->if_snd.ifq_lock);
+#endif
+
 	sysctl_teardown(&ifp->if_sysctl_log);
 
 #if NCARP > 0
diff --git a/sys/net/if.h b/sys/net/if.h
index bc2c240..e87bb31 100644
--- a/sys/net/if.h
+++ b/sys/net/if.h
@@ -86,6 +86,8 @@
 #include <net/pktqueue.h>
 #endif
 
+//#define NET_MPSAFE 1
+
 /*
  * Always include ALTQ glue here -- we use the ALTQ interface queue
  * structure even when ALTQ is not configured into the kernel so that
@@ -198,11 +200,14 @@ struct if_data {
  * Structure defining a queue for a network interface.
  */
 struct ifqueue {
-	struct	mbuf *ifq_head;
-	struct	mbuf *ifq_tail;
-	int	ifq_len;
-	int	ifq_maxlen;
-	int	ifq_drops;
+	struct		mbuf *ifq_head;
+	struct		mbuf *ifq_tail;
+	int		ifq_len;
+	int		ifq_maxlen;
+	int		ifq_drops;
+#ifdef NET_MPSAFE
+	kmutex_t	ifq_lock;
+#endif
 };
 
 struct ifnet_lock;
@@ -230,6 +235,7 @@ struct ifnet_lock {
 				 * before they leave.
 				 */
 };
+
 #endif /* _KERNEL */
 
 /*
@@ -424,6 +430,14 @@ typedef struct ifnet {
 	"\23TSO6"		\
 	"\24LRO"		\
 
+#ifdef NET_MPSAFE
+#define IFQ_LOCK(_ifq)		mutex_enter(&(_ifq)->ifq_lock)
+#define IFQ_UNLOCK(_ifq)	mutex_exit(&(_ifq)->ifq_lock)
+#else
+#define IFQ_LOCK(_ifq)		{ } while (0)
+#define IFQ_UNLOCK(_ifq)	{ } while (0)
+#endif
+
 /*
  * Output queues (ifp->if_snd) and internetwork datagram level (pup level 1)
  * input routines have queues of messages stored on ifqueue structures
@@ -752,6 +766,7 @@ do {									\
 
 #define IFQ_ENQUEUE(ifq, m, pattr, err)					\
 do {									\
+	IFQ_LOCK((ifq));						\
 	if (ALTQ_IS_ENABLED((ifq)))					\
 		ALTQ_ENQUEUE((ifq), (m), (pattr), (err));		\
 	else {								\
@@ -765,34 +780,41 @@ do {									\
 	}								\
 	if ((err))							\
 		(ifq)->ifq_drops++;					\
+	IFQ_UNLOCK((ifq));						\
 } while (/*CONSTCOND*/ 0)
 
 #define IFQ_DEQUEUE(ifq, m)						\
 do {									\
+	IFQ_LOCK((ifq));						\
 	if (TBR_IS_ENABLED((ifq)))					\
 		(m) = tbr_dequeue((ifq), ALTDQ_REMOVE);			\
 	else if (ALTQ_IS_ENABLED((ifq)))				\
 		ALTQ_DEQUEUE((ifq), (m));				\
 	else								\
 		IF_DEQUEUE((ifq), (m));					\
+	IFQ_UNLOCK((ifq));						\
 } while (/*CONSTCOND*/ 0)
 
 #define	IFQ_POLL(ifq, m)						\
 do {									\
+	IFQ_LOCK((ifq));						\
 	if (TBR_IS_ENABLED((ifq)))					\
 		(m) = tbr_dequeue((ifq), ALTDQ_POLL);			\
 	else if (ALTQ_IS_ENABLED((ifq)))				\
 		ALTQ_POLL((ifq), (m));					\
 	else								\
 		IF_POLL((ifq), (m));					\
+	IFQ_UNLOCK((ifq));						\
 } while (/*CONSTCOND*/ 0)
 
 #define	IFQ_PURGE(ifq)							\
 do {									\
+	IFQ_LOCK((ifq));						\
 	if (ALTQ_IS_ENABLED((ifq)))					\
 		ALTQ_PURGE((ifq));					\
 	else								\
 		IF_PURGE((ifq));					\
+	IFQ_UNLOCK((ifq));						\
 } while (/*CONSTCOND*/ 0)
 
 #define	IFQ_SET_READY(ifq)						\
@@ -802,6 +824,7 @@ do {									\
 
 #define	IFQ_CLASSIFY(ifq, m, af, pattr)					\
 do {									\
+	IFQ_LOCK((ifq));						\
 	if (ALTQ_IS_ENABLED((ifq))) {					\
 		if (ALTQ_NEEDS_CLASSIFY((ifq)))				\
 			(pattr)->pattr_class = (*(ifq)->altq_classify)	\
@@ -809,6 +832,7 @@ do {									\
 		(pattr)->pattr_af = (af);				\
 		(pattr)->pattr_hdr = mtod((m), void *);		\
 	}								\
+	IFQ_UNLOCK((ifq));						\
 } while (/*CONSTCOND*/ 0)
 #else /* ! ALTQ */
 #define	ALTQ_DECL(x)		/* nothing */
@@ -816,6 +840,7 @@ do {									\
 
 #define	IFQ_ENQUEUE(ifq, m, pattr, err)					\
 do {									\
+	IFQ_LOCK((ifq));						\
 	if (IF_QFULL((ifq))) {						\
 		m_freem((m));						\
 		(err) = ENOBUFS;					\
@@ -825,13 +850,29 @@ do {									\
 	}								\
 	if ((err))							\
 		(ifq)->ifq_drops++;					\
+	IFQ_UNLOCK((ifq));						\
 } while (/*CONSTCOND*/ 0)
 
-#define	IFQ_DEQUEUE(ifq, m)	IF_DEQUEUE((ifq), (m))
+#define	IFQ_DEQUEUE(ifq, m)						\
+do {									\
+	IFQ_LOCK((ifq));						\
+	IF_DEQUEUE((ifq), (m));						\
+	IFQ_UNLOCK((ifq));						\
+} while (/*CONSTCOND*/ 0)
 
-#define	IFQ_POLL(ifq, m)	IF_POLL((ifq), (m))
+#define	IFQ_POLL(ifq, m)						\
+do {									\
+	IFQ_LOCK((ifq));						\
+	IF_POLL((ifq), (m));						\
+	IFQ_UNLOCK((ifq));						\
+} while (/*CONSTCOND*/ 0)
 
-#define	IFQ_PURGE(ifq)		IF_PURGE((ifq))
+#define	IFQ_PURGE(ifq)							\
+do {									\
+	IFQ_LOCK((ifq));						\
+	IF_PURGE((ifq));						\
+	IFQ_UNLOCK((ifq));						\
+} while (/*CONSTCOND*/ 0)
 
 #define	IFQ_SET_READY(ifq)	/* nothing */
 
diff --git a/sys/net/if_bridge.c b/sys/net/if_bridge.c
index 42c0124..8f1d7cf 100644
--- a/sys/net/if_bridge.c
+++ b/sys/net/if_bridge.c
@@ -219,6 +219,7 @@ static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
 						  const char *name);
 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
 						     struct ifnet *ifp);
+static void	bridge_release_member(struct bridge_softc *, struct bridge_iflist *);
 static void	bridge_delete_member(struct bridge_softc *,
 				     struct bridge_iflist *);
 
@@ -313,6 +314,7 @@ static const struct bridge_control bridge_control_table[] = {
 static const int bridge_control_table_size = __arraycount(bridge_control_table);
 
 static LIST_HEAD(, bridge_softc) bridge_list;
+static kmutex_t bridge_list_lock;
 
 static struct if_clone bridge_cloner =
     IF_CLONE_INITIALIZER("bridge", bridge_clone_create, bridge_clone_destroy);
@@ -330,6 +332,7 @@ bridgeattach(int n)
 	    0, 0, 0, "brtpl", NULL, IPL_NET);
 
 	LIST_INIT(&bridge_list);
+	mutex_init(&bridge_list_lock, MUTEX_DEFAULT, IPL_NET);
 	if_clone_attach(&bridge_cloner);
 }
 
@@ -343,7 +346,6 @@ bridge_clone_create(struct if_clone *ifc, int unit)
 {
 	struct bridge_softc *sc;
 	struct ifnet *ifp;
-	int s;
 
 	sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
 	ifp = &sc->sc_if;
@@ -364,6 +366,8 @@ bridge_clone_create(struct if_clone *ifc, int unit)
 	callout_init(&sc->sc_bstpcallout, 0);
 
 	LIST_INIT(&sc->sc_iflist);
+	mutex_init(&sc->sc_iflist_lock, MUTEX_DEFAULT, IPL_NET);
+	cv_init(&sc->sc_iflist_cv, "if_bridge_cv");
 
 	if_initname(ifp, ifc->ifc_name, unit);
 	ifp->if_softc = sc;
@@ -388,9 +392,9 @@ bridge_clone_create(struct if_clone *ifc, int unit)
 
 	if_alloc_sadl(ifp);
 
-	s = splnet();
+	mutex_enter(&bridge_list_lock);
 	LIST_INSERT_HEAD(&bridge_list, sc, sc_list);
-	splx(s);
+	mutex_exit(&bridge_list_lock);
 
 	return (0);
 }
@@ -405,7 +409,6 @@ bridge_clone_destroy(struct ifnet *ifp)
 {
 	struct bridge_softc *sc = ifp->if_softc;
 	struct bridge_iflist *bif;
-	int s;
 	uint64_t xc;
 
 	/* Must be called during IFF_RUNNING, i.e., before bridge_stop */
@@ -413,16 +416,16 @@ bridge_clone_destroy(struct ifnet *ifp)
 	xc = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
 	xc_wait(xc);
 
-	s = splnet();
-
 	bridge_stop(ifp, 1);
 
+	mutex_enter(&sc->sc_iflist_lock);
 	while ((bif = LIST_FIRST(&sc->sc_iflist)) != NULL)
 		bridge_delete_member(sc, bif);
+	mutex_exit(&sc->sc_iflist_lock);
 
+	mutex_enter(&bridge_list_lock);
 	LIST_REMOVE(sc, sc_list);
-
-	splx(s);
+	mutex_exit(&bridge_list_lock);
 
 	if_detach(ifp);
 
@@ -433,6 +436,9 @@ bridge_clone_destroy(struct ifnet *ifp)
 	/* Tear down the routing table. */
 	bridge_rtable_fini(sc);
 
+	cv_destroy(&sc->sc_iflist_cv);
+	mutex_destroy(&sc->sc_iflist_lock);
+
 	free(sc, M_DEVBUF);
 
 	return (0);
@@ -649,7 +655,7 @@ bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
 /*
  * bridge_lookup_member:
  *
- *	Lookup a bridge member interface.  Must be called at splnet().
+ *	Lookup a bridge member interface.
  */
 static struct bridge_iflist *
 bridge_lookup_member(struct bridge_softc *sc, const char *name)
@@ -657,31 +663,66 @@ bridge_lookup_member(struct bridge_softc *sc, const char *name)
 	struct bridge_iflist *bif;
 	struct ifnet *ifp;
 
+	mutex_enter(&sc->sc_iflist_lock);
+
 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
 		ifp = bif->bif_ifp;
 		if (strcmp(ifp->if_xname, name) == 0)
-			return (bif);
+			break;
 	}
 
-	return (NULL);
+	if (bif != NULL) {
+		atomic_inc_32(&bif->bif_refs);
+		membar_producer();
+	}
+
+	mutex_exit(&sc->sc_iflist_lock);
+
+	return bif;
 }
 
 /*
  * bridge_lookup_member_if:
  *
- *	Lookup a bridge member interface by ifnet*.  Must be called at splnet().
+ *	Lookup a bridge member interface by ifnet*.
  */
 static struct bridge_iflist *
 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
 {
 	struct bridge_iflist *bif;
 
+	mutex_enter(&sc->sc_iflist_lock);
+
 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
 		if (bif->bif_ifp == member_ifp)
-			return (bif);
+			break;
 	}
 
-	return (NULL);
+	if (bif != NULL) {
+		atomic_inc_32(&bif->bif_refs);
+		membar_producer();
+	}
+
+	mutex_exit(&sc->sc_iflist_lock);
+
+	return bif;
+}
+
+/*
+ * bridge_release_member:
+ *
+ *	Release the specified member interface.
+ */
+static void
+bridge_release_member(struct bridge_softc *sc, struct bridge_iflist *bif)
+{
+	atomic_dec_32(&bif->bif_refs);
+	membar_sync();
+	if (__predict_false(bif->bif_waiting && bif->bif_refs == 0)) {
+		mutex_enter(&sc->sc_iflist_lock);
+		cv_broadcast(&sc->sc_iflist_cv);
+		mutex_exit(&sc->sc_iflist_lock);
+	}
 }
 
 /*
@@ -694,30 +735,22 @@ bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif)
 {
 	struct ifnet *ifs = bif->bif_ifp;
 
-	switch (ifs->if_type) {
-	case IFT_ETHER:
-		/*
-		 * Take the interface out of promiscuous mode.
-		 */
-		(void) ifpromisc(ifs, 0);
-		break;
-	default:
-#ifdef DIAGNOSTIC
-		panic("bridge_delete_member: impossible");
-#endif
-		break;
-	}
+	KASSERT(mutex_owned(&sc->sc_iflist_lock));
 
 	ifs->if_input = ether_input;
 	ifs->if_bridge = NULL;
+
 	LIST_REMOVE(bif, bif_next);
 
-	bridge_rtdelete(sc, ifs);
+	bif->bif_waiting = true;
+	membar_sync();
+	while (bif->bif_refs > 0) {
+		aprint_debug("%s: cv_wait on iflist\n", __func__);
+		cv_wait(&sc->sc_iflist_cv, &sc->sc_iflist_lock);
+	}
+	bif->bif_waiting = false;
 
 	free(bif, M_DEVBUF);
-
-	if (sc->sc_if.if_flags & IFF_RUNNING)
-		bstp_initialization(sc);
 }
 
 static int
@@ -766,11 +799,17 @@ bridge_ioctl_add(struct bridge_softc *sc, void *arg)
 	bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
 	bif->bif_priority = BSTP_DEFAULT_PORT_PRIORITY;
 	bif->bif_path_cost = BSTP_DEFAULT_PATH_COST;
+	bif->bif_refs = 0;
+	bif->bif_waiting = false;
+
+	mutex_enter(&sc->sc_iflist_lock);
 
 	ifs->if_bridge = sc;
 	LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next);
 	ifs->if_input = bridge_input;
 
+	mutex_exit(&sc->sc_iflist_lock);
+
 	if (sc->sc_if.if_flags & IFF_RUNNING)
 		bstp_initialization(sc);
 	else
@@ -788,15 +827,52 @@ static int
 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
 {
 	struct ifbreq *req = arg;
+	const char *name = req->ifbr_ifsname;
 	struct bridge_iflist *bif;
+	struct ifnet *ifs;
 
-	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
-	if (bif == NULL)
-		return (ENOENT);
+	mutex_enter(&sc->sc_iflist_lock);
+
+	/*
+	 * Don't use bridge_lookup_member. We want to get a member
+	 * with bif_refs == 0.
+	 */
+	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
+		ifs = bif->bif_ifp;
+		if (strcmp(ifs->if_xname, name) == 0)
+			break;
+	}
+
+	if (bif == NULL) {
+		mutex_exit(&sc->sc_iflist_lock);
+		return ENOENT;
+	}
 
 	bridge_delete_member(sc, bif);
 
-	return (0);
+	mutex_exit(&sc->sc_iflist_lock);
+
+	switch (ifs->if_type) {
+	case IFT_ETHER:
+		/*
+		 * Take the interface out of promiscuous mode.
+		 * Don't call it with holding sc_iflist_lock.
+		 */
+		(void) ifpromisc(ifs, 0);
+		break;
+	default:
+#ifdef DIAGNOSTIC
+		panic("bridge_delete_member: impossible");
+#endif
+		break;
+	}
+
+	bridge_rtdelete(sc, ifs);
+
+	if (sc->sc_if.if_flags & IFF_RUNNING)
+		bstp_initialization(sc);
+
+	return 0;
 }
 
 static int
@@ -815,6 +891,8 @@ bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
 	req->ifbr_path_cost = bif->bif_path_cost;
 	req->ifbr_portno = bif->bif_ifp->if_index & 0xff;
 
+	bridge_release_member(sc, bif);
+
 	return (0);
 }
 
@@ -836,12 +914,15 @@ bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
 
 		default:
 			/* Nothing else can. */
+			bridge_release_member(sc, bif);
 			return (EINVAL);
 		}
 	}
 
 	bif->bif_flags = req->ifbr_ifsflags;
 
+	bridge_release_member(sc, bif);
+
 	if (sc->sc_if.if_flags & IFF_RUNNING)
 		bstp_initialization(sc);
 
@@ -877,6 +958,8 @@ bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
 	struct ifbreq breq;
 	int count, len, error = 0;
 
+	mutex_enter(&sc->sc_iflist_lock);
+
 	count = 0;
 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next)
 		count++;
@@ -907,6 +990,8 @@ bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
 		len -= sizeof(breq);
 	}
 
+	mutex_exit(&sc->sc_iflist_lock);
+
 	bifc->ifbic_len = sizeof(breq) * count;
 	return (error);
 }
@@ -922,6 +1007,8 @@ bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
 	if (bac->ifbac_len == 0)
 		return (0);
 
+	mutex_enter(&sc->sc_rtlist_lock);
+
 	len = bac->ifbac_len;
 	LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
 		if (len < sizeof(bareq))
@@ -943,6 +1030,8 @@ bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
 		len -= sizeof(bareq);
 	}
  out:
+	mutex_exit(&sc->sc_rtlist_lock);
+
 	bac->ifbac_len = sizeof(bareq) * count;
 	return (error);
 }
@@ -961,6 +1050,8 @@ bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
 	error = bridge_rtupdate(sc, req->ifba_dst, bif->bif_ifp, 1,
 	    req->ifba_flags);
 
+	bridge_release_member(sc, bif);
+
 	return (error);
 }
 
@@ -1115,6 +1206,8 @@ bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
 	if (sc->sc_if.if_flags & IFF_RUNNING)
 		bstp_initialization(sc);
 
+	bridge_release_member(sc, bif);
+
 	return (0);
 }
 
@@ -1171,6 +1264,8 @@ bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
 	if (sc->sc_if.if_flags & IFF_RUNNING)
 		bstp_initialization(sc);
 
+	bridge_release_member(sc, bif);
+
 	return (0);
 }
 
@@ -1186,6 +1281,9 @@ bridge_ifdetach(struct ifnet *ifp)
 	struct bridge_softc *sc = ifp->if_bridge;
 	struct ifbreq breq;
 
+	/* ioctl_lock should prevent this from happening */
+	KASSERT(sc != NULL);
+
 	memset(&breq, 0, sizeof(breq));
 	strlcpy(breq.ifbr_ifsname, ifp->if_xname, sizeof(breq.ifbr_ifsname));
 
@@ -1240,8 +1338,6 @@ bridge_stop(struct ifnet *ifp, int disable)
  * bridge_enqueue:
  *
  *	Enqueue a packet on a bridge member interface.
- *
- *	NOTE: must be called at splnet().
  */
 void
 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m,
@@ -1283,7 +1379,9 @@ bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m,
 	len = m->m_pkthdr.len;
 	m->m_flags |= M_PROTO1;
 	mflags = m->m_flags;
+
 	IFQ_ENQUEUE(&dst_ifp->if_snd, m, &pktattr, error);
+
 	if (error) {
 		/* mbuf is already freed */
 		sc->sc_if.if_oerrors++;
@@ -1321,7 +1419,6 @@ bridge_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *sa,
 	struct ether_header *eh;
 	struct ifnet *dst_if;
 	struct bridge_softc *sc;
-	int s;
 
 	if (m->m_len < ETHER_HDR_LEN) {
 		m = m_pullup(m, ETHER_HDR_LEN);
@@ -1332,14 +1429,13 @@ bridge_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *sa,
 	eh = mtod(m, struct ether_header *);
 	sc = ifp->if_bridge;
 
-	s = splnet();
-
 	/*
 	 * If bridge is down, but the original output interface is up,
 	 * go ahead and send out that interface.  Otherwise, the packet
 	 * is dropped below.
 	 */
-	if ((sc->sc_if.if_flags & IFF_RUNNING) == 0) {
+	if (__predict_false(sc == NULL) ||
+	    (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
 		dst_if = ifp;
 		goto sendunicast;
 	}
@@ -1357,6 +1453,8 @@ bridge_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *sa,
 		struct mbuf *mc;
 		int used = 0;
 
+		mutex_enter(&sc->sc_iflist_lock);
+
 		LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
 			dst_if = bif->bif_ifp;
 			if ((dst_if->if_flags & IFF_RUNNING) == 0)
@@ -1391,9 +1489,11 @@ bridge_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *sa,
 
 			bridge_enqueue(sc, dst_if, mc, 0);
 		}
+
+		mutex_exit(&sc->sc_iflist_lock);
+
 		if (used == 0)
 			m_freem(m);
-		splx(s);
 		return (0);
 	}
 
@@ -1404,13 +1504,11 @@ bridge_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *sa,
 
 	if ((dst_if->if_flags & IFF_RUNNING) == 0) {
 		m_freem(m);
-		splx(s);
 		return (0);
 	}
 
 	bridge_enqueue(sc, dst_if, m, 0);
 
-	splx(s);
 	return (0);
 }
 
@@ -1441,17 +1539,10 @@ bridge_forward(void *v)
 	struct bridge_iflist *bif;
 	struct ifnet *src_if, *dst_if;
 	struct ether_header *eh;
-	int s;
 
-	KERNEL_LOCK(1, NULL);
-	mutex_enter(softnet_lock);
-	if ((sc->sc_if.if_flags & IFF_RUNNING) == 0) {
-		mutex_exit(softnet_lock);
-		KERNEL_UNLOCK_ONE(NULL);
+	if ((sc->sc_if.if_flags & IFF_RUNNING) == 0)
 		return;
-	}
 
-	s = splnet();
 	while ((m = pktq_dequeue(sc->sc_fwd_pktq)) != NULL) {
 		src_if = m->m_pkthdr.rcvif;
 
@@ -1474,6 +1565,7 @@ bridge_forward(void *v)
 			case BSTP_IFSTATE_LISTENING:
 			case BSTP_IFSTATE_DISABLED:
 				m_freem(m);
+				bridge_release_member(sc, bif);
 				continue;
 			}
 		}
@@ -1500,9 +1592,12 @@ bridge_forward(void *v)
 		if ((bif->bif_flags & IFBIF_STP) != 0 &&
 		    bif->bif_state == BSTP_IFSTATE_LEARNING) {
 			m_freem(m);
+			bridge_release_member(sc, bif);
 			continue;
 		}
 
+		bridge_release_member(sc, bif);
+
 		/*
 		 * At this point, the port either doesn't participate
 		 * in spanning tree or it is in the forwarding state.
@@ -1546,6 +1641,7 @@ bridge_forward(void *v)
 			m_freem(m);
 			continue;
 		}
+
 		bif = bridge_lookup_member_if(sc, dst_if);
 		if (bif == NULL) {
 			/* Not a member of the bridge (anymore?) */
@@ -1558,15 +1654,15 @@ bridge_forward(void *v)
 			case BSTP_IFSTATE_DISABLED:
 			case BSTP_IFSTATE_BLOCKING:
 				m_freem(m);
+				bridge_release_member(sc, bif);
 				continue;
 			}
 		}
 
+		bridge_release_member(sc, bif);
+
 		bridge_enqueue(sc, dst_if, m, 1);
 	}
-	splx(s);
-	mutex_exit(softnet_lock);
-	KERNEL_UNLOCK_ONE(NULL);
 }
 
 static bool
@@ -1604,7 +1700,6 @@ bridge_ourether(struct bridge_iflist *bif, struct ether_header *eh, int src)
  *
  *	Receive input from a member interface.  Queue the packet for
  *	bridging if it is not for us.
- *	should be called at splnet()
  */
 static void
 bridge_input(struct ifnet *ifp, struct mbuf *m)
@@ -1613,7 +1708,8 @@ bridge_input(struct ifnet *ifp, struct mbuf *m)
 	struct bridge_iflist *bif;
 	struct ether_header *eh;
 
-	if ((sc->sc_if.if_flags & IFF_RUNNING) == 0) {
+	if (__predict_false(sc == NULL) ||
+	    (sc->sc_if.if_flags & IFF_RUNNING) == 0) {
 		ether_input(ifp, m);
 		return;
 	}
@@ -1642,6 +1738,7 @@ bridge_input(struct ifnet *ifp, struct mbuf *m)
 	    !bstp_state_before_learning(bif)) {
 		struct bridge_iflist *_bif;
 
+		mutex_enter(&sc->sc_iflist_lock);
 		LIST_FOREACH(_bif, &sc->sc_iflist, bif_next) {
 			/* It is destined for us. */
 			if (bridge_ourether(_bif, eh, 0)) {
@@ -1650,21 +1747,28 @@ bridge_input(struct ifnet *ifp, struct mbuf *m)
 					    eh->ether_shost, ifp, 0, IFBAF_DYNAMIC);
 				m->m_pkthdr.rcvif = _bif->bif_ifp;
 				ether_input(_bif->bif_ifp, m);
-				return;
+				break;
 			}
 
 			/* We just received a packet that we sent out. */
 			if (bridge_ourether(_bif, eh, 1)) {
 				m_freem(m);
-				return;
+				break;
 			}
 		}
+		mutex_exit(&sc->sc_iflist_lock);
+
+		if (_bif != NULL) {
+			bridge_release_member(sc, bif);
+			return;
+		}
 	}
 
 	/* Tap off 802.1D packets; they do not get forwarded. */
 	if (bif->bif_flags & IFBIF_STP &&
 	    memcmp(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN) == 0) {
 		bstp_input(sc, bif, m);
+		bridge_release_member(sc, bif);
 		return;
 	}
 
@@ -1674,11 +1778,14 @@ bridge_input(struct ifnet *ifp, struct mbuf *m)
 	 */
 	if (bstp_state_before_learning(bif)) {
 		ether_input(ifp, m);
+		bridge_release_member(sc, bif);
 		return;
 	}
 
+	bridge_release_member(sc, bif);
+
 	/* Queue the packet for bridge forwarding. */
-	if (__predict_false(!pktq_enqueue(sc->sc_fwd_pktq, m, 0)))
+	if (__predict_false(!pktq_enqueue(sc->sc_fwd_pktq, m, eh->ether_dhost[5])))
 		m_freem(m);
 }
 
@@ -1699,6 +1806,7 @@ bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
 	bool used, bmcast;
 
 	used = bmcast = m->m_flags & (M_BCAST|M_MCAST);
+	mutex_enter(&sc->sc_iflist_lock);
 
 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
 		dst_if = bif->bif_ifp;
@@ -1737,6 +1845,8 @@ bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
 		ether_input(src_if, m);
 	else if (!used)
 		m_freem(m);
+
+	mutex_exit(&sc->sc_iflist_lock);
 }
 
 /*
@@ -1749,15 +1859,19 @@ bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
     struct ifnet *dst_if, int setflags, uint8_t flags)
 {
 	struct bridge_rtnode *brt;
-	int error;
+	int error = 0;
+
+	mutex_enter(&sc->sc_rtlist_lock);
 
 	/*
 	 * A route for this destination might already exist.  If so,
 	 * update it, otherwise create a new one.
 	 */
 	if ((brt = bridge_rtnode_lookup(sc, dst)) == NULL) {
-		if (sc->sc_brtcnt >= sc->sc_brtmax)
-			return (ENOSPC);
+		if (sc->sc_brtcnt >= sc->sc_brtmax) {
+			error = ENOSPC;
+			goto out;
+		}
 
 		/*
 		 * Allocate a new bridge forwarding node, and
@@ -1765,8 +1879,10 @@ bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
 		 * address.
 		 */
 		brt = pool_get(&bridge_rtnode_pool, PR_NOWAIT);
-		if (brt == NULL)
-			return (ENOMEM);
+		if (brt == NULL) {
+			error = ENOMEM;
+			goto out;
+		}
 
 		memset(brt, 0, sizeof(*brt));
 		brt->brt_expire = time_uptime + sc->sc_brttimeout;
@@ -1775,7 +1891,7 @@ bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
 
 		if ((error = bridge_rtnode_insert(sc, brt)) != 0) {
 			pool_put(&bridge_rtnode_pool, brt);
-			return (error);
+			goto out;
 		}
 	}
 
@@ -1788,7 +1904,10 @@ bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
 			brt->brt_expire = time_uptime + sc->sc_brttimeout;
 	}
 
-	return (0);
+out:
+	mutex_exit(&sc->sc_rtlist_lock);
+
+	return error;
 }
 
 /*
@@ -1800,11 +1919,17 @@ static struct ifnet *
 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
 {
 	struct bridge_rtnode *brt;
+	struct ifnet *ifs = NULL;
+
+	mutex_enter(&sc->sc_rtlist_lock);
+
+	brt = bridge_rtnode_lookup(sc, addr);
+	if (brt != NULL)
+		ifs = brt->brt_ifp;
 
-	if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL)
-		return (NULL);
+	mutex_exit(&sc->sc_rtlist_lock);
 
-	return (brt->brt_ifp);
+	return ifs;
 }
 
 /*
@@ -1819,23 +1944,30 @@ bridge_rttrim(struct bridge_softc *sc)
 {
 	struct bridge_rtnode *brt, *nbrt;
 
+	mutex_enter(&sc->sc_rtlist_lock);
+
 	/* Make sure we actually need to do this. */
 	if (sc->sc_brtcnt <= sc->sc_brtmax)
-		return;
+		goto out;
 
 	/* Force an aging cycle; this might trim enough addresses. */
 	bridge_rtage(sc);
 	if (sc->sc_brtcnt <= sc->sc_brtmax)
-		return;
+		goto out;
 
 	for (brt = LIST_FIRST(&sc->sc_rtlist); brt != NULL; brt = nbrt) {
 		nbrt = LIST_NEXT(brt, brt_list);
 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
 			bridge_rtnode_destroy(sc, brt);
 			if (sc->sc_brtcnt <= sc->sc_brtmax)
-				return;
+				goto out;
 		}
 	}
+
+out:
+	mutex_exit(&sc->sc_rtlist_lock);
+
+	return;
 }
 
 /*
@@ -1847,15 +1979,16 @@ static void
 bridge_timer(void *arg)
 {
 	struct bridge_softc *sc = arg;
-	int s;
 
-	s = splnet();
+	mutex_enter(&sc->sc_rtlist_lock);
+
 	bridge_rtage(sc);
-	splx(s);
 
 	if (sc->sc_if.if_flags & IFF_RUNNING)
 		callout_reset(&sc->sc_brcallout,
 		    bridge_rtable_prune_period * hz, bridge_timer, sc);
+
+	mutex_exit(&sc->sc_rtlist_lock);
 }
 
 /*
@@ -1868,6 +2001,8 @@ bridge_rtage(struct bridge_softc *sc)
 {
 	struct bridge_rtnode *brt, *nbrt;
 
+	KASSERT(mutex_owned(&sc->sc_rtlist_lock));
+
 	for (brt = LIST_FIRST(&sc->sc_rtlist); brt != NULL; brt = nbrt) {
 		nbrt = LIST_NEXT(brt, brt_list);
 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
@@ -1887,11 +2022,15 @@ bridge_rtflush(struct bridge_softc *sc, int full)
 {
 	struct bridge_rtnode *brt, *nbrt;
 
+	mutex_enter(&sc->sc_rtlist_lock);
+
 	for (brt = LIST_FIRST(&sc->sc_rtlist); brt != NULL; brt = nbrt) {
 		nbrt = LIST_NEXT(brt, brt_list);
 		if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
 			bridge_rtnode_destroy(sc, brt);
 	}
+
+	mutex_exit(&sc->sc_rtlist_lock);
 }
 
 /*
@@ -1903,12 +2042,21 @@ static int
 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
 {
 	struct bridge_rtnode *brt;
+	int error = 0;
 
-	if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL)
-		return (ENOENT);
+	mutex_enter(&sc->sc_rtlist_lock);
+
+	if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL) {
+		error = ENOENT;
+		goto out;
+	}
 
 	bridge_rtnode_destroy(sc, brt);
-	return (0);
+
+out:
+	mutex_exit(&sc->sc_rtlist_lock);
+
+	return error;
 }
 
 /*
@@ -1921,11 +2069,15 @@ bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp)
 {
 	struct bridge_rtnode *brt, *nbrt;
 
+	mutex_enter(&sc->sc_rtlist_lock);
+
 	for (brt = LIST_FIRST(&sc->sc_rtlist); brt != NULL; brt = nbrt) {
 		nbrt = LIST_NEXT(brt, brt_list);
 		if (brt->brt_ifp == ifp)
 			bridge_rtnode_destroy(sc, brt);
 	}
+
+	mutex_exit(&sc->sc_rtlist_lock);
 }
 
 /*
@@ -1950,6 +2102,8 @@ bridge_rtable_init(struct bridge_softc *sc)
 
 	LIST_INIT(&sc->sc_rtlist);
 
+	mutex_init(&sc->sc_rtlist_lock, MUTEX_DEFAULT, IPL_NET);
+
 	return (0);
 }
 
@@ -1963,6 +2117,7 @@ bridge_rtable_fini(struct bridge_softc *sc)
 {
 
 	free(sc->sc_rthash, M_DEVBUF);
+	mutex_destroy(&sc->sc_rtlist_lock);
 }
 
 /*
@@ -2013,6 +2168,8 @@ bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
 	uint32_t hash;
 	int dir;
 
+	KASSERT(mutex_owned(&sc->sc_rtlist_lock));
+
 	hash = bridge_rthash(sc, addr);
 	LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
 		dir = memcmp(addr, brt->brt_addr, ETHER_ADDR_LEN);
@@ -2038,6 +2195,8 @@ bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
 	uint32_t hash;
 	int dir;
 
+	KASSERT(mutex_owned(&sc->sc_rtlist_lock));
+
 	hash = bridge_rthash(sc, brt->brt_addr);
 
 	lbrt = LIST_FIRST(&sc->sc_rthash[hash]);
@@ -2080,15 +2239,14 @@ bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
 static void
 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
 {
-	int s = splnet();
+
+	KASSERT(mutex_owned(&sc->sc_rtlist_lock));
 
 	LIST_REMOVE(brt, brt_hash);
 
 	LIST_REMOVE(brt, brt_list);
 	sc->sc_brtcnt--;
 	pool_put(&bridge_rtnode_pool, brt);
-
-	splx(s);
 }
 
 #if defined(BRIDGE_IPF)
diff --git a/sys/net/if_bridgevar.h b/sys/net/if_bridgevar.h
index 5d5a27a..f5279e4 100644
--- a/sys/net/if_bridgevar.h
+++ b/sys/net/if_bridgevar.h
@@ -77,6 +77,8 @@
 
 #include <sys/callout.h>
 #include <sys/queue.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
 
 /*
  * Commands used in the SIOCSDRVSPEC ioctl.  Note the lookup of the
@@ -255,6 +257,8 @@ struct bridge_iflist {
 	uint8_t			bif_priority;
 	struct ifnet		*bif_ifp;	/* member if */
 	uint32_t		bif_flags;	/* member if flags */
+	uint32_t		bif_refs;	/* to prevent from being freed */
+	bool			bif_waiting;	/* to prevent from being freed */
 };
 
 /*
@@ -303,7 +307,11 @@ struct bridge_softc {
 	LIST_HEAD(, bridge_rtnode) sc_rtlist;	/* list version of above */
 	uint32_t		sc_rthash_key;	/* key for hash */
 	uint32_t		sc_filter_flags; /* ipf and flags */
-	void			*sc_softintr;
+
+	kmutex_t		sc_iflist_lock;
+	kcondvar_t		sc_iflist_cv;
+	kmutex_t		sc_rtlist_lock;
+
 	pktqueue_t *		sc_fwd_pktq;
 };
 
diff --git a/sys/net/pktqueue.c b/sys/net/pktqueue.c
index 816c74c..48ac72c 100644
--- a/sys/net/pktqueue.c
+++ b/sys/net/pktqueue.c
@@ -208,7 +208,11 @@ pktq_rps_hash(const struct mbuf *m __unused)
 bool
 pktq_enqueue(pktqueue_t *pq, struct mbuf *m, const u_int hash __unused)
 {
-	const unsigned cpuid = curcpu()->ci_index /* hash % ncpu */;
+#ifdef _RUMPKERNEL
+	const unsigned cpuid = curcpu()->ci_index;
+#else
+	const unsigned cpuid = hash % ncpu;
+#endif
 
 	KASSERT(kpreempt_disabled());