Index: miscfs/genfs/genfs_io.c =================================================================== RCS file: /cvsroot/src/sys/miscfs/genfs/genfs_io.c,v retrieving revision 1.76 diff -u -p -r1.76 genfs_io.c --- miscfs/genfs/genfs_io.c 6 Oct 2019 05:48:00 -0000 1.76 +++ miscfs/genfs/genfs_io.c 6 Dec 2019 13:11:12 -0000 @@ -79,9 +79,7 @@ genfs_rel_pages(struct vm_page **pgs, un pg->flags |= PG_RELEASED; } } - mutex_enter(&uvm_pageqlock); uvm_page_unbusy(pgs, npages); - mutex_exit(&uvm_pageqlock); } static void @@ -466,7 +464,6 @@ startover: out: UVMHIST_LOG(ubchist, "succeeding, npages %jd", npages,0,0,0); error = 0; - mutex_enter(&uvm_pageqlock); for (i = 0; i < npages; i++) { struct vm_page *pg = pgs[i]; if (pg == NULL) { @@ -490,7 +487,10 @@ out: uvm_pagezero(pg); } if (pg->flags & PG_RELEASED) { - uvm_pagefree(pg); + mutex_enter(&uvm_pageqlock); + uvm_pagefree1(pg); + mutex_exit(&uvm_pageqlock); + uvm_pagefree2(pg); continue; } uvm_pageenqueue(pg); @@ -498,7 +498,6 @@ out: UVM_PAGE_OWN(pg, NULL); } } - mutex_exit(&uvm_pageqlock); if (memwrite) { genfs_markdirty(vp); } @@ -1201,9 +1200,6 @@ retry: * apply FREE or DEACTIVATE options if requested. */ - if (flags & (PGO_DEACTIVATE|PGO_FREE)) { - mutex_enter(&uvm_pageqlock); - } for (i = 0; i < npages; i++) { tpg = pgs[i]; KASSERT(tpg->uobject == uobj); @@ -1219,7 +1215,9 @@ retry: tpg->flags |= freeflag; if (pagedaemon) { uvm_pageout_start(1); + mutex_enter(&uvm_pageqlock); uvm_pagedequeue(tpg); + mutex_exit(&uvm_pageqlock); } } else { @@ -1230,15 +1228,15 @@ retry: */ nextpg = TAILQ_NEXT(tpg, listq.queue); - uvm_pagefree(tpg); + mutex_enter(&uvm_pageqlock); + uvm_pagefree1(tpg); + mutex_exit(&uvm_pageqlock); + uvm_pagefree2(tpg); if (pagedaemon) uvmexp.pdfreed++; } } } - if (flags & (PGO_DEACTIVATE|PGO_FREE)) { - mutex_exit(&uvm_pageqlock); - } if (needs_clean) { modified = true; @@ -1646,7 +1644,6 @@ genfs_compat_getpages(void *v) } uvm_pagermapout(kva, npages); mutex_enter(uobj->vmobjlock); - mutex_enter(&uvm_pageqlock); for (i = 0; i < npages; i++) { pg = pgs[i]; if (error && (pg->flags & PG_FAKE) != 0) { @@ -1659,7 +1656,6 @@ genfs_compat_getpages(void *v) if (error) { uvm_page_unbusy(pgs, npages); } - mutex_exit(&uvm_pageqlock); if (error == 0 && memwrite) { genfs_markdirty(vp); } Index: nfs/nfs_bio.c =================================================================== RCS file: /cvsroot/src/sys/nfs/nfs_bio.c,v retrieving revision 1.191 diff -u -p -r1.191 nfs_bio.c --- nfs/nfs_bio.c 15 Jul 2015 03:28:55 -0000 1.191 +++ nfs/nfs_bio.c 6 Dec 2019 13:11:13 -0000 @@ -1340,9 +1340,7 @@ nfs_getpages(void *v) * available and put back original pgs array. */ - mutex_enter(&uvm_pageqlock); uvm_page_unbusy(pgs, npages); - mutex_exit(&uvm_pageqlock); *ap->a_count = 0; memcpy(pgs, opgs, npages * sizeof(struct vm_pages *)); Index: ufs/lfs/lfs_pages.c =================================================================== RCS file: /cvsroot/src/sys/ufs/lfs/lfs_pages.c,v retrieving revision 1.15 diff -u -p -r1.15 lfs_pages.c --- ufs/lfs/lfs_pages.c 19 Aug 2017 14:22:49 -0000 1.15 +++ ufs/lfs/lfs_pages.c 6 Dec 2019 13:11:13 -0000 @@ -539,9 +539,7 @@ retry: "lfsput2", 0); mutex_enter(vp->v_interlock); } - mutex_enter(&uvm_pageqlock); uvm_pageactivate(pg); - mutex_exit(&uvm_pageqlock); } ap->a_offlo = blkeof; if (ap->a_offhi > 0 && ap->a_offhi <= ap->a_offlo) { Index: ufs/lfs/lfs_vfsops.c =================================================================== RCS file: /cvsroot/src/sys/ufs/lfs/lfs_vfsops.c,v retrieving revision 1.365 diff -u -p -r1.365 lfs_vfsops.c --- ufs/lfs/lfs_vfsops.c 28 May 2019 08:59:35 -0000 1.365 +++ ufs/lfs/lfs_vfsops.c 6 Dec 2019 13:11:14 -0000 @@ -2238,14 +2238,15 @@ lfs_gop_write(struct vnode *vp, struct v pgs[0]->offset, eof, npages)); } - mutex_enter(&uvm_pageqlock); for (i = 0; i < npages; i++) { pg = pgs[i]; if (pg->flags & PG_PAGEOUT) uvm_pageout_done(1); if (pg->flags & PG_DELWRI) { + mutex_enter(&uvm_pageqlock); uvm_pageunwire(pg); + mutex_exit(&uvm_pageqlock); } uvm_pageactivate(pg); pg->flags &= ~(PG_CLEAN|PG_DELWRI|PG_PAGEOUT|PG_RELEASED); @@ -2262,7 +2263,6 @@ lfs_gop_write(struct vnode *vp, struct v } /* uvm_pageunbusy takes care of PG_BUSY, PG_WANTED */ uvm_page_unbusy(pgs, npages); - mutex_exit(&uvm_pageqlock); mutex_exit(vp->v_interlock); return EAGAIN; } Index: ufs/lfs/ulfs_inode.c =================================================================== RCS file: /cvsroot/src/sys/ufs/lfs/ulfs_inode.c,v retrieving revision 1.21 diff -u -p -r1.21 ulfs_inode.c --- ufs/lfs/ulfs_inode.c 28 Oct 2017 00:37:13 -0000 1.21 +++ ufs/lfs/ulfs_inode.c 6 Dec 2019 13:11:14 -0000 @@ -234,7 +234,6 @@ ulfs_balloc_range(struct vnode *vp, off_ GOP_SIZE(vp, off + len, &eob, 0); mutex_enter(uobj->vmobjlock); - mutex_enter(&uvm_pageqlock); for (i = 0; i < npages; i++) { KASSERT((pgs[i]->flags & PG_RELEASED) == 0); if (!error) { @@ -246,7 +245,6 @@ ulfs_balloc_range(struct vnode *vp, off_ } uvm_pageactivate(pgs[i]); } - mutex_exit(&uvm_pageqlock); uvm_page_unbusy(pgs, npages); mutex_exit(uobj->vmobjlock); Index: ufs/ufs/ufs_inode.c =================================================================== RCS file: /cvsroot/src/sys/ufs/ufs/ufs_inode.c,v retrieving revision 1.105 diff -u -p -r1.105 ufs_inode.c --- ufs/ufs/ufs_inode.c 10 Dec 2018 20:48:34 -0000 1.105 +++ ufs/ufs/ufs_inode.c 6 Dec 2019 13:11:14 -0000 @@ -270,7 +270,6 @@ ufs_balloc_range(struct vnode *vp, off_t GOP_SIZE(vp, off + len, &eob, 0); mutex_enter(uobj->vmobjlock); - mutex_enter(&uvm_pageqlock); for (i = 0; i < npages; i++) { KASSERT((pgs[i]->flags & PG_RELEASED) == 0); if (!error) { @@ -282,7 +281,6 @@ ufs_balloc_range(struct vnode *vp, off_t } uvm_pageactivate(pgs[i]); } - mutex_exit(&uvm_pageqlock); uvm_page_unbusy(pgs, npages); mutex_exit(uobj->vmobjlock); Index: uvm/uvm_amap.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_amap.c,v retrieving revision 1.110 diff -u -p -r1.110 uvm_amap.c --- uvm/uvm_amap.c 1 Dec 2019 14:24:43 -0000 1.110 +++ uvm/uvm_amap.c 6 Dec 2019 13:11:14 -0000 @@ -1074,7 +1074,6 @@ ReStart: UVM_PAGE_OWN(npg, NULL); } /* Activate all pages. Some may be missing because of retry above. */ - mutex_enter(&uvm_pageqlock); for (lcv = 0 ; lcv < amap->am_nused ; lcv++) { anon = amap->am_anon[amap->am_slots[lcv]]; KASSERT(anon->an_lock == amap->am_lock); @@ -1082,7 +1081,6 @@ ReStart: uvm_pageactivate(anon->an_page); } } - mutex_exit(&uvm_pageqlock); amap_unlock(amap); } Index: uvm/uvm_anon.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_anon.c,v retrieving revision 1.68 diff -u -p -r1.68 uvm_anon.c --- uvm/uvm_anon.c 2 Dec 2019 20:02:02 -0000 1.68 +++ uvm/uvm_anon.c 6 Dec 2019 13:11:14 -0000 @@ -230,16 +236,24 @@ uvm_anon_freelst(struct vm_amap *amap, s } } - /* Free pages and leave a page replacement hint. */ + /* Strip pages of their identities and leave a page replacement hint. */ mutex_enter(&uvm_pageqlock); for (anon = anonlst; anon != NULL; anon = anon->an_link) { UVMHIST_LOG(maphist, "anon 0x%#jx, page 0x%#jx: " "releasing now!", (uintptr_t)anon, (uintptr_t)anon->an_page, 0, 0); + /* + * Messing with an_page behind uvm_pagefree1()'s back is + * ugly, but means we can take pressure off uvm_pageqlock + * by sending the pages to the free list later, and avoid + * the overhead of constructing and traversing yet another + * list to do so. + */ if ((pg = anon->an_page) != NULL) { - uvm_pagefree(pg); + uvm_pagefree1(pg); } uvmpdpol_anfree(anon); + anon->an_page = pg; } mutex_exit(&uvm_pageqlock); amap_unlock(amap); @@ -247,9 +261,13 @@ uvm_anon_freelst(struct vm_amap *amap, s /* Free swap space, pages and vm_anon. */ while (anonlst) { anon = anonlst->an_link; + if ((pg = anonlst->an_page) != NULL) { + uvm_pagefree2(pg); + } /* Note: clears an_ref as well. */ anonlst->an_link = NULL; anonlst->an_lock = NULL; + anonlst->an_page = NULL; uvm_anon_free(anonlst); anonlst = anon; } @@ -396,11 +414,9 @@ uvm_anon_pagein(struct vm_amap *amap, st * Deactivate the page (to put it on a page queue). */ - mutex_enter(&uvm_pageqlock); if (pg->wire_count == 0) { uvm_pagedeactivate(pg); } - mutex_exit(&uvm_pageqlock); if (pg->flags & PG_WANTED) { pg->flags &= ~PG_WANTED; @@ -458,13 +474,14 @@ uvm_anon_release(struct vm_anon *anon) KASSERT(anon->an_ref == 0); mutex_enter(&uvm_pageqlock); - uvm_pagefree(pg); + uvm_pagefree1(pg); mutex_exit(&uvm_pageqlock); KASSERT(anon->an_page == NULL); /* dispose should succeed as no one can reach this anon anymore. */ success = uvm_anon_dispose(anon); KASSERT(success); mutex_exit(anon->an_lock); + uvm_pagefree2(pg); /* Note: extra reference is held for PG_RELEASED case. */ mutex_obj_free(anon->an_lock); anon->an_lock = NULL; Index: uvm/uvm_aobj.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_aobj.c,v retrieving revision 1.130 diff -u -p -r1.130 uvm_aobj.c --- uvm/uvm_aobj.c 1 Dec 2019 20:31:40 -0000 1.130 +++ uvm/uvm_aobj.c 6 Dec 2019 13:11:14 -0000 @@ -803,12 +803,10 @@ uao_put(struct uvm_object *uobj, voff_t case PGO_CLEANIT|PGO_DEACTIVATE: case PGO_DEACTIVATE: deactivate_it: - mutex_enter(&uvm_pageqlock); /* skip the page if it's wired */ if (pg->wire_count == 0) { uvm_pagedeactivate(pg); } - mutex_exit(&uvm_pageqlock); break; case PGO_FREE: @@ -834,8 +832,9 @@ uao_put(struct uvm_object *uobj, voff_t uao_dropswap(uobj, pg->offset >> PAGE_SHIFT); mutex_enter(&uvm_pageqlock); - uvm_pagefree(pg); + uvm_pagefree1(pg); mutex_exit(&uvm_pageqlock); + uvm_pagefree2(pg); break; default: @@ -1380,10 +1379,7 @@ uao_pagein_page(struct uvm_aobj *aobj, i /* * make sure it's on a page queue. */ - mutex_enter(&uvm_pageqlock); - if (pg->wire_count == 0) - uvm_pageenqueue(pg); - mutex_exit(&uvm_pageqlock); + uvm_pageenqueue(pg); if (pg->flags & PG_WANTED) { wakeup(pg); Index: uvm/uvm_bio.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_bio.c,v retrieving revision 1.100 diff -u -p -r1.100 uvm_bio.c --- uvm/uvm_bio.c 7 Nov 2019 07:45:14 -0000 1.100 +++ uvm/uvm_bio.c 6 Dec 2019 13:11:14 -0000 @@ -244,8 +244,9 @@ ubc_fault_page(const struct uvm_faultinf KASSERT((pg->flags & PG_FAKE) == 0); if (pg->flags & PG_RELEASED) { mutex_enter(&uvm_pageqlock); - uvm_pagefree(pg); + uvm_pagefree1(pg); mutex_exit(&uvm_pageqlock); + uvm_pagefree2(pg); return 0; } if (pg->loan_count != 0) { @@ -287,9 +288,7 @@ ubc_fault_page(const struct uvm_faultinf error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg), prot & mask, PMAP_CANFAIL | (access_type & mask)); - mutex_enter(&uvm_pageqlock); uvm_pageactivate(pg); - mutex_exit(&uvm_pageqlock); pg->flags &= ~(PG_BUSY|PG_WANTED); UVM_PAGE_OWN(pg, NULL); @@ -659,7 +658,6 @@ ubc_release(void *va, int flags) } umap->flags &= ~UMAP_PAGES_LOCKED; mutex_enter(uobj->vmobjlock); - mutex_enter(&uvm_pageqlock); for (u_int i = 0; i < npages; i++) { paddr_t pa; bool rv __diagused; @@ -672,7 +670,6 @@ ubc_release(void *va, int flags) KASSERT(pgs[i]->loan_count == 0); uvm_pageactivate(pgs[i]); } - mutex_exit(&uvm_pageqlock); pmap_kremove(umapva, ubc_winsize); pmap_update(pmap_kernel()); uvm_page_unbusy(pgs, npages); @@ -891,7 +888,6 @@ ubc_direct_release(struct uvm_object *uo int flags, struct vm_page **pgs, int npages) { mutex_enter(uobj->vmobjlock); - mutex_enter(&uvm_pageqlock); for (int i = 0; i < npages; i++) { struct vm_page *pg = pgs[i]; @@ -901,8 +897,6 @@ ubc_direct_release(struct uvm_object *uo if (flags & UBC_WRITE) pg->flags &= ~(PG_FAKE|PG_CLEAN); } - mutex_exit(&uvm_pageqlock); - uvm_page_unbusy(pgs, npages); mutex_exit(uobj->vmobjlock); } Index: uvm/uvm_fault.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_fault.c,v retrieving revision 1.211 diff -u -p -r1.211 uvm_fault.c --- uvm/uvm_fault.c 1 Dec 2019 14:30:01 -0000 1.211 +++ uvm/uvm_fault.c 6 Dec 2019 13:11:15 -0000 @@ -191,7 +192,6 @@ uvmfault_anonflush(struct vm_anon **anon int lcv; struct vm_page *pg; - mutex_enter(&uvm_pageqlock); for (lcv = 0; lcv < n; lcv++) { if (anons[lcv] == NULL) continue; @@ -203,7 +203,6 @@ uvmfault_anonflush(struct vm_anon **anon } } } - mutex_exit(&uvm_pageqlock); } /* @@ -492,9 +487,7 @@ released: * We have successfully read the page, activate it. */ - mutex_enter(&uvm_pageqlock); uvm_pageactivate(pg); - mutex_exit(&uvm_pageqlock); pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE); UVM_PAGE_OWN(pg, NULL); #else @@ -1272,14 +1261,11 @@ uvm_fault_upper_neighbor( /* locked: amap, anon */ - mutex_enter(&uvm_pageqlock); uvm_pageenqueue(pg); - mutex_exit(&uvm_pageqlock); UVMHIST_LOG(maphist, " MAPPING: n anon: pm=%#jx, va=%#jx, pg=%#jx", (uintptr_t)ufi->orig_map->pmap, currva, (uintptr_t)pg, 0); /* * Since this page isn't the page that's actually faulting, @@ -1492,9 +1477,8 @@ uvm_fault_upper_promote( KASSERT(anon == NULL || anon->an_lock == oanon->an_lock); pg = anon->an_page; - mutex_enter(&uvm_pageqlock); - uvm_pageenqueue(pg); /* uvm_fault_upper_done will activate the page */ - mutex_exit(&uvm_pageqlock); + /* uvm_fault_upper_done will activate the page */ + uvm_pageenqueue(pg); pg->flags &= ~(PG_BUSY|PG_FAKE); UVM_PAGE_OWN(pg, NULL); @@ -1627,9 +1610,10 @@ uvm_fault_upper_done( * ... update the page queues. */ - mutex_enter(&uvm_pageqlock); if (wire_paging) { + mutex_enter(&uvm_pageqlock); uvm_pagewire(pg); + mutex_exit(&uvm_pageqlock); /* * since the now-wired page cannot be paged out, @@ -1639,11 +1623,9 @@ uvm_fault_upper_done( */ pg->flags &= ~(PG_CLEAN); - } else { uvm_pageactivate(pg); } - mutex_exit(&uvm_pageqlock); if (wire_paging) { uvm_anon_dropswap(anon); @@ -1863,14 +1844,11 @@ uvm_fault_lower_neighbor( * for this. we can just directly enter the pages. */ - mutex_enter(&uvm_pageqlock); uvm_pageenqueue(pg); - mutex_exit(&uvm_pageqlock); UVMHIST_LOG(maphist, " MAPPING: n obj: pm=%#jx, va=%#jx, pg=%#jx", (uintptr_t)ufi->orig_map->pmap, currva, (uintptr_t)pg, 0); @@ -1984,9 +1961,7 @@ uvm_fault_lower_io( mutex_enter(uobj->vmobjlock); KASSERT((pg->flags & PG_BUSY) != 0); - mutex_enter(&uvm_pageqlock); uvm_pageactivate(pg); - mutex_exit(&uvm_pageqlock); /* locked(locked): maps(read), amap(if !null), uobj, pg */ /* locked(!locked): uobj, pg */ @@ -2288,9 +2260,7 @@ uvm_fault_lower_enter( * we just promoted the page. */ - mutex_enter(&uvm_pageqlock); uvm_pageenqueue(pg); - mutex_exit(&uvm_pageqlock); if (pg->flags & PG_WANTED) wakeup(pg); @@ -2349,8 +2319,8 @@ uvm_fault_lower_done( UVMHIST_FUNC("uvm_fault_lower_done"); UVMHIST_CALLED(maphist); - mutex_enter(&uvm_pageqlock); if (flt->wire_paging) { + mutex_enter(&uvm_pageqlock); uvm_pagewire(pg); if (pg->pqflags & PQ_AOBJ) { @@ -2365,10 +2335,10 @@ uvm_fault_lower_done( pg->flags &= ~(PG_CLEAN); dropswap = true; } + mutex_exit(&uvm_pageqlock); } else { uvm_pageactivate(pg); } - mutex_exit(&uvm_pageqlock); if (dropswap) { uao_dropswap(uobj, pg->offset >> PAGE_SHIFT); Index: uvm/uvm_km.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_km.c,v retrieving revision 1.150 diff -u -p -r1.150 uvm_km.c --- uvm/uvm_km.c 1 Dec 2019 23:14:47 -0000 1.150 +++ uvm/uvm_km.c 6 Dec 2019 13:11:15 -0000 @@ -475,8 +475,9 @@ uvm_km_pgremove(vaddr_t startva, vaddr_t uao_dropswap(uobj, curoff >> PAGE_SHIFT); if (pg != NULL) { mutex_enter(&uvm_pageqlock); - uvm_pagefree(pg); + uvm_pagefree1(pg); mutex_exit(&uvm_pageqlock); + uvm_pagefree2(pg); } } mutex_exit(uobj->vmobjlock); Index: uvm/uvm_loan.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_loan.c,v retrieving revision 1.88 diff -u -p -r1.88 uvm_loan.c --- uvm/uvm_loan.c 1 Dec 2019 14:40:31 -0000 1.88 +++ uvm/uvm_loan.c 6 Dec 2019 13:11:15 -0000 @@ -412,7 +412,6 @@ uvm_loananon(struct uvm_faultinfo *ufi, */ pg = anon->an_page; - mutex_enter(&uvm_pageqlock); if (pg->wire_count > 0) { mutex_exit(&uvm_pageqlock); UVMHIST_LOG(loanhist, "->K wired %#jx", (uintptr_t)pg, 0, 0, 0); @@ -423,10 +422,11 @@ uvm_loananon(struct uvm_faultinfo *ufi, if (pg->loan_count == 0) { pmap_page_protect(pg, VM_PROT_READ); } + mutex_enter(&uvm_pageqlock); pg->loan_count++; KASSERT(pg->loan_count > 0); /* detect wrap-around */ - uvm_pageactivate(pg); mutex_exit(&uvm_pageqlock); + uvm_pageactivate(pg); **output = pg; (*output)++; @@ -463,9 +463,7 @@ uvm_loanpage(struct vm_page **pgpp, int KASSERT(mutex_owned(pg->uobject->vmobjlock)); KASSERT(pg->flags & PG_BUSY); - mutex_enter(&uvm_pageqlock); if (pg->wire_count > 0) { - mutex_exit(&uvm_pageqlock); UVMHIST_LOG(loanhist, "wired %#jx", (uintptr_t)pg, 0, 0, 0); error = EBUSY; @@ -474,10 +472,11 @@ uvm_loanpage(struct vm_page **pgpp, int if (pg->loan_count == 0) { pmap_page_protect(pg, VM_PROT_READ); } + mutex_enter(&uvm_pageqlock); pg->loan_count++; KASSERT(pg->loan_count > 0); /* detect wrap-around */ - uvm_pageactivate(pg); mutex_exit(&uvm_pageqlock); + uvm_pageactivate(pg); } uvm_page_unbusy(pgpp, npages); @@ -576,9 +575,7 @@ reget: slock = pg->uobject->vmobjlock; mutex_enter(slock); - mutex_enter(&uvm_pageqlock); uvm_page_unbusy(&pg, 1); - mutex_exit(&uvm_pageqlock); mutex_exit(slock); } goto reget; @@ -714,14 +711,13 @@ uvm_loanuobj(struct uvm_faultinfo *ufi, } if (pg->flags & PG_RELEASED) { mutex_enter(&uvm_pageqlock); - uvm_pagefree(pg); + uvm_pagefree1(pg); mutex_exit(&uvm_pageqlock); + uvm_pagefree2(pg); mutex_exit(uobj->vmobjlock); return (0); } - mutex_enter(&uvm_pageqlock); uvm_pageactivate(pg); - mutex_exit(&uvm_pageqlock); pg->flags &= ~(PG_BUSY|PG_WANTED); UVM_PAGE_OWN(pg, NULL); mutex_exit(uobj->vmobjlock); @@ -779,7 +775,6 @@ uvm_loanuobj(struct uvm_faultinfo *ufi, if (anon == NULL) { goto fail; } - mutex_enter(&uvm_pageqlock); if (pg->wire_count > 0) { mutex_exit(&uvm_pageqlock); UVMHIST_LOG(loanhist, "wired %#jx", (uintptr_t)pg, 0, 0, 0); @@ -788,13 +783,14 @@ uvm_loanuobj(struct uvm_faultinfo *ufi, if (pg->loan_count == 0) { pmap_page_protect(pg, VM_PROT_READ); } + mutex_enter(&uvm_pageqlock); pg->loan_count++; KASSERT(pg->loan_count > 0); /* detect wrap-around */ pg->uanon = anon; anon->an_page = pg; anon->an_lock = /* TODO: share amap lock */ - uvm_pageactivate(pg); mutex_exit(&uvm_pageqlock); + uvm_pageactivate(pg); if (pg->flags & PG_WANTED) { wakeup(pg); } @@ -872,9 +868,7 @@ again: /* got a zero'd page. */ pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE); pg->flags |= PG_RDONLY; - mutex_enter(&uvm_pageqlock); uvm_pageactivate(pg); - mutex_exit(&uvm_pageqlock); UVM_PAGE_OWN(pg, NULL); } @@ -923,8 +917,8 @@ again: mutex_enter(&uvm_pageqlock); pg->loan_count++; KASSERT(pg->loan_count > 0); /* detect wrap-around */ - uvm_pageactivate(pg); mutex_exit(&uvm_pageqlock); + uvm_pageactivate(pg); mutex_exit(&anon->an_lock); mutex_exit(uvm_loanzero_object.vmobjlock); **output = anon; @@ -1075,12 +1069,13 @@ ulz_put(struct uvm_object *uobj, voff_t KASSERT(pg != NULL); KASSERT(TAILQ_NEXT(pg, listq.queue) == NULL); - mutex_enter(&uvm_pageqlock); if (pg->uanon) uvm_pageactivate(pg); - else + else { + mutex_enter(&uvm_pageqlock); uvm_pagedequeue(pg); - mutex_exit(&uvm_pageqlock); + mutex_exit(&uvm_pageqlock); + } mutex_exit(uobj->vmobjlock); return 0; @@ -1177,8 +1172,8 @@ uvm_loanbreak(struct vm_page *uobjpage) */ /* install new page */ - uvm_pageactivate(pg); mutex_exit(&uvm_pageqlock); + uvm_pageactivate(pg); /* * done! loan is broken and "pg" is @@ -1224,18 +1219,19 @@ uvm_loanbreak_anon(struct vm_anon *anon, uvm_pagedequeue(anon->an_page); } - if (uobj) { - mutex_exit(uobj->vmobjlock); - } - /* install new page in anon */ anon->an_page = pg; pg->uanon = anon; pg->pqflags |= PQ_ANON; - uvm_pageactivate(pg); mutex_exit(&uvm_pageqlock); + uvm_pageactivate(pg); + + if (uobj) { + mutex_exit(uobj->vmobjlock); + } + pg->flags &= ~(PG_BUSY|PG_FAKE); UVM_PAGE_OWN(pg, NULL); Index: uvm/uvm_map.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_map.c,v retrieving revision 1.366 diff -u -p -r1.366 uvm_map.c --- uvm/uvm_map.c 1 Nov 2019 13:04:22 -0000 1.366 +++ uvm/uvm_map.c 6 Dec 2019 13:11:16 -0000 @@ -3944,15 +3945,12 @@ uvm_map_clean(struct vm_map *map, vaddr_ * at all in these cases. */ - mutex_enter(&uvm_pageqlock); if (pg->loan_count != 0 || pg->wire_count != 0) { - mutex_exit(&uvm_pageqlock); continue; } KASSERT(pg->uanon == anon); uvm_pagedeactivate(pg); - mutex_exit(&uvm_pageqlock); continue; case PGO_FREE: Index: uvm/uvm_page.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_page.c,v retrieving revision 1.200 diff -u -p -r1.200 uvm_page.c --- uvm/uvm_page.c 20 Sep 2019 11:09:43 -0000 1.200 +++ uvm/uvm_page.c 6 Dec 2019 13:11:17 -0000 @@ -1348,7 +1453,7 @@ uvm_pagefree(struct vm_page *pg) * => pages must either all belong to the same object, or all belong to anons. * => if pages are object-owned, object must be locked. * => if pages are anon-owned, anons must be locked. - * => caller must lock page queues if pages may be released. + * => caller must NOT have locked page queues * => caller must make sure that anon-owned pages are not PG_RELEASED. */ @@ -1369,6 +1474,7 @@ uvm_page_unbusy(struct vm_page **pgs, in KASSERT(pg->flags & PG_BUSY); KASSERT((pg->flags & PG_PAGEOUT) == 0); if (pg->flags & PG_WANTED) { + /* XXXAD thundering herd problem. */ wakeup(pg); } if (pg->flags & PG_RELEASED) { @@ -1377,7 +1483,10 @@ uvm_page_unbusy(struct vm_page **pgs, in KASSERT(pg->uobject != NULL || (pg->uanon != NULL && pg->uanon->an_ref > 0)); pg->flags &= ~PG_RELEASED; - uvm_pagefree(pg); + mutex_enter(&uvm_pageqlock); + uvm_pagefree1(pg); + mutex_exit(&uvm_pageqlock); + uvm_pagefree2(pg); } else { UVMHIST_LOG(ubchist, "unbusying pg %#jx", (uintptr_t)pg, 0, 0, 0); @@ -1598,20 +1604,21 @@ uvm_pagewire(struct vm_page *pg) void uvm_pageunwire(struct vm_page *pg) { + KASSERT(mutex_owned(&uvm_pageqlock)); KASSERT(pg->wire_count != 0); pg->wire_count--; if (pg->wire_count == 0) { uvm_pageactivate(pg); - KASSERT(uvmexp.wired != 0); - uvmexp.wired--; + /* KASSERT(uvmexp.wired != 0); */ + uvm_stat_dec(UVM_STAT_WIRED); } } /* * uvm_pagedeactivate: deactivate page * - * => caller must lock page queues + * => caller must lock objects * => caller must check to make sure page is not wired * => object that page belongs to must be locked (so we can adjust pg->flags) * => caller must clear the reference on the page before calling @@ -1621,7 +1628,6 @@ void uvm_pagedeactivate(struct vm_page *pg) { - KASSERT(mutex_owned(&uvm_pageqlock)); KASSERT(uvm_page_locked_p(pg)); KASSERT(pg->wire_count != 0 || uvmpdpol_pageisqueued_p(pg)); uvmpdpol_pagedeactivate(pg); @@ -1630,17 +1636,18 @@ uvm_pagedeactivate(struct vm_page *pg) /* * uvm_pageactivate: activate page * - * => caller must lock page queues + * => caller must lock objects */ void uvm_pageactivate(struct vm_page *pg) { - KASSERT(mutex_owned(&uvm_pageqlock)); KASSERT(uvm_page_locked_p(pg)); #if defined(READAHEAD_STATS) if ((pg->pqflags & PQ_READAHEAD) != 0) { uvm_ra_hit.ev_count++; pg->pqflags &= ~PQ_READAHEAD; } @@ -1653,33 +1660,35 @@ uvm_pageactivate(struct vm_page *pg) /* * uvm_pagedequeue: remove a page from any paging queue + * + * => caller must objects and uvm_pageqlock */ - void uvm_pagedequeue(struct vm_page *pg) { + KASSERT(uvm_page_locked_p(pg)); + KASSERT(mutex_owned(&uvm_pageqlock)); + if (uvmpdpol_pageisqueued_p(pg)) { - KASSERT(mutex_owned(&uvm_pageqlock)); + uvmpdpol_pagedequeue(pg); } - - uvmpdpol_pagedequeue(pg); } /* * uvm_pageenqueue: add a page to a paging queue without activating. * used where a page is not really demanded (yet). eg. read-ahead */ - void uvm_pageenqueue(struct vm_page *pg) { - KASSERT(mutex_owned(&uvm_pageqlock)); - if (pg->wire_count != 0) { - return; + KASSERT(uvm_page_locked_p(pg)); + + if (pg->wire_count == 0) { + uvmpdpol_pageenqueue(pg); } - uvmpdpol_pageenqueue(pg); } /* Index: uvm/uvm_page.h =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_page.h,v retrieving revision 1.84 diff -u -p -r1.84 uvm_page.h --- uvm/uvm_page.h 7 Jan 2019 22:48:01 -0000 1.84 +++ uvm/uvm_page.h 6 Dec 2019 13:11:17 -0000 @@ -323,6 +337,8 @@ void uvm_pagedeactivate(struct vm_page * void uvm_pagedequeue(struct vm_page *); void uvm_pageenqueue(struct vm_page *); void uvm_pagefree(struct vm_page *); +void uvm_pagefree1(struct vm_page *); +void uvm_pagefree2(struct vm_page *); void uvm_page_unbusy(struct vm_page **, int); struct vm_page *uvm_pagelookup(struct uvm_object *, voff_t); void uvm_pageunwire(struct vm_page *); Index: uvm/uvm_pager.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_pager.c,v retrieving revision 1.113 diff -u -p -r1.113 uvm_pager.c --- uvm/uvm_pager.c 1 Dec 2019 23:14:47 -0000 1.113 +++ uvm/uvm_pager.c 6 Dec 2019 13:11:17 -0000 @@ -323,7 +323,6 @@ uvm_aio_aiodone_pages(struct vm_page **p uobj = pg->uobject; slock = uobj->vmobjlock; mutex_enter(slock); - mutex_enter(&uvm_pageqlock); } else { #if defined(VMSWAP) if (error) { @@ -362,7 +361,6 @@ uvm_aio_aiodone_pages(struct vm_page **p slock = pg->uanon->an_lock; } mutex_enter(slock); - mutex_enter(&uvm_pageqlock); anon_disposed = (pg->flags & PG_RELEASED) != 0; KASSERT(!anon_disposed || pg->uobject != NULL || pg->uanon->an_ref == 0); @@ -421,8 +419,10 @@ uvm_aio_aiodone_pages(struct vm_page **p KASSERT(!write); pg->flags &= ~PG_FAKE; #if defined(READAHEAD_STATS) + mutex_enter(&uvm_pageqlock); pg->pqflags |= PQ_READAHEAD; uvm_ra_total.ev_count++; + mutex_exit(&uvm_pageqlock); #endif /* defined(READAHEAD_STATS) */ KASSERT((pg->flags & PG_CLEAN) != 0); uvm_pageenqueue(pg); @@ -437,7 +437,7 @@ uvm_aio_aiodone_pages(struct vm_page **p if (pg->flags & PG_PAGEOUT) { pg->flags &= ~PG_PAGEOUT; pageout_done++; - uvmexp.pdfreed++; + atomic_inc_uint(&uvmexp.pdfreed); pg->flags |= PG_RELEASED; } @@ -448,11 +448,9 @@ uvm_aio_aiodone_pages(struct vm_page **p if (swap) { if (pg->uobject == NULL && anon_disposed) { - mutex_exit(&uvm_pageqlock); uvm_anon_release(pg->uanon); } else { uvm_page_unbusy(&pg, 1); - mutex_exit(&uvm_pageqlock); mutex_exit(slock); } } @@ -461,7 +459,6 @@ uvm_aio_aiodone_pages(struct vm_page **p uvm_pageout_done(pageout_done); if (!swap) { uvm_page_unbusy(pgs, npages); - mutex_exit(&uvm_pageqlock); mutex_exit(slock); } else { #if defined(VMSWAP) @@ -478,7 +475,7 @@ uvm_aio_aiodone_pages(struct vm_page **p else uvm_swap_free(swslot, npages); } - uvmexp.pdpending--; + atomic_dec_uint(&uvmexp.pdpending); #endif /* defined(VMSWAP) */ } } Index: uvm/uvm_pdaemon.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_pdaemon.c,v retrieving revision 1.112 diff -u -p -r1.112 uvm_pdaemon.c --- uvm/uvm_pdaemon.c 1 Dec 2019 14:40:31 -0000 1.112 +++ uvm/uvm_pdaemon.c 6 Dec 2019 13:11:17 -0000 @@ -261,8 +277,10 @@ uvm_pageout(void *arg) uvm.pagedaemon_lwp = curlwp; mutex_enter(&uvm_pageqlock); + uvmpdpol_lock(); npages = uvmexp.npages; uvmpd_tune(); + uvmpdpol_unlock(); mutex_exit(&uvm_pageqlock); /* @@ -291,6 +309,7 @@ uvm_pageout(void *arg) */ mutex_enter(&uvm_pageqlock); + uvmpdpol_lock(); if (npages != uvmexp.npages || extrapages != uvm_extrapages) { npages = uvmexp.npages; extrapages = uvm_extrapages; @@ -334,8 +354,9 @@ uvm_pageout(void *arg) mutex_spin_exit(&uvm_fpageqlock); /* - * scan done. unlock page queues (the only lock we are holding) + * scan done. unlock page queues. */ + uvmpdpol_unlock(); mutex_exit(&uvm_pageqlock); /* @@ -673,6 +695,9 @@ uvmpd_scan_queue(void) kmutex_t *slock; UVMHIST_FUNC("uvmpd_scan_queue"); UVMHIST_CALLED(pdhist); + KASSERT(mutex_owned(&uvm_pageqlock)); + KASSERT(uvmpdpol_locked_p()); + /* * swslot is non-zero if we are building a swap cluster. we want * to stay in the loop while we have a page to scan or we have @@ -744,10 +768,10 @@ uvmpd_scan_queue(void) */ lockownerfail++; if (lockownerfail > UVMPD_NUMTRYLOCKOWNER) { - mutex_exit(&uvm_pageqlock); + uvmpdpol_unlock(); /* XXX Better than yielding but inadequate. */ - kpause("livelock", false, 1, NULL); - mutex_enter(&uvm_pageqlock); + kpause("livelock", false, 1, &uvm_pageqlock); + uvmpdpol_lock(); lockownerfail = 0; } continue; @@ -786,10 +810,12 @@ uvmpd_scan_queue(void) if ((p->pqflags & PQ_SWAPBACKED) == 0) { KASSERT(uobj != NULL); + uvmpdpol_unlock(); mutex_exit(&uvm_pageqlock); (void) (uobj->pgops->pgo_put)(uobj, p->offset, p->offset + PAGE_SIZE, PGO_CLEANIT|PGO_FREE); mutex_enter(&uvm_pageqlock); + uvmpdpol_lock(); continue; } @@ -862,7 +889,9 @@ uvmpd_scan_queue(void) if (swapcluster_allocslots(&swc)) { dirtyreacts++; + uvmpdpol_unlock(); uvm_pageactivate(p); + uvmpdpol_lock(); mutex_exit(slock); continue; } @@ -877,11 +906,11 @@ uvmpd_scan_queue(void) p->flags |= PG_BUSY; UVM_PAGE_OWN(p, "scan_queue"); - p->flags |= PG_PAGEOUT; - uvm_pagedequeue(p); - uvmexp.pgswapout++; + + uvmpdpol_unlock(); + uvm_pagedequeue(p); mutex_exit(&uvm_pageqlock); /* @@ -895,30 +924,36 @@ uvmpd_scan_queue(void) dirtyreacts++; uvm_pageactivate(p); mutex_exit(slock); + uvmpdpol_lock(); continue; } mutex_exit(slock); swapcluster_flush(&swc, false); mutex_enter(&uvm_pageqlock); + uvmpdpol_lock(); /* * the pageout is in progress. bump counters and set up * for the next loop. */ - uvmexp.pdpending++; + atomic_inc_uint(&uvmexp.pdpending); #else /* defined(VMSWAP) */ + uvmpdpol_unlock(); uvm_pageactivate(p); mutex_exit(slock); + uvmpdpol_lock(); #endif /* defined(VMSWAP) */ } #if defined(VMSWAP) + uvmpdpol_unlock(); mutex_exit(&uvm_pageqlock); swapcluster_flush(&swc, true); mutex_enter(&uvm_pageqlock); + uvmpdpol_lock(); #endif /* defined(VMSWAP) */ } @@ -931,9 +966,12 @@ uvmpd_scan_queue(void) static void uvmpd_scan(void) { - int swap_shortage, pages_freed; + int swap_shortage, pages_freed, tmp; UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist); + KASSERT(mutex_owned(&uvm_pageqlock)); + KASSERT(uvmpdpol_locked_p()); + uvmexp.pdrevs++; /* Index: uvm/uvm_pdpolicy.h =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_pdpolicy.h,v retrieving revision 1.3 diff -u -p -r1.3 uvm_pdpolicy.h --- uvm/uvm_pdpolicy.h 21 Feb 2007 23:00:14 -0000 1.3 +++ uvm/uvm_pdpolicy.h 6 Dec 2019 13:11:18 -0000 @@ -41,6 +41,9 @@ void uvmpdpol_init(void); void uvmpdpol_reinit(void); void uvmpdpol_estimatepageable(int *, int *); bool uvmpdpol_needsscan_p(void); +void uvmpdpol_lock(void); +void uvmpdpol_unlock(void); +bool uvmpdpol_locked_p(void); void uvmpdpol_pageactivate(struct vm_page *); void uvmpdpol_pagedeactivate(struct vm_page *); Index: uvm/uvm_pdpolicy_clock.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_pdpolicy_clock.c,v retrieving revision 1.17 diff -u -p -r1.17 uvm_pdpolicy_clock.c --- uvm/uvm_pdpolicy_clock.c 30 Jan 2012 17:21:52 -0000 1.17 +++ uvm/uvm_pdpolicy_clock.c 6 Dec 2019 13:11:18 -0000 @@ -110,8 +111,13 @@ struct uvmpdpol_scanstate { struct vm_page *ss_nextpg; }; +static void uvmpdpol_pageactivate_locked(struct vm_page *); +static void uvmpdpol_pagedeactivate_locked(struct vm_page *); +static void uvmpdpol_pagedequeue_locked(struct vm_page *); + static struct uvmpdpol_globalstate pdpol_state; static struct uvmpdpol_scanstate pdpol_scanstate; +static kmutex_t uvm_pdpol_lock __cacheline_aligned; PDPOL_EVCNT_DEFINE(reactexec) PDPOL_EVCNT_DEFINE(reactfile) @@ -171,7 +178,8 @@ uvmpdpol_selectvictim(void) struct vm_page *pg; kmutex_t *lock; - KASSERT(mutex_owned(&uvm_pageqlock)); + KASSERT(mutex_owned(&uvm_pageqlock)); /* for uvmpd_trylockowner() */ + KASSERT(mutex_owned(&uvm_pdpol_lock)); while (/* CONSTCOND */ 1) { struct vm_anon *anon; @@ -201,7 +209,7 @@ uvmpdpol_selectvictim(void) lock = uvmpd_trylockowner(pg); if (lock != NULL) { if (pmap_is_referenced(pg)) { - uvmpdpol_pageactivate(pg); + uvmpdpol_pageactivate_locked(pg); uvmexp.pdreact++; mutex_exit(lock); continue; @@ -221,18 +229,18 @@ uvmpdpol_selectvictim(void) */ if (uobj && UVM_OBJ_IS_VTEXT(uobj) && ss->ss_execreact) { - uvmpdpol_pageactivate(pg); + uvmpdpol_pageactivate_locked(pg); PDPOL_EVCNT_INCR(reactexec); continue; } if (uobj && UVM_OBJ_IS_VNODE(uobj) && !UVM_OBJ_IS_VTEXT(uobj) && ss->ss_filereact) { - uvmpdpol_pageactivate(pg); + uvmpdpol_pageactivate_locked(pg); PDPOL_EVCNT_INCR(reactfile); continue; } if ((anon || UVM_OBJ_IS_AOBJ(uobj)) && ss->ss_anonreact) { - uvmpdpol_pageactivate(pg); + uvmpdpol_pageactivate_locked(pg); PDPOL_EVCNT_INCR(reactanon); continue; } @@ -282,7 +290,7 @@ uvmpdpol_balancequeue(int swap_shortage) /* no need to check wire_count as pg is "active" */ lock = uvmpd_trylockowner(p); if (lock != NULL) { - uvmpdpol_pagedeactivate(p); + uvmpdpol_pagedeactivate_locked(p); uvmexp.pddeact++; inactive_shortage--; mutex_exit(lock); @@ -290,12 +298,12 @@ uvmpdpol_balancequeue(int swap_shortage) } } -void -uvmpdpol_pagedeactivate(struct vm_page *pg) +static void +uvmpdpol_pagedeactivate_locked(struct vm_page *pg) { KASSERT(uvm_page_locked_p(pg)); - KASSERT(mutex_owned(&uvm_pageqlock)); + KASSERT(mutex_owned(&uvm_pdpol_lock)); if (pg->pqflags & PQ_ACTIVE) { TAILQ_REMOVE(&pdpol_state.s_activeq, pg, pageq.queue); @@ -313,27 +321,50 @@ uvmpdpol_pagedeactivate(struct vm_page * } void -uvmpdpol_pageactivate(struct vm_page *pg) +uvmpdpol_pagedeactivate(struct vm_page *pg) +{ + + mutex_enter(&uvm_pdpol_lock); + uvmpdpol_pagedeactivate_locked(pg); + mutex_exit(&uvm_pdpol_lock); +} + +static void +uvmpdpol_pageactivate_locked(struct vm_page *pg) { - uvmpdpol_pagedequeue(pg); + uvmpdpol_pagedequeue_locked(pg); TAILQ_INSERT_TAIL(&pdpol_state.s_activeq, pg, pageq.queue); pg->pqflags |= PQ_ACTIVE; + pg->pdpol = hardclock_ticks; pdpol_state.s_active++; } void -uvmpdpol_pagedequeue(struct vm_page *pg) +uvmpdpol_pageactivate(struct vm_page *pg) +{ + + /* Safety: PQ_ACTIVE clear also tells us if it is not enqueued. */ + if ((pg->pqflags & PQ_ACTIVE) == 0 || + (hardclock_ticks - pg->pdpol) > hz) { + mutex_enter(&uvm_pdpol_lock); + uvmpdpol_pageactivate_locked(pg); + mutex_exit(&uvm_pdpol_lock); + } +} + +static void +uvmpdpol_pagedequeue_locked(struct vm_page *pg) { + KASSERT(mutex_owned(&uvm_pdpol_lock)); + if (pg->pqflags & PQ_ACTIVE) { - KASSERT(mutex_owned(&uvm_pageqlock)); TAILQ_REMOVE(&pdpol_state.s_activeq, pg, pageq.queue); pg->pqflags &= ~PQ_ACTIVE; KASSERT(pdpol_state.s_active > 0); pdpol_state.s_active--; } else if (pg->pqflags & PQ_INACTIVE) { - KASSERT(mutex_owned(&uvm_pageqlock)); TAILQ_REMOVE(&pdpol_state.s_inactiveq, pg, pageq.queue); pg->pqflags &= ~PQ_INACTIVE; KASSERT(pdpol_state.s_inactive > 0); @@ -342,10 +373,45 @@ uvmpdpol_pagedequeue(struct vm_page *pg) } void +uvmpdpol_pagedequeue(struct vm_page *pg) +{ + + mutex_enter(&uvm_pdpol_lock); + uvmpdpol_pagedequeue_locked(pg); + mutex_exit(&uvm_pdpol_lock); +} + +void uvmpdpol_pageenqueue(struct vm_page *pg) { - uvmpdpol_pageactivate(pg); + /* Safe to test unlocked due to page life-cycle. */ + if (!uvmpdpol_pageisqueued_p(pg)) { + mutex_enter(&uvm_pdpol_lock); + uvmpdpol_pageactivate_locked(pg); + mutex_exit(&uvm_pdpol_lock); + } +} + +void +uvmpdpol_lock(void) +{ + + mutex_enter(&uvm_pdpol_lock); +} + +void +uvmpdpol_unlock(void) +{ + + mutex_exit(&uvm_pdpol_lock); +} + +bool +uvmpdpol_locked_p(void) +{ + + return mutex_owned(&uvm_pdpol_lock); } void @@ -400,6 +466,7 @@ uvmpdpol_init(void) { struct uvmpdpol_globalstate *s = &pdpol_state; + mutex_init(&uvm_pdpol_lock, MUTEX_DEFAULT, IPL_NONE); TAILQ_INIT(&s->s_activeq); TAILQ_INIT(&s->s_inactiveq); uvm_pctparam_init(&s->s_inactivepct, CLOCK_INACTIVEPCT, NULL);