Index: uvm/uvm_anon.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_anon.c,v retrieving revision 1.66 diff -u -p -r1.66 uvm_anon.c --- uvm/uvm_anon.c 1 Dec 2019 17:02:50 -0000 1.66 +++ uvm/uvm_anon.c 3 Dec 2019 00:16:50 -0000 @@ -227,14 +227,23 @@ uvm_anon_freelst(struct vm_amap *amap, s } } - /* Free pages and leave a page replacement hint. */ + /* Strip pages of their identities and leave a page replacement hint. */ mutex_enter(&uvm_pageqlock); for (anon = anonlst; anon != NULL; anon = anon->an_link) { UVMHIST_LOG(maphist, "anon 0x%#jx, page 0x%#jx: " "releasing now!", (uintptr_t)anon, (uintptr_t)anon->an_page, 0, 0); + /* + * Messing with an_page behind uvm_pagefree1()'s back is + * ugly, but means we can take pressure off uvm_pageqlock + * by sending the pages to the free list later, and avoid + * the overhead of constructing and traversing yet another + * list to do so. Once uvm_pagefree1() is called nothing + * else in the system should have visibility of the anon. + */ if ((pg = anon->an_page) != NULL) { - uvm_pagefree(pg); + uvm_pagefree1(pg); + anon->an_page = pg; } uvmpdpol_anfree(anon); } @@ -244,9 +253,13 @@ uvm_anon_freelst(struct vm_amap *amap, s /* Free swap space, pages and vm_anon. */ while (anonlst) { anon = anonlst->an_link; + if ((pg = anonlst->an_page) != NULL) { + uvm_pagefree2(pg); + } /* Note: clears an_ref as well. */ anonlst->an_link = NULL; anonlst->an_lock = NULL; + anonlst->an_page = NULL; uvm_anon_free(anonlst); anonlst = anon; } @@ -455,13 +468,14 @@ uvm_anon_release(struct vm_anon *anon) KASSERT(anon->an_ref == 0); mutex_enter(&uvm_pageqlock); - uvm_pagefree(pg); + uvm_pagefree1(pg); mutex_exit(&uvm_pageqlock); KASSERT(anon->an_page == NULL); /* dispose should succeed as no one can reach this anon anymore. */ success = uvm_anon_dispose(anon); KASSERT(success); mutex_exit(anon->an_lock); + uvm_pagefree2(pg); /* Note: extra reference is held for PG_RELEASED case. */ mutex_obj_free(anon->an_lock); anon->an_lock = NULL; Index: uvm/uvm_aobj.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_aobj.c,v retrieving revision 1.130 diff -u -p -r1.130 uvm_aobj.c --- uvm/uvm_aobj.c 1 Dec 2019 20:31:40 -0000 1.130 +++ uvm/uvm_aobj.c 3 Dec 2019 00:16:51 -0000 @@ -834,8 +834,9 @@ uao_put(struct uvm_object *uobj, voff_t uao_dropswap(uobj, pg->offset >> PAGE_SHIFT); mutex_enter(&uvm_pageqlock); - uvm_pagefree(pg); + uvm_pagefree1(pg); mutex_exit(&uvm_pageqlock); + uvm_pagefree2(pg); break; default: @@ -1380,10 +1381,7 @@ uao_pagein_page(struct uvm_aobj *aobj, i /* * make sure it's on a page queue. */ - mutex_enter(&uvm_pageqlock); - if (pg->wire_count == 0) - uvm_pageenqueue(pg); - mutex_exit(&uvm_pageqlock); + uvm_pageenqueue1(pg); if (pg->flags & PG_WANTED) { wakeup(pg); Index: uvm/uvm_bio.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_bio.c,v retrieving revision 1.100 diff -u -p -r1.100 uvm_bio.c --- uvm/uvm_bio.c 7 Nov 2019 07:45:14 -0000 1.100 +++ uvm/uvm_bio.c 3 Dec 2019 00:16:51 -0000 @@ -244,8 +244,9 @@ ubc_fault_page(const struct uvm_faultinf KASSERT((pg->flags & PG_FAKE) == 0); if (pg->flags & PG_RELEASED) { mutex_enter(&uvm_pageqlock); - uvm_pagefree(pg); + uvm_pagefree1(pg); mutex_exit(&uvm_pageqlock); + uvm_pagefree2(pg); return 0; } if (pg->loan_count != 0) { @@ -287,9 +288,7 @@ ubc_fault_page(const struct uvm_faultinf error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg), prot & mask, PMAP_CANFAIL | (access_type & mask)); - mutex_enter(&uvm_pageqlock); - uvm_pageactivate(pg); - mutex_exit(&uvm_pageqlock); + uvm_pageactivate1(pg); pg->flags &= ~(PG_BUSY|PG_WANTED); UVM_PAGE_OWN(pg, NULL); @@ -659,7 +658,6 @@ ubc_release(void *va, int flags) } umap->flags &= ~UMAP_PAGES_LOCKED; mutex_enter(uobj->vmobjlock); - mutex_enter(&uvm_pageqlock); for (u_int i = 0; i < npages; i++) { paddr_t pa; bool rv __diagused; @@ -670,9 +668,8 @@ ubc_release(void *va, int flags) pgs[i] = PHYS_TO_VM_PAGE(pa); pgs[i]->flags &= ~(PG_FAKE|PG_CLEAN); KASSERT(pgs[i]->loan_count == 0); - uvm_pageactivate(pgs[i]); + uvm_pageactivate1(pgs[i]); } - mutex_exit(&uvm_pageqlock); pmap_kremove(umapva, ubc_winsize); pmap_update(pmap_kernel()); uvm_page_unbusy(pgs, npages); @@ -891,18 +888,15 @@ ubc_direct_release(struct uvm_object *uo int flags, struct vm_page **pgs, int npages) { mutex_enter(uobj->vmobjlock); - mutex_enter(&uvm_pageqlock); for (int i = 0; i < npages; i++) { struct vm_page *pg = pgs[i]; - uvm_pageactivate(pg); + uvm_pageactivate1(pg); /* Page was changed, no longer fake and neither clean */ if (flags & UBC_WRITE) pg->flags &= ~(PG_FAKE|PG_CLEAN); } - mutex_exit(&uvm_pageqlock); - uvm_page_unbusy(pgs, npages); mutex_exit(uobj->vmobjlock); } Index: uvm/uvm_fault.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_fault.c,v retrieving revision 1.211 diff -u -p -r1.211 uvm_fault.c --- uvm/uvm_fault.c 1 Dec 2019 14:30:01 -0000 1.211 +++ uvm/uvm_fault.c 3 Dec 2019 00:16:51 -0000 @@ -492,9 +492,7 @@ released: * We have successfully read the page, activate it. */ - mutex_enter(&uvm_pageqlock); - uvm_pageactivate(pg); - mutex_exit(&uvm_pageqlock); + uvm_pageactivate1(pg); pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE); UVM_PAGE_OWN(pg, NULL); #else @@ -1272,9 +1270,7 @@ uvm_fault_upper_neighbor( /* locked: amap, anon */ - mutex_enter(&uvm_pageqlock); - uvm_pageenqueue(pg); - mutex_exit(&uvm_pageqlock); + uvm_pageenqueue1(pg); UVMHIST_LOG(maphist, " MAPPING: n anon: pm=%#jx, va=%#jx, pg=%#jx", (uintptr_t)ufi->orig_map->pmap, currva, (uintptr_t)pg, 0); @@ -1492,9 +1488,8 @@ uvm_fault_upper_promote( KASSERT(anon == NULL || anon->an_lock == oanon->an_lock); pg = anon->an_page; - mutex_enter(&uvm_pageqlock); - uvm_pageenqueue(pg); /* uvm_fault_upper_done will activate the page */ - mutex_exit(&uvm_pageqlock); + /* uvm_fault_upper_done will activate the page */ + uvm_pageenqueue1(pg); pg->flags &= ~(PG_BUSY|PG_FAKE); UVM_PAGE_OWN(pg, NULL); @@ -1627,9 +1622,10 @@ uvm_fault_upper_done( * ... update the page queues. */ - mutex_enter(&uvm_pageqlock); if (wire_paging) { + mutex_enter(&uvm_pageqlock); uvm_pagewire(pg); + mutex_exit(&uvm_pageqlock); /* * since the now-wired page cannot be paged out, @@ -1639,11 +1635,9 @@ uvm_fault_upper_done( */ pg->flags &= ~(PG_CLEAN); - } else { - uvm_pageactivate(pg); + uvm_pageactivate1(pg); } - mutex_exit(&uvm_pageqlock); if (wire_paging) { uvm_anon_dropswap(anon); @@ -1863,9 +1857,7 @@ uvm_fault_lower_neighbor( * for this. we can just directly enter the pages. */ - mutex_enter(&uvm_pageqlock); - uvm_pageenqueue(pg); - mutex_exit(&uvm_pageqlock); + uvm_pageenqueue1(pg); UVMHIST_LOG(maphist, " MAPPING: n obj: pm=%#jx, va=%#jx, pg=%#jx", (uintptr_t)ufi->orig_map->pmap, currva, (uintptr_t)pg, 0); @@ -1984,9 +1976,7 @@ uvm_fault_lower_io( mutex_enter(uobj->vmobjlock); KASSERT((pg->flags & PG_BUSY) != 0); - mutex_enter(&uvm_pageqlock); - uvm_pageactivate(pg); - mutex_exit(&uvm_pageqlock); + uvm_pageactivate1(pg); /* locked(locked): maps(read), amap(if !null), uobj, pg */ /* locked(!locked): uobj, pg */ @@ -2288,9 +2278,7 @@ uvm_fault_lower_enter( * we just promoted the page. */ - mutex_enter(&uvm_pageqlock); - uvm_pageenqueue(pg); - mutex_exit(&uvm_pageqlock); + uvm_pageenqueue1(pg); if (pg->flags & PG_WANTED) wakeup(pg); @@ -2349,8 +2337,8 @@ uvm_fault_lower_done( UVMHIST_FUNC("uvm_fault_lower_done"); UVMHIST_CALLED(maphist); - mutex_enter(&uvm_pageqlock); if (flt->wire_paging) { + mutex_enter(&uvm_pageqlock); uvm_pagewire(pg); if (pg->pqflags & PQ_AOBJ) { @@ -2365,10 +2353,10 @@ uvm_fault_lower_done( pg->flags &= ~(PG_CLEAN); dropswap = true; } + mutex_exit(&uvm_pageqlock); } else { - uvm_pageactivate(pg); + uvm_pageactivate1(pg); } - mutex_exit(&uvm_pageqlock); if (dropswap) { uao_dropswap(uobj, pg->offset >> PAGE_SHIFT); Index: uvm/uvm_glue.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_glue.c,v retrieving revision 1.170 diff -u -p -r1.170 uvm_glue.c --- uvm/uvm_glue.c 21 Nov 2019 17:47:53 -0000 1.170 +++ uvm/uvm_glue.c 3 Dec 2019 00:16:52 -0000 @@ -67,6 +67,7 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v #include "opt_kgdb.h" #include "opt_kstack.h" #include "opt_uvmhist.h" +#include "opt_multiprocessor.h" /* * uvm_glue.c: glue functions @@ -500,6 +501,15 @@ uvm_scheduler(void) lwp_changepri(l, PRI_VM); lwp_unlock(l); +#if defined(DIAGNOSTIC) && defined(MULTIPROCESSOR) && defined(_LP64) + /* Moan on machines only where it really matters. */ + if ((sizeof(struct vm_page) & (COHERENCY_UNIT - 1)) != 0) { + printf("uvm: sizeof(struct vm_page)=%d not aligned to " + " COHERENCY_UNIT=%d\n", (int)sizeof(struct vm_page), + (int)COHERENCY_UNIT); + } +#endif + for (;;) { sched_pstats(); (void)kpause("uvm", false, hz, NULL); Index: uvm/uvm_km.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_km.c,v retrieving revision 1.149 diff -u -p -r1.149 uvm_km.c --- uvm/uvm_km.c 1 Dec 2019 14:43:26 -0000 1.149 +++ uvm/uvm_km.c 3 Dec 2019 00:16:52 -0000 @@ -474,8 +474,9 @@ uvm_km_pgremove(vaddr_t startva, vaddr_t uao_dropswap(uobj, curoff >> PAGE_SHIFT); if (pg != NULL) { mutex_enter(&uvm_pageqlock); - uvm_pagefree(pg); + uvm_pagefree1(pg); mutex_exit(&uvm_pageqlock); + uvm_pagefree2(pg); } } mutex_exit(uobj->vmobjlock); Index: uvm/uvm_loan.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_loan.c,v retrieving revision 1.88 diff -u -p -r1.88 uvm_loan.c --- uvm/uvm_loan.c 1 Dec 2019 14:40:31 -0000 1.88 +++ uvm/uvm_loan.c 3 Dec 2019 00:16:52 -0000 @@ -714,14 +714,13 @@ uvm_loanuobj(struct uvm_faultinfo *ufi, } if (pg->flags & PG_RELEASED) { mutex_enter(&uvm_pageqlock); - uvm_pagefree(pg); + uvm_pagefree1(pg); mutex_exit(&uvm_pageqlock); + uvm_pagefree2(pg); mutex_exit(uobj->vmobjlock); return (0); } - mutex_enter(&uvm_pageqlock); - uvm_pageactivate(pg); - mutex_exit(&uvm_pageqlock); + uvm_pageactivate1(pg); pg->flags &= ~(PG_BUSY|PG_WANTED); UVM_PAGE_OWN(pg, NULL); mutex_exit(uobj->vmobjlock); Index: uvm/uvm_page.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_page.c,v retrieving revision 1.200 diff -u -p -r1.200 uvm_page.c --- uvm/uvm_page.c 20 Sep 2019 11:09:43 -0000 1.200 +++ uvm/uvm_page.c 3 Dec 2019 00:16:52 -0000 @@ -1203,15 +1203,28 @@ uvm_pagezerocheck(struct vm_page *pg) void uvm_pagefree(struct vm_page *pg) { - struct pgflist *pgfl; - struct uvm_cpu *ucpu; - int index, color, queue; - bool iszero; + + uvm_pagefree1(pg); + uvm_pagefree2(pg); +} + +/* + * uvm_pagefree1: first part of free page + * + * => erase page's identity (i.e. remove from object) + * => caller must lock owning object (either anon or uvm_object) + * => caller must lock page queues + * => assumes all valid mappings of pg are gone + */ + +void +uvm_pagefree1(struct vm_page *pg) +{ #ifdef DEBUG if (pg->uobject == (void *)0xdeadbeef && pg->uanon == (void *)0xdeadbeef) { - panic("uvm_pagefree: freeing free page %p", pg); + panic("uvm_pagefree1: freeing free page %p", pg); } #endif /* DEBUG */ @@ -1297,6 +1310,31 @@ uvm_pagefree(struct vm_page *pg) pg->wire_count = 0; uvmexp.wired--; } +} + +/* + * uvm_pagefree2: second part of free page + * + * => put page on free list + * => page must be anonymous (e.g. never mapped, or uvm_pagefree1() called) + */ + +void +uvm_pagefree2(struct vm_page *pg) +{ + struct pgflist *pgfl; + struct uvm_cpu *ucpu; + int index, color, queue; + bool iszero; + +#ifdef DEBUG + if (pg->uobject == (void *)0xdeadbeef && + pg->uanon == (void *)0xdeadbeef) { + panic("uvm_pagefree2: freeing free page %p", pg); + } +#endif /* DEBUG */ + + KASSERT(!(pg->pqflags & PQ_FREE)); /* * and put on free queue @@ -1320,7 +1358,6 @@ uvm_pagefree(struct vm_page *pg) uvm_pagezerocheck(pg); #endif /* DEBUG */ - /* global list */ pgfl = &uvm.page_free[index].pgfl_buckets[color].pgfl_queues[queue]; LIST_INSERT_HEAD(pgfl, pg, pageq.list); @@ -1630,7 +1667,7 @@ uvm_pagedeactivate(struct vm_page *pg) /* * uvm_pageactivate: activate page * - * => caller must lock page queues + * => caller must lock page queues and objects */ void @@ -1648,7 +1685,37 @@ uvm_pageactivate(struct vm_page *pg) if (pg->wire_count != 0) { return; } - uvmpdpol_pageactivate(pg); + /* Avoid false sharing: don't disturb data structures unless needed. */ + if (uvmpdpol_pageactivate_p(pg)) { + uvmpdpol_pageactivate(pg); + } +} + +/* + * uvm_pageactivate1: lock avoidant version of uvm_pageactivate + * + * => caller must lock objects, but not page queues + */ +void +uvm_pageactivate1(struct vm_page *pg) +{ + + KASSERT(uvm_page_locked_p(pg)); +#if defined(READAHEAD_STATS) + if ((pg->pqflags & PQ_READAHEAD) != 0) { + mutex_enter(&uvm_pageqlock); + uvm_ra_hit.ev_count++; + pg->pqflags &= ~PQ_READAHEAD; + mutex_exit(&uvm_pageqlock); + } +#endif /* defined(READAHEAD_STATS) */ + + /* Safe to test with only uobject/anon lock held. */ + if (pg->wire_count == 0 && uvmpdpol_pageactivate_p(pg)) { + mutex_enter(&uvm_pageqlock); + uvm_pageactivate(pg); + mutex_exit(&uvm_pageqlock); + } } /* @@ -1659,11 +1726,13 @@ void uvm_pagedequeue(struct vm_page *pg) { + KASSERT(uvm_page_locked_p(pg)); + KASSERT(mutex_owned(&uvm_pageqlock)); + if (uvmpdpol_pageisqueued_p(pg)) { KASSERT(mutex_owned(&uvm_pageqlock)); + uvmpdpol_pagedequeue(pg); } - - uvmpdpol_pagedequeue(pg); } /* @@ -1675,11 +1744,31 @@ void uvm_pageenqueue(struct vm_page *pg) { + KASSERT(uvm_page_locked_p(pg)); KASSERT(mutex_owned(&uvm_pageqlock)); - if (pg->wire_count != 0) { - return; + + /* Avoid false sharing: don't disturb data structures unless needed. */ + if (!uvmpdpol_pageisqueued_p(pg) && pg->wire_count == 0) { + uvmpdpol_pageenqueue(pg); + } +} + +/* + * uvm_pageenqueue1: lock avoidant version of uvm_pageenqueue + */ + +void +uvm_pageenqueue1(struct vm_page *pg) +{ + + KASSERT(uvm_page_locked_p(pg)); + + /* Safe to test with only uobject/anon lock held. */ + if (!uvmpdpol_pageisqueued_p(pg) && pg->wire_count == 0) { + mutex_enter(&uvm_pageqlock); + uvmpdpol_pageenqueue(pg); + mutex_exit(&uvm_pageqlock); } - uvmpdpol_pageenqueue(pg); } /* @@ -1888,7 +1977,7 @@ uvm_page_printit(struct vm_page *pg, boo } /* - * uvm_pages_printthem - print a summary of all managed pages + * uvm_page_printall - print a summary of all managed pages */ void Index: uvm/uvm_page.h =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_page.h,v retrieving revision 1.84 diff -u -p -r1.84 uvm_page.h --- uvm/uvm_page.h 7 Jan 2019 22:48:01 -0000 1.84 +++ uvm/uvm_page.h 3 Dec 2019 00:16:52 -0000 @@ -98,9 +98,11 @@ * * Field markings and the corresponding locks: * - * f: free page queue lock, uvm_fpageqlock + * f: free page queue lock * o: page owner (uvm_object::vmobjlock, vm_amap::am_lock, vm_anon::an_lock) * p: page queue lock, uvm_pageqlock + * => page queue flags set and cleared only with o&p held can + * safely be tested for with only o held. * o,p: o|p for read, o&p for write * w: wired page queue or uvm_pglistalloc: * => wired page queue: o&p to change, stable from wire to unwire @@ -145,7 +147,27 @@ */ struct vm_page { + /* + * The fields in this first part of the structure (56 bytes on + * _LP64) do not change much, so we cluster them together in the + * same cache line. We want to keep the cache line containing these + * clean and in the SHARED state as much as possible, especially so + * that uvm_pagelookup() doesn't suffer cache misses. + */ struct rb_node rb_node; /* o: tree of pages in obj */ + struct vm_anon *uanon; /* o,p: anon */ + struct uvm_object *uobject; /* o,p: object */ + voff_t offset; /* o,p: offset into object */ + paddr_t phys_addr; /* : PA of page */ + + /* + * The remaining fields change more often, or are only examined when + * there is direct activity on the page and/or associated objects. + */ + union { + TAILQ_ENTRY(vm_page) queue; /* o: pages in same object */ + LIST_ENTRY(vm_page) list; /* f: CPU free page queue */ + } listq; union { TAILQ_ENTRY(vm_page) queue; /* w: wired page queue @@ -153,19 +175,13 @@ struct vm_page { LIST_ENTRY(vm_page) list; /* f: global free page queue */ } pageq; - union { - TAILQ_ENTRY(vm_page) queue; /* o: pages in same object */ - LIST_ENTRY(vm_page) list; /* f: CPU free page queue */ - } listq; - - struct vm_anon *uanon; /* o,p: anon */ - struct uvm_object *uobject; /* o,p: object */ - voff_t offset; /* o,p: offset into object */ uint16_t flags; /* o: object flags */ uint16_t loan_count; /* o,p: num. active loans */ - uint16_t wire_count; /* p: wired down map refs */ - uint16_t pqflags; /* p: page queue flags */ - paddr_t phys_addr; /* physical address of page */ + uint8_t unused1; /* p,f: currently unused */ + uint8_t unused2; /* p,f: current unused */ + uint16_t pqflags; /* p,f: page queue flags */ + uint32_t pdpol; /* p: for use of pd policy */ + uint32_t wire_count; /* o,p: wired down map refs */ #ifdef __HAVE_VM_PAGE_MD struct vm_page_md mdpage; /* ?: pmap-specific data */ @@ -317,12 +333,16 @@ void uvm_page_recolor(int); void uvm_pageidlezero(void); void uvm_pageactivate(struct vm_page *); +void uvm_pageactivate1(struct vm_page *); vaddr_t uvm_pageboot_alloc(vsize_t); void uvm_pagecopy(struct vm_page *, struct vm_page *); void uvm_pagedeactivate(struct vm_page *); void uvm_pagedequeue(struct vm_page *); void uvm_pageenqueue(struct vm_page *); +void uvm_pageenqueue1(struct vm_page *); void uvm_pagefree(struct vm_page *); +void uvm_pagefree1(struct vm_page *); +void uvm_pagefree2(struct vm_page *); void uvm_page_unbusy(struct vm_page **, int); struct vm_page *uvm_pagelookup(struct uvm_object *, voff_t); void uvm_pageunwire(struct vm_page *); Index: uvm/uvm_pdpolicy.h =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_pdpolicy.h,v retrieving revision 1.3 diff -u -p -r1.3 uvm_pdpolicy.h --- uvm/uvm_pdpolicy.h 21 Feb 2007 23:00:14 -0000 1.3 +++ uvm/uvm_pdpolicy.h 3 Dec 2019 00:16:52 -0000 @@ -43,6 +43,7 @@ void uvmpdpol_estimatepageable(int *, in bool uvmpdpol_needsscan_p(void); void uvmpdpol_pageactivate(struct vm_page *); +bool uvmpdpol_pageactivate_p(struct vm_page *); void uvmpdpol_pagedeactivate(struct vm_page *); void uvmpdpol_pagedequeue(struct vm_page *); void uvmpdpol_pageenqueue(struct vm_page *); Index: uvm/uvm_pdpolicy_clock.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_pdpolicy_clock.c,v retrieving revision 1.17 diff -u -p -r1.17 uvm_pdpolicy_clock.c --- uvm/uvm_pdpolicy_clock.c 30 Jan 2012 17:21:52 -0000 1.17 +++ uvm/uvm_pdpolicy_clock.c 3 Dec 2019 00:16:52 -0000 @@ -319,6 +319,7 @@ uvmpdpol_pageactivate(struct vm_page *pg uvmpdpol_pagedequeue(pg); TAILQ_INSERT_TAIL(&pdpol_state.s_activeq, pg, pageq.queue); pg->pqflags |= PQ_ACTIVE; + pg->pdpol = hardclock_ticks; pdpol_state.s_active++; } @@ -360,6 +361,15 @@ uvmpdpol_pageisqueued_p(struct vm_page * return (pg->pqflags & (PQ_ACTIVE | PQ_INACTIVE)) != 0; } +bool +uvmpdpol_pageactivate_p(struct vm_page *pg) +{ + + /* Safety: PQ_ACTIVE clear also tells us if it is not enqueued. */ + return (pg->pqflags & PQ_ACTIVE) == 0 || + (hardclock_ticks - pg->pdpol) > hz; +} + void uvmpdpol_estimatepageable(int *active, int *inactive) { Index: uvm/uvm_pdpolicy_clockpro.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_pdpolicy_clockpro.c,v retrieving revision 1.17 diff -u -p -r1.17 uvm_pdpolicy_clockpro.c --- uvm/uvm_pdpolicy_clockpro.c 20 Jun 2011 23:18:58 -0000 1.17 +++ uvm/uvm_pdpolicy_clockpro.c 3 Dec 2019 00:16:52 -0000 @@ -1111,6 +1111,14 @@ uvmpdpol_pagedeactivate(struct vm_page * clockpro_clearreferencebit(pg, true); } +bool +uvmpdpol_pageactivate_p(struct vm_page *pg) +{ + + /* XXX Work out a good test. For now always activate. */ + return true; +} + void uvmpdpol_pagedequeue(struct vm_page *pg) {