Index: uvm/uvm_pdaemon.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_pdaemon.c,v retrieving revision 1.110 diff -u -p -r1.110 uvm_pdaemon.c --- uvm/uvm_pdaemon.c 21 Apr 2019 15:32:18 -0000 1.110 +++ uvm/uvm_pdaemon.c 1 Jan 2020 16:05:00 -0000 @@ -915,6 +915,8 @@ uvmpd_scan_queue(void) #endif /* defined(VMSWAP) */ } + uvmpdpol_scanfini(); + #if defined(VMSWAP) mutex_exit(&uvm_pageqlock); swapcluster_flush(&swc, true); @@ -1038,17 +1040,39 @@ uvm_estimatepageable(int *active, int *i static void uvmpd_pool_drain_thread(void *arg) { - int bufcnt; + struct pool *firstpool, *curpool; + int bufcnt, lastslept; + bool cycled; + firstpool = NULL; + cycled = true; for (;;) { + /* + * sleep until awoken by the pagedaemon. + */ mutex_enter(&uvmpd_pool_drain_lock); if (!uvmpd_pool_drain_run) { + lastslept = hardclock_ticks; cv_wait(&uvmpd_pool_drain_cv, &uvmpd_pool_drain_lock); + if (hardclock_ticks != lastslept) { + cycled = false; + firstpool = NULL; + } } uvmpd_pool_drain_run = false; mutex_exit(&uvmpd_pool_drain_lock); /* + * rate limit draining, otherwise in desperate circumstances + * this can totally saturate the system with xcall activity. + */ + if (cycled) { + kpause("uvmpdlmt", false, 1, NULL); + cycled = false; + firstpool = NULL; + } + + /* * kill unused metadata buffers. */ mutex_spin_enter(&uvm_fpageqlock); @@ -1064,7 +1088,13 @@ uvmpd_pool_drain_thread(void *arg) /* * drain a pool. */ - pool_drain(NULL); + (void)pool_drain(&curpool); + KASSERT(curpool != NULL); + if (firstpool == NULL) { + firstpool = curpool; + } else if (firstpool == curpool) { + cycled = true; + } } /*NOTREACHED*/ } Index: uvm/uvm_pdpolicy.h =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_pdpolicy.h,v retrieving revision 1.3 diff -u -p -r1.3 uvm_pdpolicy.h --- uvm/uvm_pdpolicy.h 21 Feb 2007 23:00:14 -0000 1.3 +++ uvm/uvm_pdpolicy.h 1 Jan 2020 16:05:00 -0000 @@ -51,6 +51,7 @@ void uvmpdpol_anfree(struct vm_anon *); void uvmpdpol_tune(void); void uvmpdpol_scaninit(void); +void uvmpdpol_scanfini(void); struct vm_page *uvmpdpol_selectvictim(void); void uvmpdpol_balancequeue(int); Index: uvm/uvm_pdpolicy_clock.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_pdpolicy_clock.c,v retrieving revision 1.17 diff -u -p -r1.17 uvm_pdpolicy_clock.c --- uvm/uvm_pdpolicy_clock.c 30 Jan 2012 17:21:52 -0000 1.17 +++ uvm/uvm_pdpolicy_clock.c 1 Jan 2020 16:05:00 -0000 @@ -105,9 +105,8 @@ struct uvmpdpol_globalstate { }; struct uvmpdpol_scanstate { - bool ss_first; bool ss_anonreact, ss_filereact, ss_execreact; - struct vm_page *ss_nextpg; + struct vm_page ss_marker; }; static struct uvmpdpol_globalstate pdpol_state; @@ -160,8 +159,18 @@ uvmpdpol_scaninit(void) ss->ss_anonreact = anonreact; ss->ss_filereact = filereact; ss->ss_execreact = execreact; + memset(&ss->ss_marker, 0, sizeof(ss->ss_marker)); + ss->ss_marker.flags = PG_MARKER; + TAILQ_INSERT_HEAD(&pdpol_state.s_inactiveq, &ss->ss_marker, + pageq.queue); +} + +void +uvmpdpol_scanfini(void) +{ + struct uvmpdpol_scanstate *ss = &pdpol_scanstate; - ss->ss_first = true; + TAILQ_REMOVE(&pdpol_state.s_inactiveq, &ss->ss_marker, pageq.queue); } struct vm_page * @@ -177,20 +186,11 @@ uvmpdpol_selectvictim(void) struct vm_anon *anon; struct uvm_object *uobj; - if (ss->ss_first) { - pg = TAILQ_FIRST(&pdpol_state.s_inactiveq); - ss->ss_first = false; - } else { - pg = ss->ss_nextpg; - if (pg != NULL && (pg->pqflags & PQ_INACTIVE) == 0) { - pg = TAILQ_FIRST(&pdpol_state.s_inactiveq); - } - } + pg = TAILQ_NEXT(&ss->ss_marker, pageq.queue); if (pg == NULL) { break; } - ss->ss_nextpg = TAILQ_NEXT(pg, pageq.queue); - + KASSERT((pg->flags & PG_MARKER) == 0); uvmexp.pdscans++; /* @@ -213,6 +213,15 @@ uvmpdpol_selectvictim(void) uobj = pg->uobject; /* + * now prepare to move on to the next page. + */ + + TAILQ_REMOVE(&pdpol_state.s_inactiveq, &ss->ss_marker, + pageq.queue); + TAILQ_INSERT_AFTER(&pdpol_state.s_inactiveq, pg, + &ss->ss_marker, pageq.queue); + + /* * enforce the minimum thresholds on different * types of memory usage. if reusing the current * page would reduce that type of usage below its Index: uvm/uvm_pdpolicy_clockpro.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_pdpolicy_clockpro.c,v retrieving revision 1.17 diff -u -p -r1.17 uvm_pdpolicy_clockpro.c --- uvm/uvm_pdpolicy_clockpro.c 20 Jun 2011 23:18:58 -0000 1.17 +++ uvm/uvm_pdpolicy_clockpro.c 1 Jan 2020 16:05:00 -0000 @@ -1190,6 +1190,12 @@ uvmpdpol_scaninit(void) ss->ss_nscanned = 0; } +void +uvmpdpol_scanfini(void) +{ + +} + struct vm_page * uvmpdpol_selectvictim(void) {