Index: uvm_pdpolicy.h =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_pdpolicy.h,v retrieving revision 1.3 diff -u -p -r1.3 uvm_pdpolicy.h --- uvm_pdpolicy.h 21 Feb 2007 23:00:14 -0000 1.3 +++ uvm_pdpolicy.h 6 Dec 2019 13:09:59 -0000 @@ -41,6 +41,9 @@ void uvmpdpol_init(void); void uvmpdpol_reinit(void); void uvmpdpol_estimatepageable(int *, int *); bool uvmpdpol_needsscan_p(void); +void uvmpdpol_lock(void); +void uvmpdpol_unlock(void); +bool uvmpdpol_locked_p(void); void uvmpdpol_pageactivate(struct vm_page *); void uvmpdpol_pagedeactivate(struct vm_page *); Index: uvm_pdpolicy_clock.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_pdpolicy_clock.c,v retrieving revision 1.17 diff -u -p -r1.17 uvm_pdpolicy_clock.c --- uvm_pdpolicy_clock.c 30 Jan 2012 17:21:52 -0000 1.17 +++ uvm_pdpolicy_clock.c 6 Dec 2019 13:09:59 -0000 @@ -110,8 +111,13 @@ struct uvmpdpol_scanstate { struct vm_page *ss_nextpg; }; +static void uvmpdpol_pageactivate_locked(struct vm_page *); +static void uvmpdpol_pagedeactivate_locked(struct vm_page *); +static void uvmpdpol_pagedequeue_locked(struct vm_page *); + static struct uvmpdpol_globalstate pdpol_state; static struct uvmpdpol_scanstate pdpol_scanstate; +static kmutex_t uvm_pdpol_lock __cacheline_aligned; PDPOL_EVCNT_DEFINE(reactexec) PDPOL_EVCNT_DEFINE(reactfile) @@ -171,7 +178,8 @@ uvmpdpol_selectvictim(void) struct vm_page *pg; kmutex_t *lock; - KASSERT(mutex_owned(&uvm_pageqlock)); + KASSERT(mutex_owned(&uvm_pageqlock)); /* for uvmpd_trylockowner() */ + KASSERT(mutex_owned(&uvm_pdpol_lock)); while (/* CONSTCOND */ 1) { struct vm_anon *anon; @@ -201,7 +209,7 @@ uvmpdpol_selectvictim(void) lock = uvmpd_trylockowner(pg); if (lock != NULL) { if (pmap_is_referenced(pg)) { - uvmpdpol_pageactivate(pg); + uvmpdpol_pageactivate_locked(pg); uvmexp.pdreact++; mutex_exit(lock); continue; @@ -221,18 +229,18 @@ uvmpdpol_selectvictim(void) */ if (uobj && UVM_OBJ_IS_VTEXT(uobj) && ss->ss_execreact) { - uvmpdpol_pageactivate(pg); + uvmpdpol_pageactivate_locked(pg); PDPOL_EVCNT_INCR(reactexec); continue; } if (uobj && UVM_OBJ_IS_VNODE(uobj) && !UVM_OBJ_IS_VTEXT(uobj) && ss->ss_filereact) { - uvmpdpol_pageactivate(pg); + uvmpdpol_pageactivate_locked(pg); PDPOL_EVCNT_INCR(reactfile); continue; } if ((anon || UVM_OBJ_IS_AOBJ(uobj)) && ss->ss_anonreact) { - uvmpdpol_pageactivate(pg); + uvmpdpol_pageactivate_locked(pg); PDPOL_EVCNT_INCR(reactanon); continue; } @@ -282,7 +290,7 @@ uvmpdpol_balancequeue(int swap_shortage) /* no need to check wire_count as pg is "active" */ lock = uvmpd_trylockowner(p); if (lock != NULL) { - uvmpdpol_pagedeactivate(p); + uvmpdpol_pagedeactivate_locked(p); uvmexp.pddeact++; inactive_shortage--; mutex_exit(lock); @@ -290,12 +298,12 @@ uvmpdpol_balancequeue(int swap_shortage) } } -void -uvmpdpol_pagedeactivate(struct vm_page *pg) +static void +uvmpdpol_pagedeactivate_locked(struct vm_page *pg) { KASSERT(uvm_page_locked_p(pg)); - KASSERT(mutex_owned(&uvm_pageqlock)); + KASSERT(mutex_owned(&uvm_pdpol_lock)); if (pg->pqflags & PQ_ACTIVE) { TAILQ_REMOVE(&pdpol_state.s_activeq, pg, pageq.queue); @@ -313,27 +321,50 @@ uvmpdpol_pagedeactivate(struct vm_page * } void -uvmpdpol_pageactivate(struct vm_page *pg) +uvmpdpol_pagedeactivate(struct vm_page *pg) +{ + + mutex_enter(&uvm_pdpol_lock); + uvmpdpol_pagedeactivate_locked(pg); + mutex_exit(&uvm_pdpol_lock); +} + +static void +uvmpdpol_pageactivate_locked(struct vm_page *pg) { - uvmpdpol_pagedequeue(pg); + uvmpdpol_pagedequeue_locked(pg); TAILQ_INSERT_TAIL(&pdpol_state.s_activeq, pg, pageq.queue); pg->pqflags |= PQ_ACTIVE; + pg->pdpol = hardclock_ticks; pdpol_state.s_active++; } void -uvmpdpol_pagedequeue(struct vm_page *pg) +uvmpdpol_pageactivate(struct vm_page *pg) +{ + + /* Safety: PQ_ACTIVE clear also tells us if it is not enqueued. */ + if ((pg->pqflags & PQ_ACTIVE) == 0 || + (hardclock_ticks - pg->pdpol) > hz) { + mutex_enter(&uvm_pdpol_lock); + uvmpdpol_pageactivate_locked(pg); + mutex_exit(&uvm_pdpol_lock); + } +} + +static void +uvmpdpol_pagedequeue_locked(struct vm_page *pg) { + KASSERT(mutex_owned(&uvm_pdpol_lock)); + if (pg->pqflags & PQ_ACTIVE) { - KASSERT(mutex_owned(&uvm_pageqlock)); TAILQ_REMOVE(&pdpol_state.s_activeq, pg, pageq.queue); pg->pqflags &= ~PQ_ACTIVE; KASSERT(pdpol_state.s_active > 0); pdpol_state.s_active--; } else if (pg->pqflags & PQ_INACTIVE) { - KASSERT(mutex_owned(&uvm_pageqlock)); TAILQ_REMOVE(&pdpol_state.s_inactiveq, pg, pageq.queue); pg->pqflags &= ~PQ_INACTIVE; KASSERT(pdpol_state.s_inactive > 0); @@ -342,10 +373,45 @@ uvmpdpol_pagedequeue(struct vm_page *pg) } void +uvmpdpol_pagedequeue(struct vm_page *pg) +{ + + mutex_enter(&uvm_pdpol_lock); + uvmpdpol_pagedequeue_locked(pg); + mutex_exit(&uvm_pdpol_lock); +} + +void uvmpdpol_pageenqueue(struct vm_page *pg) { - uvmpdpol_pageactivate(pg); + /* Safe to test unlocked due to page life-cycle. */ + if (!uvmpdpol_pageisqueued_p(pg)) { + mutex_enter(&uvm_pdpol_lock); + uvmpdpol_pageactivate_locked(pg); + mutex_exit(&uvm_pdpol_lock); + } +} + +void +uvmpdpol_lock(void) +{ + + mutex_enter(&uvm_pdpol_lock); +} + +void +uvmpdpol_unlock(void) +{ + + mutex_exit(&uvm_pdpol_lock); +} + +bool +uvmpdpol_locked_p(void) +{ + + return mutex_owned(&uvm_pdpol_lock); } void @@ -400,6 +466,7 @@ uvmpdpol_init(void) { struct uvmpdpol_globalstate *s = &pdpol_state; + mutex_init(&uvm_pdpol_lock, MUTEX_DEFAULT, IPL_NONE); TAILQ_INIT(&s->s_activeq); TAILQ_INIT(&s->s_inactiveq); uvm_pctparam_init(&s->s_inactivepct, CLOCK_INACTIVEPCT, NULL);