Index: kern/subr_lockdebug.c =================================================================== RCS file: /cvsroot/src/sys/kern/subr_lockdebug.c,v retrieving revision 1.57 diff -u -u -r1.57 subr_lockdebug.c --- kern/subr_lockdebug.c 1 Jun 2017 02:45:13 -0000 1.57 +++ kern/subr_lockdebug.c 26 Feb 2018 21:07:11 -0000 @@ -52,6 +52,8 @@ #include #include +#include + #include unsigned int ld_panic; @@ -368,8 +370,11 @@ /* * Can't call kmem_alloc() if in interrupt context. XXX We could * deadlock, because we don't know which locks the caller holds. + * We also don't want to allocate more memory when called from + * the pagedaemon, because we don't want to sleep. */ - if (cpu_intr_p() || (curlwp->l_pflag & LP_INTR) != 0) { + if (cpu_intr_p() || (curlwp->l_pflag & LP_INTR) != 0 + || curlwp == uvm.pagedaemon_lwp) { return s; } Index: uvm/uvm_pdaemon.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_pdaemon.c,v retrieving revision 1.108.22.1 diff -u -u -r1.108.22.1 uvm_pdaemon.c --- uvm/uvm_pdaemon.c 2 Nov 2017 21:29:53 -0000 1.108.22.1 +++ uvm/uvm_pdaemon.c 26 Feb 2018 21:07:12 -0000 @@ -79,6 +79,7 @@ #include #include #include +#include #include #include @@ -105,9 +106,16 @@ static void uvmpd_scan(void); static void uvmpd_scan_queue(void); static void uvmpd_tune(void); +static void uvmpd_pool_drain_thread(void *); +static void uvmpd_pool_drain_wakeup(void); static unsigned int uvm_pagedaemon_waiters; +/* State for the pool drainer thread */ +static kmutex_t uvmpd_pool_drain_lock; +static kcondvar_t uvmpd_pool_drain_cv; +static bool uvmpd_pool_drain_run = false; + /* * XXX hack to avoid hangs when large processes fork. */ @@ -229,14 +237,21 @@ void uvm_pageout(void *arg) { - int bufcnt, npages = 0; + int npages = 0; int extrapages = 0; - struct pool *pp; UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist); UVMHIST_LOG(pdhist,"", 0, 0, 0, 0); + mutex_init(&uvmpd_pool_drain_lock, MUTEX_DEFAULT, IPL_VM); + cv_init(&uvmpd_pool_drain_cv, "pooldrain"); + + /* Create the pool drainer kernel thread. */ + if (kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, + uvmpd_pool_drain_thread, NULL, NULL, "pooldrain")) + panic("fork pooldrain"); + /* * ensure correct priority and set paging parameters... */ @@ -288,13 +303,9 @@ * system only when entire pool page is empty. */ mutex_spin_enter(&uvm_fpageqlock); - bufcnt = uvmexp.freetarg - uvmexp.free; - if (bufcnt < 0) - bufcnt = 0; UVMHIST_LOG(pdhist," free/ftarg=%jd/%jd", uvmexp.free, uvmexp.freetarg, 0,0); - needsfree = uvmexp.free + uvmexp.paging < uvmexp.freetarg; needsscan = needsfree || uvmpdpol_needsscan_p(); @@ -331,16 +342,10 @@ continue; /* - * kill unused metadata buffers. + * kick the pool drainer thread. */ - mutex_enter(&bufcache_lock); - buf_drain(bufcnt << PAGE_SHIFT); - mutex_exit(&bufcache_lock); - /* - * drain the pools. - */ - pool_drain(&pp); + uvmpd_pool_drain_wakeup(); } /*NOTREACHED*/ } @@ -1022,3 +1027,53 @@ uvmpdpol_estimatepageable(active, inactive); } + +/* + * Use a separate thread for draining pools. + * This work can't done from the main pagedaemon thread because + * some pool allocators need to take vm_map locks. + */ + +static void +uvmpd_pool_drain_thread(void *arg) +{ + int bufcnt; + + for (;;) { + mutex_enter(&uvmpd_pool_drain_lock); + if (!uvmpd_pool_drain_run) { + cv_wait(&uvmpd_pool_drain_cv, &uvmpd_pool_drain_lock); + } + uvmpd_pool_drain_run = false; + mutex_exit(&uvmpd_pool_drain_lock); + + /* + * kill unused metadata buffers. + */ + mutex_spin_enter(&uvm_fpageqlock); + bufcnt = uvmexp.freetarg - uvmexp.free; + mutex_spin_exit(&uvm_fpageqlock); + if (bufcnt < 0) + bufcnt = 0; + + mutex_enter(&bufcache_lock); + buf_drain(bufcnt << PAGE_SHIFT); + mutex_exit(&bufcache_lock); + + /* + * drain a pool. + */ + pool_drain(NULL); + } + /*NOTREACHED*/ +} + +static void +uvmpd_pool_drain_wakeup(void) +{ + + mutex_enter(&uvmpd_pool_drain_lock); + uvmpd_pool_drain_run = true; + cv_signal(&uvmpd_pool_drain_cv); + mutex_exit(&uvmpd_pool_drain_lock); +}