switch sparc pmap lock to the scheme sparc64 uses: - local IPL_NONE mutex for general pmap locking operations, not kernel lock. - for pmap_activate()/pmap_deactivate(), switch to using the existing ctx_lock, and push handling of it into ctx_alloc() the ctx_free() callers. seems to fix easy to trigger deadlocks on systems with >2 cpus. Index: sys/arch/sparc/sparc/pmap.c =================================================================== RCS file: /cvsroot/src/sys/arch/sparc/sparc/pmap.c,v retrieving revision 1.365 diff -p -u -r1.365 pmap.c --- sys/arch/sparc/sparc/pmap.c 3 Sep 2018 16:29:27 -0000 1.365 +++ sys/arch/sparc/sparc/pmap.c 13 Jan 2019 06:52:45 -0000 @@ -176,8 +176,8 @@ paddr_t vm_first_phys = (paddr_t)-1; paddr_t vm_last_phys = 0; psize_t vm_num_phys; -#define PMAP_LOCK() KERNEL_LOCK(1, NULL) -#define PMAP_UNLOCK() KERNEL_UNLOCK_ONE(NULL) +#define PMAP_LOCK() mutex_enter(&pmap_lock) +#define PMAP_UNLOCK() mutex_exit(&pmap_lock) /* * Flags in pvlist.pv_flags. Note that PV_MOD must be 1 and PV_REF must be 2 @@ -347,6 +347,7 @@ mmuq_insert_tail(struct mmuentry *head, int seginval; /* [4/4c] the invalid segment number */ int reginval; /* [4/3mmu] the invalid region number */ +static kmutex_t pmap_lock; static kmutex_t demap_lock; static bool lock_available = false; /* demap_lock has been initialized */ @@ -372,15 +373,15 @@ union ctxinfo { struct pmap *c_pmap; /* pmap (if busy) */ }; -static kmutex_t ctx_lock; /* lock for below */ +static kmutex_t ctx_lock; /* lock for below, and {,de}activate */ union ctxinfo *ctxinfo; /* allocated at in pmap_bootstrap */ union ctxinfo *ctx_freelist; /* context free list */ int ctx_kick; /* allocation rover when none free */ int ctx_kickdir; /* ctx_kick roves both directions */ int ncontext; /* sizeof ctx_freelist */ -void ctx_alloc(struct pmap *); -void ctx_free(struct pmap *); +static void ctx_alloc(struct pmap *); +static void ctx_free(struct pmap *); /*void * vdumppages; -* 32KB worth of reserved dump pages */ @@ -2121,7 +2122,7 @@ mmu_pagein(struct pmap *pm, vaddr_t va, * This routine is only ever called from locore.s just after it has * saved away the previous process, so there are no active user windows. */ -void +static void ctx_alloc(struct pmap *pm) { union ctxinfo *c; @@ -2133,6 +2134,8 @@ ctx_alloc(struct pmap *pm) struct cpu_info *cpi; #endif + KASSERT(mutex_owned(&ctx_lock)); + /*XXX-GCC!*/gap_start=gap_end=0; #ifdef DEBUG if (pm->pm_ctx) @@ -2145,7 +2148,6 @@ ctx_alloc(struct pmap *pm) gap_end = pm->pm_gap_end; } - mutex_spin_enter(&ctx_lock); if ((c = ctx_freelist) != NULL) { ctx_freelist = c->c_nextfree; cnum = c - ctxinfo; @@ -2288,13 +2290,12 @@ ctx_alloc(struct pmap *pm) setcontext4m(cnum); #endif /* SUN4M || SUN4D */ } - mutex_spin_exit(&ctx_lock); } /* * Give away a context. */ -void +static void ctx_free(struct pmap *pm) { union ctxinfo *c; @@ -2303,6 +2304,8 @@ ctx_free(struct pmap *pm) struct cpu_info *cpi; #endif + KASSERT(mutex_owned(&ctx_lock)); + c = pm->pm_ctx; ctx = pm->pm_ctxnum; pm->pm_ctx = NULL; @@ -2316,8 +2319,6 @@ ctx_free(struct pmap *pm) } #endif /* SUN4 || SUN4C */ - mutex_spin_enter(&ctx_lock); - #if defined(SUN4M) || defined(SUN4D) if (CPU_HAS_SRMMU) { CPU_INFO_ITERATOR i; @@ -2334,7 +2335,6 @@ ctx_free(struct pmap *pm) c->c_nextfree = ctx_freelist; ctx_freelist = c; - mutex_spin_exit(&ctx_lock); } @@ -3070,6 +3070,7 @@ pmap_bootstrap(int nctx, int nregion, in } pmap_page_upload(); + mutex_init(&pmap_lock, MUTEX_DEFAULT, IPL_NONE); mutex_init(&demap_lock, MUTEX_DEFAULT, IPL_VM); mutex_init(&ctx_lock, MUTEX_DEFAULT, IPL_SCHED); lock_available = true; @@ -4392,7 +4393,9 @@ pmap_pmap_pool_dtor(void *arg, void *obj #endif if ((c = pm->pm_ctx) != NULL) { + mutex_spin_exit(&ctx_lock); ctx_free(pm); + mutex_spin_enter(&ctx_lock); } #if defined(SUN4M) || defined(SUN4D) @@ -4662,7 +4665,7 @@ pmap_remove(struct pmap *pm, vaddr_t va, } ctx = getcontext(); - s = splvm(); /* XXX conservative */ + s = splvm(); PMAP_LOCK(); for (; va < endva; va = nva) { /* do one virtual segment at a time */ @@ -6022,7 +6025,7 @@ pmap_kenter_pa4_4c(vaddr_t va, paddr_t p splx(s); } -#if notyet +#if notyet /* XXXMRG delete */ void pmap_lockmmu(vaddr_t sva, size_t sz); void @@ -6286,7 +6289,7 @@ pmap_enk4m(struct pmap *pm, vaddr_t va, sp = &rp->rg_segmap[vs]; kpreempt_disable(); - s = splvm(); /* XXX way too conservative */ + s = splvm(); PMAP_LOCK(); if (rp->rg_seg_ptps == NULL) /* enter new region */ @@ -6372,7 +6375,7 @@ pmap_enu4m(struct pmap *pm, vaddr_t va, vr = VA_VREG(va); vs = VA_VSEG(va); rp = &pm->pm_regmap[vr]; - s = splvm(); /* XXX conservative */ + s = splvm(); PMAP_LOCK(); if (rp->rg_segmap == NULL) { @@ -7494,14 +7497,14 @@ pmap_activate(struct lwp *l) return; } - PMAP_LOCK(); + mutex_spin_enter(&ctx_lock); if (pm->pm_ctx == NULL) { ctx_alloc(pm); /* performs setcontext() */ } else { setcontext(pm->pm_ctxnum); } PMAP_SET_CPUSET(pm, &cpuinfo); - PMAP_UNLOCK(); + mutex_spin_exit(&ctx_lock); } /* @@ -7518,7 +7521,7 @@ pmap_deactivate(struct lwp *l) } write_user_windows(); - PMAP_LOCK(); + mutex_spin_enter(&ctx_lock); if (pm->pm_ctx) { (*cpuinfo.pure_vcache_flush)(); @@ -7530,7 +7533,7 @@ pmap_deactivate(struct lwp *l) /* we no longer need broadcast tlb flushes for this pmap. */ PMAP_CLR_CPUSET(pm, &cpuinfo); - PMAP_UNLOCK(); + mutex_spin_exit(&ctx_lock); } #ifdef DEBUG