Index: pmap/pmap.c =================================================================== RCS file: /cvsroot/src/sys/uvm/pmap/pmap.c,v retrieving revision 1.53 diff -p -u -r1.53 pmap.c --- pmap/pmap.c 11 Aug 2020 06:09:44 -0000 1.53 +++ pmap/pmap.c 19 Aug 2020 05:01:41 -0000 @@ -217,8 +217,10 @@ struct pmap_limits pmap_limits = { /* VA #ifdef UVMHIST static struct kern_history_ent pmapexechistbuf[10000]; static struct kern_history_ent pmaphistbuf[10000]; +static struct kern_history_ent pmapsegtabhistbuf[10000]; UVMHIST_DEFINE(pmapexechist); UVMHIST_DEFINE(pmaphist); +UVMHIST_DEFINE(pmapsegtabhist); #endif /* @@ -587,6 +589,7 @@ pmap_init(void) { UVMHIST_INIT_STATIC(pmapexechist, pmapexechistbuf); UVMHIST_INIT_STATIC(pmaphist, pmaphistbuf); + UVMHIST_INIT_STATIC(pmapsegtabhist, pmapsegtabhistbuf); UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist); Index: pmap/pmap.h =================================================================== RCS file: /cvsroot/src/sys/uvm/pmap/pmap.h,v retrieving revision 1.16 diff -p -u -r1.16 pmap.h --- pmap/pmap.h 7 Aug 2020 07:19:45 -0000 1.16 +++ pmap/pmap.h 19 Aug 2020 05:01:41 -0000 @@ -78,6 +78,7 @@ #ifdef UVMHIST UVMHIST_DECL(pmapexechist); UVMHIST_DECL(pmaphist); +UVMHIST_DECL(pmapsegtabhist); #endif /* Index: pmap/pmap_segtab.c =================================================================== RCS file: /cvsroot/src/sys/uvm/pmap/pmap_segtab.c,v retrieving revision 1.18 diff -p -u -r1.18 pmap_segtab.c --- pmap/pmap_segtab.c 18 Aug 2020 11:48:21 -0000 1.18 +++ pmap/pmap_segtab.c 19 Aug 2020 05:01:41 -0000 @@ -131,16 +131,28 @@ struct pmap_segtab_info { kmutex_t pmap_segtab_lock __cacheline_aligned; +/* + * Check that a seg_tab[] array is empty. + * + * This is used when allocating or freeing a pmap_segtab_t. The stp + * should be unused -- meaning, none of the seg_tab[] pointers are + * not NULL, as it transitions from either freshly allocated segtab from + * pmap pool, an unused allocated page segtab alloc (SMP case, if two + * CPUs attempt to allocate the same underlying segtab), + */ static void pmap_check_stp(pmap_segtab_t *stp, const char *caller, const char *why) { #ifdef DEBUG for (size_t i = 0; i < PMAP_SEGTABSIZE; i++) { - if (stp->seg_tab[i] != 0) { + if (stp->seg_tab[i] != NULL) { +#define DEBUG_NOISY #ifdef DEBUG_NOISY + printf("%s: stp = %p\n", __func__, stp); for (size_t j = i; j < PMAP_SEGTABSIZE; j++) - printf("%s: pm_segtab.seg_tab[%zu] = %p\n", - caller, j, stp->seg_tab[j]); + if (stp->seg_tab[j] != NULL) + printf("%s: stp->seg_tab[%zu] = %p\n", + caller, j, stp->seg_tab[j]); #endif panic("%s: pm_segtab.seg_tab[%zu] != 0 (%p): %s", caller, i, stp->seg_tab[i], why); @@ -193,13 +205,25 @@ pmap_pte_lookup(pmap_t pmap, vaddr_t va) static void pmap_segtab_free(pmap_segtab_t *stp) { + static unsigned local_segcount; + static pmap_segtab_t *local_free_segtab; + + UVMHIST_FUNC(__func__); + UVMHIST_CALLARGS(pmapsegtabhist, "stp=%#jx", stp, 0, 0, 0); /* * Insert the segtab into the segtab freelist. */ mutex_spin_enter(&pmap_segtab_lock); - stp->seg_seg[0] = pmap_segtab_info.free_segtab; - pmap_segtab_info.free_segtab = stp; - SEGTAB_ADD(nput, 1); + if (local_segcount < 2048) { + stp->seg_seg[0] = local_free_segtab; + local_free_segtab = stp; + local_segcount++; + } else { + stp->seg_seg[0] = local_free_segtab; + pmap_segtab_info.free_segtab = stp; + SEGTAB_ADD(nput, local_segcount); + local_segcount = 0; + } mutex_spin_exit(&pmap_segtab_lock); } @@ -209,19 +233,21 @@ pmap_segtab_release(pmap_t pmap, pmap_se vaddr_t va, vsize_t vinc) { pmap_segtab_t *stp = *stp_p; + size_t i; UVMHIST_FUNC(__func__); - UVMHIST_CALLARGS(pmaphist, "pm=%#jx stpp=%#jx free=%jd", + UVMHIST_CALLARGS(pmapsegtabhist, "pm=%#jx stpp=%#jx free=%jd", (uintptr_t)pmap, (uintptr_t)stp_p, free_stp, 0); - UVMHIST_LOG(pmaphist, " callback=%jx flags=%jx va=%jx vinc=%jx", + UVMHIST_LOG(pmapsegtabhist, " callback=%jx flags=%jx va=%jx vinc=%jx", (uintptr_t)callback, flags, (uintptr_t)va, (uintptr_t)vinc); - for (size_t i = (va / vinc) & (PMAP_SEGTABSIZE - 1); + for (i = (va / vinc) & (PMAP_SEGTABSIZE - 1); i < PMAP_SEGTABSIZE; i++, va += vinc) { #ifdef _LP64 if (vinc > NBSEG) { if (stp->seg_seg[i] != NULL) { - UVMHIST_LOG(pmaphist, " recursing", 0, 0, 0, 0); + UVMHIST_LOG(pmapsegtabhist, + " recursing %jd", i, 0, 0, 0); pmap_segtab_release(pmap, &stp->seg_seg[i], true, callback, flags, va, vinc / NSEGPG); KASSERT(stp->seg_seg[i] == NULL); @@ -236,6 +262,17 @@ pmap_segtab_release(pmap_t pmap, pmap_se if (pte == NULL) continue; +#ifdef __mips_n64 + /* + * XXX This is evil. If vinc is 1000000 we are in + * the last level, and this pte should be page aligned. + */ + if (vinc == 0x1000000 && ((uintptr_t)pte & PAGE_MASK) != 0) { + panic("%s: pte entry at %p not page aligned", + __func__, pte); + } +#endif + /* * If our caller wants a callback, do so. */ @@ -244,9 +281,16 @@ pmap_segtab_release(pmap_t pmap, pmap_se } #ifdef DEBUG for (size_t j = 0; j < NPTEPG; j++) { - if (!pte_zero_p(pte[j])) - panic("%s: pte entry at %p not 0 (%#"PRIxPTE")", - __func__, &pte[j], pte_value(pte[j])); + if (!pte_zero_p(pte[j])) { + for (size_t k = j + 1; k < NPTEPG; k++) { + if (!pte_zero_p(pte[k])) + UVMHIST_LOG(pmapsegtabhist, + "pte[%zu] = %#"PRIxPTE, + k, pte_value(pte[k]), 0, 0); + } + panic("%s: pte entry at %p/%ju not 0 (%#"PRIxPTE")", + __func__, &pte[j], j, pte_value(pte[j])); + } } #endif // PMAP_UNMAP_POOLPAGE should handle any VCA issues itself @@ -261,8 +305,9 @@ pmap_segtab_release(pmap_t pmap, pmap_se #endif stp->seg_tab[i] = NULL; - UVMHIST_LOG(pmaphist, " zeroing tab[%jd]", i, 0, 0, 0); + UVMHIST_LOG(pmapsegtabhist, " zeroing tab[%jd]", i, 0, 0, 0); } + UVMHIST_LOG(pmapsegtabhist, " got to i=%jd", i, 0, 0, 0); if (free_stp) { pmap_check_stp(stp, __func__, @@ -290,6 +335,7 @@ pmap_segtab_alloc(void) pmap_segtab_t *stp; bool found_on_freelist = false; + UVMHIST_FUNC(__func__); again: mutex_spin_enter(&pmap_segtab_lock); if (__predict_true((stp = pmap_segtab_info.free_segtab) != NULL)) { @@ -297,6 +343,7 @@ pmap_segtab_alloc(void) stp->seg_seg[0] = NULL; SEGTAB_ADD(nget, 1); found_on_freelist = true; + UVMHIST_CALLARGS(pmapsegtabhist, "freelist stp=%#jx", stp, 0, 0, 0); } mutex_spin_exit(&pmap_segtab_lock); @@ -314,6 +361,7 @@ pmap_segtab_alloc(void) const paddr_t stp_pa = VM_PAGE_TO_PHYS(stp_pg); stp = (pmap_segtab_t *)PMAP_MAP_POOLPAGE(stp_pa); + UVMHIST_CALLARGS(pmapsegtabhist, "new stp=%#jx", stp, 0, 0, 0); const size_t n = NBPG / sizeof(*stp); if (n > 1) { /* @@ -454,10 +502,7 @@ pmap_pte_reserve(pmap_t pmap, vaddr_t va { pmap_segtab_t *stp = pmap->pm_segtab; pt_entry_t *pte; - UVMHIST_FUNC(__func__); - UVMHIST_CALLARGS(pmaphist, "pm=%#jx va=%#jx flags=%jx", - (uintptr_t)pmap, (uintptr_t)va, flags, 0); pte = pmap_pte_lookup(pmap, va); if (__predict_false(pte == NULL)) { @@ -524,8 +569,9 @@ pmap_pte_reserve(pmap_t pmap, vaddr_t va *pte_p = pte; #endif KASSERT(pte == stp->seg_tab[(va >> SEGSHIFT) & (PMAP_SEGTABSIZE - 1)]); - UVMHIST_LOG(pmaphist, " set tab[%jd]=%jx", - (va >> SEGSHIFT) & (PMAP_SEGTABSIZE - 1), pte, 0, 0); + UVMHIST_CALLARGS(pmapsegtabhist, "pm=%#jx va=%#jx -> tab[%jd]=%jx", + (uintptr_t)pmap, (uintptr_t)va, + (va >> SEGSHIFT) & (PMAP_SEGTABSIZE - 1), pte); #ifdef DEBUG for (size_t i = 0; i < NPTEPG; i++) { Index: pmap/pmap_tlb.c =================================================================== RCS file: /cvsroot/src/sys/uvm/pmap/pmap_tlb.c,v retrieving revision 1.36 diff -p -u -r1.36 pmap_tlb.c --- pmap/pmap_tlb.c 11 Aug 2020 06:54:14 -0000 1.36 +++ pmap/pmap_tlb.c 19 Aug 2020 05:01:41 -0000 @@ -564,7 +564,7 @@ pmap_tlb_shootdown_process(void) */ struct pmap_asid_info * const pai = PMAP_PAI(ti->ti_victim, ti); KASSERT(ti->ti_victim != pmap_kernel()); - if (!pmap_tlb_intersecting_onproc_p(ti->ti_victim, ti)) { + if (pmap_tlb_intersecting_onproc_p(ti->ti_victim, ti)) { /* * The victim is an active pmap so we will just * invalidate its TLB entries.