Index: sys/arch/arm/arm32/pmap.c =================================================================== RCS file: /cvsroot/src/sys/arch/arm/arm32/pmap.c,v retrieving revision 1.342 diff -u -p -r1.342 pmap.c --- sys/arch/arm/arm32/pmap.c 23 Dec 2016 07:15:27 -0000 1.342 +++ sys/arch/arm/arm32/pmap.c 22 Feb 2017 16:45:31 -0000 @@ -1694,7 +1694,7 @@ pmap_l2ptp_ctor(void *arg, void *v, int /* * Page tables must have the cache-mode set correctly. */ - const pt_entry_t npte = (pte & ~L2_S_CACHE_MASK) + const pt_entry_t npte = (opte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; l2pte_set(ptep, npte, opte); PTE_SYNC(ptep); @@ -1973,7 +1973,7 @@ pmap_vac_me_user(struct vm_page_md *md, pt_entry_t npte = opte & ~L2_S_CACHE_MASK; if ((va != pv->pv_va || pm != pv->pv_pmap) - && l2pte_valid_p(npte)) { + && l2pte_valid_p(opte)) { #ifdef PMAP_CACHE_VIVT pmap_cache_wbinv_page(pv->pv_pmap, pv->pv_va, true, pv->pv_flags); @@ -2301,7 +2301,7 @@ pmap_vac_me_harder(struct vm_page_md *md if (opte == npte) /* only update is there's a change */ continue; - if (l2pte_valid_p(npte)) { + if (l2pte_valid_p(opte)) { pmap_tlb_flush_SE(pv->pv_pmap, pv->pv_va, pv->pv_flags); } @@ -4275,7 +4275,7 @@ pmap_prefetchabt_fixup(void *v) if ((opte & L2_S_PROT_U) == 0 || (opte & L2_XS_XN) == 0) goto out; - paddr_t pa = l2pte_pa(pte); + paddr_t pa = l2pte_pa(opte); struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); KASSERT(pg != NULL); Index: sys/arch/arm/include/arm32/pmap.h =================================================================== RCS file: /cvsroot/src/sys/arch/arm/include/arm32/pmap.h,v retrieving revision 1.144 diff -u -p -r1.144 pmap.h --- sys/arch/arm/include/arm32/pmap.h 14 Jul 2016 05:00:51 -0000 1.144 +++ sys/arch/arm/include/arm32/pmap.h 22 Feb 2017 16:45:32 -0000 @@ -480,15 +480,21 @@ vtophys(vaddr_t va) extern int pmap_needs_pte_sync; #if defined(_KERNEL_OPT) /* + * Perform compile time evaluation of PMAP_NEEDS_PTE_SYNC when only a + * single MMU type is selected. + * * StrongARM SA-1 caches do not have a write-through mode. So, on these, - * we need to do PTE syncs. If only SA-1 is configured, then evaluate - * this at compile time. + * we need to do PTE syncs. Additionally, V6 MMUs also need PTE syncs. + * Finally, MEMC, GENERIC and XSCALE MMUs do not need PTE syncs. + * + * Use run time evaluation for all other cases. + * */ -#if (ARM_MMU_SA1 + ARM_MMU_V6 != 0) && (ARM_NMMUS == 1) +#if (ARM_NMMUS == 1) +#if (ARM_MMU_SA1 + ARM_MMU_V6 != 0) #define PMAP_INCLUDE_PTE_SYNC -#if (ARM_MMU_V6 > 0) #define PMAP_NEEDS_PTE_SYNC 1 -#elif (ARM_MMU_SA1 == 0) +#elif (ARM_MMU_MEMC + ARM_MMU_GENERIC + ARM_MMU_XSCALE != 0) #define PMAP_NEEDS_PTE_SYNC 0 #endif #endif