Index: sys/arch/powerpc/oea/pmap.c =================================================================== RCS file: /cvsroot/src/sys/arch/powerpc/oea/pmap.c,v retrieving revision 1.96 diff -p -u -r1.96 pmap.c --- sys/arch/powerpc/oea/pmap.c 31 May 2020 10:49:39 -0000 1.96 +++ sys/arch/powerpc/oea/pmap.c 31 May 2020 11:10:20 -0000 @@ -295,9 +295,11 @@ struct pvo_entry { #define PVO_WIRED 0x0010 /* PVO entry is wired */ #define PVO_MANAGED 0x0020 /* PVO e. for managed page */ #define PVO_EXECUTABLE 0x0040 /* PVO e. for executable page */ +#define PVO_POOL_MANAGED 0x0080 /* XXX */ #define PVO_WIRED_P(pvo) ((pvo)->pvo_vaddr & PVO_WIRED) #define PVO_MANAGED_P(pvo) ((pvo)->pvo_vaddr & PVO_MANAGED) #define PVO_EXECUTABLE_P(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE) +#define PVO_POOL_MANAGED_P(pvo) ((pvo)->pvo_vaddr & PVO_POOL_MANAGED) #define PVO_ENTER_INSERT 0 /* PVO has been removed */ #define PVO_SPILL_UNSET 1 /* PVO has been evicted */ #define PVO_SPILL_SET 2 /* PVO has been spilled */ @@ -1556,7 +1558,7 @@ static struct pool * pmap_pvo_pl(struct pvo_entry *pvo) { - return PVO_MANAGED_P(pvo) ? &pmap_mpvo_pool : &pmap_upvo_pool; + return PVO_POOL_MANAGED_P(pvo) ? &pmap_mpvo_pool : &pmap_upvo_pool; } /* @@ -1623,14 +1625,21 @@ pmap_pvo_enter(pmap_t pm, struct pool *p --pmap_pvo_enter_depth; #endif pmap_interrupts_restore(msr); - if (pvo) { - KASSERT(pmap_pvo_pl(pvo) == pl); - } else { + if (pvo == NULL) { pvo = pool_get(pl, poolflags); + if (pvo != NULL) { + if (pl == &pmap_mpvo_pool) + pvo->pvo_vaddr = PVO_POOL_MANAGED; + else { + KASSERT(pl == &pmap_upvo_pool); + pvo->pvo_vaddr = 0; + } + } } KASSERT((vaddr_t)pvo < VM_MIN_KERNEL_ADDRESS); #ifdef DEBUG +#error /* * Exercise pmap_pvo_reclaim() a little. */ @@ -1660,9 +1669,8 @@ pmap_pvo_enter(pmap_t pm, struct pool *p } } - pvo->pvo_vaddr = va; + pvo->pvo_vaddr = PVO_POOL_MANAGED_P(pvo) | (va & ~ADDR_POFF); pvo->pvo_pmap = pm; - pvo->pvo_vaddr &= ~ADDR_POFF; if (flags & VM_PROT_EXECUTE) { PMAPCOUNT(exec_mappings); pvo_set_exec(pvo);