? sys/arch/hppa/hppa/.pmap.c.swp Index: sys/arch/hppa/hppa/pmap.c =================================================================== RCS file: /cvsroot/src/sys/arch/hppa/hppa/pmap.c,v retrieving revision 1.43.8.23 diff -u -r1.43.8.23 pmap.c --- sys/arch/hppa/hppa/pmap.c 3 Feb 2009 09:18:15 -0000 1.43.8.23 +++ sys/arch/hppa/hppa/pmap.c 5 Feb 2009 15:40:31 -0000 @@ -198,7 +198,7 @@ static inline struct pv_entry *pmap_pv_alloc(void); static inline void pmap_pv_free(struct pv_entry *); static inline void pmap_pv_enter(struct vm_page *, struct pv_entry *, pmap_t, - vaddr_t , struct vm_page *); + vaddr_t , struct vm_page *, pt_entry_t *); static inline struct pv_entry *pmap_pv_remove(struct vm_page *, pmap_t, vaddr_t); @@ -220,12 +220,15 @@ void pmap_dump_pv(paddr_t); #endif -#ifdef PMAPDEBUG -int pmap_check_alias(struct pv_entry *, vaddr_t, pt_entry_t); -#endif +pt_entry_t pmap_check_alias(struct vm_page *, struct pv_entry *, vaddr_t, + pt_entry_t); + +static bool __changebit(struct vm_page *, u_int, u_int); +static bool __testbit(struct vm_page *, u_int, u_int); #define pmap_pvh_attrs(a) \ - (((a) & PTE_PROT(TLB_DIRTY)) | ((a) ^ PTE_PROT(TLB_REFTRAP))) + (((a) & PVF_MOD) | ((a) ^ PVF_REF) | ((a) & PVF_WRITE) | \ + ((a) & PVF_UNCACHEABLE)) #define PMAP_LOCK(pm) \ do { \ @@ -495,27 +498,69 @@ } #endif -#ifdef PMAPDEBUG -int -pmap_check_alias(struct pv_entry *pve, vaddr_t va, pt_entry_t pte) -{ - int ret; +/* + * Check for non-equiv aliases for this PA. If we find any then mark + * all PTEs as uncacheable. + * + * We return a possibly updated PTE (we may have had to mark the PTE + * as uncacheable). + * + * - Must be called with pg->mdpage.pvh_lock held. + */ +pt_entry_t +pmap_check_alias(struct vm_page *pg, struct pv_entry *pve, vaddr_t va, + pt_entry_t pte) +{ + bool marked_uncacheable = false; + bool write_capable = false; + struct pv_entry *tpve; + pt_entry_t ret = pte; + vaddr_t tva = 0; + + KASSERT(mutex_owned(&pg->mdpage.pvh_lock)); + + for (tpve = pve; tpve; tpve = tpve->pv_next) { + tva |= ((tva & HPPA_PGAOFF) ^ (tpve->pv_va & HPPA_PGAOFF)); + + /* + * As soon as we find evidence of non-equiv aliases, get on + * with sorting things out. + */ + if (tva != 0) + break; + } + + if ((pte & PTE_PROT(TLB_WRITE)) || __testbit(pg, PVF_WRITE)) + write_capable = true; - /* check for non-equ aliased mappings */ - for (ret = 0; pve; pve = pve->pv_next) { - pte |= pmap_vp_find(pve->pv_pmap, pve->pv_va); - if ((va & HPPA_PGAOFF) != (pve->pv_va & HPPA_PGAOFF) && - (pte & PTE_PROT(TLB_WRITE))) { - DPRINTF(PDB_FOLLOW|PDB_PARANOIA, ("pmap_check_alias: " - "aliased writable mapping 0x%x:0x%x\n", - pve->pv_pmap->pm_space, (uint32_t)pve->pv_va)); - ret++; + if (__testbit(pg, PVF_UNCACHEABLE)) + marked_uncacheable = true; + + /* We have non-equiv aliases */ + if (tva != 0) { + if (write_capable == true) { + /* + * We have non-equiv aliases and some mappings are + * writable. We must mark the pages as uncacheable + * (if they're not already marked as such). + */ + if (marked_uncacheable == false) + __changebit(pg, PVF_UNCACHEABLE, 0); + + ret |= PTE_PROT(TLB_UNCACHEABLE); + } else if (marked_uncacheable == true) { + /* + * We have non-equiv aliases and they're marked + * uncacheable but no one wants to write to them. + * It's now safe for us to mark them cacheable. + */ + if (!__testbit(pg, PVF_NC)) + __changebit(pg, 0, PVF_UNCACHEABLE); } } return (ret); } -#endif /* * This allocates and returns a new struct pv_entry. @@ -546,7 +591,7 @@ static inline void pmap_pv_enter(struct vm_page *pg, struct pv_entry *pve, pmap_t pm, - vaddr_t va, struct vm_page *pdep) + vaddr_t va, struct vm_page *pdep, pt_entry_t *pte) { KASSERT(mutex_owned(&pg->mdpage.pvh_lock)); @@ -557,10 +602,9 @@ pve->pv_ptp = pdep; pve->pv_next = pg->mdpage.pvh_list; pg->mdpage.pvh_list = pve; -#ifdef PMAPDEBUG - if (pmap_check_alias(pve, va, 0)) - Debugger(); -#endif + + *pte = pmap_check_alias(pg, pve, va, *pte); + } static inline struct pv_entry * @@ -1234,7 +1278,7 @@ } panic("pmap_enter: no pv entries available"); } - pmap_pv_enter(pg, pve, pmap, va, ptp); + pmap_pv_enter(pg, pve, pmap, va, ptp, &pte); mutex_exit(&pg->mdpage.pvh_lock); } else if (pve) { pmap_pv_free(pve); @@ -1455,11 +1499,26 @@ { struct pv_entry *pve; pt_entry_t res; + bool rv; DPRINTF(PDB_FOLLOW|PDB_BITS, ("pmap_changebit(%p, %x, %x)\n", pg, set, clear)); mutex_enter(&pg->mdpage.pvh_lock); + rv = __changebit(pg, set, clear); + mutex_exit(&pg->mdpage.pvh_lock); + + return rv; +} + +/* + * Must be called with pg->mdpage.pvh_lock held. + */ +static bool +__changebit(struct vm_page *pg, u_int set, u_int clear) +{ + KASSERT(mutex_owned(&pg->mdpage.pvh_lock)); + res = pg->mdpage.pvh_attrs = 0; for (pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next) { pmap_t pmap = pve->pv_pmap; @@ -1473,7 +1532,7 @@ #ifdef PMAPDEBUG if (!pte) { DPRINTF(PDB_FOLLOW|PDB_BITS, - ("pmap_changebit: zero pte for 0x%x\n", + ("__changebit: zero pte for 0x%x\n", (int)va)); continue; } @@ -1490,7 +1549,6 @@ } PMAP_UNLOCK(pmap); } - mutex_exit(&pg->mdpage.pvh_lock); return ((res & (clear | set)) != 0); } @@ -1500,10 +1558,25 @@ { struct pv_entry *pve; pt_entry_t pte; + bool rv; DPRINTF(PDB_FOLLOW|PDB_BITS, ("pmap_testbit(%p, %x)\n", pg, bit)); mutex_enter(&pg->mdpage.pvh_lock); + rv = __testbit(pg, bit); + mutex_exit(&pg->mdpage.pvh_lock); + + return rv; +} + +/* + * Must be called with pg->mdpage.pvh_lock held. + */ +static bool +__testbit(struct vm_page *pg, u_int bit) +{ + KASSERT(mutex_owned(&pg->mdpage.pvh_lock)); + for (pve = pg->mdpage.pvh_list; !(pg->mdpage.pvh_attrs & bit) && pve; pve = pve->pv_next) { pmap_t pm = pve->pv_pmap; @@ -1514,7 +1587,6 @@ pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte); } - mutex_exit(&pg->mdpage.pvh_lock); return ((pg->mdpage.pvh_attrs & bit) != 0); } @@ -1633,6 +1705,7 @@ { volatile pt_entry_t *pde; pt_entry_t pte, opte; + struct vm_page *pg; #ifdef PMAPDEBUG int opmapdebug = pmapdebug; @@ -1656,28 +1729,31 @@ opte = pmap_pte_get(pde, va); pte = pa | PTE_PROT(TLB_WIRED | TLB_REFTRAP | pmap_prot(pmap_kernel(), prot & VM_PROT_ALL)); - if (pa >= HPPA_IOBEGIN || (prot & PMAP_NC)) + + pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)); + + mutex_enter(&pg->mdpage.pvh_lock); + + if (pa >= HPPA_IOBEGIN || (prot & PMAP_NC)) { pte |= PTE_PROT(TLB_UNCACHEABLE); + + /* + * Guard against pmap_check_alias() marking this + * page as cacheable. + */ + __changebit(pg, PVF_NC, 0); + } + + pte = pmap_check_alias(pg, pg->mdpage.pvh_list, va, pte); + + mutex_exit(&pg->mdpage.pvh_lock); + pmap_pte_set(pde, va, pte); pmap_kernel()->pm_stats.wired_count++; pmap_kernel()->pm_stats.resident_count++; if (opte) pmap_pte_flush(pmap_kernel(), va, opte); -#ifdef PMAPDEBUG - if (pmap_initialized) { - struct vm_page *pg; - - pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)); - if (pg != NULL) { - mutex_enter(&pg->mdpage.pvh_lock); - if (pmap_check_alias(pg->mdpage.pvh_list, va, pte)) - Debugger(); - mutex_exit(&pg->mdpage.pvh_lock); - } - } -#endif - DPRINTF(PDB_FOLLOW|PDB_ENTER, ("pmap_kenter_pa: leaving\n")); #ifdef PMAPDEBUG Index: sys/arch/hppa/include/pmap.h =================================================================== RCS file: /cvsroot/src/sys/arch/hppa/include/pmap.h,v retrieving revision 1.16.18.10 diff -u -r1.16.18.10 pmap.h --- sys/arch/hppa/include/pmap.h 30 Jan 2009 08:21:59 -0000 1.16.18.10 +++ sys/arch/hppa/include/pmap.h 5 Feb 2009 15:40:31 -0000 @@ -64,8 +64,25 @@ #define PVF_MOD PTE_PROT(TLB_DIRTY) /* page/mapping is modified */ #define PVF_REF PTE_PROT(TLB_REFTRAP) /* page/mapping (inv) is referenced */ +#define PVF_WRITE PTE_PROT(TLB_WRITE) /* page/mapping is writable */ -#define PVF_UNCACHEABLE 0x1000 /* page is uncacheable */ + +/* + * Both of these flags mark a page as uncacheable. We need two flags + * because pages can be marked uncacheable for two reasons, + * + * 1) A page's contents may change under our feet, + * e.g. I/O space (PVF_NC) + * 2) A page has non-equivalent aliases (PVF_UNCACHEABLE) + * + * A page that is marked PVF_UNCACHEABLE and PVF_NC can *never* be marked + * cacheable because the caller of pmap_enter()/pmap_kenter_pa() specifically + * asked for the mapping to be uncacheable. However, a page that only has + * the PVF_UNCACHEABLE flag set may be marked as cacheable if we can be sure + * there are no non-equivalent aliases. + */ +#define PVF_NC PTE_PROT(TLB_NC) /* software only */ +#define PVF_UNCACHEABLE PTE_PROT(TLB_UNCACHEABLE) #define HPPA_MAX_PID 0xfffa #define HPPA_SID_MAX 0x7ffd Index: sys/arch/hppa/include/pte.h =================================================================== RCS file: /cvsroot/src/sys/arch/hppa/include/pte.h,v retrieving revision 1.2.120.2 diff -u -r1.2.120.2 pte.h --- sys/arch/hppa/include/pte.h 29 Oct 2008 19:35:19 -0000 1.2.120.2 +++ sys/arch/hppa/include/pte.h 5 Feb 2009 15:40:31 -0000 @@ -40,6 +40,7 @@ #define PTE_PAGE(pte) ((pte) & ~PGOFSET) /* TLB access/protection values */ +#define TLB_NC 0x80000000 /* software only */ #define TLB_WIRED 0x40000000 /* software only */ #define TLB_REFTRAP 0x20000000 /* bit 2, T */ #define TLB_DIRTY 0x10000000 /* bit 3, D */