Index: sparc64/clock.c =================================================================== RCS file: /cvsroot/src/sys/arch/sparc64/sparc64/clock.c,v retrieving revision 1.101 diff -p -r1.101 clock.c *** sparc64/clock.c 11 Mar 2010 03:54:56 -0000 1.101 --- sparc64/clock.c 13 Jan 2011 11:00:58 -0000 *************** clockintr(void *cap) *** 501,507 **** --- 501,509 ---- } #endif /* Let locore.s clear the interrupt for us. */ + kpreempt_disable(); hardclock((struct clockframe *)cap); + kpreempt_enable(); return (1); } *************** tickintr(void *cap) *** 518,523 **** --- 520,526 ---- { int s; + kpreempt_disable(); hardclock((struct clockframe *)cap); s = intr_disable(); *************** tickintr(void *cap) *** 525,530 **** --- 528,534 ---- next_tick(curcpu()->ci_tick_increment); intr_restore(s); curcpu()->ci_tick_evcnt.ev_count++; + kpreempt_disable(); return (1); } *************** statintr(void *cap) *** 539,544 **** --- 543,549 ---- register u_long newint, r, var; struct cpu_info *ci = curcpu(); + kpreempt_disable(); #ifdef NOT_DEBUG printf("statclock: count %x:%x, limit %x:%x\n", timerreg_4u.t_timer[1].t_count, timerreg_4u.t_timer[1].t_limit); *************** statintr(void *cap) *** 548,553 **** --- 553,559 ---- #endif statclock((struct clockframe *)cap); #ifdef NOTDEF_DEBUG + kpreempt_enable(); /* Don't re-schedule the IRQ */ return 1; #endif *************** statintr(void *cap) *** 569,574 **** --- 575,581 ---- } stxa((vaddr_t)&timerreg_4u.t_timer[1].t_limit, ASI_NUCLEUS, tmr_ustolim(newint)|TMR_LIM_IEN|TMR_LIM_RELOAD); + kpreempt_enable(); return (1); } *************** int *** 576,582 **** --- 583,591 ---- schedintr(void *arg) { + kpreempt_disable(); schedclock(curcpu()->ci_data.cpu_onproc); + kpreempt_enable(); return (1); } #endif Index: sparc64/machdep.c =================================================================== RCS file: /cvsroot/src/sys/arch/sparc64/sparc64/machdep.c,v retrieving revision 1.254 diff -p -r1.254 machdep.c *** sparc64/machdep.c 26 Jun 2010 08:40:01 -0000 1.254 --- sparc64/machdep.c 13 Jan 2011 11:00:59 -0000 *************** cpu_signotify(struct lwp *l) *** 2039,2046 **** bool cpu_intr_p(void) { ! return curcpu()->ci_idepth >= 0; } #ifdef MODULAR --- 2056,2068 ---- bool cpu_intr_p(void) { + int idepth; + + kpreempt_disable(); + idepth = curcpu()->ci_idepth; + kpreempt_enable(); ! return idepth >= 0; } #ifdef MODULAR Index: sparc64/pmap.c =================================================================== RCS file: /cvsroot/src/sys/arch/sparc64/sparc64/pmap.c,v retrieving revision 1.269 diff -p -r1.269 pmap.c *** sparc64/pmap.c 12 Nov 2010 07:59:27 -0000 1.269 --- sparc64/pmap.c 13 Jan 2011 11:00:59 -0000 *************** pmap_kextract(vaddr_t va) *** 549,554 **** --- 549,555 ---- int i; paddr_t paddr = (paddr_t)-1; + kpreempt_disable(); for (i = 0; i < kernel_tlb_slots; i++) { if ((va & ~PAGE_MASK_4M) == kernel_tlbs[i].te_va) { paddr = kernel_tlbs[i].te_pa + *************** pmap_kextract(vaddr_t va) *** 561,566 **** --- 562,568 ---- panic("pmap_kextract: Address %p is not from kernel space.\n" "Data segment is too small?\n", (void*)va); } + kpreempt_enable(); return (paddr); } *************** pmap_growkernel(vaddr_t maxkvaddr) *** 1342,1347 **** --- 1344,1350 ---- (void *)KERNEND, (void *)maxkvaddr); return (kbreak); } + kpreempt_disable(); took_lock = lock_available; if (__predict_true(took_lock)) mutex_enter(&pmap_lock); *************** pmap_growkernel(vaddr_t maxkvaddr) *** 1364,1369 **** --- 1367,1373 ---- } if (__predict_true(took_lock)) mutex_exit(&pmap_lock); + kpreempt_enable(); return (kbreak); } *************** pmap_kenter_pa(vaddr_t va, paddr_t pa, v *** 1544,1549 **** --- 1548,1554 ---- * Construct the TTE. */ + kpreempt_disable(); ENTER_STAT(unmanaged); if (pa & (PMAP_NVC|PMAP_NC)) { ENTER_STAT(ci); *************** pmap_kenter_pa(vaddr_t va, paddr_t pa, v *** 1576,1581 **** --- 1581,1587 ---- (long long)ptp); pmap_free_page_noflush(ptp); } + kpreempt_enable(); #ifdef DEBUG i = ptelookup_va(va); if (pmapdebug & PDB_ENTER) *************** pmap_kremove(vaddr_t va, vsize_t size) *** 1614,1619 **** --- 1620,1626 ---- KASSERT(va < kdata || va > ekdata); DPRINTF(PDB_DEMAP, ("pmap_kremove: start 0x%lx size %lx\n", va, size)); + kpreempt_disable(); for (; size >= PAGE_SIZE; va += PAGE_SIZE, size -= PAGE_SIZE) { #ifdef DIAGNOSTIC *************** pmap_kremove(vaddr_t va, vsize_t size) *** 1657,1662 **** --- 1664,1670 ---- tlb_flush_pte(va, pm); dcache_flush_page_all(pa); } + kpreempt_enable(); if (flush) REMOVE_STAT(flushes); } *************** pmap_enter(struct pmap *pm, vaddr_t va, *** 1686,1691 **** --- 1694,1700 ---- KASSERT(pm != pmap_kernel() || va < INTSTACK || va > EINTSTACK); KASSERT(pm != pmap_kernel() || va < kdata || va > ekdata); + kpreempt_disable(); /* Grab a spare PV. */ freepv = pool_cache_get(&pmap_pv_cache, PR_NOWAIT); if (__predict_false(freepv == NULL)) { *************** pmap_enter(struct pmap *pm, vaddr_t va, *** 1923,1928 **** --- 1932,1938 ---- /* We will let the fast mmu miss interrupt load the new translation */ pv_check(); out: + kpreempt_disable(); /* Catch up on deferred frees. */ for (; freepv != NULL; freepv = npv) { npv = freepv->pv_next; *************** pmap_protect(struct pmap *pm, vaddr_t sv *** 2107,2112 **** --- 2117,2123 ---- return; } + kpreempt_disable(); mutex_enter(&pmap_lock); sva = trunc_page(sva); for (; sva < eva; sva += PAGE_SIZE) { *************** pmap_protect(struct pmap *pm, vaddr_t sv *** 2118,2123 **** --- 2129,2135 ---- sva < roundup(ekdata, 4 * MEG)) { prom_printf("pmap_protect: va=%08x in locked TLB\n", sva); + kpreempt_enable(); prom_abort(); return; } *************** pmap_protect(struct pmap *pm, vaddr_t sv *** 2168,2173 **** --- 2180,2186 ---- } pv_check(); mutex_exit(&pmap_lock); + kpreempt_enable(); } /* *************** pmap_kprotect(vaddr_t va, vm_prot_t prot *** 2254,2259 **** --- 2267,2273 ---- int64_t data; int rv; + kpreempt_disable(); mutex_enter(&pmap_lock); data = pseg_get(pm, va); KASSERT(data & TLB_V); *************** pmap_kprotect(vaddr_t va, vm_prot_t prot *** 2269,2274 **** --- 2283,2289 ---- tsb_invalidate(va, pm); tlb_flush_pte(va, pm); mutex_exit(&pmap_lock); + kpreempt_enable(); } /* *************** pmap_clear_modify(struct vm_page *pg) *** 2491,2496 **** --- 2506,2512 ---- modified = pmap_is_modified(pg); #endif + kpreempt_disable(); mutex_enter(&pmap_lock); /* Clear all mappings */ pv = &md->mdpg_pvh; *************** pmap_clear_modify(struct vm_page *pg) *** 2543,2548 **** --- 2559,2565 ---- } pv_check(); mutex_exit(&pmap_lock); + kpreempt_disable(); #ifdef DEBUG if (pmap_is_modified(pg)) { printf("pmap_clear_modify(): %p still modified!\n", pg); *************** pmap_clear_reference(struct vm_page *pg) *** 2570,2575 **** --- 2587,2593 ---- int referenced = 0; #endif + kpreempt_disable(); mutex_enter(&pmap_lock); #ifdef DEBUG DPRINTF(PDB_CHANGEPROT|PDB_REF, ("pmap_clear_reference(%p)\n", pg)); *************** pmap_clear_reference(struct vm_page *pg) *** 2648,2653 **** --- 2666,2672 ---- } #endif mutex_exit(&pmap_lock); + kpreempt_enable(); return (changed); } *************** pmap_unwire(pmap_t pmap, vaddr_t va) *** 2815,2820 **** --- 2834,2840 ---- return; } #endif + kpreempt_disable(); mutex_enter(&pmap_lock); data = pseg_get(pmap, va & PV_VAMASK); KASSERT(data & TLB_V); *************** pmap_unwire(pmap_t pmap, vaddr_t va) *** 2824,2829 **** --- 2844,2850 ---- panic("pmap_unwire: pseg_set needs spare! rv=%d\n", rv); pv_check(); mutex_exit(&pmap_lock); + kpreempt_disable(); } /* *************** pmap_page_protect(struct vm_page *pg, vm *** 2847,2852 **** --- 2868,2874 ---- DPRINTF(PDB_CHANGEPROT, ("pmap_page_protect: pg %p prot %x\n", pg, prot)); + kpreempt_enable(); mutex_enter(&pmap_lock); pv = &md->mdpg_pvh; if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) { *************** pmap_page_protect(struct vm_page *pg, vm *** 2999,3004 **** --- 3021,3027 ---- /* We should really only flush the pages we demapped. */ pv_check(); mutex_exit(&pmap_lock); + kpreempt_enable(); /* Catch up on deferred frees. */ for (; freepv != NULL; freepv = npv) { *************** pmap_copy_page(paddr_t src, paddr_t dst) *** 3690,3696 **** --- 3713,3721 ---- if (!cold) dcache_flush_page_all(dst); + kpreempt_disable(); pmap_copy_page_phys(src, dst); + kpreempt_enable(); } void *************** pmap_zero_page(paddr_t pa) *** 3699,3703 **** --- 3724,3730 ---- if (!cold) dcache_flush_page_all(pa); + kpreempt_disable(); pmap_zero_page_phys(pa); + kpreempt_enable(); }