Index: sparc/intr.c =================================================================== RCS file: /cvsroot/src/sys/arch/sparc/sparc/intr.c,v retrieving revision 1.108 diff -p -r1.108 intr.c *** sparc/intr.c 5 Jan 2010 21:38:50 -0000 1.108 --- sparc/intr.c 13 Jan 2011 06:41:45 -0000 *************** static void *** 369,374 **** --- 369,376 ---- xcallintr(void *v) { + kpreempt_disable(); + /* Tally */ lev13_evcnt.ev_count++; *************** xcallintr(void *v) *** 385,390 **** --- 387,394 ---- } cpuinfo.msg.tag = 0; cpuinfo.msg.complete = 1; + + kpreempt_enable(); } #endif /* MULTIPROCESSOR */ #endif /* SUN4M || SUN4D */ *************** intr_biglock_wrapper(void *vp) *** 858,863 **** bool cpu_intr_p(void) { ! return curcpu()->ci_idepth != 0; } --- 862,872 ---- bool cpu_intr_p(void) { + int idepth; + + kpreempt_disable(); + idepth = curcpu()->ci_idepth; + kpreempt_enable(); ! return idepth != 0; } Index: sparc/pmap.c =================================================================== RCS file: /cvsroot/src/sys/arch/sparc/sparc/pmap.c,v retrieving revision 1.340 diff -p -r1.340 pmap.c *** sparc/pmap.c 26 Apr 2010 09:26:25 -0000 1.340 --- sparc/pmap.c 13 Jan 2011 06:41:45 -0000 *************** inline void *** 818,824 **** --- 818,826 ---- setpgt4m(int *ptep, int pte) { + kpreempt_disable(); swap(ptep, pte); + kpreempt_enable(); } inline void *************** setpgt4m_va(vaddr_t va, int *ptep, int p *** 829,837 **** --- 831,841 ---- #if defined(MULTIPROCESSOR) updatepte4m(va, ptep, 0xffffffff, pte, pageflush ? ctx : 0, cpuset); #else + kpreempt_disable(); if (__predict_true(pageflush)) tlb_flush_page(va, ctx, 0); setpgt4m(ptep, pte); + kpreempt_enable(); #endif /* MULTIPROCESSOR */ } *************** pmap_enk4m(struct pmap *pm, vaddr_t va, *** 6247,6252 **** --- 6251,6257 ---- rp = &pm->pm_regmap[vr]; sp = &rp->rg_segmap[vs]; + kpreempt_disable(); s = splvm(); /* XXX way too conservative */ PMAP_LOCK(); *************** pmap_enk4m(struct pmap *pm, vaddr_t va, *** 6261,6269 **** if ((tpte & SRMMU_PPNMASK) == (pteproto & SRMMU_PPNMASK)) { /* just changing protection and/or wiring */ pmap_changeprot4m(pm, va, prot, flags); ! PMAP_UNLOCK(); ! splx(s); ! return (0); } if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) { --- 6266,6273 ---- if ((tpte & SRMMU_PPNMASK) == (pteproto & SRMMU_PPNMASK)) { /* just changing protection and/or wiring */ pmap_changeprot4m(pm, va, prot, flags); ! error = 0; ! goto out; } if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) { *************** printf("pmap_enk4m: changing existing va *** 6309,6314 **** --- 6313,6319 ---- out: PMAP_UNLOCK(); splx(s); + kpreempt_enable(); return (error); } *************** pmap_enu4m(struct pmap *pm, vaddr_t va, *** 6438,6446 **** /* just changing prot and/or wiring */ /* caller should call this directly: */ pmap_changeprot4m(pm, va, prot, flags); ! PMAP_UNLOCK(); ! splx(s); ! return (0); } /* * Switcheroo: changing pa for this va. --- 6443,6450 ---- /* just changing prot and/or wiring */ /* caller should call this directly: */ pmap_changeprot4m(pm, va, prot, flags); ! error = 0; ! goto out; } /* * Switcheroo: changing pa for this va. *************** pmap_kremove4m(vaddr_t va, vsize_t len) *** 6551,6556 **** --- 6555,6561 ---- * The kernel pmap doesn't need to be locked, but the demap lock * in updatepte() requires interrupt protection. */ + kpreempt_disable(); s = splvm(); endva = va + len; *************** pmap_kremove4m(vaddr_t va, vsize_t len) *** 6596,6601 **** --- 6601,6607 ---- } } splx(s); + kpreempt_enable(); } /* *************** pmap_kprotect4m(vaddr_t va, vsize_t size *** 6616,6621 **** --- 6622,6628 ---- * The kernel pmap doesn't need to be locked, but the demap lock * in updatepte() requires interrupt protection. */ + kpreempt_disable(); s = splvm(); while (size > 0) { *************** pmap_kprotect4m(vaddr_t va, vsize_t size *** 6639,6644 **** --- 6646,6652 ---- size -= NBPG; } splx(s); + kpreempt_enable(); } #endif /* SUN4M || SUN4D */ *************** pmap_unwire(struct pmap *pm, vaddr_t va) *** 6654,6659 **** --- 6662,6668 ---- struct segmap *sp; bool owired; + kpreempt_disable(); vr = VA_VREG(va); vs = VA_VSEG(va); rp = &pm->pm_regmap[vr]; *************** pmap_unwire(struct pmap *pm, vaddr_t va) *** 6672,6677 **** --- 6681,6687 ---- if (!owired) { pmap_stats.ps_useless_changewire++; return; + kpreempt_enable(); } pm->pm_stats.wired_count--; *************** pmap_unwire(struct pmap *pm, vaddr_t va) *** 6686,6691 **** --- 6696,6702 ---- mmu_pmeg_unlock(sp->sg_pmeg); } #endif /* SUN4 || SUN4C */ + kpreempt_enable(); } /* *************** pmap_zero_page4m(paddr_t pa) *** 7088,7093 **** --- 7099,7105 ---- void *va; int pte; + kpreempt_disable(); if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { /* * The following VAC flush might not be necessary since the *************** pmap_zero_page4m(paddr_t pa) *** 7115,7120 **** --- 7127,7133 ---- */ sp_tlb_flush((int)va, 0, ASI_SRMMUFP_L3); setpgt4m(cpuinfo.vpage_pte[0], SRMMU_TEINVALID); + kpreempt_enable(); } /* *************** pmap_zero_page_viking_mxcc(paddr_t pa) *** 7127,7132 **** --- 7140,7146 ---- u_int stream_data_addr = MXCC_STREAM_DATA; uint64_t v = (uint64_t)pa; + kpreempt_disable(); /* Load MXCC stream data register with 0 (bottom 32 bytes only) */ stda(stream_data_addr+0, ASI_CONTROL, 0); stda(stream_data_addr+8, ASI_CONTROL, 0); *************** pmap_zero_page_viking_mxcc(paddr_t pa) *** 7138,7143 **** --- 7152,7158 ---- for (offset = 0; offset < NBPG; offset += MXCC_STREAM_BLKSZ) { stda(MXCC_STREAM_DST, ASI_CONTROL, v | offset); } + kpreempt_enable(); } /* *************** pmap_zero_page_hypersparc(paddr_t pa) *** 7151,7156 **** --- 7166,7172 ---- int pte; int offset; + kpreempt_disable(); /* * We still have to map the page, since ASI_BLOCKFILL * takes virtual addresses. This also means we have to *************** pmap_zero_page_hypersparc(paddr_t pa) *** 7177,7182 **** --- 7193,7199 ---- /* Remove temporary mapping */ sp_tlb_flush((int)va, 0, ASI_SRMMUFP_L3); setpgt4m(cpuinfo.vpage_pte[0], SRMMU_TEINVALID); + kpreempt_enable(); } /* *************** pmap_copy_page4m(paddr_t src, paddr_t ds *** 7195,7200 **** --- 7212,7218 ---- void *sva, *dva; int spte, dpte; + kpreempt_disable(); if ((pg = PHYS_TO_VM_PAGE(src)) != NULL) { if (CACHEINFO.c_vactype == VAC_WRITEBACK) pv_flushcache4m(pg); *************** pmap_copy_page4m(paddr_t src, paddr_t ds *** 7225,7230 **** --- 7243,7249 ---- setpgt4m(cpuinfo.vpage_pte[0], SRMMU_TEINVALID); sp_tlb_flush((int)dva, 0, ASI_SRMMUFP_L3); setpgt4m(cpuinfo.vpage_pte[1], SRMMU_TEINVALID); + kpreempt_enable(); } /* *************** pmap_copy_page_viking_mxcc(paddr_t src, *** 7237,7242 **** --- 7256,7262 ---- uint64_t v1 = (uint64_t)src; uint64_t v2 = (uint64_t)dst; + kpreempt_disable(); /* Enable cache-coherency */ v1 |= MXCC_STREAM_C; v2 |= MXCC_STREAM_C; *************** pmap_copy_page_viking_mxcc(paddr_t src, *** 7246,7251 **** --- 7266,7272 ---- stda(MXCC_STREAM_SRC, ASI_CONTROL, v1 | offset); stda(MXCC_STREAM_DST, ASI_CONTROL, v2 | offset); } + kpreempt_enable(); } /* *************** pmap_copy_page_hypersparc(paddr_t src, p *** 7259,7264 **** --- 7280,7286 ---- int spte, dpte; int offset; + kpreempt_disable(); /* * We still have to map the pages, since ASI_BLOCKCOPY * takes virtual addresses. This also means we have to *************** pmap_copy_page_hypersparc(paddr_t src, p *** 7297,7302 **** --- 7319,7325 ---- setpgt4m(cpuinfo.vpage_pte[0], SRMMU_TEINVALID); sp_tlb_flush((int)dva, 0, ASI_SRMMUFP_L3); setpgt4m(cpuinfo.vpage_pte[1], SRMMU_TEINVALID); + kpreempt_enable(); } #endif /* SUN4M || SUN4D */ Index: sparc/timer_sun4m.c =================================================================== RCS file: /cvsroot/src/sys/arch/sparc/sparc/timer_sun4m.c,v retrieving revision 1.22 diff -p -r1.22 timer_sun4m.c *** sparc/timer_sun4m.c 13 Jan 2010 02:17:12 -0000 1.22 --- sparc/timer_sun4m.c 13 Jan 2011 06:41:45 -0000 *************** void *** 100,105 **** --- 100,106 ---- schedintr_4m(void *v) { + kpreempt_disable(); #ifdef MULTIPROCESSOR /* * We call hardclock() here so that we make sure it is called on *************** schedintr_4m(void *v) *** 115,120 **** --- 116,122 ---- if ((++cpuinfo.ci_schedstate.spc_schedticks & 7) == 0 && schedhz != 0) #endif schedclock(curlwp); + kpreempt_enable(); } *************** clockintr_4m(void *cap) *** 136,141 **** --- 138,144 ---- * For MP, we defer calling hardclock() to the schedintr so * that we call it on all cpus. */ + kpreempt_disable(); cpuinfo.ci_lev10.ev_count++; if (cold) return 0; *************** clockintr_4m(void *cap) *** 145,150 **** --- 148,154 ---- #if !defined(MULTIPROCESSOR) hardclock((struct clockframe *)cap); #endif + kpreempt_enable(); return (1); } *************** statintr_4m(void *cap) *** 157,162 **** --- 161,168 ---- struct clockframe *frame = cap; u_long newint; + kpreempt_disable(); + cpuinfo.ci_lev14.ev_count++; /* read the limit register to clear the interrupt */ *************** statintr_4m(void *cap) *** 197,202 **** --- 203,209 ---- #if !defined(MULTIPROCESSOR) } #endif + kpreempt_enable(); return (1); }