Index: sys/arch/sparc/dev/zs.c =================================================================== RCS file: /cvsroot/src/sys/arch/sparc/dev/zs.c,v retrieving revision 1.111.6.3 diff -p -r1.111.6.3 zs.c *** sys/arch/sparc/dev/zs.c 16 Jan 2011 12:54:42 -0000 1.111.6.3 --- sys/arch/sparc/dev/zs.c 21 Feb 2011 03:17:29 -0000 *************** CFATTACH_DECL_NEW(zs_obio, sizeof(struct *** 171,182 **** extern struct cfdriver zs_cd; - /* softintr(9) cookie, shared by all instances of this driver */ - static void *zs_sicookie; - /* Interrupt handlers. */ static int zshard(void *); - static void zssoft(void *); static int zs_get_speed(struct zs_chanstate *); --- 171,178 ---- *************** zs_attach(struct zsc_softc *zsc, struct *** 396,402 **** struct zsc_attach_args zsc_args; struct zs_chanstate *cs; int channel; - static int didintr, prevpri; #if (NKBD > 0) || (NMS > 0) int ch0_is_cons = 0; #endif --- 392,397 ---- *************** zs_attach(struct zsc_softc *zsc, struct *** 407,418 **** return; } ! if (!didintr) { ! zs_sicookie = softint_establish(SOFTINT_SERIAL, zssoft, NULL); ! if (zs_sicookie == NULL) { ! aprint_error(": cannot establish soft int handler\n"); ! return; ! } } aprint_normal(" softpri %d\n", IPL_SOFTSERIAL); --- 402,412 ---- return; } ! zsc->zsc_sicookie = softint_establish(SOFTINT_SERIAL, ! (void (*)(void *))zsc_intr_soft, zsc); ! if (zsc->zsc_sicookie == NULL) { ! aprint_error(": cannot establish soft int handler\n"); ! return; } aprint_normal(" softpri %d\n", IPL_SOFTSERIAL); *************** zs_attach(struct zsc_softc *zsc, struct *** 551,567 **** } /* ! * Now safe to install interrupt handlers. Note the arguments ! * to the interrupt handlers aren't used. Note, we only do this ! * once since both SCCs interrupt at the same level and vector. */ ! if (!didintr) { ! didintr = 1; ! prevpri = pri; ! bus_intr_establish(zsc->zsc_bustag, pri, IPL_SERIAL, ! zshard, NULL); ! } else if (pri != prevpri) ! panic("broken zs interrupt scheme"); evcnt_attach_dynamic(&zsc->zsc_intrcnt, EVCNT_TYPE_INTR, NULL, device_xname(zsc->zsc_dev), "intr"); --- 545,553 ---- } /* ! * Now safe to install interrupt handlers. */ ! bus_intr_establish(zsc->zsc_bustag, pri, IPL_SERIAL, zshard, zsc); evcnt_attach_dynamic(&zsc->zsc_intrcnt, EVCNT_TYPE_INTR, NULL, device_xname(zsc->zsc_dev), "intr"); *************** zs_print(void *aux, const char *name) *** 610,690 **** static volatile int zssoftpending; /* ! * Our ZS chips all share a common, autovectored interrupt, ! * so we have to look at all of them on each interrupt. */ static int zshard(void *arg) { ! struct zsc_softc *zsc; ! int unit, rr3, rval, softreq; ! ! rval = softreq = 0; ! for (unit = 0; unit < zs_cd.cd_ndevs; unit++) { ! struct zs_chanstate *cs; ! zsc = device_lookup_private(&zs_cd, unit); ! if (zsc == NULL) ! continue; ! rr3 = zsc_intr_hard(zsc); ! /* Count up the interrupts. */ ! if (rr3) { ! rval |= rr3; ! zsc->zsc_intrcnt.ev_count++; ! } ! if ((cs = zsc->zsc_cs[0]) != NULL) ! softreq |= cs->cs_softreq; ! if ((cs = zsc->zsc_cs[1]) != NULL) ! softreq |= cs->cs_softreq; ! } ! ! /* We are at splzs here, so no need to lock. */ ! if (softreq && (zssoftpending == 0)) { ! zssoftpending = 1; ! softint_schedule(zs_sicookie); } return (rval); } /* - * Similar scheme as for zshard (look at all of them) - */ - static void - zssoft(void *arg) - { - struct zsc_softc *zsc; - int unit; - - /* This is not the only ISR on this IPL. */ - if (zssoftpending == 0) - return; - - /* - * The soft intr. bit will be set by zshard only if - * the variable zssoftpending is zero. The order of - * these next two statements prevents our clearing - * the soft intr bit just after zshard has set it. - */ - /* ienab_bic(IE_ZSSOFT); */ - zssoftpending = 0; - - #if 0 /* not yet */ - /* Make sure we call the tty layer with tty_lock held. */ - mutex_spin_enter(&tty_lock); - #endif - for (unit = 0; unit < zs_cd.cd_ndevs; unit++) { - zsc = device_lookup_private(&zs_cd, unit); - if (zsc == NULL) - continue; - (void)zsc_intr_soft(zsc); - } - #if 0 /* not yet */ - mutex_spin_exit(&tty_lock); - #endif - } - - - /* * Compute the current baud rate given a ZS channel. */ static int --- 596,624 ---- static volatile int zssoftpending; /* ! * Our ZS chips all share a common interrupt level, ! * but we establish zshard handler per each ZS chips ! * to avoid holding unnecessary locks in interrupt context. */ static int zshard(void *arg) { ! struct zsc_softc *zsc = arg; ! int rr3, rval; ! rval = 0; ! rr3 = zsc_intr_hard(zsc); ! /* Count up the interrupts. */ ! if (rr3) { ! rval = rr3; ! zsc->zsc_intrcnt.ev_count++; } + if (zsc->zsc_cs[0]->cs_softreq || zsc->zsc_cs[1]->cs_softreq) + softint_schedule(zsc->zsc_sicookie); return (rval); } /* * Compute the current baud rate given a ZS channel. */ static int Index: sys/arch/sparc/include/cpu.h =================================================================== RCS file: /cvsroot/src/sys/arch/sparc/include/cpu.h,v retrieving revision 1.84.14.1 diff -p -r1.84.14.1 cpu.h *** sys/arch/sparc/include/cpu.h 9 Jan 2010 01:43:51 -0000 1.84.14.1 --- sys/arch/sparc/include/cpu.h 21 Feb 2011 03:17:29 -0000 *************** void schedintr(void *); *** 191,196 **** --- 191,197 ---- /* locore.s */ struct fpstate; + void ipi_savefpstate(struct fpstate *); void savefpstate(struct fpstate *); void loadfpstate(struct fpstate *); int probeget(void *, int); Index: sys/arch/sparc/include/z8530var.h =================================================================== RCS file: /cvsroot/src/sys/arch/sparc/include/z8530var.h,v retrieving revision 1.9 diff -p -r1.9 z8530var.h *** sys/arch/sparc/include/z8530var.h 29 Mar 2008 19:15:35 -0000 1.9 --- sys/arch/sparc/include/z8530var.h 21 Feb 2011 03:17:29 -0000 *************** struct zsc_softc { *** 54,59 **** --- 54,60 ---- int zsc_node; /* PROM node, if any */ struct evcnt zsc_intrcnt; /* count interrupts */ struct zs_chanstate zsc_cs_store[2]; + void *zsc_sicookie; /* softint(9) cookie */ }; /* Index: sys/arch/sparc/sparc/cpu.c =================================================================== RCS file: /cvsroot/src/sys/arch/sparc/sparc/cpu.c,v retrieving revision 1.211.8.4 diff -p -r1.211.8.4 cpu.c *** sys/arch/sparc/sparc/cpu.c 16 Feb 2011 21:33:25 -0000 1.211.8.4 --- sys/arch/sparc/sparc/cpu.c 21 Feb 2011 03:17:29 -0000 *************** __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.21 *** 65,70 **** --- 65,72 ---- #include #include #include + #include + #include #include *************** __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.21 *** 78,83 **** --- 80,86 ---- #include #if defined(MULTIPROCESSOR) && defined(DDB) + #include #include #endif *************** CFATTACH_DECL(cpu_cpuunit, sizeof(struct *** 119,124 **** --- 122,128 ---- cpu_cpuunit_match, cpu_cpuunit_attach, NULL, NULL); #endif /* SUN4D */ + static void cpu_init_evcnt(struct cpu_info *cpi); static void cpu_attach(struct cpu_softc *, int, int); static const char *fsrtoname(int, int, int); *************** cpu_cpuunit_attach(struct device *parent *** 293,298 **** --- 297,367 ---- } #endif /* SUN4D */ + static const char * const hard_intr_names[] = { + "spur hard", + "lev1 hard", + "lev2 hard", + "lev3 hard", + "lev4 hard", + "lev5 hard", + "lev6 hard", + "lev7 hard", + "lev8 hard", + "lev9 hard", + "clock hard", + "lev11 hard", + "lev12 hard", + "lev13 hard", + "prof hard", + "nmi hard", + }; + + static const char * const soft_intr_names[] = { + "spur soft", + "lev1 soft", + "lev2 soft", + "lev3 soft", + "lev4 soft", + "lev5 soft", + "lev6 soft", + "lev7 soft", + "lev8 soft", + "lev9 soft", + "lev10 soft", + "lev11 soft", + "lev12 soft", + "xcall std", + "xcall fast", + "nmi soft", + }; + + static void + cpu_init_evcnt(struct cpu_info *cpi) + { + int i; + + /* + * Setup the per-cpu counters. + */ + snprintf(cpi->ci_cpuname, sizeof(cpi->ci_cpuname), "cpu/%d", cpi->ci_cpuid); + evcnt_attach_dynamic(&cpi->ci_savefpstate, EVCNT_TYPE_MISC, + NULL, cpi->ci_cpuname, "savefp ipi"); + evcnt_attach_dynamic(&cpi->ci_xpmsg_mutex_fail, EVCNT_TYPE_MISC, + NULL, cpi->ci_cpuname, "IPI mutex_trylock fail"); + evcnt_attach_dynamic(&cpi->ci_xpmsg_mutex_fail_call, EVCNT_TYPE_MISC, + NULL, cpi->ci_cpuname, "IPI mutex_trylock fail/call"); + + /* + * These are the per-cpu per-IPL hard & soft interrupt counters. + */ + for (i = 0; i < 16; i++) { + evcnt_attach_dynamic(&cpi->ci_intrcnt[i], EVCNT_TYPE_INTR, + NULL, cpi->ci_cpuname, hard_intr_names[i]); + evcnt_attach_dynamic(&cpi->ci_sintrcnt[i], EVCNT_TYPE_INTR, + NULL, cpi->ci_cpuname, soft_intr_names[i]); + } + } + /* * Attach the CPU. * Discover interesting goop about the virtual address cache *************** cpu_attach(struct cpu_softc *sc, int nod *** 341,350 **** --- 410,422 ---- #if defined(MULTIPROCESSOR) if (cpu_attach_count > 1) { cpu_attach_non_boot(sc, cpi, node); + cpu_init_evcnt(cpi); return; } #endif /* MULTIPROCESSOR */ + cpu_init_evcnt(cpi); + /* Stuff to only run on the boot CPU */ cpu_setup(); snprintf(buf, sizeof buf, "%s @ %s MHz, %s FPU", *************** void *** 500,506 **** cpu_init_system(void) { ! mutex_init(&xpmsg_mutex, MUTEX_SPIN, IPL_VM); } /* --- 572,578 ---- cpu_init_system(void) { ! mutex_init(&xpmsg_mutex, MUTEX_SPIN, IPL_SCHED); } /* *************** xcall(xcall_func_t func, xcall_trap_t tr *** 560,567 **** --- 632,643 ---- struct cpu_info *cpi; int n, i, done, callself, mybit; volatile struct xpmsg_func *p; + u_int pil; int fasttrap; int is_noop = func == (xcall_func_t)sparc_noop; + static char errbuf[160]; + char *bufp = errbuf; + size_t bufsz = sizeof errbuf, wrsz; mybit = (1 << cpuinfo.ci_cpuid); callself = func && (cpuset & mybit) != 0; *************** xcall(xcall_func_t func, xcall_trap_t tr *** 571,577 **** --- 647,680 ---- cpuset &= cpu_ready_mask; /* prevent interrupts that grab the kernel lock */ + #if 0 mutex_spin_enter(&xpmsg_mutex); + #else + /* + * There's a deadlock potential between multiple CPUs trying + * to xcall() at the same time, and the thread that loses the + * race to get xpmsg_lock is at an IPL above the incoming IPI + * IPL level, so it sits around waiting to take the lock while + * the other CPU is waiting for this CPU to handle the IPI and + * mark it as completed. + * + * If we fail to get the mutex, and we're at high enough IPL, + * call xcallintr() if there is a valid msg.tag. + */ + pil = (getpsr() & PSR_PIL) >> 8; + + if (cold || pil < 13) + mutex_spin_enter(&xpmsg_mutex); + else { + while (mutex_tryenter(&xpmsg_mutex) == 0) { + cpuinfo.ci_xpmsg_mutex_fail.ev_count++; + if (cpuinfo.msg.tag) { + cpuinfo.ci_xpmsg_mutex_fail_call.ev_count++; + xcallintr(xcallintr); + } + } + } + #endif /* * Firstly, call each CPU. We do this so that they might have *************** xcall(xcall_func_t func, xcall_trap_t tr *** 599,605 **** /* * Second, call ourselves. */ - p = &cpuinfo.msg.u.xpmsg_func; if (callself) (*func)(arg0, arg1, arg2); --- 702,707 ---- *************** xcall(xcall_func_t func, xcall_trap_t tr *** 612,619 **** i = 100000; /* time-out, not too long, but still an _AGE_ */ while (!done) { if (--i < 0) { ! printf_nolog("xcall(cpu%d,%p): couldn't ping cpus:", cpu_number(), func); } done = 1; --- 714,724 ---- i = 100000; /* time-out, not too long, but still an _AGE_ */ while (!done) { if (--i < 0) { ! wrsz = snprintf(bufp, bufsz, ! "xcall(cpu%d,%p): couldn't ping cpus:", cpu_number(), func); + bufsz -= wrsz; + bufp += wrsz; } done = 1; *************** xcall(xcall_func_t func, xcall_trap_t tr *** 623,629 **** if (cpi->msg.complete == 0) { if (i < 0) { ! printf_nolog(" cpu%d", cpi->ci_cpuid); } else { done = 0; break; --- 728,740 ---- if (cpi->msg.complete == 0) { if (i < 0) { ! wrsz = snprintf(bufp, bufsz, ! " cpu%d", cpi->ci_cpuid); ! bufsz -= wrsz; ! bufp += wrsz; ! /* insanity */ ! if (bufsz < 0) ! break; } else { done = 0; break; *************** xcall(xcall_func_t func, xcall_trap_t tr *** 632,639 **** } } if (i < 0) ! printf_nolog("\n"); ! mutex_spin_exit(&xpmsg_mutex); } --- 743,749 ---- } } if (i < 0) ! printf_nolog("%s\n", errbuf); mutex_spin_exit(&xpmsg_mutex); } *************** fsrtoname(int impl, int vers, int fver) *** 2033,2040 **** #include "ioconf.h" - void cpu_debug_dump(void); - /* * Dump CPU information from ddb. */ --- 2143,2148 ---- *************** cpu_debug_dump(void) *** 2057,2060 **** --- 2165,2199 ---- ci->curpcb); } } + + #if defined(MULTIPROCESSOR) + /* + * Dump CPU xcall from ddb. + */ + void + cpu_xcall_dump(void) + { + struct cpu_info *ci; + CPU_INFO_ITERATOR cii; + + db_printf("%-4s %-10s %-10s %-10s %-10s %-10s " + "%-4s %-4s %-4s\n", + "CPU#", "FUNC", "TRAP", "ARG0", "ARG1", "ARG2", + "TAG", "RECV", "COMPL"); + for (CPU_INFO_FOREACH(cii, ci)) { + db_printf("%-4d %-10p %-10p 0x%-8x 0x%-8x 0x%-8x " + "%-4d %-4d %-4d\n", + ci->ci_cpuid, + ci->msg.u.xpmsg_func.func, + ci->msg.u.xpmsg_func.trap, + ci->msg.u.xpmsg_func.arg0, + ci->msg.u.xpmsg_func.arg1, + ci->msg.u.xpmsg_func.arg2, + ci->msg.tag, + ci->msg.received, + ci->msg.complete); + } + } + #endif + #endif Index: sys/arch/sparc/sparc/cpuvar.h =================================================================== RCS file: /cvsroot/src/sys/arch/sparc/sparc/cpuvar.h,v retrieving revision 1.75.10.4 diff -p -r1.75.10.4 cpuvar.h *** sys/arch/sparc/sparc/cpuvar.h 16 Feb 2011 21:33:25 -0000 1.75.10.4 --- sys/arch/sparc/sparc/cpuvar.h 21 Feb 2011 03:17:29 -0000 *************** struct xpmsg { *** 127,132 **** --- 127,139 ---- struct cpu_info { struct cpu_data ci_data; /* MI per-cpu data */ + /* + * Primary Inter-processor message area. Keep this aligned + * to a cache line boundary if possible, as the structure + * itself is one (normal 32 byte) cache-line. + */ + struct xpmsg msg __aligned(32); + /* Scheduler flags */ int ci_want_ast; int ci_want_resched; *************** struct cpu_info { *** 142,150 **** */ struct cpu_info * volatile ci_self; - /* Primary Inter-processor message area */ - struct xpmsg msg; - int ci_cpuid; /* CPU index (see cpus[] array) */ /* Context administration */ --- 149,154 ---- *************** struct cpu_info { *** 333,340 **** vaddr_t ci_free_sva1, ci_free_eva1, ci_free_sva2, ci_free_eva2; char ci_cpuname[8]; /* "cpu/0", etc. */ ! struct evcnt ci_lev10; ! struct evcnt ci_lev14; }; /* --- 337,347 ---- vaddr_t ci_free_sva1, ci_free_eva1, ci_free_sva2, ci_free_eva2; char ci_cpuname[8]; /* "cpu/0", etc. */ ! struct evcnt ci_savefpstate; ! struct evcnt ci_xpmsg_mutex_fail; ! struct evcnt ci_xpmsg_mutex_fail_call; ! struct evcnt ci_intrcnt[16]; ! struct evcnt ci_sintrcnt[16]; }; /* *************** void cpu_init_system(void); *** 448,453 **** --- 455,462 ---- typedef void (*xcall_func_t)(int, int, int); typedef void (*xcall_trap_t)(int, int, int); void xcall(xcall_func_t, xcall_trap_t, int, int, int, u_int); + /* from intr.c */ + void xcallintr(void *); /* Shorthand */ #define XCALL0(f,cpuset) \ xcall((xcall_func_t)f, NULL, 0, 0, 0, cpuset) *************** extern u_int cpu_ready_mask; /* the set *** 487,491 **** --- 496,507 ---- #define cpuinfo (*(struct cpu_info *)CPUINFO_VA) + #if defined(DDB) || defined(MULTIPROCESSOR) + /* + * These are called by ddb mach functions. + */ + void cpu_debug_dump(void); + void cpu_xcall_dump(void); + #endif #endif /* _sparc_cpuvar_h */ Index: sys/arch/sparc/sparc/db_interface.c =================================================================== RCS file: /cvsroot/src/sys/arch/sparc/sparc/db_interface.c,v retrieving revision 1.79.4.2 diff -p -r1.79.4.2 db_interface.c *** sys/arch/sparc/sparc/db_interface.c 16 Jan 2011 12:58:24 -0000 1.79.4.2 --- sys/arch/sparc/sparc/db_interface.c 21 Feb 2011 03:17:29 -0000 *************** void db_dump_pcb(db_expr_t, bool, db_exp *** 202,207 **** --- 202,208 ---- void db_uvmhistdump(db_expr_t, bool, db_expr_t, const char *); #ifdef MULTIPROCESSOR void db_cpu_cmd(db_expr_t, bool, db_expr_t, const char *); + void db_xcall_cmd(db_expr_t, bool, db_expr_t, const char *); #endif void db_page_cmd(db_expr_t, bool, db_expr_t, const char *); *************** db_page_cmd(db_expr_t addr, bool have_ad *** 448,454 **** } #if defined(MULTIPROCESSOR) - extern void cpu_debug_dump(void); /* XXX */ void db_cpu_cmd(db_expr_t addr, bool have_addr, db_expr_t count, const char *modif) --- 449,454 ---- *************** db_cpu_cmd(db_expr_t addr, bool have_add *** 483,488 **** --- 483,494 ---- ddb_cpuinfo = ci; } + void + db_xcall_cmd(db_expr_t addr, bool have_addr, db_expr_t count, const char *modif) + { + cpu_xcall_dump(); + } + #endif /* MULTIPROCESSOR */ const struct db_command db_machine_command_table[] = { *************** const struct db_command db_machine_comma *** 492,497 **** --- 498,504 ---- { DDB_ADD_CMD("page", db_page_cmd, 0, NULL,NULL,NULL) }, #ifdef MULTIPROCESSOR { DDB_ADD_CMD("cpu", db_cpu_cmd, 0, NULL,NULL,NULL) }, + { DDB_ADD_CMD("xcall", db_xcall_cmd, 0, NULL,NULL,NULL) }, #endif { DDB_ADD_CMD(NULL, NULL, 0,NULL,NULL,NULL) } }; Index: sys/arch/sparc/sparc/genassym.cf =================================================================== RCS file: /cvsroot/src/sys/arch/sparc/sparc/genassym.cf,v retrieving revision 1.56.4.1 diff -p -r1.56.4.1 genassym.cf *** sys/arch/sparc/sparc/genassym.cf 28 Jan 2011 07:16:13 -0000 1.56.4.1 --- sys/arch/sparc/sparc/genassym.cf 21 Feb 2011 03:17:29 -0000 *************** define PMAP_CPUSET offsetof(struct pmap, *** 128,133 **** --- 128,136 ---- define V_INTR offsetof(struct uvmexp, intrs) define V_SOFT offsetof(struct uvmexp, softs) define V_FAULTS offsetof(struct uvmexp, faults) + define CPUINFO_LEV14 offsetof(struct cpu_info, ci_sintrcnt[14]) + define CPUINFO_INTRCNT offsetof(struct cpu_info, ci_intrcnt) + define CPUINFO_SINTRCNT offsetof(struct cpu_info, ci_sintrcnt) define EV_COUNT offsetof(struct evcnt, ev_count) define EV_STRUCTSIZE sizeof(struct evcnt) *************** define CPUINFO_IDLESPIN offsetof(struct *** 161,166 **** --- 164,170 ---- define CPUINFO_MTX_COUNT offsetof(struct cpu_info, ci_mtx_count) define CPUINFO_MTX_OLDSPL offsetof(struct cpu_info, ci_mtx_oldspl) define CPUINFO_IDEPTH offsetof(struct cpu_info, ci_idepth) + define CPUINFO_SAVEFPSTATE offsetof(struct cpu_info, ci_savefpstate) # PTE bits and related information define PG_W PG_W Index: sys/arch/sparc/sparc/intr.c =================================================================== RCS file: /cvsroot/src/sys/arch/sparc/sparc/intr.c,v retrieving revision 1.100.20.2 diff -p -r1.100.20.2 intr.c *** sys/arch/sparc/sparc/intr.c 28 Jan 2011 07:16:13 -0000 1.100.20.2 --- sys/arch/sparc/sparc/intr.c 21 Feb 2011 03:17:29 -0000 *************** __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.1 *** 72,118 **** #if defined(MULTIPROCESSOR) void *xcall_cookie; ! ! /* Stats */ ! struct evcnt lev13_evcnt = EVCNT_INITIALIZER(EVCNT_TYPE_INTR,0,"xcall","std"); ! struct evcnt lev14_evcnt = EVCNT_INITIALIZER(EVCNT_TYPE_INTR,0,"xcall","fast"); ! EVCNT_ATTACH_STATIC(lev13_evcnt); ! EVCNT_ATTACH_STATIC(lev14_evcnt); ! #endif ! ! struct evcnt intrcnt[15] = { ! EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 0, "spur", "hard"), ! EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 0, "lev1", "hard"), ! EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 0, "lev2", "hard"), ! EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 0, "lev3", "hard"), ! EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 0, "lev4", "hard"), ! EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 0, "lev5", "hard"), ! EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 0, "lev6", "hard"), ! EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 0, "lev7", "hard"), ! EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 0, "lev8", "hard"), ! EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 0, "lev9", "hard"), ! EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 0, "clock", "hard"), ! EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 0, "lev11", "hard"), ! EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 0, "lev12", "hard"), ! EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 0, "lev13", "hard"), ! EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 0, "prof", "hard"), ! }; ! ! EVCNT_ATTACH_STATIC2(intrcnt, 0); ! EVCNT_ATTACH_STATIC2(intrcnt, 1); ! EVCNT_ATTACH_STATIC2(intrcnt, 2); ! EVCNT_ATTACH_STATIC2(intrcnt, 3); ! EVCNT_ATTACH_STATIC2(intrcnt, 4); ! EVCNT_ATTACH_STATIC2(intrcnt, 5); ! EVCNT_ATTACH_STATIC2(intrcnt, 6); ! EVCNT_ATTACH_STATIC2(intrcnt, 7); ! EVCNT_ATTACH_STATIC2(intrcnt, 8); ! EVCNT_ATTACH_STATIC2(intrcnt, 9); ! EVCNT_ATTACH_STATIC2(intrcnt, 10); ! EVCNT_ATTACH_STATIC2(intrcnt, 11); ! EVCNT_ATTACH_STATIC2(intrcnt, 12); ! EVCNT_ATTACH_STATIC2(intrcnt, 13); ! EVCNT_ATTACH_STATIC2(intrcnt, 14); void strayintr(struct clockframe *); #ifdef DIAGNOSTIC --- 72,78 ---- #if defined(MULTIPROCESSOR) void *xcall_cookie; ! #endif void strayintr(struct clockframe *); #ifdef DIAGNOSTIC *************** strayintr(struct clockframe *fp) *** 130,135 **** --- 90,108 ---- static int straytime, nstray; char bits[64]; int timesince; + + #if defined(MULTIPROCESSOR) + /* + * XXX + * + * Don't whine about zs interrupts on MP. We sometimes get + * stray interrupts when polled kernel output on cpu>0 eats + * the interrupt and cpu0 sees it. + */ + #define ZS_INTR_IPL 12 + if (fp->ipl == ZS_INTR_IPL) + return; + #endif printf("stray interrupt cpu%d ipl 0x%x pc=0x%x npc=0x%x psr=%s\n", cpu_number(), fp->ipl, fp->pc, fp->npc, *************** nmi_hard(void) *** 229,234 **** --- 202,210 ---- char bits[64]; u_int afsr, afva; + /* Tally */ + cpuinfo.ci_intrcnt[15].ev_count++; + afsr = afva = 0; if ((*cpuinfo.get_asyncflt)(&afsr, &afva) == 0) { printf("Async registers (mid %d): afsr=%s; afva=0x%x%x\n", *************** nmi_hard(void) *** 315,320 **** --- 291,300 ---- void nmi_soft(struct trapframe *tf) { + + /* Tally */ + cpuinfo.ci_sintrcnt[15].ev_count++; + if (cpuinfo.mailbox) { /* Check PROM messages */ uint8_t msg = *(uint8_t *)cpuinfo.mailbox; *************** nmi_soft(struct trapframe *tf) *** 362,374 **** #if defined(MULTIPROCESSOR) /* * Respond to an xcall() request from another CPU. */ ! static void xcallintr(void *v) { /* Tally */ ! lev13_evcnt.ev_count++; /* notyet - cpuinfo.msg.received = 1; */ switch (cpuinfo.msg.tag) { --- 342,360 ---- #if defined(MULTIPROCESSOR) /* * Respond to an xcall() request from another CPU. + * + * This is also called directly from xcall() if we notice an + * incoming message while we're waiting to grab the xpmsg_lock. + * We pass the address of xcallintr() itself to indicate that + * this is not a real interrupt. */ ! void xcallintr(void *v) { /* Tally */ ! if (v != xcallintr) ! cpuinfo.ci_sintrcnt[13].ev_count++; /* notyet - cpuinfo.msg.received = 1; */ switch (cpuinfo.msg.tag) { Index: sys/arch/sparc/sparc/locore.s =================================================================== RCS file: /cvsroot/src/sys/arch/sparc/sparc/locore.s,v retrieving revision 1.244.8.3 diff -p -r1.244.8.3 locore.s *** sys/arch/sparc/sparc/locore.s 28 Jan 2011 07:16:13 -0000 1.244.8.3 --- sys/arch/sparc/sparc/locore.s 21 Feb 2011 03:17:29 -0000 *************** ctw_invalid: *** 1812,1817 **** --- 1812,1818 ---- */ #if defined(SUN4) + _ENTRY(memfault_sun4) memfault_sun4: TRAP_SETUP(-CCFSZ-80) INCR(_C_LABEL(uvmexp)+V_FAULTS) ! cnt.v_faults++ (clobbers %o0,%o1) *************** memfault_sun4: *** 1876,1883 **** #endif /* SUN4C || SUN4M */ #endif /* SUN4 */ - memfault_sun4c: #if defined(SUN4C) TRAP_SETUP(-CCFSZ-80) INCR(_C_LABEL(uvmexp)+V_FAULTS) ! cnt.v_faults++ (clobbers %o0,%o1) --- 1877,1885 ---- #endif /* SUN4C || SUN4M */ #endif /* SUN4 */ #if defined(SUN4C) + _ENTRY(memfault_sun4c) + memfault_sun4c: TRAP_SETUP(-CCFSZ-80) INCR(_C_LABEL(uvmexp)+V_FAULTS) ! cnt.v_faults++ (clobbers %o0,%o1) *************** memfault_sun4c: *** 1975,1980 **** --- 1977,1983 ---- #endif /* SUN4C */ #if defined(SUN4M) + _ENTRY(memfault_sun4m) memfault_sun4m: sethi %hi(CPUINFO_VA), %l4 ld [%l4 + %lo(CPUINFO_VA+CPUINFO_GETSYNCFLT)], %l5 *************** softintr_common: *** 2527,2539 **** wr %l4, PSR_ET, %psr ! song and dance is necessary std %l0, [%sp + CCFSZ + 0] ! set up intrframe/clockframe sll %l3, 2, %l5 ! set intrcnt, %l4 ! intrcnt[intlev].ev_count++; sll %l3, EV_STRUCTSHIFT, %o2 ldd [%l4 + %o2], %o0 ! std %l2, [%sp + CCFSZ + 8] inccc %o1 addx %o0, 0, %o0 std %o0, [%l4 + %o2] set _C_LABEL(sintrhand), %l4! %l4 = sintrhand[intlev]; ld [%l4 + %l5], %l4 --- 2530,2544 ---- wr %l4, PSR_ET, %psr ! song and dance is necessary std %l0, [%sp + CCFSZ + 0] ! set up intrframe/clockframe sll %l3, 2, %l5 ! ! set CPUINFO_VA + CPUINFO_SINTRCNT, %l4 ! sintrcnt[intlev].ev_count++; sll %l3, EV_STRUCTSHIFT, %o2 ldd [%l4 + %o2], %o0 ! std %l2, [%sp + CCFSZ + 8] ! set up intrframe/clockframe inccc %o1 addx %o0, 0, %o0 std %o0, [%l4 + %o2] + set _C_LABEL(sintrhand), %l4! %l4 = sintrhand[intlev]; ld [%l4 + %l5], %l4 *************** sparc_interrupt4m_bogus: *** 2681,2687 **** wr %l4, PSR_ET, %psr ! song and dance is necessary std %l0, [%sp + CCFSZ + 0] ! set up intrframe/clockframe sll %l3, 2, %l5 ! set intrcnt, %l4 ! intrcnt[intlev].ev_count++; sll %l3, EV_STRUCTSHIFT, %o2 ldd [%l4 + %o2], %o0 std %l2, [%sp + CCFSZ + 8] ! set up intrframe/clockframe --- 2686,2693 ---- wr %l4, PSR_ET, %psr ! song and dance is necessary std %l0, [%sp + CCFSZ + 0] ! set up intrframe/clockframe sll %l3, 2, %l5 ! ! set CPUINFO_VA + CPUINFO_INTRCNT, %l4 ! intrcnt[intlev].ev_count++; sll %l3, EV_STRUCTSHIFT, %o2 ldd [%l4 + %o2], %o0 std %l2, [%sp + CCFSZ + 8] ! set up intrframe/clockframe *************** sparc_interrupt_common: *** 2728,2740 **** wr %l4, PSR_ET, %psr ! song and dance is necessary std %l0, [%sp + CCFSZ + 0] ! set up intrframe/clockframe sll %l3, 2, %l5 ! set intrcnt, %l4 ! intrcnt[intlev].ev_count++; sll %l3, EV_STRUCTSHIFT, %o2 ldd [%l4 + %o2], %o0 std %l2, [%sp + CCFSZ + 8] ! set up intrframe/clockframe inccc %o1 addx %o0, 0, %o0 std %o0, [%l4 + %o2] set _C_LABEL(intrhand), %l4 ! %l4 = intrhand[intlev]; ld [%l4 + %l5], %l4 --- 2734,2748 ---- wr %l4, PSR_ET, %psr ! song and dance is necessary std %l0, [%sp + CCFSZ + 0] ! set up intrframe/clockframe sll %l3, 2, %l5 ! ! set CPUINFO_VA + CPUINFO_INTRCNT, %l4 ! intrcnt[intlev].ev_count++; sll %l3, EV_STRUCTSHIFT, %o2 ldd [%l4 + %o2], %o0 std %l2, [%sp + CCFSZ + 8] ! set up intrframe/clockframe inccc %o1 addx %o0, 0, %o0 std %o0, [%l4 + %o2] + set _C_LABEL(intrhand), %l4 ! %l4 = intrhand[intlev]; ld [%l4 + %l5], %l4 *************** sparc_interrupt_common: *** 2813,2823 **** * %l6 = &cpuinfo */ lev14_softint: ! set _C_LABEL(lev14_evcnt), %l7 ! lev14_evcnt.ev_count++; ! ldd [%l7 + EV_COUNT], %l4 inccc %l5 addx %l4, %g0, %l4 ! std %l4, [%l7 + EV_COUNT] ld [%l6 + CPUINFO_XMSG_TRAP], %l7 #ifdef DIAGNOSTIC --- 2821,2831 ---- * %l6 = &cpuinfo */ lev14_softint: ! sethi %hi(CPUINFO_VA), %l7 ! ldd [%l7 + CPUINFO_LEV14], %l4 inccc %l5 addx %l4, %g0, %l4 ! std %l4, [%l7 + CPUINFO_LEV14] ld [%l6 + CPUINFO_XMSG_TRAP], %l7 #ifdef DIAGNOSTIC *************** _ENTRY(_C_LABEL(nmi_common)) *** 3112,3118 **** #if defined(SUN4M) _ENTRY(_C_LABEL(nmi_sun4m)) ! INTR_SETUP(-CCFSZ-80) INCR(_C_LABEL(uvmexp)+V_INTR) ! cnt.v_intr++; (clobbers %o0,%o1) #if !defined(MSIIEP) /* normal sun4m */ --- 3120,3126 ---- #if defined(SUN4M) _ENTRY(_C_LABEL(nmi_sun4m)) ! INTR_SETUP(-CCFSZ-80-8-8) ! normal frame, plus g2..g5 INCR(_C_LABEL(uvmexp)+V_INTR) ! cnt.v_intr++; (clobbers %o0,%o1) #if !defined(MSIIEP) /* normal sun4m */ *************** Lkcerr: *** 5904,5915 **** /* NOTREACHED */ /* ! * savefpstate(f) struct fpstate *f; * * Store the current FPU state. The first `st %fsr' may cause a trap; * our trap handler knows how to recover (by `returning' to savefpcont). */ ENTRY(savefpstate) rd %psr, %o1 ! enable FP before we begin set PSR_EF, %o2 or %o1, %o2, %o1 --- 5912,5940 ---- /* NOTREACHED */ /* ! * savefpstate(struct fpstate *f); ! * ipi_savefpstate(struct fpstate *f); * * Store the current FPU state. The first `st %fsr' may cause a trap; * our trap handler knows how to recover (by `returning' to savefpcont). + * + * The IPI version just deals with updating event counters first. */ + Lpanic_savefpstate: + .asciz "cpu%d: NULL fpstate" + _ALIGN + + ENTRY(ipi_savefpstate) + sethi %hi(CPUINFO_VA), %o5 + ldd [%o5 + CPUINFO_SAVEFPSTATE], %o2 + inccc %o3 + addx %o2, 0, %o2 + std %o2, [%o5 + CPUINFO_SAVEFPSTATE] + ENTRY(savefpstate) + cmp %o0, 0 + bz Lfp_null_fpstate + nop rd %psr, %o1 ! enable FP before we begin set PSR_EF, %o2 or %o1, %o2, %o1 *************** special_fp_store: *** 5928,5935 **** * So we still have to check the blasted QNE bit. * With any luck it will usually not be set. */ ! ld [%o0 + FS_FSR], %o4 ! if (f->fs_fsr & QNE) ! btst %o5, %o4 bnz Lfp_storeq ! goto storeq; std %f0, [%o0 + FS_REGS + (4*0)] ! f->fs_f0 = etc; Lfp_finish: --- 5953,5960 ---- * So we still have to check the blasted QNE bit. * With any luck it will usually not be set. */ ! ld [%o0 + FS_FSR], %o2 ! if (f->fs_fsr & QNE) ! btst %o5, %o2 bnz Lfp_storeq ! goto storeq; std %f0, [%o0 + FS_REGS + (4*0)] ! f->fs_f0 = etc; Lfp_finish: *************** Lfp_finish: *** 5952,5957 **** --- 5977,5991 ---- std %f30, [%o0 + FS_REGS + (4*30)] /* + * We got a NULL struct fpstate * on the IPI. We panic. + */ + Lfp_null_fpstate: + ld [%o5 + CPUINFO_CPUNO], %o1 + sethi %hi(Lpanic_savefpstate), %o0 + call _C_LABEL(panic) + or %o0, %lo(Lpanic_savefpstate), %o0 + + /* * Store the (now known nonempty) FP queue. * We have to reread the fsr each time in order to get the new QNE bit. */ *************** Lfp_storeq: *** 5964,5969 **** --- 5998,6004 ---- btst %o5, %o4 bnz 1b inc 8, %o3 + st %o2, [%o0 + FS_FSR] ! fs->fs_fsr = original_fsr b Lfp_finish ! set qsize and finish storing fregs srl %o3, 3, %o3 ! (but first fix qsize) Index: sys/arch/sparc/sparc/machdep.c =================================================================== RCS file: /cvsroot/src/sys/arch/sparc/sparc/machdep.c,v retrieving revision 1.282.4.3 diff -p -r1.282.4.3 machdep.c *** sys/arch/sparc/sparc/machdep.c 18 Oct 2009 14:40:40 -0000 1.282.4.3 --- sys/arch/sparc/sparc/machdep.c 21 Feb 2011 03:17:29 -0000 *************** setregs(struct lwp *l, struct exec_packa *** 377,383 **** savefpstate(fs); #if defined(MULTIPROCESSOR) else ! XCALL1(savefpstate, fs, 1 << cpi->ci_cpuid); #endif cpi->fplwp = NULL; } --- 377,383 ---- savefpstate(fs); #if defined(MULTIPROCESSOR) else ! XCALL1(ipi_savefpstate, fs, 1 << cpi->ci_cpuid); #endif cpi->fplwp = NULL; } Index: sys/arch/sparc/sparc/timer.c =================================================================== RCS file: /cvsroot/src/sys/arch/sparc/sparc/timer.c,v retrieving revision 1.23 diff -p -r1.23 timer.c *** sys/arch/sparc/sparc/timer.c 3 Dec 2007 15:34:22 -0000 1.23 --- sys/arch/sparc/sparc/timer.c 21 Feb 2011 03:17:29 -0000 *************** void *** 153,158 **** --- 153,159 ---- timerattach(volatile int *cntreg, volatile int *limreg) { u_int prec = 0, t0; + void (*sched_intr_fn)(void *); /* * Calibrate delay() by tweaking the magic constant *************** timerattach(volatile int *cntreg, volati *** 192,202 **** cntr.mask = (1 << (31-t0))-1; counter_timecounter.tc_frequency = 1000000 * (TMR_SHIFT - t0 + 1); ! printf(": delay constant %d, frequency = %" PRIu64 " Hz\n", timerblurb, counter_timecounter.tc_frequency); #if defined(SUN4) || defined(SUN4C) if (CPU_ISSUN4 || CPU_ISSUN4C) { timer_init = timer_init_4; level10.ih_fun = clockintr_4; level14.ih_fun = statintr_4; cntr.limit = tmr_ustolim(tick); --- 193,205 ---- cntr.mask = (1 << (31-t0))-1; counter_timecounter.tc_frequency = 1000000 * (TMR_SHIFT - t0 + 1); ! printf(": delay constant %d, frequency = %" PRIu64 " Hz\n", ! timerblurb, counter_timecounter.tc_frequency); #if defined(SUN4) || defined(SUN4C) if (CPU_ISSUN4 || CPU_ISSUN4C) { timer_init = timer_init_4; + sched_intr_fn = schedintr; level10.ih_fun = clockintr_4; level14.ih_fun = statintr_4; cntr.limit = tmr_ustolim(tick); *************** timerattach(volatile int *cntreg, volati *** 205,210 **** --- 208,219 ---- #if defined(SUN4M) if (CPU_ISSUN4M) { timer_init = timer_init_4m; + #if defined(MULTIPROCESSOR) + if (sparc_ncpus > 1) + sched_intr_fn = schedintr_4m; + else + #endif + sched_intr_fn = schedintr; level10.ih_fun = clockintr_4m; level14.ih_fun = statintr_4m; cntr.limit = tmr_ustolim4m(tick); *************** timerattach(volatile int *cntreg, volati *** 215,221 **** intr_establish(14, 0, &level14, NULL); /* Establish a soft interrupt at a lower level for schedclock */ ! sched_cookie = sparc_softintr_establish(IPL_SCHED, schedintr, NULL); if (sched_cookie == NULL) panic("timerattach: cannot establish schedintr"); --- 224,230 ---- intr_establish(14, 0, &level14, NULL); /* Establish a soft interrupt at a lower level for schedclock */ ! sched_cookie = sparc_softintr_establish(IPL_SCHED, sched_intr_fn, NULL); if (sched_cookie == NULL) panic("timerattach: cannot establish schedintr"); Index: sys/arch/sparc/sparc/timer_sun4m.c =================================================================== RCS file: /cvsroot/src/sys/arch/sparc/sparc/timer_sun4m.c,v retrieving revision 1.16.56.2 diff -p -r1.16.56.2 timer_sun4m.c *** sys/arch/sparc/sparc/timer_sun4m.c 16 Feb 2011 21:33:25 -0000 1.16.56.2 --- sys/arch/sparc/sparc/timer_sun4m.c 21 Feb 2011 03:17:29 -0000 *************** timer_init_4m(void) *** 95,100 **** --- 95,123 ---- icr_si_bic(SINTR_T); } + void + schedintr_4m(void *v) + { + + #ifdef MULTIPROCESSOR + /* + * We call hardclock() here so that we make sure it is called on + * all CPUs. This function ends up being called on sun4m systems + * every tick, so we have avoid + */ + if (!CPU_IS_PRIMARY(curcpu())) + hardclock(v); + + /* + * The factor 8 is only valid for stathz==100. + * See also clock.c + */ + if ((++cpuinfo.ci_schedstate.spc_schedticks & 7) == 0 && schedhz != 0) + #endif + schedclock(curlwp); + } + + /* * Level 10 (clock) interrupts from system counter. */ *************** int *** 102,107 **** --- 125,131 ---- clockintr_4m(void *cap) { + KASSERT(CPU_IS_PRIMARY(curcpu())); /* * XXX this needs to be fixed in a more general way * problem is that the kernel enables interrupts and THEN *************** clockintr_4m(void *cap) *** 109,116 **** * a timer interrupt - if we call hardclock() at that point we'll * panic * so for now just bail when cold */ - cpuinfo.ci_lev10.ev_count++; if (cold) return 0; /* read the limit register to clear the interrupt */ --- 133,142 ---- * a timer interrupt - if we call hardclock() at that point we'll * panic * so for now just bail when cold + * + * For MP, we defer calling hardclock() to the schedintr so + * that we call it on all cpus. */ if (cold) return 0; /* read the limit register to clear the interrupt */ *************** statintr_4m(void *cap) *** 129,136 **** struct clockframe *frame = cap; u_long newint; - cpuinfo.ci_lev14.ev_count++; - /* read the limit register to clear the interrupt */ *((volatile int *)&counterreg4m->t_limit); --- 155,160 ---- *************** statintr_4m(void *cap) *** 152,170 **** * The factor 8 is only valid for stathz==100. * See also clock.c */ ! if (curlwp && (++cpuinfo.ci_schedstate.spc_schedticks & 7) == 0) { if (CLKF_LOPRI(frame, IPL_SCHED)) { /* No need to schedule a soft interrupt */ spllowerschedclock(); ! schedintr(cap); } else { /* * We're interrupting a thread that may have the ! * scheduler lock; run schedintr() on this CPU later. */ raise_ipi(&cpuinfo, IPL_SCHED); /* sched_cookie->pil */ } } return (1); } --- 176,198 ---- * The factor 8 is only valid for stathz==100. * See also clock.c */ ! #if !defined(MULTIPROCESSOR) ! if ((++cpuinfo.ci_schedstate.spc_schedticks & 7) == 0 && schedhz != 0) { ! #endif if (CLKF_LOPRI(frame, IPL_SCHED)) { /* No need to schedule a soft interrupt */ spllowerschedclock(); ! schedintr_4m(cap); } else { /* * We're interrupting a thread that may have the ! * scheduler lock; run schedintr_4m() on this CPU later. */ raise_ipi(&cpuinfo, IPL_SCHED); /* sched_cookie->pil */ } + #if !defined(MULTIPROCESSOR) } + #endif return (1); } *************** timerattach_obio_4m(struct device *paren *** 224,236 **** cpi->counterreg_4m = (struct counter_4m *)bh; } - /* Install timer/statclock event counters, per cpu */ - for (CPU_INFO_FOREACH(n, cpi)) { - snprintf(cpi->ci_cpuname, sizeof(cpi->ci_cpuname), "cpu/%d", n); - evcnt_attach_dynamic(&cpi->ci_lev10, EVCNT_TYPE_INTR, NULL, cpi->ci_cpuname, "lev10"); - evcnt_attach_dynamic(&cpi->ci_lev14, EVCNT_TYPE_INTR, NULL, cpi->ci_cpuname, "lev14"); - } - /* Put processor counter in "timer" mode */ timerreg4m->t_cfg = 0; --- 252,257 ---- Index: sys/arch/sparc/sparc/timervar.h =================================================================== RCS file: /cvsroot/src/sys/arch/sparc/sparc/timervar.h,v retrieving revision 1.8 diff -p -r1.8 timervar.h *** sys/arch/sparc/sparc/timervar.h 7 Jun 2006 22:38:50 -0000 1.8 --- sys/arch/sparc/sparc/timervar.h 21 Feb 2011 03:17:29 -0000 *************** void timerattach_mainbus_4c(struct devic *** 50,55 **** --- 50,56 ---- #endif /* SUN4 || SUN4C */ #if defined(SUN4M) + void schedintr_4m(void *); int clockintr_4m(void *); int statintr_4m(void *); void timer_init_4m(void); Index: sys/arch/sparc/sparc/trap.c =================================================================== RCS file: /cvsroot/src/sys/arch/sparc/sparc/trap.c,v retrieving revision 1.176 diff -p -r1.176 trap.c *** sys/arch/sparc/sparc/trap.c 15 Oct 2008 06:51:18 -0000 1.176 --- sys/arch/sparc/sparc/trap.c 21 Feb 2011 03:17:29 -0000 *************** badtrap: *** 479,485 **** panic("FPU(%d): state for %p", cpi->ci_cpuid, l); #if defined(MULTIPROCESSOR) ! XCALL1(savefpstate, fs, 1 << cpi->ci_cpuid); #endif cpi->fplwp = NULL; } --- 479,485 ---- panic("FPU(%d): state for %p", cpi->ci_cpuid, l); #if defined(MULTIPROCESSOR) ! XCALL1(ipi_savefpstate, fs, 1 << cpi->ci_cpuid); #endif cpi->fplwp = NULL; } Index: sys/arch/sparc/sparc/vm_machdep.c =================================================================== RCS file: /cvsroot/src/sys/arch/sparc/sparc/vm_machdep.c,v retrieving revision 1.95.4.2 diff -p -r1.95.4.2 vm_machdep.c *** sys/arch/sparc/sparc/vm_machdep.c 9 Jan 2010 01:43:51 -0000 1.95.4.2 --- sys/arch/sparc/sparc/vm_machdep.c 21 Feb 2011 03:17:29 -0000 *************** cpu_lwp_fork(struct lwp *l1, struct lwp *** 230,236 **** savefpstate(l1->l_md.md_fpstate); #if defined(MULTIPROCESSOR) else ! XCALL1(savefpstate, l1->l_md.md_fpstate, 1 << cpi->ci_cpuid); #endif } --- 230,236 ---- savefpstate(l1->l_md.md_fpstate); #if defined(MULTIPROCESSOR) else ! XCALL1(ipi_savefpstate, l1->l_md.md_fpstate, 1 << cpi->ci_cpuid); #endif } *************** cpu_lwp_free(struct lwp *l, int proc) *** 308,314 **** savefpstate(fs); #if defined(MULTIPROCESSOR) else ! XCALL1(savefpstate, fs, 1 << cpi->ci_cpuid); #endif cpi->fplwp = NULL; } --- 308,314 ---- savefpstate(fs); #if defined(MULTIPROCESSOR) else ! XCALL1(ipi_savefpstate, fs, 1 << cpi->ci_cpuid); #endif cpi->fplwp = NULL; }