Index: sparc/sparc/timer.c =================================================================== RCS file: /cvsroot/src/sys/arch/sparc/sparc/timer.c,v retrieving revision 1.29 diff -p -u -u -r1.29 timer.c --- sparc/sparc/timer.c 17 Jul 2011 23:18:23 -0000 1.29 +++ sparc/sparc/timer.c 13 Jan 2018 21:11:27 -0000 @@ -83,56 +83,93 @@ static u_int timer_get_timecount(struct * timecounter local state */ static struct counter { - volatile u_int *cntreg; /* counter register */ + __cpu_simple_lock_t lock; /* protects access to offset, reg, last* */ + volatile u_int *cntreg; /* counter register to read */ u_int limit; /* limit we count up to */ u_int offset; /* accumulated offet due to wraps */ u_int shift; /* scaling for valid bits */ u_int mask; /* valid bit mask */ -} cntr; + u_int lastcnt; /* the last* values are used to notice */ + u_int lastres; /* and fix up cases where it would appear */ + u_int lastoffset; /* time went backwards. */ +} cntr __aligned(CACHE_LINE_SIZE); /* * define timecounter */ static struct timecounter counter_timecounter = { - timer_get_timecount, /* get_timecount */ - 0, /* no poll_pps */ - ~0u, /* counter_mask */ - 0, /* frequency - set at initialisation */ - "timer-counter", /* name */ - 100, /* quality */ - &cntr /* private reference */ + .tc_get_timecount = timer_get_timecount, + .tc_poll_pps = NULL, + .tc_counter_mask = ~0u, + .tc_frequency = 0, + .tc_name = "timer-counter", + .tc_quality = 100, + .tc_priv = &cntr, }; /* * timer_get_timecount provide current counter value */ +__attribute__((__optimize__("Os"))) static u_int timer_get_timecount(struct timecounter *tc) { - struct counter *ctr = (struct counter *)tc->tc_priv; - - u_int c, res, r; + u_int cnt, res, fixup, offset; int s; - + /* + * We use splhigh/__cpu_simple_lock here as we don't want + * any mutex or lockdebug overhead. The lock protects a + * bunch of the members of cntr that are written here to + * deal with the various minor races to be observed and + * worked around. + */ s = splhigh(); - res = c = *ctr->cntreg; + __cpu_simple_lock(&cntr.lock); + res = cnt = *cntr.cntreg; res &= ~TMR_LIMIT; + offset = cntr.offset; - if (c != res) { - r = ctr->limit; + /* + * There are 3 cases here: + * - limit reached, interrupt not yet processed. + * - count reset but offset the same, race between handling + * the interrupt and tickle_tc() updating the offset. + * - normal case. + * + * For the first two cases, add the limit so that we avoid + * time going backwards. + */ + if (cnt != res) { + fixup = cntr.limit; + } else if (res < cntr.lastcnt && offset == cntr.lastoffset) { + fixup = cntr.limit; } else { - r = 0; + fixup = 0; } + + cntr.lastcnt = res; + cntr.lastoffset = offset; - res >>= ctr->shift; - res &= ctr->mask; + res >>= cntr.shift; + res &= cntr.mask; - res += r + ctr->offset; + res += fixup + offset; + /* + * This handles early-boot cases where the counter resets twice + * before the offset is updated, and we have a stupid check to + * ensure overflow hasn't happened. + */ + if (res < cntr.lastres && res > (TMR_MASK+1) << 3) + res = cntr.lastres + 1; + + cntr.lastres = res; + + __cpu_simple_unlock(&cntr.lock); splx(s); return res; @@ -141,7 +178,15 @@ timer_get_timecount(struct timecounter * void tickle_tc(void) { + if (timecounter->tc_get_timecount == timer_get_timecount) { + /* + * This could be protected by cntr.lock/splhigh but the update + * happens at IPL10 already and as a 32 bit value it should + * never be seen as a partial update, so skip it here. This + * also probably slows down the actual offset update, making + * one of the cases above more likely to need the workaround. + */ cntr.offset += cntr.limit; } } @@ -189,6 +234,8 @@ timerattach(volatile int *cntreg, volati if ((1 << t0) & prec) break; + __cpu_simple_lock_init(&cntr.lock); + cntr.shift = t0; cntr.mask = (1 << (31-t0))-1; counter_timecounter.tc_frequency = 1000000 * (TMR_SHIFT - t0 + 1); @@ -219,6 +266,7 @@ timerattach(volatile int *cntreg, volati cntr.limit = tmr_ustolim4m(tick); } #endif + /* link interrupt handlers */ intr_establish(10, 0, &level10, NULL, true); intr_establish(14, 0, &level14, NULL, true); @@ -231,6 +279,9 @@ timerattach(volatile int *cntreg, volati cntr.cntreg = cntreg; cntr.limit >>= cntr.shift; + /* start at non-zero, so that cntr.oldoffset is less */ + cntr.offset = cntr.limit; + tc_init(&counter_timecounter); } Index: sparc/sparc/timer_sun4m.c =================================================================== RCS file: /cvsroot/src/sys/arch/sparc/sparc/timer_sun4m.c,v retrieving revision 1.28 diff -p -u -u -r1.28 timer_sun4m.c --- sparc/sparc/timer_sun4m.c 1 Sep 2011 08:43:24 -0000 1.28 +++ sparc/sparc/timer_sun4m.c 13 Jan 2018 21:11:27 -0000 @@ -74,7 +74,7 @@ __KERNEL_RCSID(0, "$NetBSD: timer_sun4m. #include #include -struct timer_4m *timerreg4m; +static struct timer_4m *timerreg4m; #define counterreg4m cpuinfo.counterreg_4m /* @@ -142,9 +142,13 @@ clockintr_4m(void *cap) */ if (cold) return 0; + kpreempt_disable(); - /* read the limit register to clear the interrupt */ + + /* Read the limit register to clear the interrupt. */ *((volatile int *)&timerreg4m->t_limit); + + /* Update the timecounter offset. */ tickle_tc(); hardclock((struct clockframe *)cap); kpreempt_enable(); Index: sparc/sparc/timerreg.h =================================================================== RCS file: /cvsroot/src/sys/arch/sparc/sparc/timerreg.h,v retrieving revision 1.9 diff -p -u -u -r1.9 timerreg.h --- sparc/sparc/timerreg.h 16 Nov 2005 03:00:23 -0000 1.9 +++ sparc/sparc/timerreg.h 13 Jan 2018 21:11:27 -0000 @@ -120,9 +120,12 @@ struct counter_4m { /* counter that int */ #define tmr_ustolim(n) (((n) + 1) << TMR_SHIFT) -/*efine TMR_SHIFT4M 9 -* shift to obtain microseconds */ -/*efine tmr_ustolim4m(n) (((2*(n)) + 1) << TMR_SHIFT4M)*/ +#define TMR_SHIFT4M 9 /* shift to obtain microseconds */ +#if 1 +#define tmr_ustolim4m(n) (((2*(n)) + 1) << TMR_SHIFT4M) +#else #define tmr_ustolim4m(n) ((n) << TMR_SHIFT) +#endif /* The number of microseconds represented by a counter register value */ #define tmr_cnttous(c) ((((c) >> TMR_SHIFT) & TMR_MASK) - 1)