# HG changeset patch # User Taylor R Campbell # Date 1590721859 0 # Fri May 29 03:10:59 2020 +0000 # Branch trunk # Node ID 4d69ce2cc0d48afeeb205845cf2f7b96a11518f1 # Parent fe321082a5d07691f34cc5e5a87d4df6c2f23b8e # EXP-Topic riastradh-hardclockentropy Sample cycle counter or timecounter in hardclock. This should stay as close to the time of the actual hardclock timer interrupt, so that the oscillator driving it determines when we sample the cycle counter or timecounter. If we used a callout, there would be many other influences -- such as spin lock delays possibly synchronized with this core's cycle counter -- that might get between the timer interrupt and the sample. diff -r fe321082a5d0 -r 4d69ce2cc0d4 sys/kern/kern_clock.c --- a/sys/kern/kern_clock.c Thu May 28 23:13:11 2020 +0000 +++ b/sys/kern/kern_clock.c Fri May 29 03:10:59 2020 +0000 @@ -90,6 +90,7 @@ #include #include #include +#include #ifdef GPROF #include @@ -138,6 +139,61 @@ static int hardscheddiv; /* hard => sche static int psdiv; /* prof => stat divider */ int psratio; /* ratio: prof / stat */ +struct clockrnd { + struct krndsource source; + unsigned needed; +}; + +static struct clockrnd hardclockrnd __aligned(COHERENCY_UNIT); +static struct clockrnd statclockrnd __aligned(COHERENCY_UNIT); + +static void +clockrnd_get(size_t needed, void *cookie) +{ + struct clockrnd *C = cookie; + + /* Start sampling. */ + atomic_store_relaxed(&C->needed, NBBY*needed); +} + +static void +clockrnd_sample(struct clockrnd *C) +{ + struct cpu_info *ci = curcpu(); + + /* If there's nothing needed right now, stop here. */ + if (__predict_true(C->needed == 0)) + return; + + /* + * If we're not the primary core of a package, we're probably + * driven by the same clock as the primary core, so don't + * bother. + */ + if (ci != ci->ci_package1st) + return; + + /* Take a sample and enter it into the pool. */ + rnd_add_uint32(&C->source, 0); + + /* + * On the primary CPU, count down. Using an atomic decrement + * here isn't really necessary -- on every platform we care + * about, stores to unsigned int are atomic, and the only other + * memory operation that could happen here is for another CPU + * to store a higher value for needed. But using an atomic + * decrement avoids giving the impression of data races, and is + * unlikely to hurt because only one CPU will ever be writing + * to the location. + */ + if (CPU_IS_PRIMARY(curcpu())) { + unsigned needed __diagused; + + needed = atomic_dec_uint_nv(&C->needed); + KASSERT(needed != UINT_MAX); + } +} + static u_int get_intr_timecount(struct timecounter *); static struct timecounter intr_timecounter = { @@ -213,6 +269,16 @@ initclocks(void) SYSCTL_DESCR("Number of hardclock ticks"), NULL, 0, &hardclock_ticks, sizeof(hardclock_ticks), CTL_KERN, KERN_HARDCLOCK_TICKS, CTL_EOL); + + rndsource_setcb(&hardclockrnd.source, clockrnd_get, &hardclockrnd); + rnd_attach_source(&hardclockrnd.source, "hardclock", RND_TYPE_SKEW, + RND_FLAG_COLLECT_TIME|RND_FLAG_HASCB); + if (stathz) { + rndsource_setcb(&statclockrnd.source, clockrnd_get, + &statclockrnd); + rnd_attach_source(&statclockrnd.source, "statclock", + RND_TYPE_SKEW, RND_FLAG_COLLECT_TIME|RND_FLAG_HASCB); + } } /* @@ -224,6 +290,8 @@ hardclock(struct clockframe *frame) struct lwp *l; struct cpu_info *ci; + clockrnd_sample(&hardclockrnd); + ci = curcpu(); l = ci->ci_onproc; @@ -327,6 +395,9 @@ statclock(struct clockframe *frame) struct proc *p; struct lwp *l; + if (stathz) + clockrnd_sample(&statclockrnd); + /* * Notice changes in divisor frequency, and adjust clock * frequency accordingly.