# HG changeset patch # User Taylor R Campbell # Date 1590722312 0 # Fri May 29 03:18:32 2020 +0000 # Branch trunk # Node ID 314969a7d280643b43f1d2f00d3555f7ce425925 # Parent 4d69ce2cc0d48afeeb205845cf2f7b96a11518f1 # EXP-Topic riastradh-hardclockentropy Implement a callout rndsource (again). This is a bit simpler than before: we simply schedule a callout to sample the cycle counter or timecounter every tick for n ticks. If the cycle counter and the callout timer are driven by independent oscillators, there may be some jitter between them. By doing this in a callout, we do not take a sample exactly when the hardclock timer interrupt occurs, but this way we don't add a new dependency on the hardclock timer itself, so perhaps this is better for the glorious tickless future. diff -r 4d69ce2cc0d4 -r 314969a7d280 sys/kern/kern_entropy.c --- a/sys/kern/kern_entropy.c Fri May 29 03:10:59 2020 +0000 +++ b/sys/kern/kern_entropy.c Fri May 29 03:18:32 2020 +0000 @@ -80,6 +80,7 @@ #include #include #include +#include #include #include #include @@ -187,6 +188,9 @@ static struct lwp *entropy_lwp __read_mo int rnd_initial_entropy __read_mostly; /* XXX legacy */ static struct krndsource seed_rndsource __read_mostly; +static struct krndsource callout_rndsource __read_mostly; +static volatile unsigned callout_rnd_bitsneeded; +static struct callout callout_rnd_ch; /* * Event counters @@ -254,6 +258,9 @@ static void rndsource_entropybits_cpu(vo static void rndsource_to_user(struct krndsource *, rndsource_t *); static void rndsource_to_user_est(struct krndsource *, rndsource_est_t *); +static void callout_rnd_get(size_t, void *); +static void callout_rnd_intr(void *); + /* * entropy_timer() * @@ -425,6 +432,13 @@ entropy_init_late(void) panic("unable to create entropy housekeeping thread: %d", error); + /* Create the callout rndsource. */ + callout_init(&callout_rnd_ch, CALLOUT_MPSAFE); + callout_setfunc(&callout_rnd_ch, &callout_rnd_intr, NULL); + rndsource_setcb(&callout_rndsource, &callout_rnd_get, NULL); + rnd_attach_source(&callout_rndsource, "callout", RND_TYPE_SKEW, + RND_FLAG_COLLECT_TIME|RND_FLAG_HASCB); + /* * Wait until the per-CPU initialization has hit all CPUs * before proceeding to mark the entropy system hot. @@ -2346,3 +2360,52 @@ rnd_system_ioctl(struct file *fp, unsign return entropy_ioctl(cmd, data); } + +/* + * callout_rndsource + * + * We devise a `ring oscillator' on systems where the cycle + * counter, or selected timecounter, is driven by a clock + * independent of the clock driving the callout timer interrupts. + * + * Normally a `ring oscillator' entropy source is a free-running + * oscillator driving an odd number of inverters, sampled + * periodically by another oscillator to obtain one bit of data + * per sample; jitter between the two clocks manifests in the + * samples, with some jitter arising from from dependent phenomena + * in the clocks (which we don't want) and some jitter arising + * from unpredictable thermal noise (which we do want). + * + * We do not have a nice series of inverters here, so there's no + * bit to sample; instead we sample the CPU cycle counter or + * timecounter, in the hope that they are not driven by the same + * underlying oscillator. This is a rather tenuous hope without + * more specific knowledge of the hardware than we can have in MI + * logic like this. + */ + +static void +callout_rnd_get(size_t bytesneeded, void *cookie) +{ + + /* If the callout was previously not running, start it. */ + if (atomic_swap_uint(&callout_rnd_bitsneeded, 2*NBBY*bytesneeded) == 0) + callout_schedule(&callout_rnd_ch, 1); +} + +static void +callout_rnd_intr(void *cookie) +{ + + /* Sample the cycle counter or timecounter. */ + rnd_add_uint32(&callout_rndsource, 0); + + /* + * Count down a bit. If this was the last one, stop. + * Otherwise, schedule to sample the cycle counter or + * timecounter at the next timer interrupt. + */ + if (atomic_dec_uint_nv(&callout_rnd_bitsneeded) == 0) + return; + callout_schedule(&callout_rnd_ch, 1); +}