From dce35791dddc48cc39bdc9d89903ceda4087dc8e Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Thu, 17 Mar 2022 10:03:03 +0000 Subject: [PATCH 1/5] entropy(9): Create per-CPU state earlier. This will make it possible to use it from interrupts as soon as they start, which means the global entropy pool lock won't have to block interrupts. --- sys/kern/kern_entropy.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/sys/kern/kern_entropy.c b/sys/kern/kern_entropy.c index fc46a5d7847f..e689555ff4bb 100644 --- a/sys/kern/kern_entropy.c +++ b/sys/kern/kern_entropy.c @@ -382,6 +382,10 @@ entropy_init(void) LIST_FOREACH(rs, &E->sources, list) rs->state = percpu_alloc(sizeof(struct rndsource_cpu)); + /* Allocate and initialize the per-CPU state. */ + entropy_percpu = percpu_create(sizeof(struct entropy_cpu), + entropy_init_cpu, entropy_fini_cpu, NULL); + /* Enter the boot cycle count to get started. */ extra[i++] = entropy_timer(); KASSERT(i == __arraycount(extra)); @@ -406,10 +410,6 @@ entropy_init_late(void) KASSERT(E->stage == ENTROPY_WARM); - /* Allocate and initialize the per-CPU state. */ - entropy_percpu = percpu_create(sizeof(struct entropy_cpu), - entropy_init_cpu, entropy_fini_cpu, NULL); - /* * Establish the softint at the highest softint priority level. * Must happen after CPU detection. @@ -453,8 +453,10 @@ entropy_init_cpu(void *ptr, void *cookie, struct cpu_info *ci) ec->ec_pending = 0; ec->ec_locked = false; + /* XXX ci_cpuname may not be initialized early enough. */ evcnt_attach_dynamic(ec->ec_softint_evcnt, EVCNT_TYPE_MISC, NULL, - ci->ci_cpuname, "entropy softint"); + ci->ci_cpuname[0] == '\0' ? "cpu0" : ci->ci_cpuname, + "entropy softint"); } /* From 8ba406975b4d298523518810c516d2e83dabf383 Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Thu, 17 Mar 2022 10:40:32 +0000 Subject: [PATCH 2/5] entropy(9): Use the early-entropy path only while cold. This way, we never take the global entropy lock from interrupt handlers (no interrupts while cold), so the global entropy lock need not block interrupts. There's an annoying ordering issue here: softint_establish doesn't work until after CPUs have been detected, which happens inside configure(), which is also what enables interrupts. So we have no opportunity to softint_establish the entropy softint _before_ interrupts are enabled. To work around this, we have to put a conditional into the interrupt path, and go out of our way to process any queued samples after establishing the softint. If we just made softint_establish work early, like percpu_create does now, this problem would go away and we could delete a bit of logic here. Candidate fix for PR kern/56730. --- sys/kern/kern_entropy.c | 56 +++++++++++++++++++++++++++-------------- 1 file changed, 37 insertions(+), 19 deletions(-) diff --git a/sys/kern/kern_entropy.c b/sys/kern/kern_entropy.c index e689555ff4bb..bccf760cb857 100644 --- a/sys/kern/kern_entropy.c +++ b/sys/kern/kern_entropy.c @@ -396,6 +396,13 @@ entropy_init(void) E->stage = ENTROPY_WARM; } +static void +entropy_init_late_cpu(void *a, void *b) +{ + + entropy_softintr(NULL); +} + /* * entropy_init_late() * @@ -406,6 +413,7 @@ entropy_init(void) static void entropy_init_late(void) { + void *sih; int error; KASSERT(E->stage == ENTROPY_WARM); @@ -414,9 +422,9 @@ entropy_init_late(void) * Establish the softint at the highest softint priority level. * Must happen after CPU detection. */ - entropy_sih = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE, + sih = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE, &entropy_softintr, NULL); - if (entropy_sih == NULL) + if (sih == NULL) panic("unable to establish entropy softint"); /* @@ -431,10 +439,22 @@ entropy_init_late(void) /* * Wait until the per-CPU initialization has hit all CPUs - * before proceeding to mark the entropy system hot. + * before proceeding to mark the entropy system hot and + * enabling use of the softint. */ xc_barrier(XC_HIGHPRI); E->stage = ENTROPY_HOT; + atomic_store_relaxed(&entropy_sih, sih); + + /* + * At this point, entering new samples from interrupt handlers + * will trigger the softint to process them. But there may be + * some samples that were entered from interrupt handlers + * before the softint was available. Make sure we process + * those samples on all CPUs by running the softint logic on + * all CPUs. + */ + xc_wait(xc_broadcast(XC_HIGHPRI, entropy_init_late_cpu, NULL, NULL)); } /* @@ -651,7 +671,7 @@ entropy_account_cpu(struct entropy_cpu *ec) { unsigned diff; - KASSERT(E->stage == ENTROPY_HOT); + KASSERT(E->stage >= ENTROPY_WARM); /* * If there's no entropy needed, and entropy has been @@ -738,8 +758,7 @@ entropy_enter_early(const void *buf, size_t len, unsigned nbits) { bool notify = false; - if (E->stage >= ENTROPY_WARM) - mutex_enter(&E->lock); + KASSERT(E->stage == ENTROPY_COLD); /* Enter it into the pool. */ entpool_enter(&E->pool, buf, len); @@ -758,9 +777,6 @@ entropy_enter_early(const void *buf, size_t len, unsigned nbits) entropy_notify(); entropy_immediate_evcnt.ev_count++; } - - if (E->stage >= ENTROPY_WARM) - mutex_exit(&E->lock); } /* @@ -784,7 +800,7 @@ entropy_enter(const void *buf, size_t len, unsigned nbits) "impossible entropy rate: %u bits in %zu-byte string", nbits, len); /* If it's too early after boot, just use entropy_enter_early. */ - if (__predict_false(E->stage < ENTROPY_HOT)) { + if (__predict_false(E->stage == ENTROPY_COLD)) { entropy_enter_early(buf, len, nbits); return; } @@ -839,12 +855,13 @@ entropy_enter_intr(const void *buf, size_t len, unsigned nbits) struct entropy_cpu *ec; bool fullyused = false; uint32_t pending; + void *sih; KASSERTMSG(howmany(nbits, NBBY) <= len, "impossible entropy rate: %u bits in %zu-byte string", nbits, len); /* If it's too early after boot, just use entropy_enter_early. */ - if (__predict_false(E->stage < ENTROPY_HOT)) { + if (__predict_false(E->stage == ENTROPY_COLD)) { entropy_enter_early(buf, len, nbits); return true; } @@ -865,7 +882,9 @@ entropy_enter_intr(const void *buf, size_t len, unsigned nbits) * truncated, schedule a softint to stir the pool and stop. */ if (!entpool_enter_nostir(ec->ec_pool, buf, len)) { - softint_schedule(entropy_sih); + sih = atomic_load_relaxed(&entropy_sih); + if (__predict_true(sih != NULL)) + softint_schedule(sih); goto out1; } fullyused = true; @@ -878,8 +897,11 @@ entropy_enter_intr(const void *buf, size_t len, unsigned nbits) /* Schedule a softint if we added anything and it matters. */ if (__predict_false((atomic_load_relaxed(&E->needed) != 0) || atomic_load_relaxed(&entropy_depletion)) && - nbits != 0) - softint_schedule(entropy_sih); + nbits != 0) { + sih = atomic_load_relaxed(&entropy_sih); + if (__predict_true(sih != NULL)) + softint_schedule(sih); + } out1: /* Release the per-CPU state. */ KASSERT(ec->ec_locked); @@ -1873,9 +1895,7 @@ rnd_add_data_1(struct krndsource *rs, const void *buf, uint32_t len, * contributed from this source. */ if (fullyused) { - if (E->stage < ENTROPY_HOT) { - if (E->stage >= ENTROPY_WARM) - mutex_enter(&E->lock); + if (__predict_false(E->stage == ENTROPY_COLD)) { rs->total = add_sat(rs->total, entropybits); switch (flag) { case RND_FLAG_COLLECT_TIME: @@ -1887,8 +1907,6 @@ rnd_add_data_1(struct krndsource *rs, const void *buf, uint32_t len, add_sat(rs->value_delta.insamples, 1); break; } - if (E->stage >= ENTROPY_WARM) - mutex_exit(&E->lock); } else { struct rndsource_cpu *rc = percpu_getref(rs->state); From 854f55dde1b59fe7971b08bef9917aa641845c0a Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Thu, 17 Mar 2022 10:47:23 +0000 Subject: [PATCH 3/5] entropy(9): Request entropy after the softint is enabled. Otherwise, there is a window during which interrupts are running, but the softint is not, so if many interrupts queue (low-entropy) samples early at boot, they might get dropped on the floor. This could happen, for instance, with a PCI RNG like ubsec(4) or hifn(4) which requests entropy and processes it in its own hard interrupt handler. --- sys/kern/kern_entropy.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sys/kern/kern_entropy.c b/sys/kern/kern_entropy.c index bccf760cb857..839749341350 100644 --- a/sys/kern/kern_entropy.c +++ b/sys/kern/kern_entropy.c @@ -2431,6 +2431,7 @@ rnd_init_softint(void) { entropy_init_late(); + entropy_bootrequest(); } int From d88a16d9a22ce9f2d9f140bece3f73fc434ea057 Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Thu, 17 Mar 2022 10:51:06 +0000 Subject: [PATCH 4/5] entropy(9): Reduce global entropy lock from IPL_VM to IPL_SOFTSERIAL. This is no longer ever taken in hard interrupt context, so there's no longer any need to block interrupts while doing crypto operations on the global entropy pool. --- sys/kern/kern_entropy.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sys/kern/kern_entropy.c b/sys/kern/kern_entropy.c index 839749341350..2c6680b96ebf 100644 --- a/sys/kern/kern_entropy.c +++ b/sys/kern/kern_entropy.c @@ -366,7 +366,7 @@ entropy_init(void) NULL, 0, &E->epoch, 0, CTL_CREATE, CTL_EOL); /* Initialize the global state for multithreaded operation. */ - mutex_init(&E->lock, MUTEX_DEFAULT, IPL_VM); + mutex_init(&E->lock, MUTEX_DEFAULT, IPL_SOFTSERIAL); cv_init(&E->cv, "entropy"); selinit(&E->selq); cv_init(&E->sourcelock_cv, "entsrclock"); From a0215677331af4419477770f57202da79705521d Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Thu, 17 Mar 2022 23:40:36 +0000 Subject: [PATCH 5/5] entropy(9): Count dropped or truncated interrupt samples. --- sys/kern/kern_entropy.c | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/sys/kern/kern_entropy.c b/sys/kern/kern_entropy.c index 2c6680b96ebf..72d449dc5287 100644 --- a/sys/kern/kern_entropy.c +++ b/sys/kern/kern_entropy.c @@ -130,7 +130,11 @@ __KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.35 2022/03/16 23:56:55 riastradh * evcnt(9) assumes it stays put in memory. */ struct entropy_cpu { - struct evcnt *ec_softint_evcnt; + struct entropy_cpu_evcnt { + struct evcnt softint; + struct evcnt intrdrop; + struct evcnt intrtrunc; + } *ec_evcnt; struct entpool *ec_pool; unsigned ec_pending; bool ec_locked; @@ -466,17 +470,21 @@ static void entropy_init_cpu(void *ptr, void *cookie, struct cpu_info *ci) { struct entropy_cpu *ec = ptr; + const char *cpuname; - ec->ec_softint_evcnt = kmem_alloc(sizeof(*ec->ec_softint_evcnt), - KM_SLEEP); + ec->ec_evcnt = kmem_alloc(sizeof(*ec->ec_evcnt), KM_SLEEP); ec->ec_pool = kmem_zalloc(sizeof(*ec->ec_pool), KM_SLEEP); ec->ec_pending = 0; ec->ec_locked = false; /* XXX ci_cpuname may not be initialized early enough. */ - evcnt_attach_dynamic(ec->ec_softint_evcnt, EVCNT_TYPE_MISC, NULL, - ci->ci_cpuname[0] == '\0' ? "cpu0" : ci->ci_cpuname, - "entropy softint"); + cpuname = ci->ci_cpuname[0] == '\0' ? "cpu0" : ci->ci_cpuname; + evcnt_attach_dynamic(&ec->ec_evcnt->softint, EVCNT_TYPE_MISC, NULL, + cpuname, "entropy softint"); + evcnt_attach_dynamic(&ec->ec_evcnt->intrdrop, EVCNT_TYPE_MISC, NULL, + cpuname, "entropy intrdrop"); + evcnt_attach_dynamic(&ec->ec_evcnt->intrtrunc, EVCNT_TYPE_MISC, NULL, + cpuname, "entropy intrtrunc"); } /* @@ -497,10 +505,12 @@ entropy_fini_cpu(void *ptr, void *cookie, struct cpu_info *ci) */ explicit_memset(ec->ec_pool, 0, sizeof(*ec->ec_pool)); - evcnt_detach(ec->ec_softint_evcnt); + evcnt_detach(&ec->ec_evcnt->intrtrunc); + evcnt_detach(&ec->ec_evcnt->intrdrop); + evcnt_detach(&ec->ec_evcnt->softint); kmem_free(ec->ec_pool, sizeof(*ec->ec_pool)); - kmem_free(ec->ec_softint_evcnt, sizeof(*ec->ec_softint_evcnt)); + kmem_free(ec->ec_evcnt, sizeof(*ec->ec_evcnt)); } /* @@ -872,8 +882,10 @@ entropy_enter_intr(const void *buf, size_t len, unsigned nbits) * higher-priority interrupts will drop their samples. */ ec = percpu_getref(entropy_percpu); - if (ec->ec_locked) + if (ec->ec_locked) { + ec->ec_evcnt->intrdrop.ev_count++; goto out0; + } ec->ec_locked = true; __insn_barrier(); @@ -885,6 +897,7 @@ entropy_enter_intr(const void *buf, size_t len, unsigned nbits) sih = atomic_load_relaxed(&entropy_sih); if (__predict_true(sih != NULL)) softint_schedule(sih); + ec->ec_evcnt->intrtrunc.ev_count++; goto out1; } fullyused = true; @@ -936,7 +949,7 @@ entropy_softintr(void *cookie) __insn_barrier(); /* Count statistics. */ - ec->ec_softint_evcnt->ev_count++; + ec->ec_evcnt->softint.ev_count++; /* Stir the pool if necessary. */ entpool_stir(ec->ec_pool);