Index: sys/kern/kern_rndq.c =================================================================== RCS file: /cvsroot/src/sys/kern/kern_rndq.c,v retrieving revision 1.80 diff -p -u -r1.80 kern_rndq.c --- sys/kern/kern_rndq.c 17 Feb 2016 01:23:32 -0000 1.80 +++ sys/kern/kern_rndq.c 17 Feb 2016 15:49:37 -0000 @@ -264,8 +264,10 @@ void rnd_getmore(size_t byteswanted) { krndsource_t *rs, *next; + size_t before, after; mutex_spin_enter(&rnd_global.lock); + before = rndpool_get_entropy_count(&rnd_global.pool); LIST_FOREACH_SAFE(rs, &rnd_global.sources, list, next) { /* Skip if there's no callback. */ if (!ISSET(rs->flags, RND_FLAG_HASCB)) @@ -294,18 +296,21 @@ rnd_getmore(size_t byteswanted) rnd_printf_verbose("rnd: asking source %s for %zu bytes\n", rs->name, byteswanted); } + after = rndpool_get_entropy_count(&rnd_global.pool); mutex_spin_exit(&rnd_global.lock); /* - * Assume some callback is likely to have entered entropy - * synchronously. In that case, we may need to distribute - * entropy to waiters. Do that, if we can do it - * asynchronously. (Otherwise we may end up trying to - * distribute to the very rndsink that is trying to get more - * entropy in the first place, leading to lock recursion in - * that rndsink's callback.) - */ - if (__predict_true(rnd_process)) + * Check whether we got enough entropy to go over the + * threshold. In that case, we may need to distribute entropy + * to waiters. Do that, if we can do it asynchronously. + * + * - Conditionally because we don't want a softint loop. + * - Asynchronously because if we did it synchronously, we may + * end up with lock recursion on rndsinks_lock. + */ + if (before < RND_ENTROPY_THRESHOLD*NBBY && + RND_ENTROPY_THRESHOLD*NBBY <= after && + rnd_process != NULL) rnd_schedule_process(); }