Index: sys/arch/i386/conf/Makefile.i386 =================================================================== RCS file: /cvsroot/src/sys/arch/i386/conf/Makefile.i386,v retrieving revision 1.195 retrieving revision 1.196 diff -p -u -r1.195 -r1.196 --- sys/arch/i386/conf/Makefile.i386 25 Apr 2020 15:26:16 -0000 1.195 +++ sys/arch/i386/conf/Makefile.i386 11 May 2020 15:15:15 -0000 1.196 @@ -1,4 +1,4 @@ -# $NetBSD: Makefile.i386,v 1.195 2020/04/25 15:26:16 bouyer Exp $ +# $NetBSD: Makefile.i386,v 1.196 2020/05/11 15:15:15 joerg Exp $ # Makefile for NetBSD # @@ -40,9 +40,9 @@ CFLAGS+= -msoft-float ## no-sse implies no-sse2 but not no-avx CFLAGS+= -mno-mmx -mno-sse -mno-avx -.if ${SPECTRE_V2_GCC_MITIGATION:U0} > 0 && ${HAVE_GCC:U0} > 0 -CFLAGS+= -mindirect-branch=thunk -CFLAGS+= -mindirect-branch-register +.if ${SPECTRE_V2_GCC_MITIGATION:U0} > 0 +CFLAGS+= ${${ACTIVE_CC} == "gcc" :? -mindirect-branch=thunk :} +CFLAGS+= ${${ACTIVE_CC} == "gcc" :? -mindirect-branch-register :} .endif EXTRA_INCLUDES= -I$S/external/mit/xen-include-public/dist/ Index: sys/arch/i386/stand/lib/exec_multiboot2.c =================================================================== RCS file: /cvsroot/src/sys/arch/i386/stand/lib/exec_multiboot2.c,v retrieving revision 1.3 retrieving revision 1.4 diff -p -u -r1.3 -r1.4 --- sys/arch/i386/stand/lib/exec_multiboot2.c 18 Oct 2019 01:15:54 -0000 1.3 +++ sys/arch/i386/stand/lib/exec_multiboot2.c 14 May 2020 08:34:20 -0000 1.4 @@ -1,4 +1,4 @@ -/* $NetBSD: exec_multiboot2.c,v 1.3 2019/10/18 01:15:54 manu Exp $ */ +/* $NetBSD: exec_multiboot2.c,v 1.4 2020/05/14 08:34:20 msaitoh Exp $ */ /* * Copyright (c) 2019 The NetBSD Foundation, Inc. @@ -863,7 +863,7 @@ mbi_apm(struct multiboot_package *mbp, v mbt->cseg = 0; mbt->offset = 0; mbt->cseg_16 = 0; - mbt->dseg = 0;; + mbt->dseg = 0; mbt->flags = 0; mbt->cseg_len = 0; mbt->cseg_16_len = 0; Index: sys/arch/x86/include/cpu_counter.h =================================================================== RCS file: /cvsroot/src/sys/arch/x86/include/cpu_counter.h,v retrieving revision 1.5 retrieving revision 1.6 diff -p -u -r1.5 -r1.6 --- sys/arch/x86/include/cpu_counter.h 2 Feb 2011 12:26:42 -0000 1.5 +++ sys/arch/x86/include/cpu_counter.h 8 May 2020 22:01:54 -0000 1.6 @@ -1,4 +1,4 @@ -/* $NetBSD: cpu_counter.h,v 1.5 2011/02/02 12:26:42 bouyer Exp $ */ +/* $NetBSD: cpu_counter.h,v 1.6 2020/05/08 22:01:54 ad Exp $ */ /*- * Copyright (c) 2000, 2008 The NetBSD Foundation, Inc. @@ -35,7 +35,6 @@ #ifdef _KERNEL uint64_t cpu_counter(void); -uint64_t cpu_counter_serializing(void); uint32_t cpu_counter32(void); uint64_t cpu_frequency(struct cpu_info *); int cpu_hascounter(void); Index: sys/arch/x86/include/cpu_rng.h =================================================================== RCS file: /cvsroot/src/sys/arch/x86/include/cpu_rng.h,v retrieving revision 1.3 retrieving revision 1.4 diff -p -u -r1.3 -r1.4 --- sys/arch/x86/include/cpu_rng.h 30 Apr 2020 03:29:19 -0000 1.3 +++ sys/arch/x86/include/cpu_rng.h 10 May 2020 06:30:57 -0000 1.4 @@ -1,4 +1,4 @@ -/* $NetBSD: cpu_rng.h,v 1.3 2020/04/30 03:29:19 riastradh Exp $ */ +/* $NetBSD: cpu_rng.h,v 1.4 2020/05/10 06:30:57 maxv Exp $ */ #ifndef _X86_CPU_RNG_H_ #define _X86_CPU_RNG_H_ @@ -33,5 +33,6 @@ */ void cpu_rng_init(void); +void cpu_rng_early_sample(uint64_t *); #endif /* _X86_CPU_RNG_H_ */ Index: sys/arch/x86/x86/cpu.c =================================================================== RCS file: /cvsroot/src/sys/arch/x86/x86/cpu.c,v retrieving revision 1.189 retrieving revision 1.191 diff -p -u -r1.189 -r1.191 --- sys/arch/x86/x86/cpu.c 2 May 2020 16:44:36 -0000 1.189 +++ sys/arch/x86/x86/cpu.c 12 May 2020 06:32:05 -0000 1.191 @@ -1,7 +1,7 @@ -/* $NetBSD: cpu.c,v 1.189 2020/05/02 16:44:36 bouyer Exp $ */ +/* $NetBSD: cpu.c,v 1.191 2020/05/12 06:32:05 msaitoh Exp $ */ /* - * Copyright (c) 2000-2012 NetBSD Foundation, Inc. + * Copyright (c) 2000-2020 NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation @@ -62,7 +62,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.189 2020/05/02 16:44:36 bouyer Exp $"); +__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.191 2020/05/12 06:32:05 msaitoh Exp $"); #include "opt_ddb.h" #include "opt_mpbios.h" /* for MPDEBUG */ @@ -73,6 +73,7 @@ __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.18 #include "lapic.h" #include "ioapic.h" #include "acpica.h" +#include "hpet.h" #include #include @@ -119,6 +120,7 @@ __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.18 #endif #include +#include #include #include @@ -433,8 +435,14 @@ cpu_attach(device_t parent, device_t sel * must be done to allow booting other processors. */ if (!again) { + /* Make sure DELAY() (likely i8254_delay()) is initialized. */ + DELAY(1); + + /* + * Basic init. Compute an approximate frequency for the TSC + * using the i8254. If there's a HPET we'll redo it later. + */ atomic_or_32(&ci->ci_flags, CPUF_PRESENT | CPUF_PRIMARY); - /* Basic init. */ cpu_intr_init(ci); cpu_get_tsc_freq(ci); cpu_init(ci); @@ -451,8 +459,6 @@ cpu_attach(device_t parent, device_t sel lapic_calibrate_timer(ci); } #endif - /* Make sure DELAY() is initialized. */ - DELAY(1); kcsan_cpu_init(ci); again = true; } @@ -718,7 +724,6 @@ cpu_init(struct cpu_info *ci) if (ci != &cpu_info_primary) { /* Synchronize TSC */ - wbinvd(); atomic_or_32(&ci->ci_flags, CPUF_RUNNING); tsc_sync_ap(ci); } else { @@ -734,6 +739,14 @@ cpu_boot_secondary_processors(void) kcpuset_t *cpus; u_long i; +#if NHPET > 0 + /* Use HPET delay, and re-calibrate TSC on boot CPU using HPET. */ + if (hpet_delay_p() && x86_delay == i8254_delay) { + delay_func = x86_delay = hpet_delay; + cpu_get_tsc_freq(curcpu()); + } +#endif + /* Now that we know the number of CPUs, patch the text segment. */ x86_patch(false); @@ -842,7 +855,6 @@ cpu_start_secondary(struct cpu_info *ci) */ psl = x86_read_psl(); x86_disable_intr(); - wbinvd(); tsc_sync_bp(ci); x86_write_psl(psl); } @@ -873,7 +885,6 @@ cpu_boot_secondary(struct cpu_info *ci) drift = ci->ci_data.cpu_cc_skew; psl = x86_read_psl(); x86_disable_intr(); - wbinvd(); tsc_sync_bp(ci); x86_write_psl(psl); drift -= ci->ci_data.cpu_cc_skew; @@ -919,7 +930,6 @@ cpu_hatch(void *v) * Synchronize the TSC for the first time. Note that interrupts are * off at this point. */ - wbinvd(); atomic_or_32(&ci->ci_flags, CPUF_PRESENT); tsc_sync_ap(ci); @@ -1310,21 +1320,58 @@ cpu_shutdown(device_t dv, int how) void cpu_get_tsc_freq(struct cpu_info *ci) { - uint64_t freq = 0, last_tsc; + uint64_t freq = 0, freq_from_cpuid, t0, t1; + int64_t overhead; - if (cpu_hascounter()) - freq = cpu_tsc_freq_cpuid(ci); + if ((ci->ci_flags & CPUF_PRIMARY) != 0 && cpu_hascounter()) { + /* + * If it's the first call of this function, try to get TSC + * freq from CPUID by calling cpu_tsc_freq_cpuid(). + * The function also set lapic_per_second variable if it's + * known. This is required for Intel's Comet Lake and newer + * processors to set LAPIC timer correctly. + */ + if (ci->ci_data.cpu_cc_freq == 0) + freq = freq_from_cpuid = cpu_tsc_freq_cpuid(ci); +#if NHPET > 0 + if (freq == 0) + freq = hpet_tsc_freq(); +#endif + if (freq == 0) { + /* + * Work out the approximate overhead involved below. + * Discard the result of the first go around the + * loop. + */ + overhead = 0; + for (int i = 0; i <= 8; i++) { + t0 = cpu_counter(); + x86_delay(0); + t1 = cpu_counter(); + if (i > 0) { + overhead += (t1 - t0); + } + } + overhead >>= 3; - if (freq != 0) { - /* Use TSC frequency taken from CPUID. */ - ci->ci_data.cpu_cc_freq = freq; + /* Now do the calibration. */ + t0 = cpu_counter(); + x86_delay(100000); + t1 = cpu_counter(); + freq = (t1 - t0 - overhead) * 10; + } + if (ci->ci_data.cpu_cc_freq != 0) { + freq_from_cpuid = cpu_tsc_freq_cpuid(ci); + if ((freq_from_cpuid != 0) + && (freq != freq_from_cpuid)) + aprint_verbose_dev(ci->ci_dev, "TSC freq " + "calibrated %" PRIu64 " Hz\n", freq); + } } else { - /* Calibrate TSC frequency. */ - last_tsc = cpu_counter_serializing(); - delay_func(100000); - ci->ci_data.cpu_cc_freq = - (cpu_counter_serializing() - last_tsc) * 10; + freq = cpu_info_primary.ci_data.cpu_cc_freq; } + + ci->ci_data.cpu_cc_freq = freq; } void Index: sys/arch/x86/x86/cpu_rng.c =================================================================== RCS file: /cvsroot/src/sys/arch/x86/x86/cpu_rng.c,v retrieving revision 1.13 retrieving revision 1.14 diff -p -u -r1.13 -r1.14 --- sys/arch/x86/x86/cpu_rng.c 30 Apr 2020 03:40:53 -0000 1.13 +++ sys/arch/x86/x86/cpu_rng.c 10 May 2020 06:30:57 -0000 1.14 @@ -1,4 +1,4 @@ -/* $NetBSD: cpu_rng.c,v 1.13 2020/04/30 03:40:53 riastradh Exp $ */ +/* $NetBSD: cpu_rng.c,v 1.14 2020/05/10 06:30:57 maxv Exp $ */ /*- * Copyright (c) 2015 The NetBSD Foundation, Inc. @@ -283,3 +283,37 @@ cpu_rng_init(void) rnd_attach_source(&cpu_rng_source, cpu_rng_name[cpu_rng_mode], RND_TYPE_RNG, RND_FLAG_COLLECT_VALUE|RND_FLAG_HASCB); } + +/* -------------------------------------------------------------------------- */ + +void +cpu_rng_early_sample(uint64_t *sample) +{ + static bool has_rdseed = false; + static bool has_rdrand = false; + static bool inited = false; + u_int descs[4]; + size_t n; + + if (!inited) { + if (cpuid_level >= 7) { + x86_cpuid(0x07, descs); + has_rdseed = (descs[1] & CPUID_SEF_RDSEED) != 0; + } + if (cpuid_level >= 1) { + x86_cpuid(0x01, descs); + has_rdrand = (descs[2] & CPUID2_RDRAND) != 0; + } + inited = true; + } + + n = 0; + if (has_rdseed && has_rdrand) + n = cpu_rng_rdseed_rdrand(sample); + else if (has_rdseed) + n = cpu_rng_rdseed(sample); + else if (has_rdrand) + n = cpu_rng_rdrand(sample); + if (n == 0) + *sample = rdtsc(); +} Index: sys/arch/x86/x86/identcpu_subr.c =================================================================== RCS file: /cvsroot/src/sys/arch/x86/x86/identcpu_subr.c,v retrieving revision 1.3 retrieving revision 1.4 diff -p -u -r1.3 -r1.4 --- sys/arch/x86/x86/identcpu_subr.c 25 Apr 2020 15:26:18 -0000 1.3 +++ sys/arch/x86/x86/identcpu_subr.c 12 May 2020 06:32:05 -0000 1.4 @@ -33,7 +33,7 @@ * See src/usr.sbin/cpuctl/{Makefile, arch/i386.c}). */ #include -__KERNEL_RCSID(0, "$NetBSD: identcpu_subr.c,v 1.3 2020/04/25 15:26:18 bouyer Exp $"); +__KERNEL_RCSID(0, "$NetBSD: identcpu_subr.c,v 1.4 2020/05/12 06:32:05 msaitoh Exp $"); #ifdef _KERNEL_OPT #include "lapic.h" @@ -139,8 +139,8 @@ cpu_tsc_freq_cpuid(struct cpu_info *ci) #endif } if (freq != 0) - aprint_verbose_dev(ci->ci_dev, "TSC freq %" PRIu64 " Hz\n", - freq); + aprint_verbose_dev(ci->ci_dev, "TSC freq CPUID %" PRIu64 + " Hz\n", freq); return freq; } Index: sys/arch/x86/x86/tsc.c =================================================================== RCS file: /cvsroot/src/sys/arch/x86/x86/tsc.c,v retrieving revision 1.43 retrieving revision 1.44 diff -p -u -r1.43 -r1.44 --- sys/arch/x86/x86/tsc.c 25 Apr 2020 15:26:18 -0000 1.43 +++ sys/arch/x86/x86/tsc.c 8 May 2020 22:01:55 -0000 1.44 @@ -1,4 +1,4 @@ -/* $NetBSD: tsc.c,v 1.43 2020/04/25 15:26:18 bouyer Exp $ */ +/* $NetBSD: tsc.c,v 1.44 2020/05/08 22:01:55 ad Exp $ */ /*- * Copyright (c) 2008, 2020 The NetBSD Foundation, Inc. @@ -27,7 +27,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: tsc.c,v 1.43 2020/04/25 15:26:18 bouyer Exp $"); +__KERNEL_RCSID(0, "$NetBSD: tsc.c,v 1.44 2020/05/08 22:01:55 ad Exp $"); #include #include @@ -47,12 +47,14 @@ __KERNEL_RCSID(0, "$NetBSD: tsc.c,v 1.43 #include "tsc.h" +#define TSC_SYNC_ROUNDS 1000 +#define ABS(a) ((a) >= 0 ? (a) : -(a)) + u_int tsc_get_timecount(struct timecounter *); uint64_t tsc_freq; /* exported for sysctl */ -static int64_t tsc_drift_max = 250; /* max cycles */ +static int64_t tsc_drift_max = 1000; /* max cycles */ static int64_t tsc_drift_observed; -static bool tsc_good; int tsc_user_enabled = 1; @@ -158,9 +160,6 @@ tsc_tc_init(void) ci = curcpu(); tsc_freq = ci->ci_data.cpu_cc_freq; - tsc_good = (cpu_feature[0] & CPUID_MSR) != 0 && - (rdmsr(MSR_TSC) != 0 || rdmsr(MSR_TSC) != 0); - invariant = tsc_is_invariant(); if (!invariant) { aprint_debug("TSC not known invariant on this CPU\n"); @@ -206,13 +205,12 @@ tsc_read_bp(struct cpu_info *ci, uint64_ /* Flag it and read our TSC. */ atomic_or_uint(&ci->ci_flags, CPUF_SYNCTSC); - bptsc = (rdtsc() >> 1); /* Wait for remote to complete, and read ours again. */ while ((ci->ci_flags & CPUF_SYNCTSC) != 0) { __insn_barrier(); } - bptsc += (rdtsc() >> 1); + bptsc = rdtsc(); /* Wait for the results to come in. */ while (tsc_sync_cpu == ci) { @@ -229,17 +227,21 @@ tsc_read_bp(struct cpu_info *ci, uint64_ void tsc_sync_bp(struct cpu_info *ci) { - int64_t bptsc, aptsc, bsum = 0, asum = 0; + int64_t bptsc, aptsc, val, diff; - tsc_read_bp(ci, &bptsc, &aptsc); /* discarded - cache effects */ - for (int i = 0; i < 8; i++) { + if (!cpu_hascounter()) + return; + + val = INT64_MAX; + for (int i = 0; i < TSC_SYNC_ROUNDS; i++) { tsc_read_bp(ci, &bptsc, &aptsc); - bsum += bptsc; - asum += aptsc; + diff = bptsc - aptsc; + if (ABS(diff) < ABS(val)) { + val = diff; + } } - /* Compute final value to adjust for skew. */ - ci->ci_data.cpu_cc_skew = (bsum - asum) >> 3; + ci->ci_data.cpu_cc_skew = val; } /* @@ -255,11 +257,10 @@ tsc_post_ap(struct cpu_info *ci) while ((ci->ci_flags & CPUF_SYNCTSC) == 0) { __insn_barrier(); } - tsc = (rdtsc() >> 1); /* Instruct primary to read its counter. */ atomic_and_uint(&ci->ci_flags, ~CPUF_SYNCTSC); - tsc += (rdtsc() >> 1); + tsc = rdtsc(); /* Post result. Ensure the whole value goes out atomically. */ (void)atomic_swap_64(&tsc_sync_val, tsc); @@ -273,8 +274,10 @@ void tsc_sync_ap(struct cpu_info *ci) { - tsc_post_ap(ci); - for (int i = 0; i < 8; i++) { + if (!cpu_hascounter()) + return; + + for (int i = 0; i < TSC_SYNC_ROUNDS; i++) { tsc_post_ap(ci); } } @@ -321,12 +324,3 @@ cpu_hascounter(void) return cpu_feature[0] & CPUID_TSC; } - -uint64_t -cpu_counter_serializing(void) -{ - if (tsc_good) - return rdmsr(MSR_TSC); - else - return cpu_counter(); -} Index: sys/arch/x86/x86/x86_softintr.c =================================================================== RCS file: /cvsroot/src/sys/arch/x86/x86/x86_softintr.c,v retrieving revision 1.2 retrieving revision 1.3 diff -p -u -r1.2 -r1.3 --- sys/arch/x86/x86/x86_softintr.c 25 Apr 2020 15:26:18 -0000 1.2 +++ sys/arch/x86/x86/x86_softintr.c 8 May 2020 21:43:54 -0000 1.3 @@ -1,4 +1,4 @@ -/* $NetBSD: x86_softintr.c,v 1.2 2020/04/25 15:26:18 bouyer Exp $ */ +/* $NetBSD: x86_softintr.c,v 1.3 2020/05/08 21:43:54 ad Exp $ */ /* * Copyright (c) 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc. @@ -133,7 +133,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: x86_softintr.c,v 1.2 2020/04/25 15:26:18 bouyer Exp $"); +__KERNEL_RCSID(0, "$NetBSD: x86_softintr.c,v 1.3 2020/05/08 21:43:54 ad Exp $"); #include #include @@ -223,6 +223,7 @@ void x86_init_preempt(struct cpu_info *ci) { struct intrsource *isp; + isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); isp->is_recurse = Xrecurse_preempt; isp->is_resume = Xresume_preempt; Index: sys/dev/random.c =================================================================== RCS file: /cvsroot/src/sys/dev/random.c,v retrieving revision 1.3 retrieving revision 1.7 diff -p -u -r1.3 -r1.7 --- sys/dev/random.c 7 May 2020 19:05:51 -0000 1.3 +++ sys/dev/random.c 8 May 2020 16:05:36 -0000 1.7 @@ -1,4 +1,4 @@ -/* $NetBSD: random.c,v 1.3 2020/05/07 19:05:51 riastradh Exp $ */ +/* $NetBSD: random.c,v 1.7 2020/05/08 16:05:36 riastradh Exp $ */ /*- * Copyright (c) 2019 The NetBSD Foundation, Inc. @@ -47,7 +47,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: random.c,v 1.3 2020/05/07 19:05:51 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: random.c,v 1.7 2020/05/08 16:05:36 riastradh Exp $"); #include #include @@ -59,9 +59,9 @@ __KERNEL_RCSID(0, "$NetBSD: random.c,v 1 #include #include #include +#include #include #include -#include #include #include #include @@ -95,7 +95,6 @@ const struct cdevsw rnd_cdevsw = { }; #define RANDOM_BUFSIZE 512 /* XXX pulled from arse */ -static pool_cache_t random_buf_pc __read_mostly; /* Entropy source for writes to /dev/random and /dev/urandom */ static krndsource_t user_rndsource; @@ -104,8 +103,6 @@ void rndattach(int num) { - random_buf_pc = pool_cache_init(RANDOM_BUFSIZE, 0, 0, 0, - "randombuf", NULL, IPL_NONE, NULL, NULL, NULL); rnd_attach_source(&user_rndsource, "/dev/random", RND_TYPE_UNKNOWN, RND_FLAG_COLLECT_VALUE); } @@ -216,11 +213,10 @@ random_read(dev_t dev, struct uio *uio, struct nist_hash_drbg drbg; uint8_t *buf; int extractflags; - bool interruptible; int error; /* Get a buffer for transfers. */ - buf = pool_cache_get(random_buf_pc, PR_WAITOK); + buf = kmem_alloc(RANDOM_BUFSIZE, KM_SLEEP); /* * If it's a short read from /dev/urandom, just generate the @@ -261,25 +257,16 @@ random_read(dev_t dev, struct uio *uio, /* Promptly zero the seed. */ explicit_memset(seed, 0, sizeof seed); - /* - * Generate data. Assume no error until failure. No - * interruption at this point until we've generated at least - * one block of output. - */ + /* Generate data. */ error = 0; - interruptible = false; while (uio->uio_resid) { - size_t n = uio->uio_resid; - - /* No more than one buffer's worth. */ - n = MIN(n, RANDOM_BUFSIZE); + size_t n = MIN(uio->uio_resid, RANDOM_BUFSIZE); /* - * If we're `depleting' and this is /dev/random, clamp - * to the smaller of the entropy capacity or the seed. + * Clamp /dev/random output to the entropy capacity and + * seed size. Programs can't rely on long reads. */ - if (__predict_false(atomic_load_relaxed(&entropy_depletion)) && - minor(dev) == RND_DEV_RANDOM) { + if (minor(dev) == RND_DEV_RANDOM) { n = MIN(n, ENTROPY_CAPACITY); n = MIN(n, sizeof seed); /* @@ -290,22 +277,6 @@ random_read(dev_t dev, struct uio *uio, CTASSERT(sizeof seed <= RANDOM_BUFSIZE); } - /* Yield if requested. */ - if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) - preempt(); - - /* - * Allow interruption, but only after providing a - * minimum number of bytes. - */ - CTASSERT(RANDOM_BUFSIZE >= 256); - /* Check for interruption. */ - if (__predict_false(curlwp->l_flag & LW_PENDSIG) && - interruptible && sigispending(curlwp, 0)) { - error = EINTR; /* XXX ERESTART? */ - break; - } - /* * Try to generate a block of data, but if we've hit * the DRBG reseed interval, reseed. @@ -343,28 +314,31 @@ random_read(dev_t dev, struct uio *uio, break; /* - * If we're `depleting' and this is /dev/random, stop - * here, return what we have, and force the next read - * to reseed. Could grab more from the pool if - * possible without blocking, but that's more - * work. + * If this is /dev/random, stop here, return what we + * have, and force the next read to reseed. Programs + * can't rely on /dev/random for long reads. */ - if (__predict_false(atomic_load_relaxed(&entropy_depletion)) && - minor(dev) == RND_DEV_RANDOM) { + if (minor(dev) == RND_DEV_RANDOM) { error = 0; break; } - /* - * We have generated one block of output, so it is - * reasonable to allow interruption after this point. - */ - interruptible = true; + /* Yield if requested. */ + if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) + preempt(); + + /* Check for interruption after at least 256 bytes. */ + CTASSERT(RANDOM_BUFSIZE >= 256); + if (__predict_false(curlwp->l_flag & LW_PENDSIG) && + sigispending(curlwp, 0)) { + error = EINTR; + break; + } } -out: /* Zero the buffer and return it to the pool cache. */ +out: /* Zero the buffer and free it. */ explicit_memset(buf, 0, RANDOM_BUFSIZE); - pool_cache_put(random_buf_pc, buf); + kmem_free(buf, RANDOM_BUFSIZE); return error; } @@ -404,14 +378,18 @@ random_write(dev_t dev, struct uio *uio, privileged = true; /* Get a buffer for transfers. */ - buf = pool_cache_get(random_buf_pc, PR_WAITOK); + buf = kmem_alloc(RANDOM_BUFSIZE, KM_SLEEP); /* Consume data. */ while (uio->uio_resid) { - size_t n = uio->uio_resid; + size_t n = MIN(uio->uio_resid, RANDOM_BUFSIZE); - /* No more than one buffer's worth in one step. */ - n = MIN(uio->uio_resid, RANDOM_BUFSIZE); + /* Transfer n bytes in and enter them into the pool. */ + error = uiomove(buf, n, uio); + if (error) + break; + rnd_add_data(&user_rndsource, buf, n, privileged ? n*NBBY : 0); + any = true; /* Yield if requested. */ if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) @@ -420,21 +398,14 @@ random_write(dev_t dev, struct uio *uio, /* Check for interruption. */ if (__predict_false(curlwp->l_flag & LW_PENDSIG) && sigispending(curlwp, 0)) { - error = EINTR; /* XXX ERESTART? */ + error = EINTR; break; } - - /* Transfer n bytes in and enter them into the pool. */ - error = uiomove(buf, n, uio); - if (error) - break; - rnd_add_data(&user_rndsource, buf, n, privileged ? n*NBBY : 0); - any = true; } - /* Zero the buffer and return it to the pool cache. */ + /* Zero the buffer and free it. */ explicit_memset(buf, 0, RANDOM_BUFSIZE); - pool_cache_put(random_buf_pc, buf); + kmem_free(buf, RANDOM_BUFSIZE); /* If we added anything, consolidate entropy now. */ if (any) Index: sys/dev/acpi/acpi_pci.c =================================================================== RCS file: /cvsroot/src/sys/dev/acpi/acpi_pci.c,v retrieving revision 1.28 retrieving revision 1.29 diff -p -u -r1.28 -r1.29 --- sys/dev/acpi/acpi_pci.c 18 Jan 2020 12:32:57 -0000 1.28 +++ sys/dev/acpi/acpi_pci.c 8 May 2020 14:42:38 -0000 1.29 @@ -1,4 +1,4 @@ -/* $NetBSD: acpi_pci.c,v 1.28 2020/01/18 12:32:57 jmcneill Exp $ */ +/* $NetBSD: acpi_pci.c,v 1.29 2020/05/08 14:42:38 jmcneill Exp $ */ /* * Copyright (c) 2009, 2010 The NetBSD Foundation, Inc. @@ -29,7 +29,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: acpi_pci.c,v 1.28 2020/01/18 12:32:57 jmcneill Exp $"); +__KERNEL_RCSID(0, "$NetBSD: acpi_pci.c,v 1.29 2020/05/08 14:42:38 jmcneill Exp $"); #include #include @@ -53,7 +53,6 @@ ACPI_MODULE_NAME ("acpi_pci") #define ACPI_HILODWORD(x) ACPI_HIWORD(ACPI_LODWORD((x))) #define ACPI_LOLODWORD(x) ACPI_LOWORD(ACPI_LODWORD((x))) -static ACPI_STATUS acpi_pcidev_pciroot_bus(ACPI_HANDLE, uint16_t *); static ACPI_STATUS acpi_pcidev_pciroot_bus_callback(ACPI_RESOURCE *, void *); @@ -106,7 +105,7 @@ static UINT8 acpi_pci_dsm_uuid[ACPI_UUID * If successful, return AE_OK and fill *busp. Otherwise, return an * exception code and leave *busp unchanged. */ -static ACPI_STATUS +ACPI_STATUS acpi_pcidev_pciroot_bus(ACPI_HANDLE handle, uint16_t *busp) { ACPI_STATUS rv; Index: sys/dev/acpi/acpi_pci.h =================================================================== RCS file: /cvsroot/src/sys/dev/acpi/acpi_pci.h,v retrieving revision 1.11 retrieving revision 1.12 diff -p -u -r1.11 -r1.12 --- sys/dev/acpi/acpi_pci.h 17 Jan 2020 17:06:32 -0000 1.11 +++ sys/dev/acpi/acpi_pci.h 8 May 2020 14:42:38 -0000 1.12 @@ -1,4 +1,4 @@ -/* $NetBSD: acpi_pci.h,v 1.11 2020/01/17 17:06:32 jmcneill Exp $ */ +/* $NetBSD: acpi_pci.h,v 1.12 2020/05/08 14:42:38 jmcneill Exp $ */ /* * Copyright (c) 2009 The NetBSD Foundation, Inc. @@ -32,6 +32,7 @@ #define _SYS_DEV_ACPI_ACPI_PCI_H ACPI_STATUS acpi_pcidev_scan(struct acpi_devnode *); +ACPI_STATUS acpi_pcidev_pciroot_bus(ACPI_HANDLE, uint16_t *); ACPI_STATUS acpi_pcidev_ppb_downbus(pci_chipset_tag_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t *); Index: sys/dev/ic/hpet.c =================================================================== RCS file: /cvsroot/src/sys/dev/ic/hpet.c,v retrieving revision 1.15 retrieving revision 1.16 diff -p -u -r1.15 -r1.16 --- sys/dev/ic/hpet.c 24 Apr 2020 22:25:07 -0000 1.15 +++ sys/dev/ic/hpet.c 8 May 2020 22:01:54 -0000 1.16 @@ -1,4 +1,4 @@ -/* $NetBSD: hpet.c,v 1.15 2020/04/24 22:25:07 ad Exp $ */ +/* $NetBSD: hpet.c,v 1.16 2020/05/08 22:01:54 ad Exp $ */ /* * Copyright (c) 2006 Nicolas Joly @@ -33,7 +33,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: hpet.c,v 1.15 2020/04/24 22:25:07 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: hpet.c,v 1.16 2020/05/08 22:01:54 ad Exp $"); #include #include @@ -45,6 +45,8 @@ __KERNEL_RCSID(0, "$NetBSD: hpet.c,v 1.1 #include #include +#include + #include #include @@ -52,6 +54,8 @@ static u_int hpet_get_timecount(struct t static bool hpet_resume(device_t, const pmf_qual_t *); static struct hpet_softc *hpet0 __read_mostly; +static uint32_t hpet_attach_val; +static uint64_t hpet_attach_tsc; int hpet_detach(device_t dv, int flags) @@ -143,6 +147,14 @@ hpet_attach_subr(device_t dv) eval = bus_space_read_4(sc->sc_memt, sc->sc_memh, HPET_MCOUNT_LO); val = eval - sval; sc->sc_adj = (int64_t)val * sc->sc_period / 1000; + + /* Store attach-time values for computing TSC frequency later. */ + if (cpu_hascounter()) { + (void)cpu_counter(); + val = bus_space_read_4(sc->sc_memt, sc->sc_memh, HPET_MCOUNT_LO); + hpet_attach_tsc = cpu_counter(); + hpet_attach_val = val; + } } static u_int @@ -198,6 +210,40 @@ hpet_delay(unsigned int us) } } +uint64_t +hpet_tsc_freq(void) +{ + struct hpet_softc *sc; + uint64_t td, val, freq; + uint32_t hd; + int s; + + if (hpet0 == NULL || !cpu_hascounter()) + return 0; + + /* Slow down if we got here from attach in under 0.1s. */ + sc = hpet0; + hd = bus_space_read_4(sc->sc_memt, sc->sc_memh, HPET_MCOUNT_LO); + hd -= hpet_attach_val; + if (hd < (uint64_t)100000 * 1000000000 / sc->sc_period) + hpet_delay(100000); + + /* + * Determine TSC freq by comparing how far the TSC and HPET have + * advanced since attach time. Take the cost of reading HPET + * register into account and round result to the nearest 1000. + */ + s = splhigh(); + (void)cpu_counter(); + hd = bus_space_read_4(sc->sc_memt, sc->sc_memh, HPET_MCOUNT_LO); + td = cpu_counter(); + splx(s); + hd -= hpet_attach_val; + val = ((uint64_t)hd * sc->sc_period - sc->sc_adj) / 100000000; + freq = (td - hpet_attach_tsc) * 10000000 / val; + return rounddown(freq + 500, 1000); +} + MODULE(MODULE_CLASS_DRIVER, hpet, NULL); #ifdef _MODULE Index: sys/dev/ic/hpetvar.h =================================================================== RCS file: /cvsroot/src/sys/dev/ic/hpetvar.h,v retrieving revision 1.6 retrieving revision 1.7 diff -p -u -r1.6 -r1.7 --- sys/dev/ic/hpetvar.h 24 Apr 2020 23:29:17 -0000 1.6 +++ sys/dev/ic/hpetvar.h 8 May 2020 22:01:54 -0000 1.7 @@ -1,4 +1,4 @@ -/* $NetBSD: hpetvar.h,v 1.6 2020/04/24 23:29:17 ad Exp $ */ +/* $NetBSD: hpetvar.h,v 1.7 2020/05/08 22:01:54 ad Exp $ */ /* * Copyright (c) 2006 Nicolas Joly @@ -49,5 +49,6 @@ void hpet_attach_subr(device_t); int hpet_detach(device_t, int flags); void hpet_delay(unsigned int); bool hpet_delay_p(void); +uint64_t hpet_tsc_freq(void); #endif /* _DEV_IC_HPETVAR_H_ */ Index: sys/dev/ic/w83l518d.c =================================================================== RCS file: /cvsroot/src/sys/dev/ic/w83l518d.c,v retrieving revision 1.2 retrieving revision 1.3 diff -p -u -r1.2 -r1.3 --- sys/dev/ic/w83l518d.c 19 Aug 2010 14:58:22 -0000 1.2 +++ sys/dev/ic/w83l518d.c 11 May 2020 14:55:20 -0000 1.3 @@ -1,4 +1,4 @@ -/* $NetBSD: w83l518d.c,v 1.2 2010/08/19 14:58:22 jmcneill Exp $ */ +/* $NetBSD: w83l518d.c,v 1.3 2020/05/11 14:55:20 jdc Exp $ */ /* * Copyright (c) 2009 Jared D. McNeill @@ -26,7 +26,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: w83l518d.c,v 1.2 2010/08/19 14:58:22 jmcneill Exp $"); +__KERNEL_RCSID(0, "$NetBSD: w83l518d.c,v 1.3 2020/05/11 14:55:20 jdc Exp $"); #include #include @@ -39,9 +39,6 @@ __KERNEL_RCSID(0, "$NetBSD: w83l518d.c,v #include -#include -#include - #include #include #include Index: sys/dev/ic/w83l518d_sdmmc.c =================================================================== RCS file: /cvsroot/src/sys/dev/ic/w83l518d_sdmmc.c,v retrieving revision 1.3 retrieving revision 1.4 diff -p -u -r1.3 -r1.4 --- sys/dev/ic/w83l518d_sdmmc.c 7 Oct 2010 12:06:09 -0000 1.3 +++ sys/dev/ic/w83l518d_sdmmc.c 11 May 2020 14:55:20 -0000 1.4 @@ -1,4 +1,4 @@ -/* $NetBSD: w83l518d_sdmmc.c,v 1.3 2010/10/07 12:06:09 kiyohara Exp $ */ +/* $NetBSD: w83l518d_sdmmc.c,v 1.4 2020/05/11 14:55:20 jdc Exp $ */ /* * Copyright (c) 2009 Jared D. McNeill @@ -26,7 +26,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: w83l518d_sdmmc.c,v 1.3 2010/10/07 12:06:09 kiyohara Exp $"); +__KERNEL_RCSID(0, "$NetBSD: w83l518d_sdmmc.c,v 1.4 2020/05/11 14:55:20 jdc Exp $"); #include #include @@ -43,9 +43,6 @@ __KERNEL_RCSID(0, "$NetBSD: w83l518d_sdm #include #include -#include -#include - #include #include #include @@ -185,7 +182,8 @@ wb_sdmmc_attach(struct wb_softc *wb) saa.saa_sch = wb; saa.saa_clkmin = 375; saa.saa_clkmax = 24000; - saa.saa_caps = SMC_CAPS_4BIT_MODE; + if (!ISSET(wb->wb_quirks, WB_QUIRK_1BIT)) + saa.saa_caps = SMC_CAPS_4BIT_MODE; wb->wb_sdmmc_dev = config_found(wb->wb_dev, &saa, NULL); } @@ -268,7 +266,7 @@ wb_sdmmc_write_protect(sdmmc_chipset_han static int wb_sdmmc_bus_power(sdmmc_chipset_handle_t sch, uint32_t ocr) { - REPORT(sch, "TRACE: sdmmc/bus_power(wb, ocr=%d)\n", ocr); + REPORT(sch, "TRACE: sdmmc/bus_power(wb, ocr=%x)\n", ocr); return 0; } @@ -334,10 +332,19 @@ wb_sdmmc_rsp_read_long(struct wb_softc * } for (i = 12; i >= 0; i -= 4) { +#if BYTE_ORDER == LITTLE_ENDIAN p[3] = wb_idx_read(wb, WB_INDEX_RESP(i + 0)); p[2] = wb_idx_read(wb, WB_INDEX_RESP(i + 1)); p[1] = wb_idx_read(wb, WB_INDEX_RESP(i + 2)); p[0] = wb_idx_read(wb, WB_INDEX_RESP(i + 3)); +#else + p[0] = wb_idx_read(wb, WB_INDEX_RESP(i + 0)); + p[1] = wb_idx_read(wb, WB_INDEX_RESP(i + 1)); + p[2] = wb_idx_read(wb, WB_INDEX_RESP(i + 2)); + p[3] = wb_idx_read(wb, WB_INDEX_RESP(i + 3)); +#endif + REPORT(wb, "TRACE: sdmmc/read_long (%d) 0x%08x\n", + (12 - i) / 4, cmd->c_resp[(12 - i) / 4]); p += 4; } } @@ -352,10 +359,19 @@ wb_sdmmc_rsp_read_short(struct wb_softc return; } +#if BYTE_ORDER == LITTLE_ENDIAN p[3] = wb_idx_read(wb, WB_INDEX_RESP(12)); p[2] = wb_idx_read(wb, WB_INDEX_RESP(13)); p[1] = wb_idx_read(wb, WB_INDEX_RESP(14)); p[0] = wb_idx_read(wb, WB_INDEX_RESP(15)); +#else + p[0] = wb_idx_read(wb, WB_INDEX_RESP(12)); + p[1] = wb_idx_read(wb, WB_INDEX_RESP(13)); + p[2] = wb_idx_read(wb, WB_INDEX_RESP(14)); + p[3] = wb_idx_read(wb, WB_INDEX_RESP(15)); +#endif + REPORT(wb, "TRACE: sdmmc/read_short 0x%08x\n", + cmd->c_resp[0]); } static int @@ -430,8 +446,9 @@ wb_sdmmc_exec_command(sdmmc_chipset_hand int s; REPORT(wb, "TRACE: sdmmc/exec_command(wb, cmd) " - "opcode %d flags 0x%x data %p datalen %d\n", - cmd->c_opcode, cmd->c_flags, cmd->c_data, cmd->c_datalen); + "opcode %d flags 0x%x data %p datalen %d arg 0x%08x\n", + cmd->c_opcode, cmd->c_flags, cmd->c_data, cmd->c_datalen, + cmd->c_arg); if (cmd->c_datalen > 0) { /* controller only supports a select number of data opcodes */ @@ -439,7 +456,9 @@ wb_sdmmc_exec_command(sdmmc_chipset_hand if (opcodes[i] == cmd->c_opcode) break; if (i == __arraycount(opcodes)) { - cmd->c_error = EINVAL; + cmd->c_error = ENOTSUP; + aprint_debug_dev(wb->wb_dev, + "unsupported opcode %d\n", cmd->c_opcode); goto done; } Index: sys/dev/ic/w83l518dvar.h =================================================================== RCS file: /cvsroot/src/sys/dev/ic/w83l518dvar.h,v retrieving revision 1.2 retrieving revision 1.3 diff -p -u -r1.2 -r1.3 --- sys/dev/ic/w83l518dvar.h 19 Aug 2010 14:58:22 -0000 1.2 +++ sys/dev/ic/w83l518dvar.h 11 May 2020 14:55:20 -0000 1.3 @@ -1,4 +1,4 @@ -/* $NetBSD: w83l518dvar.h,v 1.2 2010/08/19 14:58:22 jmcneill Exp $ */ +/* $NetBSD: w83l518dvar.h,v 1.3 2020/05/11 14:55:20 jdc Exp $ */ /* * Copyright (c) 2009 Jared D. McNeill @@ -44,6 +44,10 @@ struct wb_softc { uint8_t wb_sdmmc_clk; uint8_t wb_sdmmc_intsts; callout_t wb_sdmmc_callout; + + /* quirks */ +#define WB_QUIRK_1BIT (1U << 0) + int wb_quirks; }; void wb_attach(struct wb_softc *); Index: sys/dev/pckbport/synaptics.c =================================================================== RCS file: /cvsroot/src/sys/dev/pckbport/synaptics.c,v retrieving revision 1.66 retrieving revision 1.67 diff -p -u -r1.66 -r1.67 --- sys/dev/pckbport/synaptics.c 28 Apr 2020 19:22:58 -0000 1.66 +++ sys/dev/pckbport/synaptics.c 14 May 2020 18:06:58 -0000 1.67 @@ -1,4 +1,4 @@ -/* $NetBSD: synaptics.c,v 1.66 2020/04/28 19:22:58 jmcneill Exp $ */ +/* $NetBSD: synaptics.c,v 1.67 2020/05/14 18:06:58 nia Exp $ */ /* * Copyright (c) 2005, Steve C. Woodford @@ -48,7 +48,7 @@ #include "opt_pms.h" #include -__KERNEL_RCSID(0, "$NetBSD: synaptics.c,v 1.66 2020/04/28 19:22:58 jmcneill Exp $"); +__KERNEL_RCSID(0, "$NetBSD: synaptics.c,v 1.67 2020/05/14 18:06:58 nia Exp $"); #include #include @@ -1192,7 +1192,7 @@ pms_synaptics_input(void *vsc, int data) getmicrouptime(&psc->current); - if (psc->inputstate != 0) { + if (psc->inputstate > 0) { timersub(&psc->current, &psc->last, &diff); if (diff.tv_sec > 0 || diff.tv_usec >= 40000) { aprint_debug_dev(psc->sc_dev, Index: sys/dev/usb/ehci.c =================================================================== RCS file: /cvsroot/src/sys/dev/usb/ehci.c,v retrieving revision 1.278 retrieving revision 1.279 diff -p -u -r1.278 -r1.279 --- sys/dev/usb/ehci.c 5 Apr 2020 20:59:38 -0000 1.278 +++ sys/dev/usb/ehci.c 15 May 2020 06:15:42 -0000 1.279 @@ -1,4 +1,4 @@ -/* $NetBSD: ehci.c,v 1.278 2020/04/05 20:59:38 skrll Exp $ */ +/* $NetBSD: ehci.c,v 1.279 2020/05/15 06:15:42 skrll Exp $ */ /* * Copyright (c) 2004-2012 The NetBSD Foundation, Inc. @@ -53,7 +53,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: ehci.c,v 1.278 2020/04/05 20:59:38 skrll Exp $"); +__KERNEL_RCSID(0, "$NetBSD: ehci.c,v 1.279 2020/05/15 06:15:42 skrll Exp $"); #include "ohci.h" #include "uhci.h" @@ -4301,14 +4301,13 @@ ehci_device_fs_isoc_transfer(struct usbd ehci_soft_sitd_t *sitd; usb_dma_t *dma_buf; int i, j, k, frames; - int offs, total_length; + int offs; int frindex; u_int dir; EHCIHIST_FUNC(); EHCIHIST_CALLED(); sitd = NULL; - total_length = 0; DPRINTF("xfer %#jx len %jd flags %jd", (uintptr_t)xfer, xfer->ux_length, xfer->ux_flags, 0); @@ -4354,7 +4353,6 @@ ehci_device_fs_isoc_transfer(struct usbd /* Set page0 index and offset - TP and T-offset are set below */ sitd->sitd.sitd_buffer[0] = htole32(DMAADDR(dma_buf, offs)); - total_length += xfer->ux_frlengths[i]; offs += xfer->ux_frlengths[i]; sitd->sitd.sitd_buffer[1] = @@ -4432,8 +4430,8 @@ ehci_device_fs_isoc_transfer(struct usbd sizeof(sitd->sitd.sitd_trans), BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); - if (total_length) - usb_syncmem(&exfer->ex_xfer.ux_dmabuf, 0, total_length, + if (xfer->ux_length) + usb_syncmem(&exfer->ex_xfer.ux_dmabuf, 0, xfer->ux_length, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* @@ -4672,7 +4670,7 @@ ehci_device_isoc_transfer(struct usbd_xf usb_dma_t *dma_buf; int i, j; int frames, uframes, ufrperframe; - int trans_count, offs, total_length; + int trans_count, offs; int frindex; EHCIHIST_FUNC(); EHCIHIST_CALLED(); @@ -4680,7 +4678,6 @@ ehci_device_isoc_transfer(struct usbd_xf prev = NULL; itd = NULL; trans_count = 0; - total_length = 0; DPRINTF("xfer %#jx flags %jd", (uintptr_t)xfer, xfer->ux_flags, 0, 0); @@ -4765,7 +4762,6 @@ ehci_device_isoc_transfer(struct usbd_xf EHCI_ITD_SET_PG(addr) | EHCI_ITD_SET_OFFS(EHCI_PAGE_OFFSET(DMAADDR(dma_buf,offs)))); - total_length += xfer->ux_frlengths[trans_count]; offs += xfer->ux_frlengths[trans_count]; trans_count++; @@ -4820,8 +4816,8 @@ ehci_device_isoc_transfer(struct usbd_xf prev = itd; } /* End of frame */ - if (total_length) - usb_syncmem(&exfer->ex_xfer.ux_dmabuf, 0, total_length, + if (xfer->ux_length) + usb_syncmem(&exfer->ex_xfer.ux_dmabuf, 0, xfer->ux_length, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Index: sys/dev/usb/uhci.c =================================================================== RCS file: /cvsroot/src/sys/dev/usb/uhci.c,v retrieving revision 1.300 retrieving revision 1.301 diff -p -u -r1.300 -r1.301 --- sys/dev/usb/uhci.c 5 Apr 2020 20:59:38 -0000 1.300 +++ sys/dev/usb/uhci.c 15 May 2020 06:15:42 -0000 1.301 @@ -1,4 +1,4 @@ -/* $NetBSD: uhci.c,v 1.300 2020/04/05 20:59:38 skrll Exp $ */ +/* $NetBSD: uhci.c,v 1.301 2020/05/15 06:15:42 skrll Exp $ */ /* * Copyright (c) 1998, 2004, 2011, 2012 The NetBSD Foundation, Inc. @@ -42,7 +42,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: uhci.c,v 1.300 2020/04/05 20:59:38 skrll Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uhci.c,v 1.301 2020/05/15 06:15:42 skrll Exp $"); #ifdef _KERNEL_OPT #include "opt_usb.h" @@ -2918,8 +2918,9 @@ uhci_device_isoc_transfer(struct usbd_xf KASSERT(xfer->ux_nframes != 0); - usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length, - rd ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); + if (xfer->ux_length) + usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length, + rd ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); mutex_enter(&sc->sc_lock); next = isoc->next; Index: sys/dev/usb/usb_mem.c =================================================================== RCS file: /cvsroot/src/sys/dev/usb/usb_mem.c,v retrieving revision 1.76 retrieving revision 1.77 diff -p -u -r1.76 -r1.77 --- sys/dev/usb/usb_mem.c 5 Apr 2020 20:59:38 -0000 1.76 +++ sys/dev/usb/usb_mem.c 15 May 2020 06:26:44 -0000 1.77 @@ -1,4 +1,4 @@ -/* $NetBSD: usb_mem.c,v 1.76 2020/04/05 20:59:38 skrll Exp $ */ +/* $NetBSD: usb_mem.c,v 1.77 2020/05/15 06:26:44 skrll Exp $ */ /* * Copyright (c) 1998 The NetBSD Foundation, Inc. @@ -38,7 +38,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: usb_mem.c,v 1.76 2020/04/05 20:59:38 skrll Exp $"); +__KERNEL_RCSID(0, "$NetBSD: usb_mem.c,v 1.77 2020/05/15 06:26:44 skrll Exp $"); #ifdef _KERNEL_OPT #include "opt_usb.h" @@ -152,7 +152,7 @@ usb_block_allocmem(bus_dma_tag_t tag, si /* Caller wants one segment */ b->nsegs = 1; else - b->nsegs = (size + (PAGE_SIZE-1)) / PAGE_SIZE; + b->nsegs = howmany(size, PAGE_SIZE); b->segs = kmem_alloc(b->nsegs * sizeof(*b->segs), KM_SLEEP); b->nsegs_alloc = b->nsegs; Index: sys/dev/usb/usbdevices.config =================================================================== RCS file: /cvsroot/src/sys/dev/usb/usbdevices.config,v retrieving revision 1.38 retrieving revision 1.39 diff -p -u -r1.38 -r1.39 --- sys/dev/usb/usbdevices.config 12 Apr 2020 01:10:54 -0000 1.38 +++ sys/dev/usb/usbdevices.config 13 May 2020 10:34:05 -0000 1.39 @@ -1,4 +1,4 @@ -# $NetBSD: usbdevices.config,v 1.38 2020/04/12 01:10:54 simonb Exp $ +# $NetBSD: usbdevices.config,v 1.39 2020/05/13 10:34:05 martin Exp $ # # This file contains all USB related configuration. # It is suitable for inclusion in a kernel config(5) file. @@ -255,4 +255,4 @@ aubtfwl* at uhub? port ? # load Atheros bthub* at ubt? # Araneus Alea I/II TRNG -#ualea* at uhub? port ? configuration ? interface ? +ualea* at uhub? port ? configuration ? interface ? Index: sys/dev/usb/usbdi.c =================================================================== RCS file: /cvsroot/src/sys/dev/usb/usbdi.c,v retrieving revision 1.200 retrieving revision 1.201 diff -p -u -r1.200 -r1.201 --- sys/dev/usb/usbdi.c 5 Apr 2020 20:59:38 -0000 1.200 +++ sys/dev/usb/usbdi.c 15 May 2020 06:15:42 -0000 1.201 @@ -1,4 +1,4 @@ -/* $NetBSD: usbdi.c,v 1.200 2020/04/05 20:59:38 skrll Exp $ */ +/* $NetBSD: usbdi.c,v 1.201 2020/05/15 06:15:42 skrll Exp $ */ /* * Copyright (c) 1998, 2012, 2015 The NetBSD Foundation, Inc. @@ -32,7 +32,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: usbdi.c,v 1.200 2020/04/05 20:59:38 skrll Exp $"); +__KERNEL_RCSID(0, "$NetBSD: usbdi.c,v 1.201 2020/05/15 06:15:42 skrll Exp $"); #ifdef _KERNEL_OPT #include "opt_usb.h" @@ -685,6 +685,9 @@ usbd_setup_isoc_xfer(struct usbd_xfer *x xfer->ux_rqflags &= ~URQ_REQUEST; xfer->ux_frlengths = frlengths; xfer->ux_nframes = nframes; + + for (size_t i = 0; i < xfer->ux_nframes; i++) + xfer->ux_length += xfer->ux_frlengths[i]; } void Index: sys/fs/tmpfs/tmpfs_subr.c =================================================================== RCS file: /cvsroot/src/sys/fs/tmpfs/tmpfs_subr.c,v retrieving revision 1.109 retrieving revision 1.110 diff -p -u -r1.109 -r1.110 --- sys/fs/tmpfs/tmpfs_subr.c 23 Apr 2020 21:47:08 -0000 1.109 +++ sys/fs/tmpfs/tmpfs_subr.c 12 May 2020 23:17:41 -0000 1.110 @@ -1,4 +1,4 @@ -/* $NetBSD: tmpfs_subr.c,v 1.109 2020/04/23 21:47:08 ad Exp $ */ +/* $NetBSD: tmpfs_subr.c,v 1.110 2020/05/12 23:17:41 ad Exp $ */ /* * Copyright (c) 2005-2013 The NetBSD Foundation, Inc. @@ -73,7 +73,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: tmpfs_subr.c,v 1.109 2020/04/23 21:47:08 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: tmpfs_subr.c,v 1.110 2020/05/12 23:17:41 ad Exp $"); #include #include @@ -148,7 +148,7 @@ tmpfs_init_vnode(struct vnode *vp, tmpfs node->tn_vnode = vp; uvm_vnp_setsize(vp, node->tn_size); KASSERT(node->tn_mode != VNOVAL); - cache_enter_id(vp, node->tn_mode, node->tn_uid, node->tn_gid); + cache_enter_id(vp, node->tn_mode, node->tn_uid, node->tn_gid, true); } /* @@ -1041,7 +1041,7 @@ tmpfs_chmod(vnode_t *vp, mode_t mode, ka node->tn_mode = (mode & ALLPERMS); tmpfs_update(vp, TMPFS_UPDATE_CTIME); VN_KNOTE(vp, NOTE_ATTRIB); - cache_enter_id(vp, node->tn_mode, node->tn_uid, node->tn_gid); + cache_enter_id(vp, node->tn_mode, node->tn_uid, node->tn_gid, true); return 0; } @@ -1086,7 +1086,7 @@ tmpfs_chown(vnode_t *vp, uid_t uid, gid_ node->tn_gid = gid; tmpfs_update(vp, TMPFS_UPDATE_CTIME); VN_KNOTE(vp, NOTE_ATTRIB); - cache_enter_id(vp, node->tn_mode, node->tn_uid, node->tn_gid); + cache_enter_id(vp, node->tn_mode, node->tn_uid, node->tn_gid, true); return 0; } Index: sys/kern/init_main.c =================================================================== RCS file: /cvsroot/src/sys/kern/init_main.c,v retrieving revision 1.524 retrieving revision 1.525 diff -p -u -r1.524 -r1.525 --- sys/kern/init_main.c 30 Apr 2020 03:28:18 -0000 1.524 +++ sys/kern/init_main.c 11 May 2020 21:38:54 -0000 1.525 @@ -1,4 +1,4 @@ -/* $NetBSD: init_main.c,v 1.524 2020/04/30 03:28:18 riastradh Exp $ */ +/* $NetBSD: init_main.c,v 1.525 2020/05/11 21:38:54 riastradh Exp $ */ /*- * Copyright (c) 2008, 2009, 2019 The NetBSD Foundation, Inc. @@ -97,7 +97,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: init_main.c,v 1.524 2020/04/30 03:28:18 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: init_main.c,v 1.525 2020/05/11 21:38:54 riastradh Exp $"); #include "opt_ddb.h" #include "opt_inet.h" @@ -395,6 +395,8 @@ main(void) */ rnd_init(); /* initialize entropy pool */ + cprng_init(); /* initialize cryptographic PRNG */ + /* Initialize process and pgrp structures. */ procinit(); lwpinit(); @@ -529,8 +531,6 @@ main(void) /* Configure the system hardware. This will enable interrupts. */ configure(); - cprng_init(); /* initialize cryptographic PRNG */ - /* Once all CPUs are detected, initialize the per-CPU cprng_fast. */ cprng_fast_init(); Index: sys/kern/kern_clock.c =================================================================== RCS file: /cvsroot/src/sys/kern/kern_clock.c,v retrieving revision 1.140 retrieving revision 1.141 diff -p -u -r1.140 -r1.141 --- sys/kern/kern_clock.c 2 Apr 2020 16:29:30 -0000 1.140 +++ sys/kern/kern_clock.c 8 May 2020 22:10:09 -0000 1.141 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_clock.c,v 1.140 2020/04/02 16:29:30 maxv Exp $ */ +/* $NetBSD: kern_clock.c,v 1.141 2020/05/08 22:10:09 ad Exp $ */ /*- * Copyright (c) 2000, 2004, 2006, 2007, 2008 The NetBSD Foundation, Inc. @@ -69,7 +69,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: kern_clock.c,v 1.140 2020/04/02 16:29:30 maxv Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_clock.c,v 1.141 2020/05/08 22:10:09 ad Exp $"); #ifdef _KERNEL_OPT #include "opt_dtrace.h" @@ -257,13 +257,6 @@ hardclock(struct clockframe *frame) * Update real-time timeout queue. */ callout_hardclock(); - -#ifdef KDTRACE_HOOKS - cyclic_clock_func_t func = cyclic_clock_func[cpu_index(ci)]; - if (func) { - (*func)((struct clockframe *)frame); - } -#endif } /* @@ -431,6 +424,13 @@ statclock(struct clockframe *frame) atomic_inc_uint(&l->l_cpticks); mutex_spin_exit(&p->p_stmutex); } + +#ifdef KDTRACE_HOOKS + cyclic_clock_func_t func = cyclic_clock_func[cpu_index(ci)]; + if (func) { + (*func)((struct clockframe *)frame); + } +#endif } /* Index: sys/kern/kern_condvar.c =================================================================== RCS file: /cvsroot/src/sys/kern/kern_condvar.c,v retrieving revision 1.51 retrieving revision 1.52 diff -p -u -r1.51 -r1.52 --- sys/kern/kern_condvar.c 4 May 2020 18:23:37 -0000 1.51 +++ sys/kern/kern_condvar.c 11 May 2020 03:59:33 -0000 1.52 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_condvar.c,v 1.51 2020/05/04 18:23:37 riastradh Exp $ */ +/* $NetBSD: kern_condvar.c,v 1.52 2020/05/11 03:59:33 riastradh Exp $ */ /*- * Copyright (c) 2006, 2007, 2008, 2019, 2020 The NetBSD Foundation, Inc. @@ -34,7 +34,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: kern_condvar.c,v 1.51 2020/05/04 18:23:37 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_condvar.c,v 1.52 2020/05/11 03:59:33 riastradh Exp $"); #include #include @@ -245,75 +245,6 @@ cv_timedwait_sig(kcondvar_t *cv, kmutex_ } /* - * cv_timedwaitclock: - * - * Wait on a condition variable until awoken normally, or the - * specified timeout expires according to the provided clock. - * Returns zero if awoken normally or EWOULDBLOCK if the timeout - * expired. For relative timeouts ((flags & TIMER_ABSTIME) == 0), - * updates timeout with the time left. - * - * timeout == NULL specifies an infinite timeout. epsilon is a - * requested maximum error in timeout (excluding spurious - * wakeups). - */ -int -cv_timedwaitclock(kcondvar_t *cv, kmutex_t *mtx, struct timespec *timeout, - clockid_t clockid, int flags, const struct bintime *epsilon) -{ - struct timedwaitclock T; - int timo; - int error; - - if (timeout == NULL) { - cv_wait(cv, mtx); - return 0; - } - - timedwaitclock_setup(&T, timeout, clockid, flags, epsilon); - error = timedwaitclock_begin(&T, &timo); - if (error) - return error; - error = cv_timedwait(cv, mtx, timo); - timedwaitclock_end(&T); - return error; -} - -/* - * cv_timedwaitclock_sig: - * - * Wait on a condition variable until awoken normally, interrupted - * by a signal, or the specified timeout expires according to the - * provided clock. Returns zero if awoken normally, - * EINTR/ERESTART if interrupted by a signal, or EWOULDBLOCK if - * the timeout expired. For relative timeouts ((flags & - * TIMER_ABSTIME) == 0), updates timeout with the time left. - * - * timeout == NULL specifies an infinite timeout. epsilon is a - * requested maximum error in timeout (excluding spurious - * wakeups). - */ -int -cv_timedwaitclock_sig(kcondvar_t *cv, kmutex_t *mtx, struct timespec *timeout, - clockid_t clockid, int flags, const struct bintime *epsilon) -{ - struct timedwaitclock T; - int timo; - int error; - - if (timeout == NULL) - return cv_wait_sig(cv, mtx); - - timedwaitclock_setup(&T, timeout, clockid, flags, epsilon); - error = timedwaitclock_begin(&T, &timo); - if (error) - return error; - error = cv_timedwait_sig(cv, mtx, timo); - timedwaitclock_end(&T); - return error; -} - -/* * Given a number of seconds, sec, and 2^64ths of a second, frac, we * want a number of ticks for a timeout: * Index: sys/kern/kern_entropy.c =================================================================== RCS file: /cvsroot/src/sys/kern/kern_entropy.c,v retrieving revision 1.16 retrieving revision 1.22 diff -p -u -r1.16 -r1.22 --- sys/kern/kern_entropy.c 8 May 2020 00:54:44 -0000 1.16 +++ sys/kern/kern_entropy.c 12 May 2020 20:50:17 -0000 1.22 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_entropy.c,v 1.16 2020/05/08 00:54:44 riastradh Exp $ */ +/* $NetBSD: kern_entropy.c,v 1.22 2020/05/12 20:50:17 riastradh Exp $ */ /*- * Copyright (c) 2019 The NetBSD Foundation, Inc. @@ -75,7 +75,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.16 2020/05/08 00:54:44 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.22 2020/05/12 20:50:17 riastradh Exp $"); #include #include @@ -223,8 +223,8 @@ static struct evcnt entropy_notify_evcnt EVCNT_ATTACH_STATIC(entropy_notify_evcnt); /* Sysctl knobs */ -bool entropy_collection = 1; -bool entropy_depletion = 0; /* Silly! */ +static bool entropy_collection = 1; +static bool entropy_depletion = 0; /* Silly! */ static const struct sysctlnode *entropy_sysctlroot; static struct sysctllog *entropy_sysctllog; @@ -670,7 +670,7 @@ entropy_account_cpu(struct entropy_cpu * /* Notify waiters that we now have full entropy. */ entropy_notify(); entropy_immediate_evcnt.ev_count++; - } else if (ec->ec_pending) { + } else { /* Record how much we can add to the global pool. */ diff = MIN(ec->ec_pending, ENTROPY_CAPACITY*NBBY - E->pending); E->pending += diff; @@ -984,11 +984,14 @@ entropy_do_consolidate(void) { static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0}; static struct timeval lasttime; /* serialized by E->lock */ + struct entpool pool; + uint8_t buf[ENTPOOL_CAPACITY]; unsigned diff; uint64_t ticket; - /* Gather entropy on all CPUs. */ - ticket = xc_broadcast(0, &entropy_consolidate_xc, NULL, NULL); + /* Gather entropy on all CPUs into a temporary pool. */ + memset(&pool, 0, sizeof pool); + ticket = xc_broadcast(0, &entropy_consolidate_xc, &pool, NULL); xc_wait(ticket); /* Acquire the lock to notify waiters. */ @@ -1000,6 +1003,11 @@ entropy_do_consolidate(void) /* Note when we last consolidated, i.e. now. */ E->timestamp = time_uptime; + /* Mix what we gathered into the global pool. */ + entpool_extract(&pool, buf, sizeof buf); + entpool_enter(&E->pool, buf, sizeof buf); + explicit_memset(&pool, 0, sizeof pool); + /* Count the entropy that was gathered. */ diff = MIN(E->needed, E->pending); atomic_store_relaxed(&E->needed, E->needed - diff); @@ -1018,14 +1026,15 @@ entropy_do_consolidate(void) } /* - * entropy_consolidate_xc(arg1, arg2) + * entropy_consolidate_xc(vpool, arg2) * * Extract output from the local CPU's input pool and enter it - * into the global pool. + * into a temporary pool passed as vpool. */ static void -entropy_consolidate_xc(void *arg1 __unused, void *arg2 __unused) +entropy_consolidate_xc(void *vpool, void *arg2 __unused) { + struct entpool *pool = vpool; struct entropy_cpu *ec; uint8_t buf[ENTPOOL_CAPACITY]; uint32_t extra[7]; @@ -1063,15 +1072,15 @@ entropy_consolidate_xc(void *arg1 __unus /* * Copy over statistics, and enter the per-CPU extract and the - * extra timing into the global pool, under the global lock. + * extra timing into the temporary pool, under the global lock. */ mutex_enter(&E->lock); extra[i++] = entropy_timer(); - entpool_enter(&E->pool, buf, sizeof buf); + entpool_enter(pool, buf, sizeof buf); explicit_memset(buf, 0, sizeof buf); extra[i++] = entropy_timer(); KASSERT(i == __arraycount(extra)); - entpool_enter(&E->pool, extra, sizeof extra); + entpool_enter(pool, extra, sizeof extra); explicit_memset(extra, 0, sizeof extra); mutex_exit(&E->lock); } @@ -1678,6 +1687,13 @@ entropy_request(size_t nbytes) if (!ISSET(rs->flags, RND_FLAG_HASCB)) continue; + /* + * Skip sources that are disabled altogether -- we + * would just ignore their samples anyway. + */ + if (ISSET(rs->flags, RND_FLAG_NO_COLLECT)) + continue; + /* Drop the lock while we call the callback. */ if (E->stage >= ENTROPY_WARM) mutex_exit(&E->lock); @@ -1923,6 +1939,40 @@ rndsource_to_user_est(struct krndsource } /* + * entropy_reset_xc(arg1, arg2) + * + * Reset the current CPU's pending entropy to zero. + */ +static void +entropy_reset_xc(void *arg1 __unused, void *arg2 __unused) +{ + uint32_t extra = entropy_timer(); + struct entropy_cpu *ec; + int s; + + /* + * Acquire the per-CPU state, blocking soft interrupts and + * causing hard interrupts to drop samples on the floor. + */ + ec = percpu_getref(entropy_percpu); + s = splsoftserial(); + KASSERT(!ec->ec_locked); + ec->ec_locked = true; + __insn_barrier(); + + /* Zero the pending count and enter a cycle count for fun. */ + ec->ec_pending = 0; + entpool_enter(ec->ec_pool, &extra, sizeof extra); + + /* Release the per-CPU state. */ + KASSERT(ec->ec_locked); + __insn_barrier(); + ec->ec_locked = false; + splx(s); + percpu_putref(entropy_percpu); +} + +/* * entropy_ioctl(cmd, data) * * Handle various /dev/random ioctl queries. @@ -2156,7 +2206,9 @@ entropy_ioctl(unsigned long cmd, void *d case RNDCTL: { /* Modify entropy source flags. */ rndctl_t *rndctl = data; const size_t n = sizeof(rs->name); + uint32_t resetflags = RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT; uint32_t flags; + bool reset = false, request = false; CTASSERT(sizeof(rs->name) == sizeof(rndctl->name)); @@ -2178,9 +2230,39 @@ entropy_ioctl(unsigned long cmd, void *d } flags = rs->flags & ~rndctl->mask; flags |= rndctl->flags & rndctl->mask; + if ((rs->flags & resetflags) == 0 && + (flags & resetflags) != 0) + reset = true; + if ((rs->flags ^ flags) & resetflags) + request = true; atomic_store_relaxed(&rs->flags, flags); } mutex_exit(&E->lock); + + /* + * If we disabled estimation or collection, nix all the + * pending entropy and set needed to the maximum. + */ + if (reset) { + xc_broadcast(0, &entropy_reset_xc, NULL, NULL); + mutex_enter(&E->lock); + E->pending = 0; + atomic_store_relaxed(&E->needed, + ENTROPY_CAPACITY*NBBY); + mutex_exit(&E->lock); + } + + /* + * If we changed any of the estimation or collection + * flags, request new samples from everyone -- either + * to make up for what we just lost, or to get new + * samples from what we just added. + */ + if (request) { + mutex_enter(&E->lock); + entropy_request(ENTROPY_CAPACITY); + mutex_exit(&E->lock); + } break; } case RNDADDDATA: { /* Enter seed into entropy pool. */ Index: sys/kern/kern_fork.c =================================================================== RCS file: /cvsroot/src/sys/kern/kern_fork.c,v retrieving revision 1.224 retrieving revision 1.225 diff -p -u -r1.224 -r1.225 --- sys/kern/kern_fork.c 7 May 2020 20:02:34 -0000 1.224 +++ sys/kern/kern_fork.c 12 May 2020 11:21:09 -0000 1.225 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_fork.c,v 1.224 2020/05/07 20:02:34 kamil Exp $ */ +/* $NetBSD: kern_fork.c,v 1.225 2020/05/12 11:21:09 kamil Exp $ */ /*- * Copyright (c) 1999, 2001, 2004, 2006, 2007, 2008, 2019 @@ -68,7 +68,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: kern_fork.c,v 1.224 2020/05/07 20:02:34 kamil Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_fork.c,v 1.225 2020/05/12 11:21:09 kamil Exp $"); #include "opt_ktrace.h" #include "opt_dtrace.h" @@ -158,9 +158,9 @@ sys___clone(struct lwp *l, const struct int flags, sig; /* - * We don't support the CLONE_PID or CLONE_PTRACE flags. + * We don't support the CLONE_PTRACE flag. */ - if (SCARG(uap, flags) & (CLONE_PID|CLONE_PTRACE)) + if (SCARG(uap, flags) & (CLONE_PTRACE)) return EINVAL; /* Index: sys/kern/kern_mutex.c =================================================================== RCS file: /cvsroot/src/sys/kern/kern_mutex.c,v retrieving revision 1.90 retrieving revision 1.92 diff -p -u -r1.90 -r1.92 --- sys/kern/kern_mutex.c 8 Mar 2020 00:26:06 -0000 1.90 +++ sys/kern/kern_mutex.c 12 May 2020 21:56:17 -0000 1.92 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_mutex.c,v 1.90 2020/03/08 00:26:06 chs Exp $ */ +/* $NetBSD: kern_mutex.c,v 1.92 2020/05/12 21:56:17 ad Exp $ */ /*- * Copyright (c) 2002, 2006, 2007, 2008, 2019 The NetBSD Foundation, Inc. @@ -40,7 +40,7 @@ #define __MUTEX_PRIVATE #include -__KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.90 2020/03/08 00:26:06 chs Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.92 2020/05/12 21:56:17 ad Exp $"); #include #include @@ -456,11 +456,13 @@ mutex_vector_enter(kmutex_t *mtx) /* * Handle spin mutexes. */ + KPREEMPT_DISABLE(curlwp); owner = mtx->mtx_owner; if (MUTEX_SPIN_P(owner)) { #if defined(LOCKDEBUG) && defined(MULTIPROCESSOR) u_int spins = 0; #endif + KPREEMPT_ENABLE(curlwp); MUTEX_SPIN_SPLRAISE(mtx); MUTEX_WANTLOCK(mtx); #ifdef FULL @@ -521,7 +523,6 @@ mutex_vector_enter(kmutex_t *mtx) * determine that the owner is not running on a processor, * then we stop spinning, and sleep instead. */ - KPREEMPT_DISABLE(curlwp); for (;;) { if (!MUTEX_OWNED(owner)) { /* Index: sys/kern/kern_pmf.c =================================================================== RCS file: /cvsroot/src/sys/kern/kern_pmf.c,v retrieving revision 1.42 retrieving revision 1.43 diff -p -u -r1.42 -r1.43 --- sys/kern/kern_pmf.c 20 Apr 2020 21:39:05 -0000 1.42 +++ sys/kern/kern_pmf.c 12 May 2020 10:02:56 -0000 1.43 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_pmf.c,v 1.42 2020/04/20 21:39:05 ad Exp $ */ +/* $NetBSD: kern_pmf.c,v 1.43 2020/05/12 10:02:56 jdolecek Exp $ */ /*- * Copyright (c) 2007 Jared D. McNeill @@ -27,7 +27,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: kern_pmf.c,v 1.42 2020/04/20 21:39:05 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_pmf.c,v 1.43 2020/05/12 10:02:56 jdolecek Exp $"); #include #include @@ -893,7 +893,9 @@ pmf_class_network_suspend(device_t dev, int s; s = splnet(); + IFNET_LOCK(ifp); (*ifp->if_stop)(ifp, 0); + IFNET_UNLOCK(ifp); splx(s); return true; @@ -904,14 +906,21 @@ pmf_class_network_resume(device_t dev, c { struct ifnet *ifp = device_pmf_class_private(dev); int s; + bool restart = false; s = splnet(); + IFNET_LOCK(ifp); if (ifp->if_flags & IFF_UP) { ifp->if_flags &= ~IFF_RUNNING; if ((*ifp->if_init)(ifp) != 0) aprint_normal_ifnet(ifp, "resume failed\n"); - if_start_lock(ifp); + restart = true; } + IFNET_UNLOCK(ifp); + + if (restart) + if_start_lock(ifp); + splx(s); return true; Index: sys/kern/kern_sig.c =================================================================== RCS file: /cvsroot/src/sys/kern/kern_sig.c,v retrieving revision 1.388 retrieving revision 1.389 diff -p -u -r1.388 -r1.389 --- sys/kern/kern_sig.c 7 May 2020 20:02:34 -0000 1.388 +++ sys/kern/kern_sig.c 14 May 2020 13:32:15 -0000 1.389 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_sig.c,v 1.388 2020/05/07 20:02:34 kamil Exp $ */ +/* $NetBSD: kern_sig.c,v 1.389 2020/05/14 13:32:15 kamil Exp $ */ /*- * Copyright (c) 2006, 2007, 2008, 2019 The NetBSD Foundation, Inc. @@ -70,7 +70,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.388 2020/05/07 20:02:34 kamil Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.389 2020/05/14 13:32:15 kamil Exp $"); #include "opt_ptrace.h" #include "opt_dtrace.h" @@ -1302,6 +1302,7 @@ kpsignal2(struct proc *p, ksiginfo_t *ks lwpid_t lid; sig_t action; bool toall; + bool traced; int error = 0; KASSERT(!cpu_intr_p()); @@ -1329,11 +1330,13 @@ kpsignal2(struct proc *p, ksiginfo_t *ks prop = sigprop[signo]; toall = ((prop & SA_TOALL) != 0); lid = toall ? 0 : ksi->ksi_lid; + traced = ISSET(p->p_slflag, PSL_TRACED) && + !sigismember(&p->p_sigctx.ps_sigpass, signo); /* * If proc is traced, always give parent a chance. */ - if (p->p_slflag & PSL_TRACED) { + if (traced) { action = SIG_DFL; if (lid == 0) { @@ -1428,7 +1431,7 @@ kpsignal2(struct proc *p, ksiginfo_t *ks * or for an SA process. */ if (p->p_stat == SACTIVE && (p->p_sflag & PS_STOPPING) == 0) { - if ((p->p_slflag & PSL_TRACED) != 0) + if (traced) goto deliver; /* @@ -1444,7 +1447,7 @@ kpsignal2(struct proc *p, ksiginfo_t *ks * - If traced, then no action is needed, unless killing. * - Run the process only if sending SIGCONT or SIGKILL. */ - if ((p->p_slflag & PSL_TRACED) != 0 && signo != SIGKILL) { + if (traced && signo != SIGKILL) { goto out; } if ((prop & SA_CONT) != 0 || signo == SIGKILL) { @@ -1456,7 +1459,7 @@ kpsignal2(struct proc *p, ksiginfo_t *ks p->p_pptr->p_nstopchild--; p->p_stat = SACTIVE; p->p_sflag &= ~PS_STOPPING; - if (p->p_slflag & PSL_TRACED) { + if (traced) { KASSERT(signo == SIGKILL); goto deliver; } @@ -1487,7 +1490,7 @@ kpsignal2(struct proc *p, ksiginfo_t *ks /* * Make signal pending. */ - KASSERT((p->p_slflag & PSL_TRACED) == 0); + KASSERT(!traced); if ((error = sigput(&p->p_sigpend, p, kp)) != 0) goto out; deliver: @@ -1844,6 +1847,7 @@ issignal(struct lwp *l) int siglwp, signo, prop; sigpend_t *sp; sigset_t ss; + bool traced; p = l->l_proc; sp = NULL; @@ -1910,6 +1914,9 @@ issignal(struct lwp *l) } } + traced = ISSET(p->p_slflag, PSL_TRACED) && + !sigismember(&p->p_sigctx.ps_sigpass, signo); + if (sp) { /* Overwrite process' signal context to correspond * to the currently reported LWP. This is necessary @@ -1937,7 +1944,7 @@ issignal(struct lwp *l) * we are being traced. */ if (sigismember(&p->p_sigctx.ps_sigignore, signo) && - (p->p_slflag & PSL_TRACED) == 0) { + !traced) { /* Discard the signal. */ continue; } @@ -1947,7 +1954,7 @@ issignal(struct lwp *l) * by the debugger. If the our parent is our debugger waiting * for us and we vforked, don't hang as we could deadlock. */ - if (ISSET(p->p_slflag, PSL_TRACED) && signo != SIGKILL && + if (traced && signo != SIGKILL && !(ISSET(p->p_lflag, PL_PPWAIT) && (p->p_pptr == p->p_opptr))) { /* @@ -2004,7 +2011,7 @@ issignal(struct lwp *l) * XXX Don't hold proc_lock for p_lflag, * but it's not a big deal. */ - if ((ISSET(p->p_slflag, PSL_TRACED) && + if ((traced && !(ISSET(p->p_lflag, PL_PPWAIT) && (p->p_pptr == p->p_opptr))) || ((p->p_lflag & PL_ORPHANPG) != 0 && @@ -2035,8 +2042,7 @@ issignal(struct lwp *l) * to take action on an ignored signal other * than SIGCONT, unless process is traced. */ - if ((prop & SA_CONT) == 0 && - (p->p_slflag & PSL_TRACED) == 0) + if ((prop & SA_CONT) == 0 && !traced) printf_nolog("issignal\n"); #endif continue; Index: sys/kern/kern_time.c =================================================================== RCS file: /cvsroot/src/sys/kern/kern_time.c,v retrieving revision 1.203 retrieving revision 1.204 diff -p -u -r1.203 -r1.204 --- sys/kern/kern_time.c 2 Jan 2020 15:42:27 -0000 1.203 +++ sys/kern/kern_time.c 14 May 2020 18:18:24 -0000 1.204 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_time.c,v 1.203 2020/01/02 15:42:27 thorpej Exp $ */ +/* $NetBSD: kern_time.c,v 1.204 2020/05/14 18:18:24 maxv Exp $ */ /*- * Copyright (c) 2000, 2004, 2005, 2007, 2008, 2009 The NetBSD Foundation, Inc. @@ -61,7 +61,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.203 2020/01/02 15:42:27 thorpej Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_time.c,v 1.204 2020/05/14 18:18:24 maxv Exp $"); #include #include @@ -352,8 +352,12 @@ again: struct timespec rmtend; struct timespec t0; struct timespec *t; + int err; + + err = clock_gettime1(clock_id, &rmtend); + if (err != 0) + return err; - (void)clock_gettime1(clock_id, &rmtend); t = (rmt != NULL) ? rmt : &t0; if (flags & TIMER_ABSTIME) { timespecsub(rqt, &rmtend, t); Index: sys/kern/subr_cprng.c =================================================================== RCS file: /cvsroot/src/sys/kern/subr_cprng.c,v retrieving revision 1.37 retrieving revision 1.40 diff -p -u -r1.37 -r1.40 --- sys/kern/subr_cprng.c 30 Apr 2020 17:36:06 -0000 1.37 +++ sys/kern/subr_cprng.c 11 May 2020 21:40:12 -0000 1.40 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_cprng.c,v 1.37 2020/04/30 17:36:06 nia Exp $ */ +/* $NetBSD: subr_cprng.c,v 1.40 2020/05/11 21:40:12 riastradh Exp $ */ /*- * Copyright (c) 2019 The NetBSD Foundation, Inc. @@ -52,7 +52,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: subr_cprng.c,v 1.37 2020/04/30 17:36:06 nia Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_cprng.c,v 1.40 2020/05/11 21:40:12 riastradh Exp $"); #include #include @@ -213,6 +213,7 @@ cprng_init_cpu(void *ptr, void *cookie, { struct cprng_cpu *cc = ptr; const char *name = cookie; + const char *cpuname; uint8_t zero[NIST_HASH_DRBG_SEEDLEN_BYTES] = {0}; char namebuf[64]; /* XXX size? */ @@ -242,10 +243,12 @@ cprng_init_cpu(void *ptr, void *cookie, panic("nist_hash_drbg_instantiate"); /* Attach the event counters. */ + /* XXX ci_cpuname may not be initialized early enough. */ + cpuname = ci->ci_cpuname[0] == '\0' ? "cpu0" : ci->ci_cpuname; evcnt_attach_dynamic(&cc->cc_evcnt->intr, EVCNT_TYPE_MISC, NULL, - ci->ci_cpuname, "cprng_strong intr"); + cpuname, "cprng_strong intr"); evcnt_attach_dynamic(&cc->cc_evcnt->reseed, EVCNT_TYPE_MISC, NULL, - ci->ci_cpuname, "cprng_strong reseed"); + cpuname, "cprng_strong reseed"); /* Set the epoch uninitialized so we reseed on first use. */ cc->cc_epoch = 0; Index: sys/kern/subr_kmem.c =================================================================== RCS file: /cvsroot/src/sys/kern/subr_kmem.c,v retrieving revision 1.79 retrieving revision 1.80 diff -p -u -r1.79 -r1.80 --- sys/kern/subr_kmem.c 8 Mar 2020 00:31:19 -0000 1.79 +++ sys/kern/subr_kmem.c 14 May 2020 17:01:34 -0000 1.80 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_kmem.c,v 1.79 2020/03/08 00:31:19 ad Exp $ */ +/* $NetBSD: subr_kmem.c,v 1.80 2020/05/14 17:01:34 maxv Exp $ */ /* * Copyright (c) 2009-2020 The NetBSD Foundation, Inc. @@ -78,7 +78,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.79 2020/03/08 00:31:19 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.80 2020/05/14 17:01:34 maxv Exp $"); #ifdef _KERNEL_OPT #include "opt_kmem.h" @@ -258,7 +258,9 @@ kmem_intr_free(void *p, size_t requested pool_cache_t pc; KASSERT(p != NULL); - KASSERT(requested_size > 0); + if (__predict_false(requested_size == 0)) { + panic("%s: zero size with pointer %p", __func__, p); + } kasan_add_redzone(&requested_size); size = kmem_roundup_size(requested_size); Index: sys/kern/subr_percpu.c =================================================================== RCS file: /cvsroot/src/sys/kern/subr_percpu.c,v retrieving revision 1.24 retrieving revision 1.25 diff -p -u -r1.24 -r1.25 --- sys/kern/subr_percpu.c 7 Feb 2020 11:55:22 -0000 1.24 +++ sys/kern/subr_percpu.c 11 May 2020 21:37:31 -0000 1.25 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_percpu.c,v 1.24 2020/02/07 11:55:22 thorpej Exp $ */ +/* $NetBSD: subr_percpu.c,v 1.25 2020/05/11 21:37:31 riastradh Exp $ */ /*- * Copyright (c)2007,2008 YAMAMOTO Takashi, @@ -31,7 +31,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: subr_percpu.c,v 1.24 2020/02/07 11:55:22 thorpej Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_percpu.c,v 1.25 2020/05/11 21:37:31 riastradh Exp $"); #include #include @@ -50,8 +50,10 @@ __KERNEL_RCSID(0, "$NetBSD: subr_percpu. struct percpu { unsigned pc_offset; size_t pc_size; + percpu_callback_t pc_ctor; percpu_callback_t pc_dtor; void *pc_cookie; + LIST_ENTRY(percpu) pc_list; }; static krwlock_t percpu_swap_lock __cacheline_aligned; @@ -59,6 +61,9 @@ static vmem_t * percpu_offset_arena __r static struct { kmutex_t lock; unsigned int nextoff; + LIST_HEAD(, percpu) ctor_list; + struct lwp *busy; + kcondvar_t cv; } percpu_allocation __cacheline_aligned; static percpu_cpu_t * @@ -217,6 +222,9 @@ percpu_init(void) rw_init(&percpu_swap_lock); mutex_init(&percpu_allocation.lock, MUTEX_DEFAULT, IPL_NONE); percpu_allocation.nextoff = PERCPU_QUANTUM_SIZE; + LIST_INIT(&percpu_allocation.ctor_list); + percpu_allocation.busy = NULL; + cv_init(&percpu_allocation.cv, "percpu"); percpu_offset_arena = vmem_xcreate("percpu", 0, 0, PERCPU_QUANTUM_SIZE, percpu_backend_alloc, NULL, NULL, PERCPU_QCACHE_MAX, VM_SLEEP, @@ -227,18 +235,50 @@ percpu_init(void) * percpu_init_cpu: cpu initialization * * => should be called before the cpu appears on the list for CPU_INFO_FOREACH. + * => may be called for static CPUs afterward (typically just primary CPU) */ void percpu_init_cpu(struct cpu_info *ci) { percpu_cpu_t * const pcc = cpu_percpu(ci); + struct percpu *pc; size_t size = percpu_allocation.nextoff; /* XXX racy */ ASSERT_SLEEPABLE(); + + /* + * For the primary CPU, prior percpu_create may have already + * triggered allocation, so there's nothing more for us to do + * here. + */ + if (pcc->pcc_size) + return; + KASSERT(pcc->pcc_data == NULL); + + /* + * Otherwise, allocate storage and, while the constructor list + * is locked, run constructors for all percpus on this CPU. + */ pcc->pcc_size = size; if (size) { pcc->pcc_data = kmem_zalloc(pcc->pcc_size, KM_SLEEP); + mutex_enter(&percpu_allocation.lock); + while (percpu_allocation.busy) + cv_wait(&percpu_allocation.cv, + &percpu_allocation.lock); + percpu_allocation.busy = curlwp; + LIST_FOREACH(pc, &percpu_allocation.ctor_list, pc_list) { + KASSERT(pc->pc_ctor); + mutex_exit(&percpu_allocation.lock); + (*pc->pc_ctor)((char *)pcc->pcc_data + pc->pc_offset, + pc->pc_cookie, ci); + mutex_enter(&percpu_allocation.lock); + } + KASSERT(percpu_allocation.busy == curlwp); + percpu_allocation.busy = NULL; + cv_broadcast(&percpu_allocation.cv); + mutex_exit(&percpu_allocation.lock); } } @@ -281,6 +321,7 @@ percpu_create(size_t size, percpu_callba pc = kmem_alloc(sizeof(*pc), KM_SLEEP); pc->pc_offset = offset; pc->pc_size = size; + pc->pc_ctor = ctor; pc->pc_dtor = dtor; pc->pc_cookie = cookie; @@ -289,6 +330,22 @@ percpu_create(size_t size, percpu_callba struct cpu_info *ci; void *buf; + /* + * Wait until nobody is using the list of percpus with + * constructors. + */ + mutex_enter(&percpu_allocation.lock); + while (percpu_allocation.busy) + cv_wait(&percpu_allocation.cv, + &percpu_allocation.lock); + percpu_allocation.busy = curlwp; + mutex_exit(&percpu_allocation.lock); + + /* + * Run the constructor for all CPUs. We use a + * temporary buffer wo that we need not hold the + * percpu_swap_lock while running the constructor. + */ buf = kmem_alloc(size, KM_SLEEP); for (CPU_INFO_FOREACH(cii, ci)) { memset(buf, 0, size); @@ -299,6 +356,19 @@ percpu_create(size_t size, percpu_callba } explicit_memset(buf, 0, size); kmem_free(buf, size); + + /* + * Insert the percpu into the list of percpus with + * constructors. We are now done using the list, so it + * is safe for concurrent percpu_create or concurrent + * percpu_init_cpu to run. + */ + mutex_enter(&percpu_allocation.lock); + KASSERT(percpu_allocation.busy == curlwp); + percpu_allocation.busy = NULL; + cv_broadcast(&percpu_allocation.cv); + LIST_INSERT_HEAD(&percpu_allocation.ctor_list, pc, pc_list); + mutex_exit(&percpu_allocation.lock); } else { percpu_zero(pc, size); } @@ -320,6 +390,21 @@ percpu_free(percpu_t *pc, size_t size) ASSERT_SLEEPABLE(); KASSERT(size == pc->pc_size); + /* + * If there's a constructor, take the percpu off the list of + * percpus with constructors, but first wait until nobody is + * using the list. + */ + if (pc->pc_ctor) { + mutex_enter(&percpu_allocation.lock); + while (percpu_allocation.busy) + cv_wait(&percpu_allocation.cv, + &percpu_allocation.lock); + LIST_REMOVE(pc, pc_list); + mutex_exit(&percpu_allocation.lock); + } + + /* If there's a destructor, run it now for all CPUs. */ if (pc->pc_dtor) { CPU_INFO_ITERATOR cii; struct cpu_info *ci; Index: sys/kern/subr_time.c =================================================================== RCS file: /cvsroot/src/sys/kern/subr_time.c,v retrieving revision 1.23 retrieving revision 1.24 diff -p -u -r1.23 -r1.24 --- sys/kern/subr_time.c 4 May 2020 18:23:37 -0000 1.23 +++ sys/kern/subr_time.c 11 May 2020 03:59:33 -0000 1.24 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_time.c,v 1.23 2020/05/04 18:23:37 riastradh Exp $ */ +/* $NetBSD: subr_time.c,v 1.24 2020/05/11 03:59:33 riastradh Exp $ */ /* * Copyright (c) 1982, 1986, 1989, 1993 @@ -33,7 +33,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: subr_time.c,v 1.23 2020/05/04 18:23:37 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_time.c,v 1.24 2020/05/11 03:59:33 riastradh Exp $"); #include #include @@ -354,141 +354,3 @@ ts2timo(clockid_t clock_id, int flags, s return 0; } - -/* - * timedwaitclock_setup(T, timeout, clockid, flags, epsilon) - * - * Initialize state for a timedwaitclock, to be used subsequently - * with timedwaitclock_begin/end, possibly many times in a row. - * - * No cleanup action required at the end; the caller-allocated - * (typically stack-allocated) timedwaitclock just holds - * parameters and a little state for timedwaitclock_begin/end. - */ -void -timedwaitclock_setup(struct timedwaitclock *T, struct timespec *timeout, - clockid_t clockid, int flags, const struct bintime *epsilon) -{ - - memset(T, 0, sizeof(*T)); - T->timeout = timeout; - T->clockid = clockid; - T->flags = flags; - T->epsilon = epsilon; - T->starttime = (struct timespec){0,0}; -} - -/* - * timedwaitclock_begin(T, timo) - * - * Decide how many ticks to wait for the timedwaitclock T and - * store it in *timo. Keep state for timedwaitclock_end. May - * fail with EINVAL if the specified timeout is invalid, or if the - * specified clock fails. Fails with ETIMEDOUT if there is no - * time left to wait. - */ -int -timedwaitclock_begin(struct timedwaitclock *T, int *timo) -{ - struct timespec delta; - const struct timespec *deltap; - int error; - - /* Sanity-check timeout -- may have come from userland. */ - if (T->timeout->tv_nsec < 0 || T->timeout->tv_nsec >= 1000000000L) - return EINVAL; - - /* - * Compute the time delta. - */ - if ((T->flags & TIMER_ABSTIME) == TIMER_ABSTIME) { - /* Check our watch. */ - error = clock_gettime1(T->clockid, &T->starttime); - if (error) - return error; - - /* If the deadline has passed, we're done. */ - if (timespeccmp(T->timeout, &T->starttime, <=)) - return ETIMEDOUT; - - /* Count how much time is left. */ - timespecsub(T->timeout, &T->starttime, &delta); - deltap = δ - } else { - /* The user specified how much time is left. */ - deltap = T->timeout; - - /* If there's none left, we've timed out. */ - if (deltap->tv_sec == 0 && deltap->tv_nsec == 0) - return ETIMEDOUT; - } - - /* - * Convert to ticks, but clamp to be >=1. - * - * XXX In the tickless future, use a high-resolution timer if - * timo would round to zero. - */ - *timo = tstohz(deltap); - KASSERTMSG(*timo >= 0, "negative ticks: %d", *timo); - if (*timo == 0) - *timo = 1; - - /* Success! */ - return 0; -} - -/* - * timedwaitclock_end(T) - * - * If the timedwaitclock T was relative, update the caller's - * original timeout to reflect how much time is left, or zero if - * there is no time left or if the clock has gone bad, so that the - * next timedwaitclock_begin will immediately time out. - */ -void -timedwaitclock_end(struct timedwaitclock *T) -{ - struct timespec endtime, delta; - - /* If the timeout is absolute, nothing to do. */ - if ((T->flags & TIMER_ABSTIME) == TIMER_ABSTIME) - return; - - /* - * Check our watch. If anything goes wrong with it, make sure - * that the next time we immediately time out rather than fail - * to deduct the time elapsed. - */ - if (clock_gettime1(T->clockid, &endtime)) { - T->timeout->tv_sec = 0; - T->timeout->tv_nsec = 0; - return; - } - - /* Find how much time elapsed while we waited. */ - timespecsub(&endtime, &T->starttime, &delta); - - /* - * Paranoia: If the clock went backwards, treat it as if no - * time elapsed at all rather than adding anything. - */ - if (delta.tv_sec < 0 || - (delta.tv_sec == 0 && delta.tv_nsec < 0)) { - delta.tv_sec = 0; - delta.tv_nsec = 0; - } - - /* - * Set it to the time left, or zero, whichever is larger. We - * do not fail with EWOULDBLOCK here because this may have been - * an explicit wakeup, so the caller needs to check before they - * give up or else cv_signal would be lost. - */ - if (timespeccmp(T->timeout, &delta, <=)) { - T->timeout->tv_sec = 0; - T->timeout->tv_nsec = 0; - } else { - timespecsub(T->timeout, &delta, T->timeout); - } -} Index: sys/kern/sys_ptrace_common.c =================================================================== RCS file: /cvsroot/src/sys/kern/sys_ptrace_common.c,v retrieving revision 1.78 retrieving revision 1.80 diff -p -u -r1.78 -r1.80 --- sys/kern/sys_ptrace_common.c 22 Feb 2020 09:24:05 -0000 1.78 +++ sys/kern/sys_ptrace_common.c 14 May 2020 13:32:15 -0000 1.80 @@ -1,4 +1,4 @@ -/* $NetBSD: sys_ptrace_common.c,v 1.78 2020/02/22 09:24:05 maxv Exp $ */ +/* $NetBSD: sys_ptrace_common.c,v 1.80 2020/05/14 13:32:15 kamil Exp $ */ /*- * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc. @@ -118,7 +118,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: sys_ptrace_common.c,v 1.78 2020/02/22 09:24:05 maxv Exp $"); +__KERNEL_RCSID(0, "$NetBSD: sys_ptrace_common.c,v 1.80 2020/05/14 13:32:15 kamil Exp $"); #ifdef _KERNEL_OPT #include "opt_ptrace.h" @@ -290,6 +290,8 @@ ptrace_listener_cb(kauth_cred_t cred, ka case PT_STOP: case PT_LWPSTATUS: case PT_LWPNEXT: + case PT_SET_SIGPASS: + case PT_GET_SIGPASS: result = KAUTH_RESULT_ALLOW; break; @@ -500,6 +502,8 @@ ptrace_allowed(struct lwp *l, int req, s case PT_STOP: case PT_LWPSTATUS: case PT_LWPNEXT: + case PT_SET_SIGPASS: + case PT_GET_SIGPASS: /* * You can't do what you want to the process if: * (1) It's not being traced at all, @@ -622,6 +626,47 @@ ptrace_set_siginfo(struct proc *t, struc } static int +ptrace_get_sigpass(struct proc *t, void *addr, size_t data) +{ + sigset_t set; + + if (data > sizeof(set) || data <= 0) { + DPRINTF(("%s: invalid data: %zu < %zu <= 0\n", + __func__, sizeof(set), data)); + return EINVAL; + } + + set = t->p_sigctx.ps_sigpass; + + return copyout(&set, addr, data); +} + +static int +ptrace_set_sigpass(struct proc *t, void *addr, size_t data) +{ + sigset_t set; + int error; + + if (data > sizeof(set) || data <= 0) { + DPRINTF(("%s: invalid data: %zu < %zu <= 0\n", + __func__, sizeof(set), data)); + return EINVAL; + } + + memset(&set, 0, sizeof(set)); + + if ((error = copyin(addr, &set, data))) + return error; + + /* We catch SIGSTOP and cannot intercept SIGKILL. */ + sigminusset(&sigcantmask, &set); + + t->p_sigctx.ps_sigpass = set; + + return 0; +} + +static int ptrace_get_event_mask(struct proc *t, void *addr, size_t data) { struct ptrace_event pe; @@ -1392,7 +1437,11 @@ do_ptrace(struct ptrace_methods *ptm, st break; #endif if (req == PT_DETACH) { - CLR(t->p_slflag, PSL_TRACED|PSL_SYSCALL); + CLR(t->p_slflag, + PSL_TRACED|PSL_TRACEDCHILD|PSL_SYSCALL); + + /* clear sigpass mask */ + sigemptyset(&t->p_sigctx.ps_sigpass); /* give process back to original parent or init */ if (t->p_opptr != t->p_pptr) { @@ -1498,6 +1547,14 @@ do_ptrace(struct ptrace_methods *ptm, st error = ptrace_lwpstatus(t, ptm, <, addr, data, true); break; + case PT_SET_SIGPASS: + error = ptrace_set_sigpass(t, addr, data); + break; + + case PT_GET_SIGPASS: + error = ptrace_get_sigpass(t, addr, data); + break; + #ifdef PT_REGISTERS case_PT_SETREGS case_PT_GETREGS Index: sys/kern/vfs_cache.c =================================================================== RCS file: /cvsroot/src/sys/kern/vfs_cache.c,v retrieving revision 1.141 retrieving revision 1.142 diff -p -u -r1.141 -r1.142 --- sys/kern/vfs_cache.c 23 Apr 2020 22:58:36 -0000 1.141 +++ sys/kern/vfs_cache.c 12 May 2020 23:17:41 -0000 1.142 @@ -1,4 +1,4 @@ -/* $NetBSD: vfs_cache.c,v 1.141 2020/04/23 22:58:36 ad Exp $ */ +/* $NetBSD: vfs_cache.c,v 1.142 2020/05/12 23:17:41 ad Exp $ */ /*- * Copyright (c) 2008, 2019, 2020 The NetBSD Foundation, Inc. @@ -172,7 +172,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.141 2020/04/23 22:58:36 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.142 2020/05/12 23:17:41 ad Exp $"); #define __NAMECACHE_PRIVATE #ifdef _KERNEL_OPT @@ -683,8 +683,10 @@ cache_lookup_linked(struct vnode *dvp, c * First up check if the user is allowed to look up files in this * directory. */ - KASSERT(dvi->vi_nc_mode != VNOVAL && dvi->vi_nc_uid != VNOVAL && - dvi->vi_nc_gid != VNOVAL); + if (dvi->vi_nc_mode == VNOVAL) { + return false; + } + KASSERT(dvi->vi_nc_uid != VNOVAL && dvi->vi_nc_gid != VNOVAL); error = kauth_authorize_vnode(cred, KAUTH_ACCESS_ACTION(VEXEC, dvp->v_type, dvi->vi_nc_mode & ALLPERMS), dvp, NULL, genfs_can_access(dvp->v_type, dvi->vi_nc_mode & ALLPERMS, @@ -763,8 +765,11 @@ cache_revlookup(struct vnode *vp, struct * * I don't like it, I didn't come up with it, don't blame me! */ - KASSERT(vi->vi_nc_mode != VNOVAL && vi->vi_nc_uid != VNOVAL && - vi->vi_nc_gid != VNOVAL); + if (vi->vi_nc_mode == VNOVAL) { + rw_exit(&vi->vi_nc_listlock); + return -1; + } + KASSERT(vi->vi_nc_uid != VNOVAL && vi->vi_nc_gid != VNOVAL); error = kauth_authorize_vnode(curlwp->l_cred, KAUTH_ACCESS_ACTION(VEXEC, vp->v_type, vi->vi_nc_mode & ALLPERMS), vp, NULL, genfs_can_access(vp->v_type, @@ -941,10 +946,11 @@ cache_enter(struct vnode *dvp, struct vn /* * Set identity info in cache for a vnode. We only care about directories - * so ignore other updates. + * so ignore other updates. The cached info may be marked invalid if the + * inode has an ACL. */ void -cache_enter_id(struct vnode *vp, mode_t mode, uid_t uid, gid_t gid) +cache_enter_id(struct vnode *vp, mode_t mode, uid_t uid, gid_t gid, bool valid) { vnode_impl_t *vi = VNODE_TO_VIMPL(vp); @@ -952,9 +958,15 @@ cache_enter_id(struct vnode *vp, mode_t /* Grab both locks, for forward & reverse lookup. */ rw_enter(&vi->vi_nc_lock, RW_WRITER); rw_enter(&vi->vi_nc_listlock, RW_WRITER); - vi->vi_nc_mode = mode; - vi->vi_nc_uid = uid; - vi->vi_nc_gid = gid; + if (valid) { + vi->vi_nc_mode = mode; + vi->vi_nc_uid = uid; + vi->vi_nc_gid = gid; + } else { + vi->vi_nc_mode = VNOVAL; + vi->vi_nc_uid = VNOVAL; + vi->vi_nc_gid = VNOVAL; + } rw_exit(&vi->vi_nc_listlock); rw_exit(&vi->vi_nc_lock); } @@ -965,18 +977,15 @@ cache_enter_id(struct vnode *vp, mode_t * opportunity to confirm that everything squares up. * * Because of shared code, some file systems could provide partial - * information, missing some updates, so always check the mount flag - * instead of looking for !VNOVAL. + * information, missing some updates, so check the mount flag too. */ bool cache_have_id(struct vnode *vp) { if (vp->v_type == VDIR && - (vp->v_mount->mnt_iflag & IMNT_NCLOOKUP) != 0) { - KASSERT(VNODE_TO_VIMPL(vp)->vi_nc_mode != VNOVAL); - KASSERT(VNODE_TO_VIMPL(vp)->vi_nc_uid != VNOVAL); - KASSERT(VNODE_TO_VIMPL(vp)->vi_nc_gid != VNOVAL); + (vp->v_mount->mnt_iflag & IMNT_NCLOOKUP) != 0 && + atomic_load_relaxed(&VNODE_TO_VIMPL(vp)->vi_nc_mode) != VNOVAL) { return true; } else { return false; Index: sys/kern/vfs_trans.c =================================================================== RCS file: /cvsroot/src/sys/kern/vfs_trans.c,v retrieving revision 1.61 retrieving revision 1.62 diff -p -u -r1.61 -r1.62 --- sys/kern/vfs_trans.c 17 Jun 2019 08:07:27 -0000 1.61 +++ sys/kern/vfs_trans.c 13 May 2020 09:21:30 -0000 1.62 @@ -1,4 +1,4 @@ -/* $NetBSD: vfs_trans.c,v 1.61 2019/06/17 08:07:27 hannken Exp $ */ +/* $NetBSD: vfs_trans.c,v 1.62 2020/05/13 09:21:30 hannken Exp $ */ /*- * Copyright (c) 2007 The NetBSD Foundation, Inc. @@ -30,7 +30,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: vfs_trans.c,v 1.61 2019/06/17 08:07:27 hannken Exp $"); +__KERNEL_RCSID(0, "$NetBSD: vfs_trans.c,v 1.62 2020/05/13 09:21:30 hannken Exp $"); /* * File system transaction operations. @@ -569,6 +569,23 @@ fstrans_done(struct mount *mp) } /* + * Check if we hold an lock. + */ +int +fstrans_held(struct mount *mp) +{ + struct fstrans_lwp_info *fli; + struct fstrans_mount_info *fmi; + + KASSERT(mp != dead_rootmount); + + fli = fstrans_get_lwp_info(mp, true); + fmi = fli->fli_mountinfo; + + return (fli->fli_trans_cnt > 0 || fmi->fmi_owner == curlwp); +} + +/* * Check if this thread has an exclusive lock. */ int Index: sys/sys/condvar.h =================================================================== RCS file: /cvsroot/src/sys/sys/condvar.h,v retrieving revision 1.16 retrieving revision 1.17 diff -p -u -r1.16 -r1.17 --- sys/sys/condvar.h 3 May 2020 01:24:37 -0000 1.16 +++ sys/sys/condvar.h 11 May 2020 03:59:33 -0000 1.17 @@ -1,4 +1,4 @@ -/* $NetBSD: condvar.h,v 1.16 2020/05/03 01:24:37 riastradh Exp $ */ +/* $NetBSD: condvar.h,v 1.17 2020/05/11 03:59:33 riastradh Exp $ */ /*- * Copyright (c) 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc. @@ -49,10 +49,6 @@ void cv_wait(kcondvar_t *, struct kmutex int cv_wait_sig(kcondvar_t *, struct kmutex *); int cv_timedwait(kcondvar_t *, struct kmutex *, int); int cv_timedwait_sig(kcondvar_t *, struct kmutex *, int); -int cv_timedwaitclock(kcondvar_t *, struct kmutex *, struct timespec *, - clockid_t, int, const struct bintime *); -int cv_timedwaitclock_sig(kcondvar_t *, struct kmutex *, struct timespec *, - clockid_t, int, const struct bintime *); int cv_timedwaitbt(kcondvar_t *, struct kmutex *, struct bintime *, const struct bintime *); int cv_timedwaitbt_sig(kcondvar_t *, struct kmutex *, struct bintime *, Index: sys/sys/entropy.h =================================================================== RCS file: /cvsroot/src/sys/sys/entropy.h,v retrieving revision 1.2 retrieving revision 1.3 diff -p -u -r1.2 -r1.3 --- sys/sys/entropy.h 7 May 2020 19:05:51 -0000 1.2 +++ sys/sys/entropy.h 8 May 2020 15:54:11 -0000 1.3 @@ -1,4 +1,4 @@ -/* $NetBSD: entropy.h,v 1.2 2020/05/07 19:05:51 riastradh Exp $ */ +/* $NetBSD: entropy.h,v 1.3 2020/05/08 15:54:11 riastradh Exp $ */ /*- * Copyright (c) 2019 The NetBSD Foundation, Inc. @@ -55,6 +55,4 @@ int entropy_poll(int); int entropy_kqfilter(struct knote *); int entropy_ioctl(unsigned long, void *); -extern bool entropy_depletion; - #endif /* _SYS_ENTROPY_H */ Index: sys/sys/fstrans.h =================================================================== RCS file: /cvsroot/src/sys/sys/fstrans.h,v retrieving revision 1.13 retrieving revision 1.14 diff -p -u -r1.13 -r1.14 --- sys/sys/fstrans.h 1 Mar 2019 09:02:03 -0000 1.13 +++ sys/sys/fstrans.h 13 May 2020 09:21:30 -0000 1.14 @@ -1,4 +1,4 @@ -/* $NetBSD: fstrans.h,v 1.13 2019/03/01 09:02:03 hannken Exp $ */ +/* $NetBSD: fstrans.h,v 1.14 2020/05/13 09:21:30 hannken Exp $ */ /*- * Copyright (c) 2007 The NetBSD Foundation, Inc. @@ -53,6 +53,7 @@ void fstrans_start(struct mount *); int fstrans_start_nowait(struct mount *); void fstrans_start_lazy(struct mount *); void fstrans_done(struct mount *); +int fstrans_held(struct mount *); int fstrans_is_owner(struct mount *); int fstrans_mount(struct mount *); void fstrans_unmount(struct mount *); Index: sys/sys/namei.h =================================================================== RCS file: /cvsroot/src/sys/sys/namei.h,v retrieving revision 1.108 retrieving revision 1.109 diff -p -u -r1.108 -r1.109 --- sys/sys/namei.h 4 Apr 2020 20:52:18 -0000 1.108 +++ sys/sys/namei.h 12 May 2020 23:18:03 -0000 1.109 @@ -1,11 +1,11 @@ -/* $NetBSD: namei.h,v 1.108 2020/04/04 20:52:18 ad Exp $ */ +/* $NetBSD: namei.h,v 1.109 2020/05/12 23:18:03 ad Exp $ */ /* * WARNING: GENERATED FILE. DO NOT EDIT * (edit namei.src and run make namei in src/sys/sys) * by: NetBSD: gennameih.awk,v 1.5 2009/12/23 14:17:19 pooka Exp - * from: NetBSD: namei.src,v 1.53 2020/04/04 20:49:31 ad Exp + * from: NetBSD: namei.src,v 1.54 2020/05/12 23:17:41 ad Exp */ /* @@ -302,7 +302,7 @@ int cache_revlookup(struct vnode *, stru int cache_diraccess(struct vnode *, int); void cache_enter(struct vnode *, struct vnode *, const char *, size_t, uint32_t); -void cache_enter_id(struct vnode *, mode_t, uid_t, gid_t); +void cache_enter_id(struct vnode *, mode_t, uid_t, gid_t, bool); bool cache_have_id(struct vnode *); void cache_vnode_init(struct vnode * ); void cache_vnode_fini(struct vnode * ); Index: sys/sys/namei.src =================================================================== RCS file: /cvsroot/src/sys/sys/namei.src,v retrieving revision 1.53 retrieving revision 1.54 diff -p -u -r1.53 -r1.54 --- sys/sys/namei.src 4 Apr 2020 20:49:31 -0000 1.53 +++ sys/sys/namei.src 12 May 2020 23:17:41 -0000 1.54 @@ -1,4 +1,4 @@ -/* $NetBSD: namei.src,v 1.53 2020/04/04 20:49:31 ad Exp $ */ +/* $NetBSD: namei.src,v 1.54 2020/05/12 23:17:41 ad Exp $ */ /* * Copyright (c) 1985, 1989, 1991, 1993 @@ -294,7 +294,7 @@ int cache_revlookup(struct vnode *, stru int cache_diraccess(struct vnode *, int); void cache_enter(struct vnode *, struct vnode *, const char *, size_t, uint32_t); -void cache_enter_id(struct vnode *, mode_t, uid_t, gid_t); +void cache_enter_id(struct vnode *, mode_t, uid_t, gid_t, bool); bool cache_have_id(struct vnode *); void cache_vnode_init(struct vnode * ); void cache_vnode_fini(struct vnode * ); Index: sys/sys/param.h =================================================================== RCS file: /cvsroot/src/sys/sys/param.h,v retrieving revision 1.663 retrieving revision 1.665 diff -p -u -r1.663 -r1.665 --- sys/sys/param.h 5 May 2020 08:05:44 -0000 1.663 +++ sys/sys/param.h 14 May 2020 13:34:46 -0000 1.665 @@ -1,4 +1,4 @@ -/* $NetBSD: param.h,v 1.663 2020/05/05 08:05:44 jdolecek Exp $ */ +/* $NetBSD: param.h,v 1.665 2020/05/14 13:34:46 kamil Exp $ */ /*- * Copyright (c) 1982, 1986, 1989, 1993 @@ -67,7 +67,7 @@ * 2.99.9 (299000900) */ -#define __NetBSD_Version__ 999006000 /* NetBSD 9.99.60 */ +#define __NetBSD_Version__ 999006200 /* NetBSD 9.99.62 */ #define __NetBSD_Prereq__(M,m,p) (((((M) * 100000000) + \ (m) * 1000000) + (p) * 100) <= __NetBSD_Version__) Index: sys/sys/ptrace.h =================================================================== RCS file: /cvsroot/src/sys/sys/ptrace.h,v retrieving revision 1.69 retrieving revision 1.70 diff -p -u -r1.69 -r1.70 --- sys/sys/ptrace.h 26 Dec 2019 08:52:38 -0000 1.69 +++ sys/sys/ptrace.h 14 May 2020 13:32:15 -0000 1.70 @@ -1,4 +1,4 @@ -/* $NetBSD: ptrace.h,v 1.69 2019/12/26 08:52:38 kamil Exp $ */ +/* $NetBSD: ptrace.h,v 1.70 2020/05/14 13:32:15 kamil Exp $ */ /*- * Copyright (c) 1984, 1993 @@ -63,6 +63,8 @@ #define PT_STOP 23 /* stop the child process */ #define PT_LWPSTATUS 24 /* get info about the LWP */ #define PT_LWPNEXT 25 /* get info about next LWP */ +#define PT_SET_SIGPASS 26 /* set signals to pass to debuggee */ +#define PT_GET_SIGPASS 27 /* get signals to pass to debuggee */ #define PT_FIRSTMACH 32 /* for machine-specific requests */ #include /* machine-specific requests, if any */ @@ -93,7 +95,9 @@ /* 22 */ "PT_SUSPEND", \ /* 23 */ "PT_STOP", \ /* 24 */ "PT_LWPSTATUS", \ -/* 25 */ "PT_LWPNEXT" +/* 25 */ "PT_LWPNEXT", \ +/* 26 */ "PT_SET_SIGPASS", \ +/* 27 */ "PT_GET_SIGPASS" /* PT_{G,S}EVENT_MASK */ typedef struct ptrace_event { Index: sys/sys/sched.h =================================================================== RCS file: /cvsroot/src/sys/sys/sched.h,v retrieving revision 1.88 retrieving revision 1.89 diff -p -u -r1.88 -r1.89 --- sys/sys/sched.h 14 Mar 2020 18:08:39 -0000 1.88 +++ sys/sys/sched.h 12 May 2020 11:21:09 -0000 1.89 @@ -1,4 +1,4 @@ -/* $NetBSD: sched.h,v 1.88 2020/03/14 18:08:39 ad Exp $ */ +/* $NetBSD: sched.h,v 1.89 2020/05/12 11:21:09 kamil Exp $ */ /*- * Copyright (c) 1999, 2000, 2001, 2002, 2007, 2008, 2019, 2020 @@ -200,7 +200,6 @@ struct schedstate_percpu { #define CLONE_FS 0x00000200 /* share "file system" info */ #define CLONE_FILES 0x00000400 /* share file descriptors */ #define CLONE_SIGHAND 0x00000800 /* share signal actions */ -#define CLONE_PID 0x00001000 /* share process ID */ #define CLONE_PTRACE 0x00002000 /* ptrace(2) continues on child */ #define CLONE_VFORK 0x00004000 /* parent blocks until child Index: sys/sys/signalvar.h =================================================================== RCS file: /cvsroot/src/sys/sys/signalvar.h,v retrieving revision 1.101 retrieving revision 1.102 diff -p -u -r1.101 -r1.102 --- sys/sys/signalvar.h 5 Apr 2020 20:53:17 -0000 1.101 +++ sys/sys/signalvar.h 14 May 2020 13:32:15 -0000 1.102 @@ -1,4 +1,4 @@ -/* $NetBSD: signalvar.h,v 1.101 2020/04/05 20:53:17 christos Exp $ */ +/* $NetBSD: signalvar.h,v 1.102 2020/05/14 13:32:15 kamil Exp $ */ /* * Copyright (c) 1991, 1993 @@ -85,6 +85,7 @@ struct sigctx { void *ps_sigcode; /* address of signal trampoline */ sigset_t ps_sigignore; /* Signals being ignored. */ sigset_t ps_sigcatch; /* Signals being caught by user. */ + sigset_t ps_sigpass; /* Signals evading the debugger. */ }; /* additional signal action values, used only temporarily/internally */ Index: sys/sys/timevar.h =================================================================== RCS file: /cvsroot/src/sys/sys/timevar.h,v retrieving revision 1.43 retrieving revision 1.44 diff -p -u -r1.43 -r1.44 --- sys/sys/timevar.h 4 May 2020 18:23:37 -0000 1.43 +++ sys/sys/timevar.h 11 May 2020 03:59:33 -0000 1.44 @@ -1,4 +1,4 @@ -/* $NetBSD: timevar.h,v 1.43 2020/05/04 18:23:37 riastradh Exp $ */ +/* $NetBSD: timevar.h,v 1.44 2020/05/11 03:59:33 riastradh Exp $ */ /* * Copyright (c) 2005, 2008, The NetBSD Foundation. @@ -107,14 +107,6 @@ struct ptimers { struct ptimer *pts_timers[TIMER_MAX]; }; -struct timedwaitclock { - struct timespec *timeout; - clockid_t clockid; - int flags; - const struct bintime *epsilon; - struct timespec starttime; -}; - /* * Functions for looking at our clock: [get]{bin,nano,micro}[up]time() * @@ -200,11 +192,6 @@ void time_init(void); void time_init2(void); bool time_wraps(struct timespec *, struct timespec *); -void timedwaitclock_setup(struct timedwaitclock *, struct timespec *, - clockid_t, int, const struct bintime *); -int timedwaitclock_begin(struct timedwaitclock *, int *); -void timedwaitclock_end(struct timedwaitclock *); - extern volatile time_t time_second; /* current second in the epoch */ extern volatile time_t time_uptime; /* system uptime in seconds */ Index: sys/ufs/ffs/ffs_vfsops.c =================================================================== RCS file: /cvsroot/src/sys/ufs/ffs/ffs_vfsops.c,v retrieving revision 1.367 retrieving revision 1.368 diff -p -u -r1.367 -r1.368 --- sys/ufs/ffs/ffs_vfsops.c 4 Apr 2020 20:49:31 -0000 1.367 +++ sys/ufs/ffs/ffs_vfsops.c 12 May 2020 23:17:41 -0000 1.368 @@ -1,4 +1,4 @@ -/* $NetBSD: ffs_vfsops.c,v 1.367 2020/04/04 20:49:31 ad Exp $ */ +/* $NetBSD: ffs_vfsops.c,v 1.368 2020/05/12 23:17:41 ad Exp $ */ /*- * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc. @@ -61,7 +61,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: ffs_vfsops.c,v 1.367 2020/04/04 20:49:31 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: ffs_vfsops.c,v 1.368 2020/05/12 23:17:41 ad Exp $"); #if defined(_KERNEL_OPT) #include "opt_ffs.h" @@ -2084,7 +2084,7 @@ ffs_loadvnode(struct mount *mp, struct v ip->i_gid = ip->i_ffs1_ogid; /* XXX */ } /* XXX */ uvm_vnp_setsize(vp, ip->i_size); - cache_enter_id(vp, ip->i_mode, ip->i_uid, ip->i_gid); + cache_enter_id(vp, ip->i_mode, ip->i_uid, ip->i_gid, true); *new_key = &ip->i_number; return 0; } @@ -2206,7 +2206,7 @@ ffs_newvnode(struct mount *mp, struct vn } uvm_vnp_setsize(vp, ip->i_size); - cache_enter_id(vp, ip->i_mode, ip->i_uid, ip->i_gid); + cache_enter_id(vp, ip->i_mode, ip->i_uid, ip->i_gid, true); *new_key = &ip->i_number; return 0; } Index: sys/ufs/ufs/ufs_vnops.c =================================================================== RCS file: /cvsroot/src/sys/ufs/ufs/ufs_vnops.c,v retrieving revision 1.252 retrieving revision 1.253 diff -p -u -r1.252 -r1.253 --- sys/ufs/ufs/ufs_vnops.c 18 Apr 2020 19:18:34 -0000 1.252 +++ sys/ufs/ufs/ufs_vnops.c 12 May 2020 23:17:41 -0000 1.253 @@ -1,4 +1,4 @@ -/* $NetBSD: ufs_vnops.c,v 1.252 2020/04/18 19:18:34 christos Exp $ */ +/* $NetBSD: ufs_vnops.c,v 1.253 2020/05/12 23:17:41 ad Exp $ */ /*- * Copyright (c) 2008, 2020 The NetBSD Foundation, Inc. @@ -66,7 +66,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: ufs_vnops.c,v 1.252 2020/04/18 19:18:34 christos Exp $"); +__KERNEL_RCSID(0, "$NetBSD: ufs_vnops.c,v 1.253 2020/05/12 23:17:41 ad Exp $"); #if defined(_KERNEL_OPT) #include "opt_ffs.h" @@ -621,7 +621,7 @@ ufs_setattr(void *v) } VN_KNOTE(vp, NOTE_ATTRIB); out: - cache_enter_id(vp, ip->i_mode, ip->i_uid, ip->i_gid); + cache_enter_id(vp, ip->i_mode, ip->i_uid, ip->i_gid, true); return (error); } @@ -649,7 +649,7 @@ ufs_chmod(struct vnode *vp, int mode, ka ip->i_flag |= IN_CHANGE; DIP_ASSIGN(ip, mode, ip->i_mode); UFS_WAPBL_UPDATE(vp, NULL, NULL, 0); - cache_enter_id(vp, ip->i_mode, ip->i_uid, ip->i_gid); + cache_enter_id(vp, ip->i_mode, ip->i_uid, ip->i_gid, true); return (0); } @@ -710,7 +710,7 @@ ufs_chown(struct vnode *vp, uid_t uid, g #endif /* QUOTA || QUOTA2 */ ip->i_flag |= IN_CHANGE; UFS_WAPBL_UPDATE(vp, NULL, NULL, 0); - cache_enter_id(vp, ip->i_mode, ip->i_uid, ip->i_gid); + cache_enter_id(vp, ip->i_mode, ip->i_uid, ip->i_gid, true); return (0); } Index: sys/uvm/files.uvm =================================================================== RCS file: /cvsroot/src/sys/uvm/files.uvm,v retrieving revision 1.33 retrieving revision 1.34 diff -p -u -r1.33 -r1.34 --- sys/uvm/files.uvm 15 Jan 2020 17:55:45 -0000 1.33 +++ sys/uvm/files.uvm 10 May 2020 22:28:09 -0000 1.34 @@ -1,4 +1,4 @@ -# $NetBSD: files.uvm,v 1.33 2020/01/15 17:55:45 ad Exp $ +# $NetBSD: files.uvm,v 1.34 2020/05/10 22:28:09 pgoyette Exp $ # # UVM options @@ -8,7 +8,7 @@ defflag opt_uvmhist.h UVMHIST_PRINT: KE defparam opt_uvmhist.h UVMHIST_MAPHIST_SIZE UVMHIST_PDHIST_SIZE defflag opt_uvm.h USE_TOPDOWN_VM UVMMAP_COUNTERS defparam opt_uvm.h UVM_RESERVED_PAGES_PER_CPU -defflag opt_vmswap.h VMSWAP +defflag opt_vmswap.h VMSWAP : rijndael defflag opt_readahead.h READAHEAD_STATS defflag opt_ubc.h UBC_STATS defparam opt_pagermap.h PAGER_MAP_SIZE Index: sys/uvm/uvm_extern.h =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_extern.h,v retrieving revision 1.225 retrieving revision 1.226 diff -p -u -r1.225 -r1.226 --- sys/uvm/uvm_extern.h 27 Apr 2020 02:47:26 -0000 1.225 +++ sys/uvm/uvm_extern.h 9 May 2020 15:13:19 -0000 1.226 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_extern.h,v 1.225 2020/04/27 02:47:26 rin Exp $ */ +/* $NetBSD: uvm_extern.h,v 1.226 2020/05/09 15:13:19 thorpej Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -613,8 +613,7 @@ extern struct vm_map *phys_map; * * This structure encapsulates UVM's unique virtual object address * for an individual byte inside a pageable page. Pageable pages can - * be owned by either a uvm_object (UVM_VOADDR_TYPE_OBJECT) or a - * vm_anon (UVM_VOADDR_TYPE_ANON). + * be owned by either a uvm_object or a vm_anon. * * In each case, the byte offset into the owning object * (uvm_object or vm_anon) is included in the ID, so that @@ -631,14 +630,7 @@ extern struct vm_map *phys_map; * use. */ struct uvm_voaddr { - enum { - UVM_VOADDR_TYPE_OBJECT = 1, - UVM_VOADDR_TYPE_ANON = 2, - } type; - union { - struct uvm_object *uobj; - struct vm_anon *anon; - }; + uintptr_t object; voff_t offset; }; Index: sys/uvm/uvm_map.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_map.c,v retrieving revision 1.382 retrieving revision 1.383 diff -p -u -r1.382 -r1.383 --- sys/uvm/uvm_map.c 30 Apr 2020 04:18:07 -0000 1.382 +++ sys/uvm/uvm_map.c 9 May 2020 15:13:19 -0000 1.383 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_map.c,v 1.382 2020/04/30 04:18:07 thorpej Exp $ */ +/* $NetBSD: uvm_map.c,v 1.383 2020/05/09 15:13:19 thorpej Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -66,7 +66,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.382 2020/04/30 04:18:07 thorpej Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.383 2020/05/09 15:13:19 thorpej Exp $"); #include "opt_ddb.h" #include "opt_pax.h" @@ -4781,6 +4781,31 @@ uvm_map_unlock_entry(struct vm_map_entry } } +#define UVM_VOADDR_TYPE_MASK 0x3UL +#define UVM_VOADDR_TYPE_UOBJ 0x1UL +#define UVM_VOADDR_TYPE_ANON 0x2UL +#define UVM_VOADDR_OBJECT_MASK ~UVM_VOADDR_TYPE_MASK + +#define UVM_VOADDR_GET_TYPE(voa) \ + ((voa)->object & UVM_VOADDR_TYPE_MASK) +#define UVM_VOADDR_GET_OBJECT(voa) \ + ((voa)->object & UVM_VOADDR_OBJECT_MASK) +#define UVM_VOADDR_SET_OBJECT(voa, obj, type) \ +do { \ + KASSERT(((uintptr_t)(obj) & UVM_VOADDR_TYPE_MASK) == 0); \ + (voa)->object = ((uintptr_t)(obj)) | (type); \ +} while (/*CONSTCOND*/0) + +#define UVM_VOADDR_GET_UOBJ(voa) \ + ((struct uvm_object *)UVM_VOADDR_GET_OBJECT(voa)) +#define UVM_VOADDR_SET_UOBJ(voa, uobj) \ + UVM_VOADDR_SET_OBJECT(voa, uobj, UVM_VOADDR_TYPE_UOBJ) + +#define UVM_VOADDR_GET_ANON(voa) \ + ((struct vm_anon *)UVM_VOADDR_GET_OBJECT(voa)) +#define UVM_VOADDR_SET_ANON(voa, anon) \ + UVM_VOADDR_SET_OBJECT(voa, anon, UVM_VOADDR_TYPE_ANON) + /* * uvm_voaddr_acquire: returns the virtual object address corresponding * to the specified virtual address. @@ -4936,8 +4961,7 @@ uvm_voaddr_acquire(struct vm_map * const anon->an_ref++; rw_obj_hold(anon->an_lock); KASSERT(anon->an_ref != 0); - voaddr->type = UVM_VOADDR_TYPE_ANON; - voaddr->anon = anon; + UVM_VOADDR_SET_ANON(voaddr, anon); voaddr->offset = va & PAGE_MASK; result = true; } @@ -4950,8 +4974,7 @@ uvm_voaddr_acquire(struct vm_map * const KASSERT(uobj != NULL); (*uobj->pgops->pgo_reference)(uobj); - voaddr->type = UVM_VOADDR_TYPE_OBJECT; - voaddr->uobj = uobj; + UVM_VOADDR_SET_UOBJ(voaddr, uobj); voaddr->offset = entry->offset + (va - entry->start); result = true; } @@ -4961,7 +4984,9 @@ uvm_voaddr_acquire(struct vm_map * const if (result) { UVMHIST_LOG(maphist, "<- done OK (type=%jd,owner=#%jx,offset=%jx)", - voaddr->type, (uintptr_t)voaddr->uobj, voaddr->offset, 0); + UVM_VOADDR_GET_TYPE(voaddr), + UVM_VOADDR_GET_OBJECT(voaddr), + voaddr->offset, 0); } else { UVMHIST_LOG(maphist,"<- done (failed)",0,0,0,0); } @@ -4977,9 +5002,9 @@ void uvm_voaddr_release(struct uvm_voaddr * const voaddr) { - switch (voaddr->type) { - case UVM_VOADDR_TYPE_OBJECT: { - struct uvm_object * const uobj = voaddr->uobj; + switch (UVM_VOADDR_GET_TYPE(voaddr)) { + case UVM_VOADDR_TYPE_UOBJ: { + struct uvm_object * const uobj = UVM_VOADDR_GET_UOBJ(voaddr); KASSERT(uobj != NULL); KASSERT(uobj->pgops->pgo_detach != NULL); @@ -4987,7 +5012,7 @@ uvm_voaddr_release(struct uvm_voaddr * c break; } case UVM_VOADDR_TYPE_ANON: { - struct vm_anon * const anon = voaddr->anon; + struct vm_anon * const anon = UVM_VOADDR_GET_ANON(voaddr); krwlock_t *lock; KASSERT(anon != NULL); @@ -5015,23 +5040,22 @@ int uvm_voaddr_compare(const struct uvm_voaddr * const voaddr1, const struct uvm_voaddr * const voaddr2) { + const uintptr_t type1 = UVM_VOADDR_GET_TYPE(voaddr1); + const uintptr_t type2 = UVM_VOADDR_GET_TYPE(voaddr2); - KASSERT(voaddr1->type == UVM_VOADDR_TYPE_OBJECT || - voaddr1->type == UVM_VOADDR_TYPE_ANON); + KASSERT(type1 == UVM_VOADDR_TYPE_UOBJ || + type1 == UVM_VOADDR_TYPE_ANON); - KASSERT(voaddr2->type == UVM_VOADDR_TYPE_OBJECT || - voaddr2->type == UVM_VOADDR_TYPE_ANON); + KASSERT(type2 == UVM_VOADDR_TYPE_UOBJ || + type2 == UVM_VOADDR_TYPE_ANON); - if (voaddr1->type < voaddr2->type) + if (type1 < type2) return -1; - if (voaddr1->type > voaddr2->type) + if (type1 > type2) return 1; - /* These fields are unioned together. */ - CTASSERT(offsetof(struct uvm_voaddr, uobj) == - offsetof(struct uvm_voaddr, anon)); - const uintptr_t addr1 = (uintptr_t)voaddr1->uobj; - const uintptr_t addr2 = (uintptr_t)voaddr2->uobj; + const uintptr_t addr1 = UVM_VOADDR_GET_OBJECT(voaddr1); + const uintptr_t addr2 = UVM_VOADDR_GET_OBJECT(voaddr2); if (addr1 < addr2) return -1; Index: sys/uvm/uvm_swap.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_swap.c,v retrieving revision 1.186 retrieving revision 1.189 diff -p -u -r1.186 -r1.189 --- sys/uvm/uvm_swap.c 18 Feb 2020 20:23:17 -0000 1.186 +++ sys/uvm/uvm_swap.c 10 May 2020 02:38:10 -0000 1.189 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_swap.c,v 1.186 2020/02/18 20:23:17 chs Exp $ */ +/* $NetBSD: uvm_swap.c,v 1.189 2020/05/10 02:38:10 riastradh Exp $ */ /* * Copyright (c) 1995, 1996, 1997, 2009 Matthew R. Green @@ -30,7 +30,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v 1.186 2020/02/18 20:23:17 chs Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v 1.189 2020/05/10 02:38:10 riastradh Exp $"); #include "opt_uvmhist.h" #include "opt_compat_netbsd.h" @@ -42,6 +42,7 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v #include #include #include +#include #include #include #include @@ -64,6 +65,8 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v #include +#include + /* * uvm_swap.c: manage configuration and i/o to swap space. */ @@ -143,6 +146,11 @@ struct swapdev { int swd_maxactive; /* max active i/o reqs */ struct bufq_state *swd_tab; /* buffer list */ int swd_active; /* number of active buffers */ + + uint8_t *swd_encmap; /* bitmap of encrypted slots */ + keyInstance swd_enckey; /* AES key expanded for enc */ + keyInstance swd_deckey; /* AES key expanded for dec */ + bool swd_encinit; /* true if keys initialized */ }; /* @@ -200,6 +208,7 @@ static struct workqueue *sw_reg_workqueu /* tuneables */ u_int uvm_swapisfull_factor = 99; +bool uvm_swap_encrypt = false; /* * prototypes @@ -221,6 +230,10 @@ static void sw_reg_start(struct swapdev static int uvm_swap_io(struct vm_page **, int, int, int); +static void uvm_swap_genkey(struct swapdev *); +static void uvm_swap_encryptpage(struct swapdev *, void *, int); +static void uvm_swap_decryptpage(struct swapdev *, void *, int); + /* * uvm_swap_init: init the swap system data structures and locks * @@ -888,6 +901,13 @@ swap_on(struct lwp *l, struct swapdev *s blist_free(sdp->swd_blist, addr, size); /* + * allocate space to for swap encryption state and mark the + * keys uninitialized so we generate them lazily + */ + sdp->swd_encmap = kmem_zalloc(howmany(npages, NBBY), KM_SLEEP); + sdp->swd_encinit = false; + + /* * if the vnode we are swapping to is the root vnode * (i.e. we are swapping to the miniroot) then we want * to make sure we don't overwrite it. do a statfs to @@ -1059,6 +1079,9 @@ swap_off(struct lwp *l, struct swapdev * vmem_free(swapmap, sdp->swd_drumoffset, sdp->swd_drumsize); blist_destroy(sdp->swd_blist); bufq_free(sdp->swd_tab); + kmem_free(sdp->swd_encmap, howmany(sdp->swd_npages, NBBY)); + explicit_memset(&sdp->swd_enckey, 0, sizeof sdp->swd_enckey); + explicit_memset(&sdp->swd_deckey, 0, sizeof sdp->swd_deckey); kmem_free(sdp, sizeof(*sdp)); return (0); } @@ -1769,7 +1792,7 @@ uvm_swap_io(struct vm_page **pps, int st struct buf *bp; vaddr_t kva; int error, mapinflags; - bool write, async; + bool write, async, swap_encrypt; UVMHIST_FUNC("uvm_swap_io"); UVMHIST_CALLED(pdhist); UVMHIST_LOG(pdhist, "<- called, startslot=%jd, npages=%jd, flags=%jd", @@ -1777,6 +1800,7 @@ uvm_swap_io(struct vm_page **pps, int st write = (flags & B_READ) == 0; async = (flags & B_ASYNC) != 0; + swap_encrypt = atomic_load_relaxed(&uvm_swap_encrypt); /* * allocate a buf for the i/o. @@ -1802,9 +1826,68 @@ uvm_swap_io(struct vm_page **pps, int st mapinflags = !write ? UVMPAGER_MAPIN_WAITOK|UVMPAGER_MAPIN_READ : UVMPAGER_MAPIN_WAITOK|UVMPAGER_MAPIN_WRITE; + if (write && swap_encrypt) /* need to encrypt in-place */ + mapinflags |= UVMPAGER_MAPIN_READ; kva = uvm_pagermapin(pps, npages, mapinflags); /* + * encrypt writes in place if requested + */ + + if (write) do { + struct swapdev *sdp; + int i; + + /* + * Get the swapdev so we can discriminate on the + * encryption state. There may or may not be an + * encryption key generated; we may or may not be asked + * to encrypt swap. + * + * 1. NO KEY, NO ENCRYPTION: Nothing to do. + * + * 2. NO KEY, BUT ENCRYPTION: Generate a key, encrypt, + * and mark the slots encrypted. + * + * 3. KEY, BUT NO ENCRYPTION: The slots may already be + * marked encrypted from a past life. Mark them not + * encrypted. + * + * 4. KEY, ENCRYPTION: Encrypt and mark the slots + * encrypted. + */ + sdp = swapdrum_getsdp(startslot); + if (!sdp->swd_encinit) { + if (!swap_encrypt) + break; + uvm_swap_genkey(sdp); + } + KASSERT(sdp->swd_encinit); + + if (swap_encrypt) { + for (i = 0; i < npages; i++) { + int s = startslot + i; + KDASSERT(swapdrum_getsdp(s) == sdp); + KASSERT(s >= sdp->swd_drumoffset); + s -= sdp->swd_drumoffset; + KASSERT(s < sdp->swd_drumsize); + uvm_swap_encryptpage(sdp, + (void *)(kva + (vsize_t)i*PAGE_SIZE), s); + setbit(sdp->swd_encmap, s); + } + } else { + for (i = 0; i < npages; i++) { + int s = startslot + i; + KDASSERT(swapdrum_getsdp(s) == sdp); + KASSERT(s >= sdp->swd_drumoffset); + s -= sdp->swd_drumoffset; + KASSERT(s < sdp->swd_drumsize); + clrbit(sdp->swd_encmap, s); + } + } + } while (0); + + /* * fill in the bp/sbp. we currently route our i/o through * /dev/drum's vnode [swapdev_vp]. */ @@ -1861,6 +1944,35 @@ uvm_swap_io(struct vm_page **pps, int st error = biowait(bp); /* + * decrypt reads in place if needed + */ + + if (!write) do { + struct swapdev *sdp; + int i; + + sdp = swapdrum_getsdp(startslot); + if (!sdp->swd_encinit) + /* + * If there's no encryption key, there's no way + * any of these slots can be encrypted, so + * nothing to do here. + */ + break; + for (i = 0; i < npages; i++) { + int s = startslot + i; + KDASSERT(swapdrum_getsdp(s) == sdp); + KASSERT(s >= sdp->swd_drumoffset); + s -= sdp->swd_drumoffset; + KASSERT(s < sdp->swd_drumsize); + if (isclr(sdp->swd_encmap, s)) + continue; + uvm_swap_decryptpage(sdp, + (void *)(kva + (vsize_t)i*PAGE_SIZE), s); + } + } while (0); + + /* * kill the pager mapping */ @@ -1880,3 +1992,98 @@ uvm_swap_io(struct vm_page **pps, int st return (error); } + +/* + * uvm_swap_genkey(sdp) + * + * Generate a key for swap encryption. + */ +static void +uvm_swap_genkey(struct swapdev *sdp) +{ + uint8_t key[32]; + + KASSERT(!sdp->swd_encinit); + + cprng_strong(kern_cprng, key, sizeof key, 0); + rijndael_makeKey(&sdp->swd_enckey, DIR_ENCRYPT, 256, key); + rijndael_makeKey(&sdp->swd_deckey, DIR_DECRYPT, 256, key); + explicit_memset(key, 0, sizeof key); + + sdp->swd_encinit = true; +} + +/* + * uvm_swap_encryptpage(sdp, kva, slot) + * + * Encrypt one page of data at kva for the specified slot number + * in the swap device. + */ +static void +uvm_swap_encryptpage(struct swapdev *sdp, void *kva, int slot) +{ + cipherInstance aes; + uint8_t preiv[16] = {0}, iv[16]; + int ok __diagused, nbits __diagused; + + /* iv := AES_k(le32enc(slot) || 0^96) */ + le32enc(preiv, slot); + ok = rijndael_cipherInit(&aes, MODE_ECB, NULL); + KASSERT(ok); + nbits = rijndael_blockEncrypt(&aes, &sdp->swd_enckey, preiv, + /*length in bits*/128, iv); + KASSERT(nbits == 128); + + /* *kva := AES-CBC_k(iv, *kva) */ + ok = rijndael_cipherInit(&aes, MODE_CBC, iv); + KASSERT(ok); + nbits = rijndael_blockEncrypt(&aes, &sdp->swd_enckey, kva, + /*length in bits*/PAGE_SIZE*NBBY, kva); + KASSERT(nbits == PAGE_SIZE*NBBY); + + explicit_memset(&iv, 0, sizeof iv); + explicit_memset(&aes, 0, sizeof aes); +} + +/* + * uvm_swap_decryptpage(sdp, kva, slot) + * + * Decrypt one page of data at kva for the specified slot number + * in the swap device. + */ +static void +uvm_swap_decryptpage(struct swapdev *sdp, void *kva, int slot) +{ + cipherInstance aes; + uint8_t preiv[16] = {0}, iv[16]; + int ok __diagused, nbits __diagused; + + /* iv := AES_k(le32enc(slot) || 0^96) */ + le32enc(preiv, slot); + ok = rijndael_cipherInit(&aes, MODE_ECB, NULL); + KASSERT(ok); + nbits = rijndael_blockEncrypt(&aes, &sdp->swd_enckey, preiv, + /*length in bits*/128, iv); + KASSERTMSG(nbits == 128, "nbits=%d expected %d\n", nbits, 128); + + /* *kva := AES-CBC^{-1}_k(iv, *kva) */ + ok = rijndael_cipherInit(&aes, MODE_CBC, iv); + KASSERT(ok); + nbits = rijndael_blockDecrypt(&aes, &sdp->swd_deckey, kva, + /*length in bits*/PAGE_SIZE*NBBY, kva); + KASSERTMSG(nbits == PAGE_SIZE*NBBY, + "nbits=%d expected %d\n", nbits, PAGE_SIZE*NBBY); + + explicit_memset(&iv, 0, sizeof iv); + explicit_memset(&aes, 0, sizeof aes); +} + +SYSCTL_SETUP(sysctl_uvmswap_setup, "sysctl uvmswap setup") +{ + + sysctl_createv(clog, 0, NULL, NULL, + CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "swap_encrypt", + SYSCTL_DESCR("Encrypt data when swapped out to disk"), + NULL, 0, &uvm_swap_encrypt, 0, + CTL_VM, CTL_CREATE, CTL_EOL); +}