Index: arch/amd64/amd64/trap.c =================================================================== RCS file: /cvsroot/src/sys/arch/amd64/amd64/trap.c,v retrieving revision 1.80 diff -p -u -r1.80 trap.c --- arch/amd64/amd64/trap.c 27 Feb 2015 15:35:10 -0000 1.80 +++ arch/amd64/amd64/trap.c 27 Jul 2015 23:12:51 -0000 @@ -246,6 +246,9 @@ trap(struct trapframe *frame) } type = frame->tf_trapno; + if (!KERNELMODE(frame->tf_cs, frame->tf_rflags)) + KASSERTMSG((l->l_exlocks == 0), "exclusive locks held: %hu", l->l_exlocks); + #ifdef DEBUG if (trapdebug) { trap_print(frame, l); Index: external/bsd/drm2/linux/linux_ww_mutex.c =================================================================== RCS file: /cvsroot/src/sys/external/bsd/drm2/linux/linux_ww_mutex.c,v retrieving revision 1.2 diff -p -u -r1.2 linux_ww_mutex.c --- external/bsd/drm2/linux/linux_ww_mutex.c 21 May 2015 21:55:55 -0000 1.2 +++ external/bsd/drm2/linux/linux_ww_mutex.c 27 Jul 2015 23:12:52 -0000 @@ -49,7 +49,7 @@ __KERNEL_RCSID(0, "$NetBSD: linux_ww_mut LOCKDEBUG_LOCKED((WW)->wwm_debug, (WW), NULL, \ (uintptr_t)__builtin_return_address(0), 0) #define WW_UNLOCKED(WW) \ - LOCKDEBUG_UNLOCKED((WW)->wwm_debug, (WW), \ + LOCKDEBUG_UNLOCKED((WW)->wwm_debug, (WW), false, \ (uintptr_t)__builtin_return_address(0), 0) static int Index: kern/kern_condvar.c =================================================================== RCS file: /cvsroot/src/sys/kern/kern_condvar.c,v retrieving revision 1.34 diff -p -u -r1.34 kern_condvar.c --- kern/kern_condvar.c 25 Oct 2013 15:51:36 -0000 1.34 +++ kern/kern_condvar.c 27 Jul 2015 23:12:52 -0000 @@ -173,7 +173,7 @@ cv_exit(kcondvar_t *cv, kmutex_t *mtx, l if (__predict_false(error != 0)) cv_signal(cv); - LOCKDEBUG_UNLOCKED(CV_DEBUG_P(cv), cv, CV_RA, 0); + LOCKDEBUG_UNLOCKED(CV_DEBUG_P(cv), cv, true, CV_RA, 0); KASSERT(cv_is_valid(cv)); return error; Index: kern/kern_lock.c =================================================================== RCS file: /cvsroot/src/sys/kern/kern_lock.c,v retrieving revision 1.157 diff -p -u -r1.157 kern_lock.c --- kern/kern_lock.c 11 Apr 2015 15:24:25 -0000 1.157 +++ kern/kern_lock.c 27 Jul 2015 23:12:52 -0000 @@ -290,7 +290,7 @@ _kernel_unlock(int nlocks, int *countp) _KERNEL_LOCK_ASSERT(ci->ci_biglock_count >= l->l_blcnt); if (ci->ci_biglock_count == nlocks) { LOCKDEBUG_UNLOCKED(kernel_lock_dodebug, kernel_lock, - RETURN_ADDRESS, 0); + false, RETURN_ADDRESS, 0); ci->ci_biglock_count = 0; __cpu_simple_unlock(kernel_lock); l->l_blcnt -= nlocks; Index: kern/kern_mutex.c =================================================================== RCS file: /cvsroot/src/sys/kern/kern_mutex.c,v retrieving revision 1.62 diff -p -u -r1.62 kern_mutex.c --- kern/kern_mutex.c 25 May 2015 21:02:37 -0000 1.62 +++ kern/kern_mutex.c 27 Jul 2015 23:12:52 -0000 @@ -79,7 +79,7 @@ __KERNEL_RCSID(0, "$NetBSD: kern_mutex.c LOCKDEBUG_LOCKED(MUTEX_DEBUG_P(mtx), (mtx), NULL, \ (uintptr_t)__builtin_return_address(0), 0) #define MUTEX_UNLOCKED(mtx) \ - LOCKDEBUG_UNLOCKED(MUTEX_DEBUG_P(mtx), (mtx), \ + LOCKDEBUG_UNLOCKED(MUTEX_DEBUG_P(mtx), (mtx), false, \ (uintptr_t)__builtin_return_address(0), 0) #define MUTEX_ABORT(mtx, msg) \ mutex_abort(mtx, __func__, msg) Index: kern/kern_rwlock.c =================================================================== RCS file: /cvsroot/src/sys/kern/kern_rwlock.c,v retrieving revision 1.45 diff -p -u -r1.45 kern_rwlock.c --- kern/kern_rwlock.c 28 Nov 2014 08:28:17 -0000 1.45 +++ kern/kern_rwlock.c 27 Jul 2015 23:12:52 -0000 @@ -61,15 +61,6 @@ __KERNEL_RCSID(0, "$NetBSD: kern_rwlock. #if defined(LOCKDEBUG) -#define RW_WANTLOCK(rw, op) \ - LOCKDEBUG_WANTLOCK(RW_DEBUG_P(rw), (rw), \ - (uintptr_t)__builtin_return_address(0), op == RW_READER); -#define RW_LOCKED(rw, op) \ - LOCKDEBUG_LOCKED(RW_DEBUG_P(rw), (rw), NULL, \ - (uintptr_t)__builtin_return_address(0), op == RW_READER); -#define RW_UNLOCKED(rw, op) \ - LOCKDEBUG_UNLOCKED(RW_DEBUG_P(rw), (rw), \ - (uintptr_t)__builtin_return_address(0), op == RW_READER); #define RW_DASSERT(rw, cond) \ do { \ if (!(cond)) \ @@ -78,13 +69,20 @@ do { \ #else /* LOCKDEBUG */ -#define RW_WANTLOCK(rw, op) /* nothing */ -#define RW_LOCKED(rw, op) /* nothing */ -#define RW_UNLOCKED(rw, op) /* nothing */ #define RW_DASSERT(rw, cond) /* nothing */ #endif /* LOCKDEBUG */ +#define RW_WANTLOCK(rw, op) \ + LOCKDEBUG_WANTLOCK(RW_DEBUG_P(rw), (rw), \ + (uintptr_t)__builtin_return_address(0), op == RW_READER); +#define RW_LOCKED(rw, op) \ + LOCKDEBUG_LOCKED(RW_DEBUG_P(rw), (rw), NULL, \ + (uintptr_t)__builtin_return_address(0), op == RW_READER); +#define RW_UNLOCKED(rw, op) \ + LOCKDEBUG_UNLOCKED(RW_DEBUG_P(rw), (rw), false, \ + (uintptr_t)__builtin_return_address(0), op == RW_READER); + /* * DIAGNOSTIC */ Index: kern/subr_lockdebug.c =================================================================== RCS file: /cvsroot/src/sys/kern/subr_lockdebug.c,v retrieving revision 1.53 diff -p -u -r1.53 subr_lockdebug.c --- kern/subr_lockdebug.c 15 Apr 2015 14:41:17 -0000 1.53 +++ kern/subr_lockdebug.c 27 Jul 2015 23:12:52 -0000 @@ -524,7 +524,7 @@ lockdebug_locked(volatile void *lock, vo * Process a lock release operation. */ void -lockdebug_unlocked(volatile void *lock, uintptr_t where, int shared) +lockdebug_unlocked(volatile void *lock, bool iscv, uintptr_t where, int shared) { struct lwp *l = curlwp; lockdebug_t *ld; @@ -539,12 +539,14 @@ lockdebug_unlocked(volatile void *lock, return; } if (ld->ld_lockops->lo_type == LOCKOPS_CV) { + KASSERT(iscv); if (lock == (void *)&lbolt) { /* nothing */ } else { ld->ld_shares--; } } else if (shared) { + KASSERT(!iscv); if (l->l_shlocks == 0) { lockdebug_abort1(ld, s, __func__, "no shared locks held by LWP", true); @@ -564,6 +566,7 @@ lockdebug_unlocked(volatile void *lock, if (ld->ld_cpu == (uint16_t)cpu_index(curcpu())) ld->ld_cpu = (uint16_t)-1; } else { + KASSERT(!iscv); if ((ld->ld_flags & LD_LOCKED) == 0) { lockdebug_abort1(ld, s, __func__, "not locked", true); return; Index: sys/lockdebug.h =================================================================== RCS file: /cvsroot/src/sys/sys/lockdebug.h,v retrieving revision 1.14 diff -p -u -r1.14 lockdebug.h --- sys/lockdebug.h 27 Apr 2013 08:12:34 -0000 1.14 +++ sys/lockdebug.h 27 Jul 2015 23:12:52 -0000 @@ -76,8 +76,8 @@ void lockdebug_wakeup(volatile void *, u if (dodebug) lockdebug_wantlock(lock, where, s) #define LOCKDEBUG_LOCKED(dodebug, lock, al, where, s) \ if (dodebug) lockdebug_locked(lock, al, where, s) -#define LOCKDEBUG_UNLOCKED(dodebug, lock, where, s) \ - if (dodebug) lockdebug_unlocked(lock, where, s) +#define LOCKDEBUG_UNLOCKED(dodebug, lock, iscv, where, s) \ + if (dodebug) lockdebug_unlocked(lock, iscv, where, s) #define LOCKDEBUG_BARRIER(lock, slp) lockdebug_barrier(lock, slp) #define LOCKDEBUG_MEM_CHECK(base, sz) \ lockdebug_mem_check(__func__, base, sz) @@ -86,11 +86,46 @@ void lockdebug_wakeup(volatile void *, u #else /* LOCKDEBUG */ +#include + +static __inline__ void +lockdebug_locked_cheap(volatile void *lock, void *cvlock, uintptr_t where, int shared) +{ + if (lock == (void *)&lbolt) + return; + if (cvlock) + return; + if (shared) + curlwp->l_shlocks++; + else + curlwp->l_exlocks++; +} + +static __inline__ void +lockdebug_unlocked_cheap(volatile void *lock, uintptr_t where, bool iscv, int shared) +{ + if (lock == (void *)&lbolt) + return; + if (iscv) + return; + if (shared) + curlwp->l_shlocks--; + else + curlwp->l_exlocks--; +} + #define LOCKDEBUG_ALLOC(lock, ops, addr) false #define LOCKDEBUG_FREE(dodebug, lock) /* nothing */ #define LOCKDEBUG_WANTLOCK(dodebug, lock, where, s) /* nothing */ +#ifdef DIAGNOSTIC +#define LOCKDEBUG_LOCKED(dodebug, lock, al, where, s) \ + /*if (dodebug)*/ lockdebug_locked_cheap(lock, al, where, s) +#define LOCKDEBUG_UNLOCKED(dodebug, lock, iscv, where, s) \ + /*if (dodebug)*/ lockdebug_unlocked_cheap(lock, iscv, where, s) +#else #define LOCKDEBUG_LOCKED(dodebug, lock, al, where, s) /* nothing */ -#define LOCKDEBUG_UNLOCKED(dodebug, lock, where, s) /* nothing */ +#define LOCKDEBUG_UNLOCKED(dodebug, lock, iscv, where, s) /* nothing */ +#endif #define LOCKDEBUG_BARRIER(lock, slp) /* nothing */ #define LOCKDEBUG_MEM_CHECK(base, sz) /* nothing */ #define LOCKDEBUG_WAKEUP(dodebug, lock, where) /* nothing */ Index: sys/userret.h =================================================================== RCS file: /cvsroot/src/sys/sys/userret.h,v retrieving revision 1.26 diff -p -u -r1.26 userret.h --- sys/userret.h 7 Apr 2013 07:54:53 -0000 1.26 +++ sys/userret.h 27 Jul 2015 23:12:52 -0000 @@ -83,6 +83,10 @@ mi_userret(struct lwp *l) #ifndef __HAVE_PREEMPTION KASSERT(curcpu()->ci_biglock_count == 0); #endif +#ifndef LOCKDEBUG + KASSERTMSG(l->l_exlocks == 0, "%hu exlocks held", l->l_exlocks); + KASSERTMSG(l->l_shlocks == 0, "%hu shlocks held", l->l_shlocks); +#endif /* * Handle "exceptional" events: pending signals, stop/exit actions,