Index: sys/kern/vfs_cache.c =================================================================== RCS file: /cvsroot/src/sys/kern/vfs_cache.c,v retrieving revision 1.137 diff -u -p -r1.137 vfs_cache.c --- sys/kern/vfs_cache.c 4 Apr 2020 20:49:30 -0000 1.137 +++ sys/kern/vfs_cache.c 4 Apr 2020 21:16:03 -0000 @@ -582,13 +582,12 @@ cache_lookup(struct vnode *dvp, const ch return hit; } vp = ncp->nc_vp; - mutex_enter(vp->v_interlock); - rw_exit(&dvi->vi_nc_lock); /* - * Unlocked except for the vnode interlock. Call vcache_tryvget(). + * Try to grab a hold on the vnode, then release the directory lock. */ error = vcache_tryvget(vp); + rw_exit(&dvi->vi_nc_lock); if (error) { KASSERT(error == EBUSY); /* @@ -673,7 +672,7 @@ cache_lookup_linked(struct vnode *dvp, c } *plock = &dvi->vi_nc_lock; } else if (*plock == NULL) { - KASSERT(dvp->v_usecount > 0); + KASSERT(vusecount(dvp) > 0); } /* @@ -813,9 +812,8 @@ cache_revlookup(struct vnode *vp, struct } dvp = ncp->nc_dvp; - mutex_enter(dvp->v_interlock); - rw_exit(&vi->vi_nc_listlock); error = vcache_tryvget(dvp); + rw_exit(&vi->vi_nc_listlock); if (error) { KASSERT(error == EBUSY); if (bufp) Index: sys/kern/vfs_mount.c =================================================================== RCS file: /cvsroot/src/sys/kern/vfs_mount.c,v retrieving revision 1.75 diff -u -p -r1.75 vfs_mount.c --- sys/kern/vfs_mount.c 23 Feb 2020 22:14:03 -0000 1.75 +++ sys/kern/vfs_mount.c 4 Apr 2020 21:16:04 -0000 @@ -381,7 +381,7 @@ vfs_vnode_iterator_init(struct mount *mp mutex_enter(mp->mnt_vnodelock); TAILQ_INSERT_HEAD(&mp->mnt_vnodelist, vip, vi_mntvnodes); - vp->v_usecount = 1; + vip->vi_state = VS_MARKER | 1; mutex_exit(mp->mnt_vnodelock); *vnip = (struct vnode_iterator *)vip; @@ -395,11 +395,11 @@ vfs_vnode_iterator_destroy(struct vnode_ kmutex_t *lock; KASSERT(vnis_marker(mvp)); - if (mvp->v_usecount != 0) { + if (vusecount(mvp) != 0) { lock = mvp->v_mount->mnt_vnodelock; mutex_enter(lock); TAILQ_REMOVE(&mvp->v_mount->mnt_vnodelist, mvip, vi_mntvnodes); - mvp->v_usecount = 0; + mvip->vi_state = VS_MARKER | 0; mutex_exit(lock); } vnfree_marker(mvp); @@ -423,7 +423,7 @@ vfs_vnode_iterator_next1(struct vnode_it mutex_enter(lock); vip = TAILQ_NEXT(mvip, vi_mntvnodes); TAILQ_REMOVE(&mp->mnt_vnodelist, mvip, vi_mntvnodes); - VIMPL_TO_VNODE(mvip)->v_usecount = 0; + mvip->vi_state = VS_MARKER | 0; again: if (vip == NULL) { mutex_exit(lock); @@ -441,7 +441,7 @@ again: } TAILQ_INSERT_AFTER(&mp->mnt_vnodelist, vip, mvip, vi_mntvnodes); - VIMPL_TO_VNODE(mvip)->v_usecount = 1; + mvip->vi_state = VS_MARKER | 1; mutex_exit(lock); error = vcache_vget(vp); KASSERT(error == 0 || error == ENOENT); @@ -581,7 +581,7 @@ vflush_one(vnode_t *vp, vnode_t *skipvp, * kill them. */ if (flags & FORCECLOSE) { - if (vp->v_usecount > 1 && + if (vusecount(vp) > 1 && (vp->v_type == VBLK || vp->v_type == VCHR)) vcache_make_anon(vp); else @@ -651,7 +651,7 @@ mount_checkdirs(vnode_t *olddp) struct proc *p; bool retry; - if (olddp->v_usecount == 1) { + if (vusecount(olddp) == 1) { return; } if (VFS_ROOT(olddp->v_mountedhere, LK_EXCLUSIVE, &newdp)) Index: sys/kern/vfs_subr.c =================================================================== RCS file: /cvsroot/src/sys/kern/vfs_subr.c,v retrieving revision 1.484 diff -u -p -r1.484 vfs_subr.c --- sys/kern/vfs_subr.c 14 Mar 2020 20:45:23 -0000 1.484 +++ sys/kern/vfs_subr.c 4 Apr 2020 21:16:04 -0000 @@ -730,18 +730,15 @@ lazy_sync_vnode(struct vnode *vp) KASSERT(mutex_owned(&syncer_data_lock)); synced = false; - /* We are locking in the wrong direction. */ - if (mutex_tryenter(vp->v_interlock)) { + if (vcache_tryvget(vp) == 0) { mutex_exit(&syncer_data_lock); - if (vcache_tryvget(vp) == 0) { - if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { - synced = true; - (void) VOP_FSYNC(vp, curlwp->l_cred, - FSYNC_LAZY, 0, 0); - vput(vp); - } else - vrele(vp); - } + if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { + synced = true; + (void) VOP_FSYNC(vp, curlwp->l_cred, + FSYNC_LAZY, 0, 0); + vput(vp); + } else + vrele(vp); mutex_enter(&syncer_data_lock); } return synced; @@ -1104,7 +1101,7 @@ vprint_common(struct vnode *vp, const ch ARRAY_PRINT(vp->v_type, vnode_types), vp->v_type, vp->v_mount, vp->v_mountedhere); (*pr)("%susecount %d writecount %d holdcount %d\n", prefix, - vp->v_usecount, vp->v_writecount, vp->v_holdcnt); + vusecount(vp), vp->v_writecount, vp->v_holdcnt); (*pr)("%ssize %" PRIx64 " writesize %" PRIx64 " numoutput %d\n", prefix, vp->v_size, vp->v_writesize, vp->v_numoutput); (*pr)("%sdata %p lock %p\n", prefix, vp->v_data, &vip->vi_lock); Index: sys/kern/vfs_vnode.c =================================================================== RCS file: /cvsroot/src/sys/kern/vfs_vnode.c,v retrieving revision 1.118 diff -u -p -r1.118 vfs_vnode.c --- sys/kern/vfs_vnode.c 4 Apr 2020 20:54:42 -0000 1.118 +++ sys/kern/vfs_vnode.c 4 Apr 2020 21:16:05 -0000 @@ -138,20 +138,16 @@ * * Reference counting * - * Vnode is considered active, if reference count (vnode_t::v_usecount) - * is non-zero. It is maintained using: vref(9) and vrele(9), as well - * as vput(9), routines. Common points holding references are e.g. - * file openings, current working directory, mount points, etc. + * Vnode is considered active, if usecount is non-zero. It is + * maintained using: vref(9) and vrele(9), as well as vput(9), + * routines. Common points holding references are e.g. file openings, + * current working directory, mount points, etc. * - * Note on v_usecount and its locking + * Note on usecount and its locking * - * At nearly all points it is known that v_usecount could be zero, - * the vnode_t::v_interlock will be held. To change the count away - * from zero, the interlock must be held. To change from a non-zero - * value to zero, again the interlock must be held. - * - * Changing the usecount from a non-zero value to a non-zero value can - * safely be done using atomic operations, without the interlock held. + * At most points it is known that the usecount could be zero, the + * vnode_t::v_interlock will be held. To change from a non-zero value + * to zero, the interlock must be held. */ #include @@ -236,6 +232,48 @@ extern int (**dead_vnodeop_p)(void *); extern int (**spec_vnodeop_p)(void *); extern struct vfsops dead_vfsops; +/* + * Atomically set the current state of a vnode_impl_t without disturbing the + * usecount. + */ +static void +vstate_store(struct vnode_impl *vi, enum vnode_state vs) +{ + u_int newval, cur, next; + + KASSERT(mutex_owned(VIMPL_TO_VNODE(vi)->v_interlock)); + KASSERT((vs & VS_USECOUNT_MASK) == 0); + + for (cur = atomic_load_relaxed(&vi->vi_state);; cur = next) { + newval = (cur & VS_USECOUNT_MASK) | vs; + next = atomic_cas_uint(&vi->vi_state, cur, newval); + if (__predict_true(cur == next)) { + return; + } + } +} + +/* + * Fetch the current state of a vnode_impl_t. + */ +static inline enum vnode_state +vstate_fetch(struct vnode_impl *vi) +{ + + return atomic_load_relaxed(&vi->vi_state) & VS_STATE_MASK; +} + +/* + * Return the current usecount of a vnode. + */ +inline int +vusecount(struct vnode *vp) +{ + vnode_impl_t *vip = VNODE_TO_VIMPL(vp); + + return atomic_load_relaxed(&vip->vi_state) & VS_USECOUNT_MASK; +} + /* Vnode state operations and diagnostics. */ #if defined(DIAGNOSTIC) @@ -254,6 +292,8 @@ _vstate_assert(vnode_t *vp, enum vnode_s bool has_lock) { vnode_impl_t *vip = VNODE_TO_VIMPL(vp); + enum vnode_state cur; + int usecount; if (!has_lock) { /* @@ -261,25 +301,28 @@ _vstate_assert(vnode_t *vp, enum vnode_s * without loooking first. */ membar_enter(); - if (state == VS_ACTIVE && vp->v_usecount > 0 && - (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED)) + cur = vstate_fetch(vip); + if (state == VS_ACTIVE && vusecount(vp) > 0 && + (cur == VS_LOADED || cur == VS_BLOCKED)) return; - if (vip->vi_state == state) + if (cur == state) return; mutex_enter((vp)->v_interlock); } KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line); - if ((state == VS_ACTIVE && vp->v_usecount > 0 && - (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED)) || - vip->vi_state == state) { + cur = vstate_fetch(vip); + usecount = vusecount(vp); + if ((state == VS_ACTIVE && usecount > 0 && + (cur == VS_LOADED || cur == VS_BLOCKED)) || + cur == state) { if (!has_lock) mutex_exit((vp)->v_interlock); return; } vnpanic(vp, "state is %s, usecount %d, expected %s at %s:%d", - vstate_name(vip->vi_state), vp->v_usecount, + vstate_name(cur), usecount, vstate_name(state), func, line); } @@ -287,31 +330,36 @@ static enum vnode_state vstate_assert_get(vnode_t *vp, const char *func, int line) { vnode_impl_t *vip = VNODE_TO_VIMPL(vp); + enum vnode_state cur = vstate_fetch(vip); KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line); - if (! VSTATE_VALID(vip->vi_state)) + cur = vstate_fetch(vip); + if (!VSTATE_VALID(cur)) vnpanic(vp, "state is %s at %s:%d", - vstate_name(vip->vi_state), func, line); + vstate_name(cur), func, line); - return vip->vi_state; + return cur; } static void vstate_assert_wait_stable(vnode_t *vp, const char *func, int line) { vnode_impl_t *vip = VNODE_TO_VIMPL(vp); + enum vnode_state cur = vstate_fetch(vip); KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line); - if (! VSTATE_VALID(vip->vi_state)) + if (!VSTATE_VALID(cur)) vnpanic(vp, "state is %s at %s:%d", vstate_name(vip->vi_state), func, line); - while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED) + while (cur != VS_LOADED && cur != VS_RECLAIMED) { cv_wait(&vp->v_cv, vp->v_interlock); + cur = vstate_fetch(vip); + } - if (! VSTATE_VALID(vip->vi_state)) + if (!VSTATE_VALID(cur)) vnpanic(vp, "state is %s at %s:%d", - vstate_name(vip->vi_state), func, line); + vstate_name(cur), func, line); } static void @@ -319,6 +367,8 @@ vstate_assert_change(vnode_t *vp, enum v const char *func, int line) { vnode_impl_t *vip = VNODE_TO_VIMPL(vp); + enum vnode_state cur = vstate_fetch(vip); + int usecount = vusecount(vp); KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line); if (from == VS_LOADING) @@ -330,15 +380,15 @@ vstate_assert_change(vnode_t *vp, enum v if (! VSTATE_VALID(to)) vnpanic(vp, "to is %s at %s:%d", vstate_name(to), func, line); - if (vip->vi_state != from) + if (cur != from) vnpanic(vp, "from is %s, expected %s at %s:%d\n", - vstate_name(vip->vi_state), vstate_name(from), func, line); - if ((from == VS_BLOCKED || to == VS_BLOCKED) && vp->v_usecount != 1) + vstate_name(cur), vstate_name(from), func, line); + if ((from == VS_BLOCKED || to == VS_BLOCKED) && usecount != 1) vnpanic(vp, "%s to %s with usecount %d at %s:%d", - vstate_name(from), vstate_name(to), vp->v_usecount, + vstate_name(from), vstate_name(to), usecount, func, line); - vip->vi_state = to; + vstate_store(vip, to); if (from == VS_LOADING) cv_broadcast(&vcache_cv); if (to == VS_LOADED || to == VS_RECLAIMED) @@ -348,7 +398,7 @@ vstate_assert_change(vnode_t *vp, enum v #else /* defined(DIAGNOSTIC) */ #define VSTATE_GET(vp) \ - (VNODE_TO_VIMPL((vp))->vi_state) + (vstate_fetch(VNODE_TO_VIMPL((vp)))) #define VSTATE_CHANGE(vp, from, to) \ vstate_change((vp), (from), (to)) #define VSTATE_WAIT_STABLE(vp) \ @@ -364,9 +414,12 @@ static void vstate_wait_stable(vnode_t *vp) { vnode_impl_t *vip = VNODE_TO_VIMPL(vp); + enum vnode_state cur = vstate_fetch(vip); - while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED) + while (cur != VS_LOADED && cur != VS_RECLAIMED) { cv_wait(&vp->v_cv, vp->v_interlock); + cur = vstate_fetch(vip); + } } static void @@ -374,7 +427,7 @@ vstate_change(vnode_t *vp, enum vnode_st { vnode_impl_t *vip = VNODE_TO_VIMPL(vp); - vip->vi_state = to; + vstate_store(vip, to); if (from == VS_LOADING) cv_broadcast(&vcache_cv); if (to == VS_LOADED || to == VS_RECLAIMED) @@ -448,7 +501,7 @@ bool vnis_marker(vnode_t *vp) { - return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER); + return (vstate_fetch(VNODE_TO_VIMPL(vp)) == VS_MARKER); } /* @@ -568,13 +621,13 @@ vdrain_remove(vnode_t *vp) KASSERT(mutex_owned(&vdrain_lock)); /* Probe usecount (unlocked). */ - if (vp->v_usecount > 0) + if (vusecount(vp) > 0) return; /* Try v_interlock -- we lock the wrong direction! */ if (!mutex_tryenter(vp->v_interlock)) return; /* Probe usecount and state. */ - if (vp->v_usecount > 0 || VSTATE_GET(vp) != VS_LOADED) { + if (vusecount(vp) > 0 || VSTATE_GET(vp) != VS_LOADED) { mutex_exit(vp->v_interlock); return; } @@ -617,7 +670,7 @@ vdrain_vrele(vnode_t *vp) * First remove the vnode from the vrele list. * Put it on the last lru list, the last vrele() * will put it back onto the right list before - * its v_usecount reaches zero. + * its usecount reaches zero. */ KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]); TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist); @@ -692,15 +745,17 @@ vdrain_thread(void *cookie) static bool vtryrele(vnode_t *vp) { - u_int use, next; + vnode_impl_t *vip = VNODE_TO_VIMPL(vp); + u_int val, next; - for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) { - if (__predict_false(use == 1)) { + for (val = atomic_load_relaxed(&vip->vi_state);; val = next) { + if (__predict_false((val & VS_USECOUNT_MASK) == 1)) { return false; } - KASSERT(use > 1); - next = atomic_cas_uint(&vp->v_usecount, use, use - 1); - if (__predict_true(next == use)) { + KASSERTMSG((val & VS_USECOUNT_MASK) > 1, + "vnode %p has zero usecount", vp); + next = atomic_cas_uint(&vip->vi_state, val, val - 1); + if (__predict_true(next == val)) { return true; } } @@ -715,13 +770,13 @@ vput(vnode_t *vp) int lktype; /* - * Do an unlocked check of v_usecount. If it looks like we're not + * Do an unlocked check of the usecount. If it looks like we're not * about to drop the last reference, then unlock the vnode and try * to drop the reference. If it ends up being the last reference * after all, vrelel() can fix it all up. Most of the time this * will all go to plan. */ - if (atomic_load_relaxed(&vp->v_usecount) > 1) { + if (vusecount(vp) > 1) { VOP_UNLOCK(vp); if (vtryrele(vp)) { return; @@ -745,6 +800,7 @@ static void vrelel(vnode_t *vp, int flags, int lktype) { const bool async = ((flags & VRELEL_ASYNC) != 0); + vnode_impl_t *vip = VNODE_TO_VIMPL(vp); bool recycle, defer; int error; @@ -768,7 +824,7 @@ vrelel(vnode_t *vp, int flags, int lktyp mutex_exit(vp->v_interlock); return; } - if (vp->v_usecount <= 0 || vp->v_writecount != 0) { + if (vusecount(vp) <= 0 || vp->v_writecount != 0) { vnpanic(vp, "%s: bad ref count", __func__); } @@ -869,10 +925,10 @@ vrelel(vnode_t *vp, int flags, int lktyp } else { VOP_UNLOCK(vp); } - KASSERT(vp->v_usecount > 0); + KASSERT(vusecount(vp) > 0); } - if (atomic_dec_uint_nv(&vp->v_usecount) != 0) { + if ((atomic_dec_uint_nv(&vip->vi_state) & VS_USECOUNT_MASK) != 0) { /* Gained another reference while being reclaimed. */ mutex_exit(vp->v_interlock); return; @@ -930,9 +986,9 @@ void vref(vnode_t *vp) { - KASSERT(atomic_load_relaxed(&vp->v_usecount) != 0); + KASSERT(vusecount(vp) != 0); - atomic_inc_uint(&vp->v_usecount); + atomic_inc_uint(&VNODE_TO_VIMPL(vp)->vi_state); } /* @@ -945,7 +1001,7 @@ vholdl(vnode_t *vp) KASSERT(mutex_owned(vp->v_interlock)); - if (vp->v_holdcnt++ == 0 && vp->v_usecount == 0) + if (vp->v_holdcnt++ == 0 && vusecount(vp) == 0) lru_requeue(vp, lru_which(vp)); } @@ -976,7 +1032,7 @@ holdrelel(vnode_t *vp) } vp->v_holdcnt--; - if (vp->v_holdcnt == 0 && vp->v_usecount == 0) + if (vp->v_holdcnt == 0 && vusecount(vp) == 0) lru_requeue(vp, lru_which(vp)); } @@ -1004,7 +1060,7 @@ vrecycle(vnode_t *vp) /* Make sure we hold the last reference. */ VSTATE_WAIT_STABLE(vp); - if (vp->v_usecount != 1) { + if (vusecount(vp) != 1) { mutex_exit(vp->v_interlock); return false; } @@ -1036,7 +1092,7 @@ vrecycle(vnode_t *vp) return false; } - KASSERT(vp->v_usecount == 1); + KASSERT(vusecount(vp) == 1); vcache_reclaim(vp); vrelel(vp, 0, LK_NONE); @@ -1085,7 +1141,7 @@ vrevoke(vnode_t *vp) enum vtype type; dev_t dev; - KASSERT(vp->v_usecount > 0); + KASSERT(vusecount(vp) > 0); mp = vrevoke_suspend_next(NULL, vp->v_mount); @@ -1094,7 +1150,7 @@ vrevoke(vnode_t *vp) if (VSTATE_GET(vp) == VS_RECLAIMED) { mutex_exit(vp->v_interlock); } else if (vp->v_type != VBLK && vp->v_type != VCHR) { - atomic_inc_uint(&vp->v_usecount); + atomic_inc_uint(&VNODE_TO_VIMPL(vp)->vi_state); mutex_exit(vp->v_interlock); vgone(vp); } else { @@ -1228,11 +1284,11 @@ vcache_alloc(void) cv_init(&vp->v_cv, "vnode"); cache_vnode_init(vp); - vp->v_usecount = 1; vp->v_type = VNON; vp->v_size = vp->v_writesize = VSIZENOTSET; - vip->vi_state = VS_LOADING; + /* Set LOADING state and a usecount of one. */ + vip->vi_state = VS_LOADING | 1; lru_requeue(vp, &lru_list[LRU_FREE]); @@ -1273,7 +1329,7 @@ vcache_free(vnode_impl_t *vip) vp = VIMPL_TO_VNODE(vip); KASSERT(mutex_owned(vp->v_interlock)); - KASSERT(vp->v_usecount == 0); + KASSERT(vusecount(vp) == 0); KASSERT(vp->v_holdcnt == 0); KASSERT(vp->v_writecount == 0); lru_requeue(vp, NULL); @@ -1301,22 +1357,20 @@ vcache_free(vnode_impl_t *vip) int vcache_tryvget(vnode_t *vp) { - int error = 0; - - KASSERT(mutex_owned(vp->v_interlock)); - - if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) - error = ENOENT; - else if (__predict_false(VSTATE_GET(vp) != VS_LOADED)) - error = EBUSY; - else if (vp->v_usecount == 0) - vp->v_usecount = 1; - else - atomic_inc_uint(&vp->v_usecount); - - mutex_exit(vp->v_interlock); - - return error; + vnode_impl_t *vi = VNODE_TO_VIMPL(vp); + enum vnode_state state; + u_int cur, next; + + for (cur = atomic_load_relaxed(&vi->vi_state);; cur = next) { + state = cur & VS_STATE_MASK; + if (__predict_false(state != VS_LOADED)) { + return state == VS_RECLAIMED ? ENOENT : EBUSY; + } + next = atomic_cas_uint(&vi->vi_state, cur, cur + 1); + if (__predict_true(cur == next)) { + return 0; + } + } } /* @@ -1339,17 +1393,14 @@ vcache_vget(vnode_t *vp) /* If this was the last reference to a reclaimed vnode free it now. */ if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) { - if (vp->v_holdcnt == 0 && vp->v_usecount == 0) + if (vp->v_holdcnt == 0 && vusecount(vp) == 0) vcache_free(VNODE_TO_VIMPL(vp)); else mutex_exit(vp->v_interlock); return ENOENT; } VSTATE_ASSERT(vp, VS_LOADED); - if (vp->v_usecount == 0) - vp->v_usecount = 1; - else - atomic_inc_uint(&vp->v_usecount); + atomic_inc_uint(&VNODE_TO_VIMPL(vp)->vi_state); mutex_exit(vp->v_interlock); return 0; @@ -1391,7 +1442,7 @@ again: * * Wait for vnodes changing state from VS_LOADING and retry. */ - if (__predict_false(vip->vi_state == VS_LOADING)) { + if (__predict_false(vstate_fetch(vip) == VS_LOADING)) { cv_wait(&vcache_cv, &vcache_lock); mutex_exit(&vcache_lock); goto again; @@ -1654,9 +1705,9 @@ vcache_reclaim(vnode_t *vp) KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 || VOP_ISLOCKED(vp) == LK_EXCLUSIVE); KASSERT(mutex_owned(vp->v_interlock)); - KASSERT(vp->v_usecount != 0); + KASSERT(vusecount(vp) != 0); - active = (vp->v_usecount > 1); + active = (vusecount(vp) > 1); temp_key_len = vip->vi_key.vk_key_len; /* * Prevent the vnode from being recycled or brought into use Index: sys/sys/vnode.h =================================================================== RCS file: /cvsroot/src/sys/sys/vnode.h,v retrieving revision 1.294 diff -u -p -r1.294 vnode.h --- sys/sys/vnode.h 22 Mar 2020 18:32:42 -0000 1.294 +++ sys/sys/vnode.h 4 Apr 2020 21:16:05 -0000 @@ -153,7 +153,6 @@ struct vnode { __aligned(COHERENCY_UNIT); int v_iflag; /* i+u VI_* flags */ int v_uflag; /* k VU_* flags */ - int v_usecount; /* i reference count */ int v_numoutput; /* i # of pending writes */ int v_writecount; /* i ref count of writers */ int v_holdcnt; /* i page & buffer refs */ @@ -516,6 +515,7 @@ int vdead_check(struct vnode *, int); void vrevoke(struct vnode *); void vremfree(struct vnode *); void vshareilock(struct vnode *, struct vnode *); +int vusecount(struct vnode *); int vcache_get(struct mount *, const void *, size_t, struct vnode **); int vcache_new(struct mount *, struct vnode *, struct vattr *, kauth_cred_t, void *, struct vnode **); Index: sys/sys/vnode_impl.h =================================================================== RCS file: /cvsroot/src/sys/sys/vnode_impl.h,v retrieving revision 1.23 diff -u -p -r1.23 vnode_impl.h --- sys/sys/vnode_impl.h 22 Mar 2020 14:38:37 -0000 1.23 +++ sys/sys/vnode_impl.h 4 Apr 2020 21:16:05 -0000 @@ -36,13 +36,32 @@ struct namecache; struct nchnode; enum vnode_state { - VS_ACTIVE, /* Assert only, fs node attached and usecount > 0. */ - VS_MARKER, /* Stable, used as marker. Will not change. */ - VS_LOADING, /* Intermediate, initialising the fs node. */ - VS_LOADED, /* Stable, valid fs node attached. */ - VS_BLOCKED, /* Intermediate, active, no new references allowed. */ - VS_RECLAIMING, /* Intermediate, detaching the fs node. */ - VS_RECLAIMED /* Stable, no fs node attached. */ + /* Assert only, fs node attached and usecount > 0. */ + VS_ACTIVE = 0x00000000, + + /* Stable, used as marker. Will not change. */ + VS_MARKER = 0x20000000, + + /* Intermediate, initialising the fs node. */ + VS_LOADING = 0x40000000, + + /* Stable, valid fs node attached. */ + VS_LOADED = 0x60000000, + + /* Intermediate, active, no new references allowed. */ + VS_BLOCKED = 0x80000000, + + /* Intermediate, detaching the fs node. */ + VS_RECLAIMING = 0xa0000000, + + /* Stable, no fs node attached. */ + VS_RECLAIMED = 0xc0000000, + + /* Mask for state values. */ + VS_STATE_MASK = 0xe0000000, + + /* Mask for vnode usecount. */ + VS_USECOUNT_MASK = 0x1fffffff, }; TAILQ_HEAD(vnodelst, vnode_impl); @@ -59,6 +78,8 @@ struct vcache_key { * lock. Field markings and the corresponding locks: * * - stable throughout the life of the vnode + * a atomic operations + * a,i some combination of atomic ops + v_interlock * c vcache_lock * d vdrain_lock * i v_interlock @@ -75,18 +96,12 @@ struct vnode_impl { * Largely stable data. */ struct vcache_key vi_key; /* c vnode cache key */ - - /* - * vnode cache, LRU and syncer. This all changes with some - * regularity so keep it together. - */ struct vnodelst *vi_lrulisthd; /* d current lru list head */ TAILQ_ENTRY(vnode_impl) vi_lrulist; /* d lru list */ int vi_synclist_slot; /* s synclist slot index */ int vi_lrulisttm; /* i time of lru enqueue */ TAILQ_ENTRY(vnode_impl) vi_synclist; /* s vnodes with dirty bufs */ SLIST_ENTRY(vnode_impl) vi_hash; /* c vnode cache list */ - enum vnode_state vi_state; /* i current state */ TAILQ_ENTRY(vnode_impl) vi_mntvnodes; /* m vnodes for mount point */ /* @@ -100,6 +115,9 @@ struct vnode_impl { uid_t vi_nc_uid; /* n,l cached UID or VNOVAL */ gid_t vi_nc_gid; /* n,l cached GID or VNOVAL */ uint32_t vi_nc_spare; /* - spare (padding) */ + krwlock_t vi_nc_lock /* - lock on node */ + __aligned(COHERENCY_UNIT); + krwlock_t vi_nc_listlock; /* - lock on nn_list */ /* * Locks and expensive to access items which can be expected to @@ -107,9 +125,7 @@ struct vnode_impl { */ krwlock_t vi_lock /* - lock for this vnode */ __aligned(COHERENCY_UNIT); - krwlock_t vi_nc_lock /* - lock on node */ - __aligned(COHERENCY_UNIT); - krwlock_t vi_nc_listlock; /* - lock on nn_list */ + u_int vi_state; /* a,i current state */ }; typedef struct vnode_impl vnode_impl_t;