Index: sys/kern/kern_pmf.c =================================================================== RCS file: /cvsroot/src/sys/kern/kern_pmf.c,v retrieving revision 1.41 diff -u -p -r1.41 kern_pmf.c --- sys/kern/kern_pmf.c 23 Feb 2020 20:08:35 -0000 1.41 +++ sys/kern/kern_pmf.c 4 Apr 2020 22:15:06 -0000 @@ -317,7 +317,7 @@ pmf_system_suspend(const pmf_qual_t *qua if (doing_shutdown == 0 && panicstr == NULL) { printf("Flushing disk caches: "); do_sys_sync(&lwp0); - if (buf_syncwait() != 0) + if (vfs_syncwait() != 0) printf("giving up\n"); else printf("done\n"); Index: sys/kern/vfs_bio.c =================================================================== RCS file: /cvsroot/src/sys/kern/vfs_bio.c,v retrieving revision 1.290 diff -u -p -r1.290 vfs_bio.c --- sys/kern/vfs_bio.c 14 Mar 2020 18:08:39 -0000 1.290 +++ sys/kern/vfs_bio.c 4 Apr 2020 22:15:06 -0000 @@ -1,7 +1,7 @@ /* $NetBSD: vfs_bio.c,v 1.290 2020/03/14 18:08:39 ad Exp $ */ /*- - * Copyright (c) 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc. + * Copyright (c) 2007, 2008, 2009, 2019, 2020 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation @@ -217,6 +217,8 @@ static buf_t *bio_doread(struct vnode *, static buf_t *getnewbuf(int, int, int); static int buf_lotsfree(void); static int buf_canrelease(void); +static int buf_compare_key(void *, const void *, const void *); +static int buf_compare_nodes(void *, const void *, const void *); static u_long buf_mempoolidx(u_long); static u_long buf_roundsize(u_long); static void *buf_alloc(size_t); @@ -247,12 +249,14 @@ biohist_init(void) } /* - * Definitions for the buffer hash lists. + * Definitions for the per-vnode buffer rbtree. */ -#define BUFHASH(dvp, lbn) \ - (&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash]) -LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash; -u_long bufhash; +const rb_tree_ops_t buf_rbtree_ops = { + .rbto_compare_nodes = buf_compare_nodes, + .rbto_compare_key = buf_compare_key, + .rbto_node_offset = offsetof(struct buf, b_rb), + .rbto_context = NULL +}; static kcondvar_t needbuffer_cv; @@ -360,7 +364,7 @@ checkfreelist(buf_t *bp, struct bqueue * #endif /* - * Insq/Remq for the buffer hash lists. + * Insq/Remq for the buffer lists. * Call with buffer queue locked. */ static void @@ -487,7 +491,7 @@ buf_memcalc(void) } /* - * Initialize buffers and hash links for buffers. + * Initialize buffers. */ void bufinit(void) @@ -559,14 +563,13 @@ bufinit(void) } /* - * Estimate hash table size based on the amount of memory we + * Estimate number of buffers based on the amount of memory we * intend to use for the buffer cache. The average buffer * size is dependent on our clients (i.e. filesystems). * * For now, use an empirical 3K per buffer. */ nbuf = (bufmem_hiwater / 1024) / 3; - bufhashtbl = hashinit(nbuf, HASH_LIST, true, &bufhash); sysctl_kern_buf_setup(); sysctl_vm_buf_setup(); @@ -1196,28 +1199,68 @@ brelse(buf_t *bp, int set) /* * Determine if a block is in the cache. - * Just look on what would be its hash chain. If it's there, return - * a pointer to it, unless it's marked invalid. If it's marked invalid, - * we normally don't return the buffer, unless the caller explicitly - * wants us to. + * Just look in the RB tree for the vnode. If it's there, return + * a pointer to it, unless it's marked invalid. */ buf_t * incore(struct vnode *vp, daddr_t blkno) { + rb_node_t *node = vp->v_buftree.rbt_root; buf_t *bp; + /* XXX This could probably be b_objlock, but for now.. */ KASSERT(mutex_owned(&bufcache_lock)); - /* Search hash chain */ - LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) { - if (bp->b_lblkno == blkno && bp->b_vp == vp && - !ISSET(bp->b_cflags, BC_INVAL)) { - KASSERT(bp->b_objlock == vp->v_interlock); - return (bp); + /* + * Search the RB tree for the key. This is an inlined lookup + * tailored for exactly what's needed here that is quite a bit + * faster than using rb_tree_find_node(). + */ + for (;;) { + if (__predict_false(RB_SENTINEL_P(node))) + return NULL; + bp = (struct buf *) + ((uintptr_t)node - offsetof(struct buf, b_rb)); + KASSERT(bp->b_vp == vp); + KASSERT(bp->b_objlock == vp->v_interlock); + if (bp->b_lblkno == blkno) { + /* XXX Because of BC_INVAL in ufs_strategy(). */ + return ISSET(bp->b_cflags, BC_INVAL) ? NULL: bp; } + node = node->rb_nodes[bp->b_lblkno < blkno]; } +} + +/* + * rbtree: compare two nodes. + */ +static int +buf_compare_nodes(void *context, const void *n1, const void *n2) +{ + const struct buf *bp1 = n1; + const struct buf *bp2 = n2; + + if (bp1->b_lblkno < bp2->b_lblkno) + return -1; + if (bp1->b_lblkno > bp2->b_lblkno) + return 1; + return 0; +} - return (NULL); +/* + * rbtree: compare a node and a key. + */ +static int +buf_compare_key(void *context, const void *n, const void *k) +{ + const struct buf *bp = n; + const daddr_t blkno = *(const daddr_t *)k; + + if (bp->b_lblkno < blkno) + return -1; + if (bp->b_lblkno > blkno) + return 1; + return 0; } /* @@ -1266,7 +1309,6 @@ getblk(struct vnode *vp, daddr_t blkno, goto loop; } - LIST_INSERT_HEAD(BUFHASH(vp, blkno), bp, b_hash); bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno; mutex_enter(vp->v_interlock); bgetvp(vp, bp); @@ -1284,7 +1326,6 @@ getblk(struct vnode *vp, daddr_t blkno, } else { if (allocbuf(bp, size, preserve)) { mutex_enter(&bufcache_lock); - LIST_REMOVE(bp, b_hash); brelsel(bp, BC_INVAL); mutex_exit(&bufcache_lock); SDT_PROBE4(io, kernel, , getblk__done, @@ -1310,8 +1351,7 @@ geteblk(int size) while ((bp = getnewbuf(0, 0, 0)) == NULL) ; - SET(bp->b_cflags, BC_INVAL); - LIST_INSERT_HEAD(&invalhash, bp, b_hash); + SET(bp->b_cflags, BC_INVAL | BC_IOBUF); mutex_exit(&bufcache_lock); BIO_SETPRIO(bp, BPRIO_DEFAULT); error = allocbuf(bp, size, 0); @@ -1518,7 +1558,12 @@ getnewbuf(int slpflag, int slptimeo, int KASSERT(transmp == NULL); - vp = bp->b_vp; + /* Disassociate us from our vnode, if we had one... */ + if ((vp = bp->b_vp) != NULL) { + mutex_enter(vp->v_interlock); + brelvp(bp); + mutex_exit(vp->v_interlock); + } /* clear out various other fields */ bp->b_cflags = BC_BUSY; @@ -1533,15 +1578,6 @@ getnewbuf(int slpflag, int slptimeo, int bp->b_resid = 0; bp->b_bcount = 0; - LIST_REMOVE(bp, b_hash); - - /* Disassociate us from our vnode, if we had one... */ - if (vp != NULL) { - mutex_enter(vp->v_interlock); - brelvp(bp); - mutex_exit(vp->v_interlock); - } - SDT_PROBE1(io, kernel, , getnewbuf__done, bp); return (bp); } @@ -1740,57 +1776,6 @@ biointr(void *cookie) splx(s); } -/* - * Wait for all buffers to complete I/O - * Return the number of "stuck" buffers. - */ -int -buf_syncwait(void) -{ - buf_t *bp; - int iter, nbusy, nbusy_prev = 0, ihash; - - BIOHIST_FUNC(__func__); BIOHIST_CALLED(biohist); - - for (iter = 0; iter < 20;) { - mutex_enter(&bufcache_lock); - nbusy = 0; - for (ihash = 0; ihash < bufhash+1; ihash++) { - LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) { - if ((bp->b_cflags & (BC_BUSY|BC_INVAL)) == BC_BUSY) - nbusy += ((bp->b_flags & B_READ) == 0); - } - } - mutex_exit(&bufcache_lock); - - if (nbusy == 0) - break; - if (nbusy_prev == 0) - nbusy_prev = nbusy; - printf("%d ", nbusy); - kpause("bflush", false, MAX(1, hz / 25 * iter), NULL); - if (nbusy >= nbusy_prev) /* we didn't flush anything */ - iter++; - else - nbusy_prev = nbusy; - } - - if (nbusy) { -#if defined(DEBUG) || defined(DEBUG_HALT_BUSY) - printf("giving up\nPrinting vnodes for busy buffers\n"); - for (ihash = 0; ihash < bufhash+1; ihash++) { - LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) { - if ((bp->b_cflags & (BC_BUSY|BC_INVAL)) == BC_BUSY && - (bp->b_flags & B_READ) == 0) - vprint(NULL, bp->b_vp); - } - } -#endif - } - - return nbusy; -} - static void sysctl_fillbuf(const buf_t *i, struct buf_sysctl *o) { @@ -2055,6 +2040,7 @@ getiobuf(struct vnode *vp, bool waitok) } else { KASSERT(bp->b_objlock == &buffer_lock); } + bp->b_cflags |= BC_IOBUF; return bp; } @@ -2116,7 +2102,7 @@ nestiobuf_setup(buf_t *mbp, buf_t *bp, i bp->b_vp = vp; bp->b_dev = mbp->b_dev; bp->b_objlock = mbp->b_objlock; - bp->b_cflags = BC_BUSY; + bp->b_cflags = BC_IOBUF | BC_BUSY; bp->b_flags = B_ASYNC | b_pass; bp->b_iodone = nestiobuf_iodone; bp->b_data = (char *)mbp->b_data + offset; Index: sys/kern/vfs_mount.c =================================================================== RCS file: /cvsroot/src/sys/kern/vfs_mount.c,v retrieving revision 1.75 diff -u -p -r1.75 vfs_mount.c --- sys/kern/vfs_mount.c 23 Feb 2020 22:14:03 -0000 1.75 +++ sys/kern/vfs_mount.c 4 Apr 2020 22:15:06 -0000 @@ -1072,7 +1072,7 @@ vfs_sync_all(struct lwp *l) do_sys_sync(l); /* Wait for sync to finish. */ - if (buf_syncwait() != 0) { + if (vfs_syncwait() != 0) { #if defined(DDB) && defined(DEBUG_HALT_BUSY) Debugger(); #endif Index: sys/kern/vfs_subr.c =================================================================== RCS file: /cvsroot/src/sys/kern/vfs_subr.c,v retrieving revision 1.484 diff -u -p -r1.484 vfs_subr.c --- sys/kern/vfs_subr.c 14 Mar 2020 20:45:23 -0000 1.484 +++ sys/kern/vfs_subr.c 4 Apr 2020 22:15:06 -0000 @@ -109,15 +109,6 @@ const int vttoif_tab[9] = { S_IFSOCK, S_IFIFO, S_IFMT, }; -/* - * Insq/Remq for the vnode usage lists. - */ -#define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs) -#define bufremvn(bp) { \ - LIST_REMOVE(bp, b_vnbufs); \ - (bp)->b_vnbufs.le_next = NOLIST; \ -} - int doforce = 1; /* 1 => permit forcible unmounting */ extern struct mount *dead_rootmount; @@ -141,6 +132,34 @@ vntblinit(void) } /* + * Insq/Remq for the vnode usage lists. + */ +static inline void +bufinsvn(struct buf *bp, const bool tree, struct buflists *dp) +{ + + if (tree && !ISSET(bp->b_cflags, BC_IOBUF)) { + KASSERT(rb_tree_find_node(&bp->b_vp->v_buftree, + &bp->b_lblkno) == NULL); + rb_tree_insert_node(&bp->b_vp->v_buftree, bp); + } + LIST_INSERT_HEAD(dp, bp, b_vnbufs); +} + +static inline void +bufremvn(struct buf *bp, const bool tree) +{ + + if (tree && !ISSET(bp->b_cflags, BC_IOBUF)) { + KASSERT(rb_tree_find_node(&bp->b_vp->v_buftree, + &bp->b_lblkno) == bp); + rb_tree_remove_node(&bp->b_vp->v_buftree, bp); + } + LIST_REMOVE(bp, b_vnbufs); + bp->b_vnbufs.le_next = NOLIST; +} + +/* * Flush out and invalidate all buffers associated with a vnode. * Called with the underlying vnode locked, which should prevent new dirty * buffers from being queued. @@ -214,6 +233,8 @@ restart: #ifdef DIAGNOSTIC if (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd)) panic("vinvalbuf: flush failed, vp %p", vp); + if (RB_TREE_MIN(&vp->v_buftree) != NULL) + panic("vinvalbuf: tree not empty, vp %p", vp); #endif mutex_exit(&bufcache_lock); @@ -396,7 +417,7 @@ bgetvp(struct vnode *vp, struct buf *bp) /* * Insert onto list for new vnode. */ - bufinsvn(bp, &vp->v_cleanblkhd); + bufinsvn(bp, true, &vp->v_cleanblkhd); bp->b_objlock = vp->v_interlock; } @@ -419,7 +440,7 @@ brelvp(struct buf *bp) * Delete from old vnode list, if on one. */ if (LIST_NEXT(bp, b_vnbufs) != NOLIST) - bufremvn(bp); + bufremvn(bp, true); if ((vp->v_iflag & (VI_ONWORKLST | VI_PAGES)) == VI_ONWORKLST && LIST_FIRST(&vp->v_dirtyblkhd) == NULL) @@ -440,6 +461,7 @@ void reassignbuf(struct buf *bp, struct vnode *vp) { struct buflists *listheadp; + bool tree; int delayx; KASSERT(mutex_owned(&bufcache_lock)); @@ -450,8 +472,11 @@ reassignbuf(struct buf *bp, struct vnode /* * Delete from old vnode list, if on one. */ - if (LIST_NEXT(bp, b_vnbufs) != NOLIST) - bufremvn(bp); + if (LIST_NEXT(bp, b_vnbufs) != NOLIST) { + bufremvn(bp, false); + tree = false; + } else + tree = true; /* * If dirty, put on list of dirty buffers; @@ -485,7 +510,7 @@ reassignbuf(struct buf *bp, struct vnode vn_syncer_add_to_worklist(vp, delayx); } } - bufinsvn(bp, listheadp); + bufinsvn(bp, tree, listheadp); } /* Index: sys/kern/vfs_syscalls.c =================================================================== RCS file: /cvsroot/src/sys/kern/vfs_syscalls.c,v retrieving revision 1.545 diff -u -p -r1.545 vfs_syscalls.c --- sys/kern/vfs_syscalls.c 4 Apr 2020 20:49:30 -0000 1.545 +++ sys/kern/vfs_syscalls.c 4 Apr 2020 22:15:06 -0000 @@ -672,6 +672,72 @@ do_sys_sync(struct lwp *l) #endif /* DEBUG */ } +static bool +sync_vnode_filter(void *cookie, vnode_t *vp) +{ + + if (vp->v_numoutput > 0) { + ++*(int *)cookie; + } + return false; +} + +int +vfs_syncwait(void) +{ + int nbusy, nbusy_prev, iter; + struct vnode_iterator *vniter; + mount_iterator_t *mpiter; + struct mount *mp; + + for (nbusy_prev = 0, iter = 0; iter < 20;) { + nbusy = 0; + mountlist_iterator_init(&mpiter); + while ((mp = mountlist_iterator_next(mpiter)) != NULL) { + vnode_t *vp __diagused; + vfs_vnode_iterator_init(mp, &vniter); + vp = vfs_vnode_iterator_next(vniter, + sync_vnode_filter, &nbusy); + KASSERT(vp == NULL); + vfs_vnode_iterator_destroy(vniter); + } + mountlist_iterator_destroy(mpiter); + + if (nbusy == 0) + break; + if (nbusy_prev == 0) + nbusy_prev = nbusy; + printf("%d ", nbusy); + kpause("syncwait", false, MAX(1, hz / 25 * iter), NULL); + if (nbusy >= nbusy_prev) /* we didn't flush anything */ + iter++; + else + nbusy_prev = nbusy; + } + + if (nbusy) { +#if defined(DEBUG) || defined(DEBUG_HALT_BUSY) + printf("giving up\nPrinting vnodes for busy buffers\n"); + mountlist_iterator_init(&mpiter); + while ((mp = mountlist_iterator_next(mpiter)) != NULL) { + vnode_t *vp; + vfs_vnode_iterator_init(mp, &vniter); + vp = vfs_vnode_iterator_next(vniter, + NULL, NULL); + mutex_enter(vp->v_interlock); + if (vp->v_numoutput > 0) + vprint(NULL, vp); + mutex_exit(vp->v_interlock); + vrele(vp); + vfs_vnode_iterator_destroy(vniter); + } + mountlist_iterator_destroy(mpiter); +#endif + } + + return nbusy; +} + /* ARGSUSED */ int sys_sync(struct lwp *l, const void *v, register_t *retval) Index: sys/kern/vfs_vnode.c =================================================================== RCS file: /cvsroot/src/sys/kern/vfs_vnode.c,v retrieving revision 1.118 diff -u -p -r1.118 vfs_vnode.c --- sys/kern/vfs_vnode.c 4 Apr 2020 20:54:42 -0000 1.118 +++ sys/kern/vfs_vnode.c 4 Apr 2020 22:15:07 -0000 @@ -1214,6 +1214,7 @@ vcache_hash_lookup(const struct vcache_k static vnode_impl_t * vcache_alloc(void) { + extern rb_tree_ops_t buf_rbtree_ops; vnode_impl_t *vip; vnode_t *vp; @@ -1227,6 +1228,7 @@ vcache_alloc(void) uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1); cv_init(&vp->v_cv, "vnode"); cache_vnode_init(vp); + rb_tree_init(&vp->v_buftree, &buf_rbtree_ops); vp->v_usecount = 1; vp->v_type = VNON; @@ -1278,6 +1280,7 @@ vcache_free(vnode_impl_t *vip) KASSERT(vp->v_writecount == 0); lru_requeue(vp, NULL); mutex_exit(vp->v_interlock); + KASSERT(RB_TREE_MIN(&vp->v_buftree) == NULL); vfs_insmntque(vp, NULL); if (vp->v_type == VBLK || vp->v_type == VCHR) Index: sys/sys/buf.h =================================================================== RCS file: /cvsroot/src/sys/sys/buf.h,v retrieving revision 1.131 diff -u -p -r1.131 buf.h --- sys/sys/buf.h 26 Aug 2019 10:24:39 -0000 1.131 +++ sys/sys/buf.h 4 Apr 2020 22:15:09 -0000 @@ -1,7 +1,7 @@ /* $NetBSD: buf.h,v 1.131 2019/08/26 10:24:39 msaitoh Exp $ */ /*- - * Copyright (c) 1999, 2000, 2007, 2008 The NetBSD Foundation, Inc. + * Copyright (c) 1999, 2000, 2007, 2008, 2020 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation @@ -150,8 +150,7 @@ struct buf { kcondvar_t b_busy; /* c: threads waiting on buf */ u_int b_refcnt; /* c: refcount for b_busy */ - void *b_unused; /* : unused */ - LIST_ENTRY(buf) b_hash; /* c: hash chain */ + rb_node_t b_rb; /* c: rb tree node */ LIST_ENTRY(buf) b_vnbufs; /* c: associated vnode */ TAILQ_ENTRY(buf) b_freelist; /* c: position if not active */ TAILQ_ENTRY(buf) b_wapbllist; /* c: transaction buffer list */ @@ -180,6 +179,7 @@ struct buf { #define BC_NOCACHE 0x00008000 /* Do not cache block after use. */ #define BC_WANTED 0x00800000 /* Process wants this buffer. */ #define BC_VFLUSH 0x04000000 /* Buffer is being synced. */ +#define BC_IOBUF 0x20000000 /* Do not return via incore(). */ /* * These flags are kept in b_oflags (owned by associated object). @@ -300,7 +300,6 @@ void minphys(buf_t *); void brelvp(buf_t *); void reassignbuf(buf_t *, struct vnode *); void bgetvp(struct vnode *, buf_t *); -int buf_syncwait(void); u_long buf_memcalc(void); int buf_drain(int); int buf_setvalimit(vsize_t); Index: sys/sys/vfs_syscalls.h =================================================================== RCS file: /cvsroot/src/sys/sys/vfs_syscalls.h,v retrieving revision 1.27 diff -u -p -r1.27 vfs_syscalls.h --- sys/sys/vfs_syscalls.h 23 Feb 2020 22:14:04 -0000 1.27 +++ sys/sys/vfs_syscalls.h 4 Apr 2020 22:15:09 -0000 @@ -80,6 +80,7 @@ int do_sys_mkdir(const char *, mode_t, e int do_sys_symlink(const char *, const char *, enum uio_seg); int do_sys_quotactl(const char *, const struct quotactl_args *); void do_sys_sync(struct lwp *); +int vfs_syncwait(void); int chdir_lookup(const char *, int, struct vnode **, struct lwp *); void change_root(struct vnode *); Index: sys/sys/vnode.h =================================================================== RCS file: /cvsroot/src/sys/sys/vnode.h,v retrieving revision 1.294 diff -u -p -r1.294 vnode.h --- sys/sys/vnode.h 22 Mar 2020 18:32:42 -0000 1.294 +++ sys/sys/vnode.h 4 Apr 2020 22:15:09 -0000 @@ -66,6 +66,7 @@ #include #include #include +#include /* XXX: clean up includes later */ #include /* XXX */ @@ -157,6 +158,7 @@ struct vnode { int v_numoutput; /* i # of pending writes */ int v_writecount; /* i ref count of writers */ int v_holdcnt; /* i page & buffer refs */ + rb_tree_t v_buftree; /* b cached buffers */ struct buflists v_cleanblkhd; /* i+b clean blocklist head */ struct buflists v_dirtyblkhd; /* i+b dirty blocklist head */ Index: usr.bin/vmstat/vmstat.c =================================================================== RCS file: /cvsroot/src/usr.bin/vmstat/vmstat.c,v retrieving revision 1.239 diff -u -p -r1.239 vmstat.c --- usr.bin/vmstat/vmstat.c 23 Mar 2020 18:44:17 -0000 1.239 +++ usr.bin/vmstat/vmstat.c 4 Apr 2020 22:15:09 -0000 @@ -215,23 +215,19 @@ struct nlist hashnl[] = { .n_name = "_ihash" }, #define X_IHASHTBL 3 { .n_name = "_ihashtbl" }, -#define X_BUFHASH 4 - { .n_name = "_bufhash" }, -#define X_BUFHASHTBL 5 - { .n_name = "_bufhashtbl" }, -#define X_UIHASH 6 +#define X_UIHASH 4 { .n_name = "_uihash" }, -#define X_UIHASHTBL 7 +#define X_UIHASHTBL 5 { .n_name = "_uihashtbl" }, -#define X_IFADDRHASH 8 +#define X_IFADDRHASH 6 { .n_name = "_in_ifaddrhash" }, -#define X_IFADDRHASHTBL 9 +#define X_IFADDRHASHTBL 7 { .n_name = "_in_ifaddrhashtbl" }, -#define X_VCACHEHASH 10 +#define X_VCACHEHASH 8 { .n_name = "_vcache_hashmask" }, -#define X_VCACHETBL 11 +#define X_VCACHETBL 9 { .n_name = "_vcache_hashtab" }, -#define X_HASHNL_SIZE 12 /* must be last */ +#define X_HASHNL_SIZE 10 /* must be last */ { .n_name = NULL }, }; @@ -1891,10 +1887,6 @@ struct kernel_hash { } khashes[] = { { - "buffer hash", - X_BUFHASH, X_BUFHASHTBL, - HASH_LIST, offsetof(struct buf, b_hash) - }, { "ipv4 address -> interface hash", X_IFADDRHASH, X_IFADDRHASHTBL, HASH_LIST, offsetof(struct in_ifaddr, ia_hash),