Index: uvm/uvm_pager.h =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_pager.h,v retrieving revision 1.44 diff -u -p -r1.44 uvm_pager.h --- uvm/uvm_pager.h 13 Jan 2017 04:43:16 -0000 1.44 +++ uvm/uvm_pager.h 11 May 2018 14:53:13 -0000 @@ -156,6 +156,7 @@ struct uvm_pagerops { #define PGO_RECLAIM 0x2000 /* object is being reclaimed */ #define PGO_GLOCKHELD 0x4000 /* genfs_node's lock is already held */ #define PGO_LAZY 0x8000 /* equivalent of MNT_LAZY / FSYNC_LAZY */ +#define PGO_CACHEONLY 0x10000 /* only return cached pages [get] */ /* page we are not interested in getting */ #define PGO_DONTCARE ((struct vm_page *) -1L) /* [get only] */ Index: uvm/uvm_readahead.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_readahead.c,v retrieving revision 1.9 diff -u -p -r1.9 uvm_readahead.c --- uvm/uvm_readahead.c 30 Mar 2018 07:22:59 -0000 1.9 +++ uvm/uvm_readahead.c 11 May 2018 14:53:13 -0000 @@ -117,20 +117,47 @@ ra_freectx(struct uvm_ractx *ra) * => start i/o for each RA_IOCHUNK sized chunk. * => return offset to which we started i/o. */ +int ra_smart = 2; static off_t ra_startio(struct uvm_object *uobj, off_t off, size_t sz) { const off_t endoff = off + sz; + const int advice = UVM_ADV_RANDOM; /* avoid recursion */ + const int gpflags = PGO_NOTIMESTAMP; + struct vm_page *pg = NULL; + int npages; + int error; DPRINTF(("%s: uobj=%p, off=%" PRIu64 ", endoff=%" PRIu64 "\n", __func__, uobj, off, endoff)); + +if (ra_smart == 0) + return endoff; + + /* + * Don't issue read-ahead if the last page of the range is already cached. + * The assumption is that since the access is sequential, the intermediate + * pages would have similar LRU stats, and hence likely to be still in cache + * too. + */ +if (ra_smart == 2) { + KASSERT(sz > 0); + npages = 1; + mutex_enter(uobj->vmobjlock); + error = (*uobj->pgops->pgo_get)(uobj, trunc_page(endoff - 1), &pg, + &npages, 0, VM_PROT_READ, advice, gpflags | PGO_CACHEONLY); + if (error == 0) { + DPRINTF(("%s: off=%" PRIu64 ", sz=%zu already cached\n", + __func__, off, sz)); + return endoff; + } +} + off = trunc_page(off); while (off < endoff) { const size_t chunksize = RA_IOCHUNK; - int error; size_t donebytes; - int npages; int orignpages; size_t bytelen; @@ -141,13 +168,9 @@ ra_startio(struct uvm_object *uobj, off_ npages = orignpages = bytelen >> PAGE_SHIFT; KASSERT(npages != 0); - /* - * use UVM_ADV_RANDOM to avoid recursion. - */ - mutex_enter(uobj->vmobjlock); error = (*uobj->pgops->pgo_get)(uobj, off, NULL, - &npages, 0, VM_PROT_READ, UVM_ADV_RANDOM, 0); + &npages, 0, VM_PROT_READ, advice, gpflags); DPRINTF(("%s: off=%" PRIu64 ", bytelen=%zu -> %d\n", __func__, off, bytelen, error)); if (error != 0 && error != EBUSY) { Index: miscfs/genfs/genfs_io.c =================================================================== RCS file: /cvsroot/src/sys/miscfs/genfs/genfs_io.c,v retrieving revision 1.71 diff -u -p -r1.71 genfs_io.c --- miscfs/genfs/genfs_io.c 28 Oct 2017 00:37:13 -0000 1.71 +++ miscfs/genfs/genfs_io.c 11 May 2018 14:53:13 -0000 @@ -209,9 +209,29 @@ startover: } /* - * For PGO_LOCKED requests, just return whatever's in memory. + * For PGO_LOCKED or PGO_CACHEONLY requests, just use whatever's in memory. */ + if (flags & PGO_CACHEONLY) { + KASSERT((flags & PGO_LOCKED) == 0 && !glocked); + + /* Caller provides storage for the pages, but we don't return them */ + npages = *ap->a_count; + int nfound = uvn_findpages(uobj, origoffset, &npages, + ap->a_m, UFP_NOWAIT|UFP_NOALLOC|(memwrite ? UFP_NORDONLY : 0)); + KASSERT(npages == *ap->a_count); + if (nfound > 0) { + genfs_rel_pages(ap->a_m, npages); +#ifdef DEBUG + /* Make sure caller won't use the pages by mistake */ + memset(ap->a_m, 0, npages * sizeof(struct vm_page *)); +#endif + } + mutex_exit(uobj->vmobjlock); + error = (nfound == npages) ? 0: EBUSY; + goto out_err; + } + if (flags & PGO_LOCKED) { int nfound; struct vm_page *pg;