Index: uvm/uvm_bio.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_bio.c,v retrieving revision 1.93 diff -u -p -r1.93 uvm_bio.c --- uvm/uvm_bio.c 26 Mar 2018 21:43:30 -0000 1.93 +++ uvm/uvm_bio.c 9 Apr 2018 21:30:14 -0000 @@ -277,14 +277,15 @@ ubc_fault_page(const struct uvm_faultinf (access_type & VM_PROT_WRITE) == 0 || pg->offset < umap->writeoff || pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen); + KASSERT(ufi->orig_map->pmap == pmap_kernel()); rdonly = ((access_type & VM_PROT_WRITE) == 0 && (pg->flags & PG_RDONLY) != 0) || UVM_OBJ_NEEDS_WRITEFAULT(uobj); mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL; - error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg), - prot & mask, PMAP_CANFAIL | (access_type & mask)); + uvm_emap_enter(va, &pg, 1, prot & mask); + error = 0; mutex_enter(&uvm_pageqlock); uvm_pageactivate(pg); @@ -473,9 +474,9 @@ ubc_find_mapping(struct uvm_object *uobj * ubc_alloc: allocate a file mapping window */ -void * +static void * __noinline ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice, - int flags) + int flags, u_int *genp) { vaddr_t slot_offset, va; struct ubc_map *umap; @@ -524,9 +525,13 @@ again: if (umap->flags & UMAP_MAPPING_CACHED) { umap->flags &= ~UMAP_MAPPING_CACHED; mutex_enter(oobj->vmobjlock); + uvm_emap_remove(va, ubc_winsize); +#if 0 + /* Non-ephemeral mapping, do full removal */ pmap_remove(pmap_kernel(), va, va + ubc_winsize); pmap_update(pmap_kernel()); +#endif mutex_exit(oobj->vmobjlock); } LIST_REMOVE(umap, hash); @@ -576,20 +581,21 @@ again_faultbusy: mutex_enter(uobj->vmobjlock); if (umap->flags & UMAP_MAPPING_CACHED) { umap->flags &= ~UMAP_MAPPING_CACHED; + uvm_emap_remove(va, ubc_winsize); +#if 0 + /* Non-ephemeral mapping, do full removal */ pmap_remove(pmap_kernel(), va, va + ubc_winsize); + pmap_update(pmap_kernel()); +#endif } memset(pgs, 0, sizeof(pgs)); error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs, &npages, 0, VM_PROT_READ | VM_PROT_WRITE, advice, gpflags); UVMHIST_LOG(ubchist, "faultbusy getpages %jd", error, 0, 0, 0); - if (error) { - /* - * Flush: the mapping above might have been removed. - */ - pmap_update(pmap_kernel()); + if (error) goto out; - } + for (i = 0; i < npages; i++) { struct vm_page *pg = pgs[i]; @@ -600,8 +606,7 @@ again_faultbusy: pg = uvm_loanbreak(pg); } if (pg == NULL) { - pmap_kremove(va, ubc_winsize); - pmap_update(pmap_kernel()); + /* Nothing mapped yet, just unbusy */ uvm_page_unbusy(pgs, npages); mutex_exit(uobj->vmobjlock); uvm_wait("ubc_alloc"); @@ -610,16 +615,16 @@ again_faultbusy: mutex_exit(uobj->vmobjlock); pgs[i] = pg; } - pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT), - VM_PAGE_TO_PHYS(pg), - VM_PROT_READ | VM_PROT_WRITE, 0); } - pmap_update(pmap_kernel()); + uvm_emap_enter(va + slot_offset, pgs, npages, + VM_PROT_READ | VM_PROT_WRITE); umap->flags |= UMAP_PAGES_LOCKED; } else { KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0); } + *genp = uvm_emap_produce(); + out: return (void *)(va + slot_offset); } @@ -628,7 +633,7 @@ out: * ubc_release: free a file mapping window. */ -void +static void __noinline ubc_release(void *va, int flags) { struct ubc_map *umap; @@ -671,8 +676,7 @@ ubc_release(void *va, int flags) uvm_pageactivate(pgs[i]); } mutex_exit(&uvm_pageqlock); - pmap_kremove(umapva, ubc_winsize); - pmap_update(pmap_kernel()); + uvm_emap_remove(umapva, ubc_winsize); uvm_page_unbusy(pgs, npages); mutex_exit(uobj->vmobjlock); unmapped = true; @@ -691,11 +695,16 @@ ubc_release(void *va, int flags) * This is typically used to avoid leaving * incompatible cache aliases around indefinitely. */ - mutex_enter(uobj->vmobjlock); - pmap_remove(pmap_kernel(), umapva, + if (!unmapped) { + mutex_enter(uobj->vmobjlock); + uvm_emap_remove(umapva, ubc_winsize); +#if 0 + pmap_remove(pmap_kernel(), umapva, umapva + ubc_winsize); - pmap_update(pmap_kernel()); - mutex_exit(uobj->vmobjlock); + pmap_update(pmap_kernel()); +#endif + mutex_exit(uobj->vmobjlock); + } umap->flags &= ~UMAP_MAPPING_CACHED; LIST_REMOVE(umap, hash); @@ -727,6 +736,7 @@ ubc_uiomove(struct uvm_object *uobj, str const bool overwrite = (flags & UBC_FAULTBUSY) != 0; voff_t off; int error; + u_int gen; KASSERT(todo <= uio->uio_resid); KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) || @@ -738,8 +748,9 @@ ubc_uiomove(struct uvm_object *uobj, str vsize_t bytelen = todo; void *win; - win = ubc_alloc(uobj, off, &bytelen, advice, flags); + win = ubc_alloc(uobj, off, &bytelen, advice, flags, &gen); if (error == 0) { + uvm_emap_consume(gen); error = uiomove(win, bytelen, uio); } if (error != 0 && overwrite) { @@ -770,6 +781,7 @@ void ubc_zerorange(struct uvm_object *uobj, off_t off, size_t len, int flags) { void *win; + u_int gen; /* * XXXUBC invent kzero() and use it @@ -778,7 +790,9 @@ ubc_zerorange(struct uvm_object *uobj, o while (len) { vsize_t bytelen = len; - win = ubc_alloc(uobj, off, &bytelen, UVM_ADV_NORMAL, UBC_WRITE); + win = ubc_alloc(uobj, off, &bytelen, UVM_ADV_NORMAL, UBC_WRITE, + &gen); + uvm_emap_consume(gen); memset(win, 0, bytelen); ubc_release(win, flags); @@ -795,7 +809,6 @@ void ubc_purge(struct uvm_object *uobj) { struct ubc_map *umap; - vaddr_t va; KASSERT(uobj->uo_npages == 0); @@ -809,10 +822,12 @@ ubc_purge(struct uvm_object *uobj) mutex_enter(ubc_object.uobj.vmobjlock); while ((umap = LIST_FIRST(&uobj->uo_ubc)) != NULL) { KASSERT(umap->refcount == 0); - for (va = 0; va < ubc_winsize; va += PAGE_SIZE) { +#if 0 + for (vaddr_t va = 0; va < ubc_winsize; va += PAGE_SIZE) { KASSERT(!pmap_extract(pmap_kernel(), va + UBC_UMAP_ADDR(umap), NULL)); } +#endif LIST_REMOVE(umap, list); LIST_REMOVE(umap, hash); umap->flags &= ~UMAP_MAPPING_CACHED; Index: uvm/uvm_emap.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_emap.c,v retrieving revision 1.12 diff -u -p -r1.12 uvm_emap.c --- uvm/uvm_emap.c 2 Apr 2018 18:25:41 -0000 1.12 +++ uvm/uvm_emap.c 9 Apr 2018 21:30:14 -0000 @@ -50,7 +50,7 @@ * * Map pages at the address: * - * uvm_emap_enter(va, pgs, npages); + * uvm_emap_enter(va, pgs, npages, VM_PROT_READ); * gen = uvm_emap_produce(); * * Read pages via the mapping: @@ -185,14 +185,14 @@ uvm_emap_free(vaddr_t va, size_t size) * uvm_emap_enter: enter a new mapping, without TLB flush. */ void -uvm_emap_enter(vaddr_t va, struct vm_page **pgs, u_int npages) +uvm_emap_enter(vaddr_t va, struct vm_page **pgs, u_int npages, vm_prot_t prot) { paddr_t pa; u_int n; for (n = 0; n < npages; n++, va += PAGE_SIZE) { pa = VM_PAGE_TO_PHYS(pgs[n]); - pmap_emap_enter(va, pa, VM_PROT_READ); + pmap_emap_enter(va, pa, prot); } } @@ -387,14 +387,14 @@ uvm_emap_update(u_int gen) */ void -uvm_emap_enter(vaddr_t va, struct vm_page **pgs, u_int npages) +uvm_emap_enter(vaddr_t va, struct vm_page **pgs, u_int npages, vm_prot_t prot) { paddr_t pa; u_int n; for (n = 0; n < npages; n++, va += PAGE_SIZE) { pa = VM_PAGE_TO_PHYS(pgs[n]); - pmap_kenter_pa(va, pa, VM_PROT_READ, 0); + pmap_kenter_pa(va, pa, prot, 0); } pmap_update(pmap_kernel()); } Index: uvm/uvm_extern.h =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_extern.h,v retrieving revision 1.208 diff -u -p -r1.208 uvm_extern.h --- uvm/uvm_extern.h 15 Dec 2017 16:03:29 -0000 1.208 +++ uvm/uvm_extern.h 9 Apr 2018 21:30:14 -0000 @@ -613,9 +613,6 @@ void uao_reference(struct uvm_object * /* uvm_bio.c */ void ubc_init(void); void ubchist_init(void); -void * ubc_alloc(struct uvm_object *, voff_t, vsize_t *, int, - int); -void ubc_release(void *, int); int ubc_uiomove(struct uvm_object *, struct uio *, vsize_t, int, int); void ubc_zerorange(struct uvm_object *, off_t, size_t, int); @@ -635,7 +632,8 @@ void uvm_emap_update(u_int); vaddr_t uvm_emap_alloc(vsize_t, bool); void uvm_emap_free(vaddr_t, size_t); -void uvm_emap_enter(vaddr_t, struct vm_page **, u_int); +void uvm_emap_enter(vaddr_t, struct vm_page **, u_int, + vm_prot_t); void uvm_emap_remove(vaddr_t, vsize_t); #ifdef __HAVE_PMAP_EMAP