From cc07bce236934283926a18f9ba19e47e4d29869a Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Sun, 14 Jan 2024 22:35:09 +0000 Subject: [PATCH] i915: Ensure every get_pages method fills the sg pages. This is needed by i915 gem fault, which maps user virtual addresses to those pages, and by i915 gem object destruction, which does pmap_page_protect on the pages to remove any of those user virtual mappings. Note: This requires i915_gem_phys.c to use pmap_kenter_pa to preserve the _kernel's_ mapping of the pages after pmap_page_protect, but bus_dmamem_map currently uses pmap_enter(pmap_kernel(), ...) instead which creates a mapping that is removed by pmap_page_protect. So we use a variant of bus_dmamem_map that uses pmap_kenter_pa instead -- perhaps bus_dmamem_map should do this itself, but this change is less risky to pull up than a change to bus_dmamem_map itself. While here, make iteration over sg_pgs a little more robust, and reduce diff from upstream where convenient. PR kern/57833 XXX pullup-10 --- .../drm2/dist/drm/i915/gem/i915_gem_mman.c | 2 +- .../drm2/dist/drm/i915/gem/i915_gem_pages.c | 9 ++ .../drm2/dist/drm/i915/gem/i915_gem_phys.c | 89 ++++++++++++++++++- .../drm2/dist/drm/i915/gem/i915_gem_region.c | 23 +++-- .../drm2/dist/drm/i915/gem/i915_gem_stolen.c | 44 +++++---- 5 files changed, 143 insertions(+), 24 deletions(-) diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c index 8e592f008ef7..d0265738e2c8 100644 --- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c +++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c @@ -675,7 +675,7 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj) if (!i915_gem_object_has_pages(obj)) return; - for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) { + for (i = 0; i < obj->mm.pages->sgl->sg_npgs; i++) { page = obj->mm.pages->sgl->sg_pgs[i]; vm_page = &page->p_vmp; pmap_page_protect(vm_page, VM_PROT_NONE); diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c index 58075877254a..32c188352dbb 100644 --- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c +++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c @@ -42,6 +42,15 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, } #ifndef __NetBSD__ + /* + * Paranoia: In NetBSD, a scatterlist is just an array of + * pages, not an array of segments that might be larger than + * pages, so the number of entries must exactly match the size + * of the object (which should also be page-aligned). + */ + KASSERTMSG(pages->sgl->sg_npgs == obj->base.size >> PAGE_SHIFT, + "npgs=%zu size=%zu", pages->sgl->sg_npgs, obj->base.size); + obj->mm.get_page.sg_pos = pages->sgl; obj->mm.get_page.sg_idx = 0; #endif diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c index 3f68276eea5f..a5aec14d48a4 100644 --- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c +++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c @@ -24,6 +24,84 @@ __KERNEL_RCSID(0, "$NetBSD: i915_gem_phys.c,v 1.8 2021/12/19 12:45:43 riastradh #include "i915_gem_region.h" #include "i915_scatterlist.h" +#ifdef __NetBSD__ + +#include +#include + +#include /* kvtopte, pmap_pte_clearbits */ + +/* + * Version of bus_dmamem_map that uses pmap_kenter_pa, not pmap_enter, + * so that it isn't affected by pmap_page_protect on the physical + * address. Adapted from sys/arch/x86/x86/bus_dma.c. + */ +static int +bus_dmamem_kmap(bus_dma_tag_t dmat, const bus_dma_segment_t *segs, int nsegs, + size_t size, void **kvap, int flags) +{ + vaddr_t va; + bus_addr_t addr; + int curseg; + const uvm_flag_t kmflags = + (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0; + u_int pmapflags = PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE; + + size = round_page(size); + if (flags & BUS_DMA_NOCACHE) + pmapflags |= PMAP_NOCACHE; + + va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags); + + if (va == 0) + return ENOMEM; + + *kvap = (void *)va; + + for (curseg = 0; curseg < nsegs; curseg++) { + for (addr = segs[curseg].ds_addr; + addr < (segs[curseg].ds_addr + segs[curseg].ds_len); + addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) { + if (size == 0) + panic("_bus_dmamem_map: size botch"); + pmap_kenter_pa(va, addr, + VM_PROT_READ | VM_PROT_WRITE, + pmapflags); + } + } + pmap_update(pmap_kernel()); + + return 0; +} + +static void +bus_dmamem_kunmap(bus_dma_tag_t t, void *kva, size_t size) +{ + pt_entry_t *pte, opte; + vaddr_t va, sva, eva; + + KASSERTMSG(((uintptr_t)kva & PGOFSET) == 0, "kva=%p", kva); + + size = round_page(size); + sva = (vaddr_t)kva; + eva = sva + size; + + /* + * mark pages cacheable again. + */ + for (va = sva; va < eva; va += PAGE_SIZE) { + pte = kvtopte(va); + opte = *pte; + if ((opte & PTE_PCD) != 0) + pmap_pte_clearbits(pte, PTE_PCD); + } + pmap_kremove((vaddr_t)kva, size); + pmap_update(pmap_kernel()); + uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY); +} + +#endif + #include static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) @@ -65,7 +143,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) if (ret) return -ENOMEM; KASSERT(rsegs == 1); - ret = -bus_dmamem_map(dmat, &obj->mm.u.phys.seg, 1, + ret = -bus_dmamem_kmap(dmat, &obj->mm.u.phys.seg, 1, roundup_pow_of_two(obj->base.size), &vaddr, BUS_DMA_WAITOK|BUS_DMA_COHERENT); if (ret) @@ -83,7 +161,12 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) if (!st) goto err_pci; +#ifdef __NetBSD__ + if (sg_alloc_table_from_bus_dmamem(st, dmat, &obj->mm.u.phys.seg, 1, + GFP_KERNEL)) +#else if (sg_alloc_table(st, 1, GFP_KERNEL)) +#endif goto err_st; sg = st->sgl; @@ -151,7 +234,7 @@ err_st: err_pci: #ifdef __NetBSD__ if (vaddr) { - bus_dmamem_unmap(dmat, vaddr, + bus_dmamem_kunmap(dmat, vaddr, roundup_pow_of_two(obj->base.size)); } obj->mm.u.phys.kva = NULL; @@ -225,7 +308,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, kfree(pages); #ifdef __NetBSD__ - bus_dmamem_unmap(dmat, obj->mm.u.phys.kva, + bus_dmamem_kunmap(dmat, obj->mm.u.phys.kva, roundup_pow_of_two(obj->base.size)); obj->mm.u.phys.kva = NULL; bus_dmamem_free(dmat, &obj->mm.u.phys.seg, 1); diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c index 1a928988bd5f..85a1de8cb7ea 100644 --- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c +++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c @@ -45,10 +45,12 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj) if (!st) return -ENOMEM; +#ifndef __NetBSD__ if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) { kfree(st); return -ENOMEM; } +#endif flags = I915_ALLOC_MIN_PAGE_SIZE; if (obj->flags & I915_BO_ALLOC_CONTIGUOUS) @@ -60,15 +62,15 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj) GEM_BUG_ON(list_empty(blocks)); - sg = st->sgl; #ifdef __NetBSD__ __USE(prev_end); - __USE(sg_page_sizes); bus_dma_tag_t dmat = obj->base.dev->dmat; bus_dma_segment_t *segs = NULL; int i = 0, nsegs = 0; bool loaded = false; + sg = NULL; + list_for_each_entry(block, blocks, link) { if (nsegs >= INT_MAX || nsegs >= SIZE_MAX/sizeof(segs[0])) @@ -85,7 +87,15 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj) segs[i].ds_addr = mem->region.start + offset; segs[i].ds_len = block_size; + i++; } + KASSERT(i == nsegs); + + ret = sg_alloc_table_from_bus_dmamem(st, dmat, segs, nsegs, + GFP_KERNEL); + if (ret) + goto err; + sg = st->sgl; /* XXX errno NetBSD->Linux */ ret = -bus_dmamap_create(dmat, size, nsegs, size, 0, BUS_DMA_WAITOK, @@ -106,8 +116,9 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj) kmem_free(segs, nsegs * sizeof(segs[0])); segs = NULL; - __i915_gem_object_set_pages(obj, st, i915_sg_page_sizes(sg)); + sg_page_sizes = i915_sg_page_sizes(sg); #else + sg = st->sgl; st->nents = 0; sg_page_sizes = 0; prev_end = (resource_size_t)-1; @@ -145,16 +156,18 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj) sg_page_sizes |= sg->length; sg_mark_end(sg); i915_sg_trim(st); +#endif __i915_gem_object_set_pages(obj, st, sg_page_sizes); -#endif return 0; #ifdef __NetBSD__ err: if (loaded) - bus_dmamap_unload(dmat, st->sgl->sg_dmamap); + bus_dmamap_unload(dmat, sg->sg_dmamap); + if (sg && sg->sg_dmamap) + bus_dmamap_destroy(dmat, sg->sg_dmamap); if (segs) kmem_free(segs, nsegs * sizeof(segs[0])); __intel_memory_region_put_pages_buddy(mem, blocks); diff --git a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c index b53d543fa894..1635e57befd7 100644 --- a/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c +++ b/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c @@ -506,14 +506,13 @@ i915_pages_create_for_stolen(struct drm_device *dev, { struct drm_i915_private *i915 = to_i915(dev); struct sg_table *st; + struct scatterlist *sg; #ifdef __NetBSD__ bus_dma_tag_t dmat = i915->drm.dmat; bus_dma_segment_t *seg = NULL; int nseg = 0, i; bool loaded = false; int ret; -#else - struct scatterlist *sg; #endif GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm))); @@ -527,11 +526,6 @@ i915_pages_create_for_stolen(struct drm_device *dev, if (st == NULL) return ERR_PTR(-ENOMEM); - if (sg_alloc_table(st, 1, GFP_KERNEL)) { - kfree(st); - return ERR_PTR(-ENOMEM); - } - #ifdef __NetBSD__ KASSERT((size % PAGE_SIZE) == 0); nseg = size / PAGE_SIZE; @@ -548,19 +542,30 @@ i915_pages_create_for_stolen(struct drm_device *dev, seg[i].ds_len = PAGE_SIZE; } + sg = NULL; + + ret = sg_alloc_table_from_bus_dmamem(st, dmat, seg, nseg, GFP_KERNEL); + if (ret) { + DRM_ERROR("failed to alloc sg table for stolen object: %d\n", + ret); + ret = -ENOMEM; + goto out; + } + sg = st->sgl; + /* XXX errno NetBSD->Linux */ ret = -bus_dmamap_create(dmat, size, nseg, PAGE_SIZE, 0, - BUS_DMA_WAITOK, &st->sgl->sg_dmamap); + BUS_DMA_WAITOK, &sg->sg_dmamap); if (ret) { DRM_ERROR("failed to create DMA map for stolen object: %d\n", ret); - st->sgl->sg_dmamap = NULL; + sg->sg_dmamap = NULL; goto out; } - st->sgl->sg_dmat = dmat; + sg->sg_dmat = dmat; - /* XXX errno NetBSD->Liux */ - ret = -bus_dmamap_load_raw(dmat, st->sgl->sg_dmamap, seg, nseg, size, + /* XXX errno NetBSD->Linux */ + ret = -bus_dmamap_load_raw(dmat, sg->sg_dmamap, seg, nseg, size, BUS_DMA_WAITOK); if (ret) { DRM_ERROR("failed to load DMA map for stolen object: %d\n", @@ -569,14 +574,23 @@ i915_pages_create_for_stolen(struct drm_device *dev, } loaded = true; -out: if (ret) { +out: kmem_free(seg, nseg * sizeof(seg[0])); + if (ret) { if (loaded) - bus_dmamap_unload(dmat, st->sgl->sg_dmamap); - sg_free_table(st); + bus_dmamap_unload(dmat, sg->sg_dmamap); + if (sg && sg->sg_dmamap) + bus_dmamap_destroy(dmat, sg->sg_dmamap); + if (sg) + sg_free_table(st); kfree(st); return ERR_PTR(ret); } #else + if (sg_alloc_table(st, 1, GFP_KERNEL)) { + kfree(st); + return ERR_PTR(-ENOMEM); + } + sg = st->sgl; sg->offset = 0; sg->length = size;