reorganise most of the NetBSD portion of nvkm_mem_dtor(). when nvkm_mem_new_host() is called via the in-kernel ioctl method, we copy the supplied dmamap, use it's dm_nsegs value for allocation of "mem->dma", and assume it remains valid until we're done. when this path is taken "mem->mem" remains NULL so all the code in nvkm_mem_dtor() is ignored, and the "mem->dma" is leaked. this is one leak seen in PR#56826. as "dmamap->dm_nsegs" can become invalid before the dtor call, store the value in "mem->nseg" for use in the dtor, and convert the dtor to free "mem->dma" if "mem->dma" is set. additionally, "mem->pages" should end up being the same value as "nseg" here, ASSERT() this. while here properly mark NetBSD specific code in nvkm_mem_new_host(). additionally, destroy the dmamap created in the non-ioctl path of nvkm_mem_new_host(). this is another leak seen in PR#56826. with both of these fixes my "kmem-04096" pool does not grow rapidly while using "mpv -vo gpu". infact, once i loaded the relevant file into memory, this pool remains stable after at least one minute of video playback. Index: external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/mmu/nouveau_nvkm_subdev_mmu_mem.c =================================================================== RCS file: /cvsroot/src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/mmu/nouveau_nvkm_subdev_mmu_mem.c,v retrieving revision 1.7 diff -p -u -r1.7 nouveau_nvkm_subdev_mmu_mem.c --- external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/mmu/nouveau_nvkm_subdev_mmu_mem.c 19 Dec 2021 11:06:44 -0000 1.7 +++ external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/mmu/nouveau_nvkm_subdev_mmu_mem.c 31 May 2022 19:49:35 -0000 @@ -97,16 +97,21 @@ static void * nvkm_mem_dtor(struct nvkm_memory *memory) { struct nvkm_mem *mem = nvkm_mem(memory); - if (mem->mem) { #ifdef __NetBSD__ + if (mem->dma) { + kmem_free(mem->dma, mem->nseg * sizeof(mem->dma[0])); + } + if (mem->mem) { struct nvkm_device *device = mem->mmu->subdev.device; bus_dma_tag_t dmat = device->func->dma_tag(device); - kmem_free(mem->dma, - mem->dmamap->dm_nsegs * sizeof(mem->dma[0])); + bus_dmamap_unload(dmat, mem->dmamap); bus_dmamem_free(dmat, mem->mem, mem->nseg); + bus_dmamap_destroy(dmat, mem->dmamap); kmem_free(mem->mem, mem->pages * sizeof(mem->mem[0])); + } #else + if (mem->mem) { while (mem->pages--) { dma_unmap_page(mem->mmu->subdev.device->dev, mem->dma[mem->pages], PAGE_SIZE, @@ -115,8 +120,8 @@ nvkm_mem_dtor(struct nvkm_memory *memory } kvfree(mem->dma); kvfree(mem->mem); -#endif } +#endif return mem; } @@ -218,7 +223,11 @@ nvkm_mem_new_host(struct nvkm_mmu *mmu, if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) { if (args->v0.dma) { nvkm_memory_ctor(&nvkm_mem_dma, &mem->memory); +#ifndef __NetBSD__ + mem->dma = args->v0.dma; +#else mem->dmamap = args->v0.dma; + mem->nseg = mem->dmamap->dm_nsegs; mem->dma = kmem_zalloc(mem->dmamap->dm_nsegs * sizeof(mem->dma[0]), KM_SLEEP); for (unsigned i = 0; i < mem->dmamap->dm_nsegs; i++) { @@ -226,6 +235,7 @@ nvkm_mem_new_host(struct nvkm_mmu *mmu, PAGE_SIZE); mem->dma[i] = mem->dmamap->dm_segs[i].ds_addr; } +#endif } else { #ifdef __NetBSD__ return -ENODEV; @@ -238,6 +248,9 @@ nvkm_mem_new_host(struct nvkm_mmu *mmu, if (!IS_ALIGNED(size, PAGE_SIZE)) return -EINVAL; mem->pages = size >> PAGE_SHIFT; +#ifdef __NetBSD__ + KASSERT(mem->pages == mem->nseg); +#endif return 0; } else if ( (ret = nvif_unvers(ret, &argv, &argc, args->vn))) { @@ -283,6 +296,7 @@ fail2: __unused mem->dma[i] = mem->dmamap->dm_segs[i].ds_addr; } mem->pages = size; + KASSERT(mem->pages == mem->nseg); #else if (!(mem->mem = kvmalloc_array(size, sizeof(*mem->mem), GFP_KERNEL))) return -ENOMEM;