Index: i915_gem.c =================================================================== RCS file: /cvsroot/src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c,v retrieving revision 1.21 diff -p -u -r1.21 i915_gem.c --- i915_gem.c 26 Feb 2015 19:43:43 -0000 1.21 +++ i915_gem.c 26 Feb 2015 20:12:13 -0000 @@ -578,6 +578,8 @@ int i915_gem_obj_prepare_shmem_read(stru ret = i915_gem_object_wait_rendering(obj, true); if (ret) return ret; + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ } ret = i915_gem_object_get_pages(obj); @@ -917,6 +919,8 @@ i915_gem_gtt_pwrite_fast(struct drm_devi ret = i915_gem_object_set_to_gtt_domain(obj, true); if (ret) goto out_unpin; + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ ret = i915_gem_object_put_fence(obj); if (ret) @@ -1062,6 +1066,8 @@ i915_gem_shmem_pwrite(struct drm_device ret = i915_gem_object_wait_rendering(obj, false); if (ret) return ret; + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ } /* Same trick applies to invalidate partially written cachelines read * before writing. */ @@ -1603,6 +1609,8 @@ static int i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj, struct intel_ring_buffer *ring) { + WARN_ON(!obj->active); /* XXX BUG */ + WARN_ON(!(obj->base.write_domain & I915_GEM_GPU_DOMAINS)); /* XXX BUG */ i915_gem_retire_requests_ring(ring); /* Manually manage the write flush as we may have not yet @@ -1615,6 +1623,9 @@ i915_gem_object_wait_rendering__tail(str obj->last_write_seqno = 0; obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ + return 0; } @@ -1638,7 +1649,13 @@ i915_gem_object_wait_rendering(struct dr if (ret) return ret; - return i915_gem_object_wait_rendering__tail(obj, ring); + ret = i915_gem_object_wait_rendering__tail(obj, ring); + if (ret) + return ret; + + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ + return 0; } /* A nonblocking variant of the above wait. This is a highly dangerous routine @@ -1678,7 +1695,13 @@ i915_gem_object_wait_rendering__nonblock if (ret) return ret; - return i915_gem_object_wait_rendering__tail(obj, ring); + ret = i915_gem_object_wait_rendering__tail(obj, ring); + if (ret) + return ret; + + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ + return 0; } /** @@ -1730,8 +1753,15 @@ i915_gem_set_domain_ioctl(struct drm_dev if (ret) goto unref; + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ + if (read_domains & I915_GEM_DOMAIN_GTT) { ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); + if (ret == 0) { + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ + } /* Silently promote "you're not bound, there was nothing to do" * to success, since the client was just asking us to @@ -1741,6 +1771,10 @@ i915_gem_set_domain_ioctl(struct drm_dev ret = 0; } else { ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); + if (ret) + goto unref; + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ } unref: @@ -1888,6 +1922,9 @@ i915_gem_fault(struct uvm_faultinfo *ufi if (ret) goto unlock; + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ + if ((obj->cache_level != I915_CACHE_NONE) && !HAS_LLC(dev)) { ret = -EINVAL; goto unlock; @@ -1900,6 +1937,8 @@ i915_gem_fault(struct uvm_faultinfo *ufi ret = i915_gem_object_set_to_gtt_domain(obj, write); if (ret) goto unpin; + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ ret = i915_gem_object_get_fence(obj); if (ret) @@ -2059,6 +2098,9 @@ int i915_gem_fault(struct vm_area_struct if (ret) goto unlock; + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ + /* Access to snoopable pages through the GTT is incoherent. */ if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) { ret = -EINVAL; @@ -2073,6 +2115,8 @@ int i915_gem_fault(struct vm_area_struct ret = i915_gem_object_set_to_gtt_domain(obj, write); if (ret) goto unpin; + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ ret = i915_gem_object_get_fence(obj); if (ret) @@ -2422,9 +2466,13 @@ i915_gem_object_put_pages_gtt(struct drm if (ret) { WARN_ON(ret != -EIO); i915_gem_clflush_object(obj, true); + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; } + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_save_bit_17_swizzle(obj); @@ -2453,8 +2501,12 @@ i915_gem_object_put_pages_gtt(struct drm */ WARN_ON(ret != -EIO); i915_gem_clflush_object(obj, true); + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; } + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_save_bit_17_swizzle(obj); @@ -2501,6 +2553,9 @@ i915_gem_object_put_pages(struct drm_i91 ops->put_pages(obj); obj->pages = NULL; + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ + if (i915_gem_object_is_purgeable(obj)) i915_gem_object_truncate(obj); @@ -2520,6 +2575,8 @@ __i915_gem_shrink(struct drm_i915_privat global_list) { if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && i915_gem_object_put_pages(obj) == 0) { + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ count += obj->base.size >> PAGE_SHIFT; if (count >= target) return count; @@ -2569,8 +2626,11 @@ __i915_gem_shrink(struct drm_i915_privat if (i915_vma_unbind(vma)) break; - if (i915_gem_object_put_pages(obj) == 0) + if (i915_gem_object_put_pages(obj) == 0) { + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ count += obj->base.size >> PAGE_SHIFT; + } drm_gem_object_unreference(&obj->base); } @@ -2595,8 +2655,11 @@ i915_gem_shrink_all(struct drm_i915_priv list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, global_list) { - if (i915_gem_object_put_pages(obj) == 0) + if (i915_gem_object_put_pages(obj) == 0) { + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ freed += obj->base.size >> PAGE_SHIFT; + } } return freed; } @@ -3489,8 +3552,14 @@ i915_gem_object_sync(struct drm_i915_gem if (from == NULL || to == from) return 0; - if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev)) - return i915_gem_object_wait_rendering(obj, false); + if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev)) { + ret = i915_gem_object_wait_rendering(obj, false); + if (ret) + return ret; + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ + return 0; + } idx = intel_ring_sync_index(from, to); @@ -4252,6 +4321,8 @@ i915_gem_object_set_to_gtt_domain(struct if (ret) return ret; + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ i915_gem_object_flush_cpu_write_domain(obj, false); /* Serialise direct access to this object with the barriers for @@ -4318,8 +4389,13 @@ int i915_gem_object_set_cache_level(stru ret = i915_gem_object_finish_gpu(obj); if (ret) return ret; + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ i915_gem_object_finish_gtt(obj); + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_DOMAIN_GTT); /* XXX BUG */ /* Before SandyBridge, you could not use tiling or fence * registers with snooped memory, so relinquish any fences @@ -4513,6 +4589,9 @@ i915_gem_object_pin_to_display_plane(str i915_gem_object_flush_cpu_write_domain(obj, true); + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ + old_write_domain = obj->base.write_domain; old_read_domains = obj->base.read_domains; @@ -4552,6 +4631,9 @@ i915_gem_object_finish_gpu(struct drm_i9 if (ret) return ret; + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ + /* Ensure that we invalidate the GPU's caches and TLBs. */ obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; return 0; @@ -4576,6 +4658,9 @@ i915_gem_object_set_to_cpu_domain(struct if (ret) return ret; + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ + i915_gem_object_flush_gtt_write_domain(obj); old_write_domain = obj->base.write_domain; @@ -5055,6 +5140,8 @@ void i915_gem_free_object(struct drm_gem if (WARN_ON(obj->pages_pin_count)) obj->pages_pin_count = 0; i915_gem_object_put_pages(obj); + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ i915_gem_object_free_mmap_offset(obj); i915_gem_object_release_stolen(obj); Index: i915_gem_execbuffer.c =================================================================== RCS file: /cvsroot/src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_execbuffer.c,v retrieving revision 1.4 diff -p -u -r1.4 i915_gem_execbuffer.c --- i915_gem_execbuffer.c 16 Jul 2014 20:56:24 -0000 1.4 +++ i915_gem_execbuffer.c 26 Feb 2015 20:12:13 -0000 @@ -292,6 +292,8 @@ relocate_entry_cpu(struct drm_i915_gem_o ret = i915_gem_object_set_to_cpu_domain(obj, true); if (ret) return ret; + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ vaddr = kmap_atomic(i915_gem_object_get_page(obj, reloc->offset >> PAGE_SHIFT)); @@ -327,6 +329,8 @@ relocate_entry_gtt(struct drm_i915_gem_o ret = i915_gem_object_set_to_gtt_domain(obj, true); if (ret) return ret; + WARN_ON(obj->active); /* XXX BUG */ + WARN_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); /* XXX BUG */ ret = i915_gem_object_put_fence(obj); if (ret) @@ -1003,6 +1007,8 @@ i915_gem_execbuffer_move_to_active(struc u32 old_read = obj->base.read_domains; u32 old_write = obj->base.write_domain; + WARN_ON(obj->active); /* not active yet */ /* XXX BUG */ + WARN_ON(obj->base.pending_write_domain & ~I915_GEM_GPU_DOMAINS); /* XXX BUG */ obj->base.write_domain = obj->base.pending_write_domain; if (obj->base.write_domain == 0) obj->base.pending_read_domains |= obj->base.read_domains;