diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_execbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 290 |
1 files changed, 91 insertions, 199 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 97b47c5..1ca53ff 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -32,7 +32,6 @@ #include "i915_drv.h" #include "i915_trace.h" #include "intel_drv.h" -#include <linux/dma_remapping.h> struct change_domains { uint32_t invalidate_domains; @@ -203,9 +202,9 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, cd->invalidate_domains |= invalidate_domains; cd->flush_domains |= flush_domains; if (flush_domains & I915_GEM_GPU_DOMAINS) - cd->flush_rings |= intel_ring_flag(obj->ring); + cd->flush_rings |= obj->ring->id; if (invalidate_domains & I915_GEM_GPU_DOMAINS) - cd->flush_rings |= intel_ring_flag(ring); + cd->flush_rings |= ring->id; } struct eb_objects { @@ -287,14 +286,14 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, * exec_object list, so it should have a GTT space bound by now. */ if (unlikely(target_offset == 0)) { - DRM_DEBUG("No GTT space found for object %d\n", + DRM_ERROR("No GTT space found for object %d\n", reloc->target_handle); return ret; } /* Validate that the target is in a valid r/w GPU domain */ if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { - DRM_DEBUG("reloc with multiple write domains: " + DRM_ERROR("reloc with multiple write domains: " "obj %p target %d offset %d " "read %08x write %08x", obj, reloc->target_handle, @@ -303,9 +302,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, reloc->write_domain); return ret; } - if (unlikely((reloc->write_domain | reloc->read_domains) - & ~I915_GEM_GPU_DOMAINS)) { - DRM_DEBUG("reloc with read/write non-GPU domains: " + if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) { + DRM_ERROR("reloc with read/write CPU domains: " "obj %p target %d offset %d " "read %08x write %08x", obj, reloc->target_handle, @@ -316,7 +314,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, } if (unlikely(reloc->write_domain && target_obj->pending_write_domain && reloc->write_domain != target_obj->pending_write_domain)) { - DRM_DEBUG("Write domain conflict: " + DRM_ERROR("Write domain conflict: " "obj %p target %d offset %d " "new %08x old %08x\n", obj, reloc->target_handle, @@ -337,7 +335,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, /* Check that the relocation address is valid... */ if (unlikely(reloc->offset > obj->base.size - 4)) { - DRM_DEBUG("Relocation beyond object bounds: " + DRM_ERROR("Relocation beyond object bounds: " "obj %p target %d offset %d size %d.\n", obj, reloc->target_handle, (int) reloc->offset, @@ -345,7 +343,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, return ret; } if (unlikely(reloc->offset & 3)) { - DRM_DEBUG("Relocation not 4-byte aligned: " + DRM_ERROR("Relocation not 4-byte aligned: " "obj %p target %d offset %d.\n", obj, reloc->target_handle, (int) reloc->offset); @@ -462,60 +460,11 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, return ret; } -#define __EXEC_OBJECT_HAS_FENCE (1<<31) - -static int -pin_and_fence_object(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *ring) -{ - struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; - bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; - bool need_fence, need_mappable; - int ret; - - need_fence = - has_fenced_gpu_access && - entry->flags & EXEC_OBJECT_NEEDS_FENCE && - obj->tiling_mode != I915_TILING_NONE; - need_mappable = - entry->relocation_count ? true : need_fence; - - ret = i915_gem_object_pin(obj, entry->alignment, need_mappable); - if (ret) - return ret; - - if (has_fenced_gpu_access) { - if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { - if (obj->tiling_mode) { - ret = i915_gem_object_get_fence(obj, ring); - if (ret) - goto err_unpin; - - entry->flags |= __EXEC_OBJECT_HAS_FENCE; - i915_gem_object_pin_fence(obj); - } else { - ret = i915_gem_object_put_fence(obj); - if (ret) - goto err_unpin; - } - } - obj->pending_fenced_gpu_access = need_fence; - } - - entry->offset = obj->gtt_offset; - return 0; - -err_unpin: - i915_gem_object_unpin(obj); - return ret; -} - static int i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, struct drm_file *file, struct list_head *objects) { - drm_i915_private_t *dev_priv = ring->dev->dev_private; struct drm_i915_gem_object *obj; int ret, retry; bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; @@ -568,7 +517,6 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, list_for_each_entry(obj, objects, exec_list) { struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; bool need_fence, need_mappable; - if (!obj->gtt_space) continue; @@ -583,55 +531,58 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, (need_mappable && !obj->map_and_fenceable)) ret = i915_gem_object_unbind(obj); else - ret = pin_and_fence_object(obj, ring); + ret = i915_gem_object_pin(obj, + entry->alignment, + need_mappable); if (ret) goto err; + + entry++; } /* Bind fresh objects */ list_for_each_entry(obj, objects, exec_list) { - if (obj->gtt_space) - continue; - - ret = pin_and_fence_object(obj, ring); - if (ret) { - int ret_ignore; - - /* This can potentially raise a harmless - * -EINVAL if we failed to bind in the above - * call. It cannot raise -EINTR since we know - * that the bo is freshly bound and so will - * not need to be flushed or waited upon. - */ - ret_ignore = i915_gem_object_unbind(obj); - (void)ret_ignore; - WARN_ON(obj->gtt_space); - break; - } - } + struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; + bool need_fence; - /* Decrement pin count for bound objects */ - list_for_each_entry(obj, objects, exec_list) { - struct drm_i915_gem_exec_object2 *entry; + need_fence = + has_fenced_gpu_access && + entry->flags & EXEC_OBJECT_NEEDS_FENCE && + obj->tiling_mode != I915_TILING_NONE; - if (!obj->gtt_space) - continue; + if (!obj->gtt_space) { + bool need_mappable = + entry->relocation_count ? true : need_fence; - entry = obj->exec_entry; - if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { - i915_gem_object_unpin_fence(obj); - entry->flags &= ~__EXEC_OBJECT_HAS_FENCE; + ret = i915_gem_object_pin(obj, + entry->alignment, + need_mappable); + if (ret) + break; } - i915_gem_object_unpin(obj); + if (has_fenced_gpu_access) { + if (need_fence) { + ret = i915_gem_object_get_fence(obj, ring); + if (ret) + break; + } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE && + obj->tiling_mode == I915_TILING_NONE) { + /* XXX pipelined! */ + ret = i915_gem_object_put_fence(obj); + if (ret) + break; + } + obj->pending_fenced_gpu_access = need_fence; + } - /* ... and ensure ppgtt mapping exist if needed. */ - if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) { - i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, - obj, obj->cache_level); + entry->offset = obj->gtt_offset; + } - obj->has_aliasing_ppgtt_mapping = 1; - } + /* Decrement pin count for bound objects */ + list_for_each_entry(obj, objects, exec_list) { + if (obj->gtt_space) + i915_gem_object_unpin(obj); } if (ret != -ENOSPC || retry > 1) @@ -648,19 +599,16 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, } while (1); err: - list_for_each_entry_continue_reverse(obj, objects, exec_list) { - struct drm_i915_gem_exec_object2 *entry; - - if (!obj->gtt_space) - continue; - - entry = obj->exec_entry; - if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { - i915_gem_object_unpin_fence(obj); - entry->flags &= ~__EXEC_OBJECT_HAS_FENCE; - } + obj = list_entry(obj->exec_list.prev, + struct drm_i915_gem_object, + exec_list); + while (objects != &obj->exec_list) { + if (obj->gtt_space) + i915_gem_object_unpin(obj); - i915_gem_object_unpin(obj); + obj = list_entry(obj->exec_list.prev, + struct drm_i915_gem_object, + exec_list); } return ret; @@ -754,7 +702,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, obj = to_intel_bo(drm_gem_object_lookup(dev, file, exec[i].handle)); if (&obj->base == NULL) { - DRM_DEBUG("Invalid object handle %d at index %d\n", + DRM_ERROR("Invalid object handle %d at index %d\n", exec[i].handle, i); ret = -ENOENT; goto err; @@ -819,22 +767,6 @@ i915_gem_execbuffer_flush(struct drm_device *dev, return 0; } -static bool -intel_enable_semaphores(struct drm_device *dev) -{ - if (INTEL_INFO(dev)->gen < 6) - return 0; - - if (i915_semaphores >= 0) - return i915_semaphores; - - /* Disable semaphores on SNB */ - if (INTEL_INFO(dev)->gen == 6) - return 0; - - return 1; -} - static int i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, struct intel_ring_buffer *to) @@ -847,7 +779,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, return 0; /* XXX gpu semaphores are implicated in various hard hangs on SNB */ - if (!intel_enable_semaphores(obj->base.dev)) + if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores) return i915_gem_object_wait_rendering(obj); idx = intel_ring_sync_index(from, to); @@ -873,8 +805,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, } from->sync_seqno[idx] = seqno; - - return to->sync_to(to, from, seqno - 1); + return intel_ring_sync(to, from, seqno - 1); } static int @@ -1048,31 +979,6 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev, } static int -i915_reset_gen7_sol_offsets(struct drm_device *dev, - struct intel_ring_buffer *ring) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - int ret, i; - - if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) - return 0; - - ret = intel_ring_begin(ring, 4 * 3); - if (ret) - return ret; - - for (i = 0; i < 4; i++) { - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i)); - intel_ring_emit(ring, 0); - } - - intel_ring_advance(ring); - - return 0; -} - -static int i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_file *file, struct drm_i915_gem_execbuffer2 *args, @@ -1086,11 +992,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct intel_ring_buffer *ring; u32 exec_start, exec_len; u32 seqno; - u32 mask; int ret, mode, i; if (!i915_gem_check_execbuffer(args)) { - DRM_DEBUG("execbuf with invalid offset/length\n"); + DRM_ERROR("execbuf with invalid offset/length\n"); return -EINVAL; } @@ -1105,26 +1010,25 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, break; case I915_EXEC_BSD: if (!HAS_BSD(dev)) { - DRM_DEBUG("execbuf with invalid ring (BSD)\n"); + DRM_ERROR("execbuf with invalid ring (BSD)\n"); return -EINVAL; } ring = &dev_priv->ring[VCS]; break; case I915_EXEC_BLT: if (!HAS_BLT(dev)) { - DRM_DEBUG("execbuf with invalid ring (BLT)\n"); + DRM_ERROR("execbuf with invalid ring (BLT)\n"); return -EINVAL; } ring = &dev_priv->ring[BCS]; break; default: - DRM_DEBUG("execbuf with unknown ring: %d\n", + DRM_ERROR("execbuf with unknown ring: %d\n", (int)(args->flags & I915_EXEC_RING_MASK)); return -EINVAL; } mode = args->flags & I915_EXEC_CONSTANTS_MASK; - mask = I915_EXEC_CONSTANTS_MASK; switch (mode) { case I915_EXEC_CONSTANTS_REL_GENERAL: case I915_EXEC_CONSTANTS_ABSOLUTE: @@ -1138,24 +1042,33 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, mode == I915_EXEC_CONSTANTS_REL_SURFACE) return -EINVAL; - /* The HW changed the meaning on this bit on gen6 */ - if (INTEL_INFO(dev)->gen >= 6) - mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE; + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit(ring, INSTPM); + intel_ring_emit(ring, + I915_EXEC_CONSTANTS_MASK << 16 | mode); + intel_ring_advance(ring); + + dev_priv->relative_constants_mode = mode; } break; default: - DRM_DEBUG("execbuf with unknown constants: %d\n", mode); + DRM_ERROR("execbuf with unknown constants: %d\n", mode); return -EINVAL; } if (args->buffer_count < 1) { - DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); + DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); return -EINVAL; } if (args->num_cliprects != 0) { if (ring != &dev_priv->ring[RCS]) { - DRM_DEBUG("clip rectangles are only valid with the render ring\n"); + DRM_ERROR("clip rectangles are only valid with the render ring\n"); return -EINVAL; } @@ -1205,7 +1118,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, obj = to_intel_bo(drm_gem_object_lookup(dev, file, exec[i].handle)); if (&obj->base == NULL) { - DRM_DEBUG("Invalid object handle %d at index %d\n", + DRM_ERROR("Invalid object handle %d at index %d\n", exec[i].handle, i); /* prevent error path from reading uninitialized data */ ret = -ENOENT; @@ -1213,7 +1126,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } if (!list_empty(&obj->exec_list)) { - DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", + DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n", obj, exec[i].handle, i); ret = -EINVAL; goto err; @@ -1251,7 +1164,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, /* Set the pending read domains for the batch buffer to COMMAND */ if (batch_obj->base.pending_write_domain) { - DRM_DEBUG("Attempting to use self-modifying batch buffer\n"); + DRM_ERROR("Attempting to use self-modifying batch buffer\n"); ret = -EINVAL; goto err; } @@ -1268,7 +1181,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, * so every billion or so execbuffers, we need to stall * the GPU in order to reset the counters. */ - ret = i915_gpu_idle(dev, true); + ret = i915_gpu_idle(dev); if (ret) goto err; @@ -1276,27 +1189,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } } - if (ring == &dev_priv->ring[RCS] && - mode != dev_priv->relative_constants_mode) { - ret = intel_ring_begin(ring, 4); - if (ret) - goto err; - - intel_ring_emit(ring, MI_NOOP); - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit(ring, INSTPM); - intel_ring_emit(ring, mask << 16 | mode); - intel_ring_advance(ring); - - dev_priv->relative_constants_mode = mode; - } - - if (args->flags & I915_EXEC_GEN7_SOL_RESET) { - ret = i915_reset_gen7_sol_offsets(dev, ring); - if (ret) - goto err; - } - trace_i915_gem_ring_dispatch(ring, seqno); exec_start = batch_obj->gtt_offset + args->batch_start_offset; @@ -1356,7 +1248,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, int ret, i; if (args->buffer_count < 1) { - DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); + DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); return -EINVAL; } @@ -1364,7 +1256,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); if (exec_list == NULL || exec2_list == NULL) { - DRM_DEBUG("Failed to allocate exec list for %d buffers\n", + DRM_ERROR("Failed to allocate exec list for %d buffers\n", args->buffer_count); drm_free_large(exec_list); drm_free_large(exec2_list); @@ -1375,7 +1267,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, (uintptr_t) args->buffers_ptr, sizeof(*exec_list) * args->buffer_count); if (ret != 0) { - DRM_DEBUG("copy %d exec entries failed %d\n", + DRM_ERROR("copy %d exec entries failed %d\n", args->buffer_count, ret); drm_free_large(exec_list); drm_free_large(exec2_list); @@ -1416,7 +1308,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, sizeof(*exec_list) * args->buffer_count); if (ret) { ret = -EFAULT; - DRM_DEBUG("failed to copy %d exec entries " + DRM_ERROR("failed to copy %d exec entries " "back to user (%d)\n", args->buffer_count, ret); } @@ -1437,7 +1329,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, if (args->buffer_count < 1 || args->buffer_count > UINT_MAX / sizeof(*exec2_list)) { - DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count); + DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); return -EINVAL; } @@ -1447,7 +1339,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); if (exec2_list == NULL) { - DRM_DEBUG("Failed to allocate exec list for %d buffers\n", + DRM_ERROR("Failed to allocate exec list for %d buffers\n", args->buffer_count); return -ENOMEM; } @@ -1456,7 +1348,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, (uintptr_t) args->buffers_ptr, sizeof(*exec2_list) * args->buffer_count); if (ret != 0) { - DRM_DEBUG("copy %d exec entries failed %d\n", + DRM_ERROR("copy %d exec entries failed %d\n", args->buffer_count, ret); drm_free_large(exec2_list); return -EFAULT; @@ -1471,7 +1363,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, sizeof(*exec2_list) * args->buffer_count); if (ret) { ret = -EFAULT; - DRM_DEBUG("failed to copy %d exec entries " + DRM_ERROR("failed to copy %d exec entries " "back to user (%d)\n", args->buffer_count, ret); } |