aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2011-01-12 23:49:13 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2011-01-13 16:06:05 +0000
commitdd6864a4edb9b2d0055a7f30e17cbc521098b1be (patch)
tree5c3b41913378fe69c506f54f9ac3ee45a1809f6c
parentc37d9a5de94a6fe60a756af350cd21aa9bbbc8a1 (diff)
downloadkernel_samsung_smdk4412-dd6864a4edb9b2d0055a7f30e17cbc521098b1be.zip
kernel_samsung_smdk4412-dd6864a4edb9b2d0055a7f30e17cbc521098b1be.tar.gz
kernel_samsung_smdk4412-dd6864a4edb9b2d0055a7f30e17cbc521098b1be.tar.bz2
drm/i915/execbuffer: Reorder relocations to match new object order
On the fault path, commit 6fe4f140 introduction a regression whereby it changed the sequence of the objects but continued to use the original ordering of relocation entries. The result was that incorrect GTT offsets were being fed into the execbuffer causing lots of misrendering and potential hangs. Reported-by: Linus Torvalds <torvalds@linux-foundation.org> Tested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index e698343..6b34e98 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -636,6 +636,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
{
struct drm_i915_gem_relocation_entry *reloc;
struct drm_i915_gem_object *obj;
+ int *reloc_offset;
int i, total, ret;
/* We may process another execbuffer during the unlock... */
@@ -653,8 +654,11 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
for (i = 0; i < count; i++)
total += exec[i].relocation_count;
+ reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
reloc = drm_malloc_ab(total, sizeof(*reloc));
- if (reloc == NULL) {
+ if (reloc == NULL || reloc_offset == NULL) {
+ drm_free_large(reloc);
+ drm_free_large(reloc_offset);
mutex_lock(&dev->struct_mutex);
return -ENOMEM;
}
@@ -672,6 +676,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
goto err;
}
+ reloc_offset[i] = total;
total += exec[i].relocation_count;
}
@@ -705,17 +710,14 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
if (ret)
goto err;
- total = 0;
list_for_each_entry(obj, objects, exec_list) {
+ int offset = obj->exec_entry - exec;
obj->base.pending_read_domains = 0;
obj->base.pending_write_domain = 0;
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
- reloc + total);
+ reloc + reloc_offset[offset]);
if (ret)
goto err;
-
- total += exec->relocation_count;
- exec++;
}
/* Leave the user relocations as are, this is the painfully slow path,
@@ -726,6 +728,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
err:
drm_free_large(reloc);
+ drm_free_large(reloc_offset);
return ret;
}