aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authormcampbellsmith <mcampbellsmith@gmail.com>2013-11-20 22:34:44 +1100
committersbrissen <sbrissen@hotmail.com>2013-12-10 14:54:28 -0500
commit74bcc029c6ab942f252477a74102877f7d093388 (patch)
tree044bd97b98693c073841663c62f57e1520509109 /drivers/base
parent2a6649bf6aa50c44a05fc02e1efb8b788c58e82b (diff)
downloadkernel_samsung_smdk4412-74bcc029c6ab942f252477a74102877f7d093388.zip
kernel_samsung_smdk4412-74bcc029c6ab942f252477a74102877f7d093388.tar.gz
kernel_samsung_smdk4412-74bcc029c6ab942f252477a74102877f7d093388.tar.bz2
mali: bulk import of r3p2-01rel3 drivers from i9300-update12
Courtesy of a similar commit from OMNI ROM. Requires updated mali blobs Change-Id: I9ee55b653b57b7c390f8e0e8cd4fc068f1c751c3
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/sync.c321
1 files changed, 263 insertions, 58 deletions
diff --git a/drivers/base/sync.c b/drivers/base/sync.c
index d6913f8..06a5eac 100644
--- a/drivers/base/sync.c
+++ b/drivers/base/sync.c
@@ -15,6 +15,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/kernel.h>
@@ -27,8 +28,13 @@
#include <linux/anon_inodes.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/sync.h>
+
static void sync_fence_signal_pt(struct sync_pt *pt);
static int _sync_pt_has_signaled(struct sync_pt *pt);
+static void sync_fence_free(struct kref *kref);
+static void sync_dump(void);
static LIST_HEAD(sync_timeline_list_head);
static DEFINE_SPINLOCK(sync_timeline_list_lock);
@@ -49,6 +55,7 @@ struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
if (obj == NULL)
return NULL;
+ kref_init(&obj->kref);
obj->ops = ops;
strlcpy(obj->name, name, sizeof(obj->name));
@@ -64,9 +71,12 @@ struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
return obj;
}
+EXPORT_SYMBOL(sync_timeline_create);
-static void sync_timeline_free(struct sync_timeline *obj)
+static void sync_timeline_free(struct kref *kref)
{
+ struct sync_timeline *obj =
+ container_of(kref, struct sync_timeline, kref);
unsigned long flags;
if (obj->ops->release_obj)
@@ -81,19 +91,17 @@ static void sync_timeline_free(struct sync_timeline *obj)
void sync_timeline_destroy(struct sync_timeline *obj)
{
- unsigned long flags;
- bool needs_freeing;
-
- spin_lock_irqsave(&obj->child_list_lock, flags);
obj->destroyed = true;
- needs_freeing = list_empty(&obj->child_list_head);
- spin_unlock_irqrestore(&obj->child_list_lock, flags);
- if (needs_freeing)
- sync_timeline_free(obj);
- else
+ /*
+ * If this is not the last reference, signal any children
+ * that their parent is going away.
+ */
+
+ if (!kref_put(&obj->kref, sync_timeline_free))
sync_timeline_signal(obj);
}
+EXPORT_SYMBOL(sync_timeline_destroy);
static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
{
@@ -110,7 +118,6 @@ static void sync_timeline_remove_pt(struct sync_pt *pt)
{
struct sync_timeline *obj = pt->parent;
unsigned long flags;
- bool needs_freeing;
spin_lock_irqsave(&obj->active_list_lock, flags);
if (!list_empty(&pt->active_list))
@@ -118,12 +125,10 @@ static void sync_timeline_remove_pt(struct sync_pt *pt)
spin_unlock_irqrestore(&obj->active_list_lock, flags);
spin_lock_irqsave(&obj->child_list_lock, flags);
- list_del(&pt->child_list);
- needs_freeing = obj->destroyed && list_empty(&obj->child_list_head);
+ if (!list_empty(&pt->child_list)) {
+ list_del_init(&pt->child_list);
+ }
spin_unlock_irqrestore(&obj->child_list_lock, flags);
-
- if (needs_freeing)
- sync_timeline_free(obj);
}
void sync_timeline_signal(struct sync_timeline *obj)
@@ -132,26 +137,33 @@ void sync_timeline_signal(struct sync_timeline *obj)
LIST_HEAD(signaled_pts);
struct list_head *pos, *n;
+ trace_sync_timeline(obj);
+
spin_lock_irqsave(&obj->active_list_lock, flags);
list_for_each_safe(pos, n, &obj->active_list_head) {
struct sync_pt *pt =
container_of(pos, struct sync_pt, active_list);
- if (_sync_pt_has_signaled(pt))
- list_move(pos, &signaled_pts);
+ if (_sync_pt_has_signaled(pt)) {
+ list_del_init(pos);
+ list_add(&pt->signaled_list, &signaled_pts);
+ kref_get(&pt->fence->kref);
+ }
}
spin_unlock_irqrestore(&obj->active_list_lock, flags);
list_for_each_safe(pos, n, &signaled_pts) {
struct sync_pt *pt =
- container_of(pos, struct sync_pt, active_list);
+ container_of(pos, struct sync_pt, signaled_list);
list_del_init(pos);
sync_fence_signal_pt(pt);
+ kref_put(&pt->fence->kref, sync_fence_free);
}
}
+EXPORT_SYMBOL(sync_timeline_signal);
struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
{
@@ -165,10 +177,12 @@ struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
return NULL;
INIT_LIST_HEAD(&pt->active_list);
+ kref_get(&parent->kref);
sync_timeline_add_pt(parent, pt);
return pt;
}
+EXPORT_SYMBOL(sync_pt_create);
void sync_pt_free(struct sync_pt *pt)
{
@@ -177,8 +191,11 @@ void sync_pt_free(struct sync_pt *pt)
sync_timeline_remove_pt(pt);
+ kref_put(&pt->parent->kref, sync_timeline_free);
+
kfree(pt);
}
+EXPORT_SYMBOL(sync_pt_free);
/* call with pt->parent->active_list_lock held */
static int _sync_pt_has_signaled(struct sync_pt *pt)
@@ -247,6 +264,7 @@ static struct sync_fence *sync_fence_alloc(const char *name)
if (fence->file == NULL)
goto err;
+ kref_init(&fence->kref);
strlcpy(fence->name, name, sizeof(fence->name));
INIT_LIST_HEAD(&fence->pt_list_head);
@@ -282,29 +300,67 @@ struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
list_add(&pt->pt_list, &fence->pt_list_head);
sync_pt_activate(pt);
+ /*
+ * signal the fence in case pt was activated before
+ * sync_pt_activate(pt) was called
+ */
+ sync_fence_signal_pt(pt);
+
return fence;
}
+EXPORT_SYMBOL(sync_fence_create);
static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
{
- struct list_head *pos;
+ struct list_head *pos, *test_pos;
list_for_each(pos, &src->pt_list_head) {
struct sync_pt *orig_pt =
container_of(pos, struct sync_pt, pt_list);
- struct sync_pt *new_pt = sync_pt_dup(orig_pt);
+ struct sync_pt *new_pt;
+
+ /* Skip already signaled points */
+ if (1 == orig_pt->status)
+ continue;
+
+ list_for_each(test_pos, &src->pt_list_head) {
+ struct sync_pt *test_pt =
+ container_of(pos, struct sync_pt, pt_list);
+ if (orig_pt->parent == test_pt->parent) {
+ int diff;
+ diff = orig_pt->parent->ops->compare(orig_pt,
+ test_pt);
+ if (diff == -1) {
+ /* Skip orig_pt; another point will
+ * signal after it */
+ continue;
+ }
+ break;
+ }
+ }
+
+ new_pt = sync_pt_dup(orig_pt);
if (new_pt == NULL)
return -ENOMEM;
new_pt->fence = dst;
list_add(&new_pt->pt_list, &dst->pt_list_head);
- sync_pt_activate(new_pt);
}
return 0;
}
+static void sync_fence_detach_pts(struct sync_fence *fence)
+{
+ struct list_head *pos, *n;
+
+ list_for_each_safe(pos, n, &fence->pt_list_head) {
+ struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
+ sync_timeline_remove_pt(pt);
+ }
+}
+
static void sync_fence_free_pts(struct sync_fence *fence)
{
struct list_head *pos, *n;
@@ -331,16 +387,19 @@ err:
fput(file);
return NULL;
}
+EXPORT_SYMBOL(sync_fence_fdget);
void sync_fence_put(struct sync_fence *fence)
{
fput(fence->file);
}
+EXPORT_SYMBOL(sync_fence_put);
void sync_fence_install(struct sync_fence *fence, int fd)
{
fd_install(fd, fence->file);
}
+EXPORT_SYMBOL(sync_fence_install);
static int sync_fence_get_status(struct sync_fence *fence)
{
@@ -349,7 +408,13 @@ static int sync_fence_get_status(struct sync_fence *fence)
list_for_each(pos, &fence->pt_list_head) {
struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
- int pt_status = pt->status;
+ int pt_status;
+
+ if (pt == NULL) {
+ printk(KERN_ERR"[sync driver] sync_fence_get_status : sync_pt is NULL\n");
+ break;
+ }
+ pt_status = pt->status;
if (pt_status < 0) {
status = pt_status;
@@ -365,6 +430,7 @@ static int sync_fence_get_status(struct sync_fence *fence)
struct sync_fence *sync_fence_merge(const char *name,
struct sync_fence *a, struct sync_fence *b)
{
+ struct list_head *pos;
struct sync_fence *fence;
int err;
@@ -380,7 +446,29 @@ struct sync_fence *sync_fence_merge(const char *name,
if (err < 0)
goto err;
- fence->status = sync_fence_get_status(fence);
+ /* Make sure there is at least one point in the fence */
+ if (list_empty(&fence->pt_list_head)) {
+ struct sync_pt *orig_pt = list_first_entry(&a->pt_list_head,
+ struct sync_pt, pt_list);
+ struct sync_pt *new_pt = sync_pt_dup(orig_pt);
+
+ new_pt->fence = fence;
+ list_add(&new_pt->pt_list, &fence->pt_list_head);
+ }
+
+ list_for_each(pos, &fence->pt_list_head) {
+ struct sync_pt *pt =
+ container_of(pos, struct sync_pt, pt_list);
+ sync_pt_activate(pt);
+ }
+
+ /*
+ * signal the fence in case one of it's pts were activated before
+ * they were activated
+ */
+ sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
+ struct sync_pt,
+ pt_list));
return fence;
err:
@@ -388,6 +476,7 @@ err:
kfree(fence);
return NULL;
}
+EXPORT_SYMBOL(sync_fence_merge);
static void sync_fence_signal_pt(struct sync_pt *pt)
{
@@ -421,33 +510,22 @@ static void sync_fence_signal_pt(struct sync_pt *pt)
container_of(pos, struct sync_fence_waiter,
waiter_list);
- waiter->callback(fence, waiter->callback_data);
list_del(pos);
- kfree(waiter);
+ waiter->callback(fence, waiter);
}
wake_up(&fence->wq);
}
}
int sync_fence_wait_async(struct sync_fence *fence,
- void (*callback)(struct sync_fence *, void *data),
- void *callback_data)
+ struct sync_fence_waiter *waiter)
{
- struct sync_fence_waiter *waiter;
unsigned long flags;
int err = 0;
- waiter = kzalloc(sizeof(struct sync_fence_waiter), GFP_KERNEL);
- if (waiter == NULL)
- return -ENOMEM;
-
- waiter->callback = callback;
- waiter->callback_data = callback_data;
-
spin_lock_irqsave(&fence->waiter_list_lock, flags);
if (fence->status) {
- kfree(waiter);
err = fence->status;
goto out;
}
@@ -458,44 +536,120 @@ out:
return err;
}
+EXPORT_SYMBOL(sync_fence_wait_async);
+
+int sync_fence_cancel_async(struct sync_fence *fence,
+ struct sync_fence_waiter *waiter)
+{
+ struct list_head *pos;
+ struct list_head *n;
+ unsigned long flags;
+ int ret = -ENOENT;
+
+ spin_lock_irqsave(&fence->waiter_list_lock, flags);
+ /*
+ * Make sure waiter is still in waiter_list because it is possible for
+ * the waiter to be removed from the list while the callback is still
+ * pending.
+ */
+ list_for_each_safe(pos, n, &fence->waiter_list_head) {
+ struct sync_fence_waiter *list_waiter =
+ container_of(pos, struct sync_fence_waiter,
+ waiter_list);
+ if (list_waiter == waiter) {
+ list_del(pos);
+ ret = 0;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(sync_fence_cancel_async);
+
+static bool sync_fence_check(struct sync_fence *fence)
+{
+ /*
+ * Make sure that reads to fence->status are ordered with the
+ * wait queue event triggering
+ */
+ smp_rmb();
+ return fence->status != 0;
+}
int sync_fence_wait(struct sync_fence *fence, long timeout)
{
- int err;
+ int err = 0;
+ struct sync_pt *pt;
- if (timeout) {
+ trace_sync_wait(fence, 1);
+ list_for_each_entry(pt, &fence->pt_list_head, pt_list)
+ trace_sync_pt(pt);
+
+ if (timeout > 0) {
timeout = msecs_to_jiffies(timeout);
err = wait_event_interruptible_timeout(fence->wq,
- fence->status != 0,
+ sync_fence_check(fence),
timeout);
- } else {
- err = wait_event_interruptible(fence->wq, fence->status != 0);
+ } else if (timeout < 0) {
+ err = wait_event_interruptible(fence->wq,
+ sync_fence_check(fence));
}
+ trace_sync_wait(fence, 0);
if (err < 0)
return err;
- if (fence->status < 0)
+ if (fence->status < 0) {
+ pr_info("fence error %d on [%p]\n", fence->status, fence);
+ sync_dump();
return fence->status;
+ }
- if (fence->status == 0)
+ if (fence->status == 0) {
+ if (timeout > 0) {
+ pr_info("fence timeout on [%p] after %dms\n", fence,
+ jiffies_to_msecs(timeout));
+ sync_dump();
+ }
return -ETIME;
+ }
return 0;
}
+EXPORT_SYMBOL(sync_fence_wait);
+
+static void sync_fence_free(struct kref *kref)
+{
+ struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
+
+ sync_fence_free_pts(fence);
+
+ kfree(fence);
+}
static int sync_fence_release(struct inode *inode, struct file *file)
{
struct sync_fence *fence = file->private_data;
unsigned long flags;
- sync_fence_free_pts(fence);
-
+ /*
+ * We need to remove all ways to access this fence before droping
+ * our ref.
+ *
+ * start with its membership in the global fence list
+ */
spin_lock_irqsave(&sync_fence_list_lock, flags);
list_del(&fence->sync_fence_list);
spin_unlock_irqrestore(&sync_fence_list_lock, flags);
- kfree(fence);
+ /*
+ * remove its pts from their parents so that sync_timeline_signal()
+ * can't reference the fence.
+ */
+ sync_fence_detach_pts(fence);
+
+ kref_put(&fence->kref, sync_fence_free);
return 0;
}
@@ -506,6 +660,12 @@ static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
poll_wait(file, &fence->wq, wait);
+ /*
+ * Make sure that reads to fence->status are ordered with the
+ * wait queue event triggering
+ */
+ smp_rmb();
+
if (fence->status == 1)
return POLLIN;
else if (fence->status < 0)
@@ -516,7 +676,7 @@ static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
{
- __u32 value;
+ __s32 value;
if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
return -EFAULT;
@@ -531,8 +691,13 @@ static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
struct sync_fence *fence2, *fence3;
struct sync_merge_data data;
- if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
- return -EFAULT;
+ if (fd < 0)
+ return fd;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
+ err = -EFAULT;
+ goto err_put_fd;
+ }
fence2 = sync_fence_fdget(data.fd2);
if (fence2 == NULL) {
@@ -568,7 +733,7 @@ err_put_fd:
return err;
}
-int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
+static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
{
struct sync_pt_info *info = data;
int ret;
@@ -596,7 +761,6 @@ int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
return info->len;
}
-
static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
unsigned long arg)
{
@@ -690,7 +854,17 @@ static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
}
- if (pt->parent->ops->print_pt) {
+ if (pt->parent->ops->timeline_value_str &&
+ pt->parent->ops->pt_value_str) {
+ char value[64];
+ pt->parent->ops->pt_value_str(pt, value, sizeof(value));
+ seq_printf(s, ": %s", value);
+ if (fence) {
+ pt->parent->ops->timeline_value_str(pt->parent, value,
+ sizeof(value));
+ seq_printf(s, " / %s", value);
+ }
+ } else if (pt->parent->ops->print_pt) {
seq_printf(s, ": ");
pt->parent->ops->print_pt(s, pt);
}
@@ -705,7 +879,11 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
- if (obj->ops->print_obj) {
+ if (obj->ops->timeline_value_str) {
+ char value[64];
+ obj->ops->timeline_value_str(obj, value, sizeof(value));
+ seq_printf(s, ": %s", value);
+ } else if (obj->ops->print_obj) {
seq_printf(s, ": ");
obj->ops->print_obj(s, obj);
}
@@ -726,7 +904,8 @@ static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
struct list_head *pos;
unsigned long flags;
- seq_printf(s, "%s: %s\n", fence->name, sync_status_str(fence->status));
+ seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
+ sync_status_str(fence->status));
list_for_each(pos, &fence->pt_list_head) {
struct sync_pt *pt =
@@ -740,8 +919,7 @@ static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
container_of(pos, struct sync_fence_waiter,
waiter_list);
- seq_printf(s, "waiter %pF %p\n", waiter->callback,
- waiter->callback_data);
+ seq_printf(s, "waiter %pF\n", waiter->callback);
}
spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
}
@@ -795,7 +973,34 @@ static __init int sync_debugfs_init(void)
debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
return 0;
}
-
late_initcall(sync_debugfs_init);
+#define DUMP_CHUNK 256
+static char sync_dump_buf[64 * 1024];
+void sync_dump(void)
+{
+ struct seq_file s = {
+ .buf = sync_dump_buf,
+ .size = sizeof(sync_dump_buf) - 1,
+ };
+ int i;
+
+ sync_debugfs_show(&s, NULL);
+
+ for (i = 0; i < s.count; i += DUMP_CHUNK) {
+ if ((s.count - i) > DUMP_CHUNK) {
+ char c = s.buf[i + DUMP_CHUNK];
+ s.buf[i + DUMP_CHUNK] = 0;
+ pr_cont("%s", s.buf + i);
+ s.buf[i + DUMP_CHUNK] = c;
+ } else {
+ s.buf[s.count] = 0;
+ pr_cont("%s", s.buf + i);
+ }
+ }
+}
+#else
+static void sync_dump(void)
+{
+}
#endif