aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/iio/ring_sw.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-05-23 12:49:28 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-23 12:49:28 -0700
commit1f3a8e093f470ef193b0ca6011d90180331c8b53 (patch)
tree66b7a58decabdc7f76ffb102899881c258c1df59 /drivers/staging/iio/ring_sw.c
parentc44dead70a841d90ddc01968012f323c33217c9e (diff)
parent1a4b6f66285785ddccef049e6b45be4e7c7a2189 (diff)
downloadkernel_samsung_smdk4412-1f3a8e093f470ef193b0ca6011d90180331c8b53.zip
kernel_samsung_smdk4412-1f3a8e093f470ef193b0ca6011d90180331c8b53.tar.gz
kernel_samsung_smdk4412-1f3a8e093f470ef193b0ca6011d90180331c8b53.tar.bz2
Merge branch 'staging-next' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6
* 'staging-next' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6: (970 commits) staging: usbip: replace usbip_u{dbg,err,info} and printk with dev_ and pr_ staging:iio: Trivial kconfig reorganization and uniformity improvements. staging:iio:documenation partial update. staging:iio: use pollfunc allocation helpers in remaining drivers. staging:iio:max1363 misc cleanups and use of for_each_bit_set to simplify event code spitting out. staging:iio: implement an iio_info structure to take some of the constant elements out of iio_dev. staging:iio:meter:ade7758: Use private data space from iio_allocate_device staging:iio:accel:lis3l02dq make write_reg_8 take value not a pointer to value. staging:iio: ring core cleanups + check if read_last available in lis3l02dq staging:iio:core cleanup: squash tiny wrappers and use dev_set_name to handle creation of event interface name. staging:iio: poll func allocation clean up. staging:iio:ad7780 trivial unused header cleanup. staging:iio:adc: AD7780: Use private data space from iio_allocate_device + trivial fixes staging:iio:adc:AD7780: Convert to new channel registration method staging:iio:adc: AD7606: Drop dev_data in favour of iio_priv() staging:iio:adc: AD7606: Consitently use indio_dev staging:iio: Rip out helper for software rings. staging:iio:adc:AD7298: Use private data space from iio_allocate_device staging:iio: rationalization of different buffer implementation hooks. staging:iio:imu:adis16400 avoid allocating rx, tx, and state separately from iio_dev. ... Fix up trivial conflicts in - drivers/staging/intel_sst/intelmid.c: patches applied in both branches - drivers/staging/rt2860/common/cmm_data_{pci,usb}.c: removed vs spelling - drivers/staging/usbip/vhci_sysfs.c: trivial header file inclusion
Diffstat (limited to 'drivers/staging/iio/ring_sw.c')
-rw-r--r--drivers/staging/iio/ring_sw.c205
1 files changed, 82 insertions, 123 deletions
diff --git a/drivers/staging/iio/ring_sw.c b/drivers/staging/iio/ring_sw.c
index b71ce39..feb84e2 100644
--- a/drivers/staging/iio/ring_sw.c
+++ b/drivers/staging/iio/ring_sw.c
@@ -12,10 +12,41 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/workqueue.h>
+#include <linux/sched.h>
#include <linux/poll.h>
#include "ring_sw.h"
#include "trigger.h"
+/**
+ * struct iio_sw_ring_buffer - software ring buffer
+ * @buf: generic ring buffer elements
+ * @data: the ring buffer memory
+ * @read_p: read pointer (oldest available)
+ * @write_p: write pointer
+ * @last_written_p: read pointer (newest available)
+ * @half_p: half buffer length behind write_p (event generation)
+ * @use_count: reference count to prevent resizing when in use
+ * @update_needed: flag to indicated change in size requested
+ * @use_lock: lock to prevent change in size when in use
+ *
+ * Note that the first element of all ring buffers must be a
+ * struct iio_ring_buffer.
+**/
+struct iio_sw_ring_buffer {
+ struct iio_ring_buffer buf;
+ unsigned char *data;
+ unsigned char *read_p;
+ unsigned char *write_p;
+ unsigned char *last_written_p;
+ /* used to act as a point at which to signal an event */
+ unsigned char *half_p;
+ int use_count;
+ int update_needed;
+ spinlock_t use_lock;
+};
+
+#define iio_to_sw_ring(r) container_of(r, struct iio_sw_ring_buffer, buf)
+
static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
int bytes_per_datum, int length)
{
@@ -40,23 +71,21 @@ static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
kfree(ring->data);
}
-void iio_mark_sw_rb_in_use(struct iio_ring_buffer *r)
+static void iio_mark_sw_rb_in_use(struct iio_ring_buffer *r)
{
struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
spin_lock(&ring->use_lock);
ring->use_count++;
spin_unlock(&ring->use_lock);
}
-EXPORT_SYMBOL(iio_mark_sw_rb_in_use);
-void iio_unmark_sw_rb_in_use(struct iio_ring_buffer *r)
+static void iio_unmark_sw_rb_in_use(struct iio_ring_buffer *r)
{
struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
spin_lock(&ring->use_lock);
ring->use_count--;
spin_unlock(&ring->use_lock);
}
-EXPORT_SYMBOL(iio_unmark_sw_rb_in_use);
/* Ring buffer related functionality */
@@ -68,7 +97,6 @@ static int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
unsigned char *data, s64 timestamp)
{
int ret = 0;
- int code;
unsigned char *temp_ptr, *change_test_ptr;
/* initial store */
@@ -123,14 +151,6 @@ static int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
*/
if (change_test_ptr == ring->read_p)
ring->read_p = temp_ptr;
-
- spin_lock(&ring->buf.shared_ev_pointer.lock);
-
- ret = iio_push_or_escallate_ring_event(&ring->buf,
- IIO_EVENT_CODE_RING_100_FULL, timestamp);
- spin_unlock(&ring->buf.shared_ev_pointer.lock);
- if (ret)
- goto error_ret;
}
/* investigate if our event barrier has been passed */
/* There are definite 'issues' with this and chances of
@@ -140,41 +160,35 @@ static int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
if (ring->half_p == ring->data + ring->buf.length*ring->buf.bytes_per_datum)
ring->half_p = ring->data;
if (ring->half_p == ring->read_p) {
- spin_lock(&ring->buf.shared_ev_pointer.lock);
- code = IIO_EVENT_CODE_RING_50_FULL;
- ret = __iio_push_event(&ring->buf.ev_int,
- code,
- timestamp,
- &ring->buf.shared_ev_pointer);
- spin_unlock(&ring->buf.shared_ev_pointer.lock);
+ ring->buf.stufftoread = true;
+ wake_up_interruptible(&ring->buf.pollq);
}
-error_ret:
return ret;
}
-int iio_rip_sw_rb(struct iio_ring_buffer *r,
- size_t count, char __user *buf, int *dead_offset)
+static int iio_read_first_n_sw_rb(struct iio_ring_buffer *r,
+ size_t n, char __user *buf)
{
struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p;
u8 *data;
- int ret, max_copied;
- int bytes_to_rip;
+ int ret, max_copied, bytes_to_rip, dead_offset;
/* A userspace program has probably made an error if it tries to
* read something that is not a whole number of bpds.
* Return an error.
*/
- if (count % ring->buf.bytes_per_datum) {
+ if (n % ring->buf.bytes_per_datum) {
ret = -EINVAL;
printk(KERN_INFO "Ring buffer read request not whole number of"
"samples: Request bytes %zd, Current bytes per datum %d\n",
- count, ring->buf.bytes_per_datum);
+ n, ring->buf.bytes_per_datum);
goto error_ret;
}
/* Limit size to whole of ring buffer */
- bytes_to_rip = min((size_t)(ring->buf.bytes_per_datum*ring->buf.length), count);
+ bytes_to_rip = min((size_t)(ring->buf.bytes_per_datum*ring->buf.length),
+ n);
data = kmalloc(bytes_to_rip, GFP_KERNEL);
if (data == NULL) {
@@ -240,9 +254,9 @@ int iio_rip_sw_rb(struct iio_ring_buffer *r,
current_read_p = ring->read_p;
if (initial_read_p <= current_read_p)
- *dead_offset = current_read_p - initial_read_p;
+ dead_offset = current_read_p - initial_read_p;
else
- *dead_offset = ring->buf.length*ring->buf.bytes_per_datum
+ dead_offset = ring->buf.length*ring->buf.bytes_per_datum
- (initial_read_p - current_read_p);
/* possible issue if the initial write has been lapped or indeed
@@ -250,7 +264,7 @@ int iio_rip_sw_rb(struct iio_ring_buffer *r,
/* No valid data read.
* In this case the read pointer is already correct having been
* pushed further than we would look. */
- if (max_copied - *dead_offset < 0) {
+ if (max_copied - dead_offset < 0) {
ret = 0;
goto error_free_data_cpy;
}
@@ -266,26 +280,30 @@ int iio_rip_sw_rb(struct iio_ring_buffer *r,
while (ring->read_p != end_read_p)
ring->read_p = end_read_p;
- ret = max_copied - *dead_offset;
+ ret = max_copied - dead_offset;
- if (copy_to_user(buf, data + *dead_offset, ret)) {
+ if (copy_to_user(buf, data + dead_offset, ret)) {
ret = -EFAULT;
goto error_free_data_cpy;
}
+
+ if (bytes_to_rip >= ring->buf.length*ring->buf.bytes_per_datum/2)
+ ring->buf.stufftoread = 0;
+
error_free_data_cpy:
kfree(data);
error_ret:
return ret;
}
-EXPORT_SYMBOL(iio_rip_sw_rb);
-int iio_store_to_sw_rb(struct iio_ring_buffer *r, u8 *data, s64 timestamp)
+static int iio_store_to_sw_rb(struct iio_ring_buffer *r,
+ u8 *data,
+ s64 timestamp)
{
struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
return iio_store_to_sw_ring(ring, data, timestamp);
}
-EXPORT_SYMBOL(iio_store_to_sw_rb);
static int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer *ring,
unsigned char *data)
@@ -309,18 +327,18 @@ again:
return 0;
}
-int iio_read_last_from_sw_rb(struct iio_ring_buffer *r,
+static int iio_read_last_from_sw_rb(struct iio_ring_buffer *r,
unsigned char *data)
{
return iio_read_last_from_sw_ring(iio_to_sw_ring(r), data);
}
-EXPORT_SYMBOL(iio_read_last_from_sw_rb);
-int iio_request_update_sw_rb(struct iio_ring_buffer *r)
+static int iio_request_update_sw_rb(struct iio_ring_buffer *r)
{
int ret = 0;
struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
+ r->stufftoread = false;
spin_lock(&ring->use_lock);
if (!ring->update_needed)
goto error_ret;
@@ -335,54 +353,49 @@ error_ret:
spin_unlock(&ring->use_lock);
return ret;
}
-EXPORT_SYMBOL(iio_request_update_sw_rb);
-int iio_get_bytes_per_datum_sw_rb(struct iio_ring_buffer *r)
+static int iio_get_bytes_per_datum_sw_rb(struct iio_ring_buffer *r)
{
struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
return ring->buf.bytes_per_datum;
}
-EXPORT_SYMBOL(iio_get_bytes_per_datum_sw_rb);
-int iio_set_bytes_per_datum_sw_rb(struct iio_ring_buffer *r, size_t bpd)
+static int iio_set_bytes_per_datum_sw_rb(struct iio_ring_buffer *r, size_t bpd)
{
if (r->bytes_per_datum != bpd) {
r->bytes_per_datum = bpd;
- if (r->access.mark_param_change)
- r->access.mark_param_change(r);
+ if (r->access->mark_param_change)
+ r->access->mark_param_change(r);
}
return 0;
}
-EXPORT_SYMBOL(iio_set_bytes_per_datum_sw_rb);
-int iio_get_length_sw_rb(struct iio_ring_buffer *r)
+static int iio_get_length_sw_rb(struct iio_ring_buffer *r)
{
return r->length;
}
-EXPORT_SYMBOL(iio_get_length_sw_rb);
-int iio_set_length_sw_rb(struct iio_ring_buffer *r, int length)
+static int iio_set_length_sw_rb(struct iio_ring_buffer *r, int length)
{
if (r->length != length) {
r->length = length;
- if (r->access.mark_param_change)
- r->access.mark_param_change(r);
+ if (r->access->mark_param_change)
+ r->access->mark_param_change(r);
}
return 0;
}
-EXPORT_SYMBOL(iio_set_length_sw_rb);
-int iio_mark_update_needed_sw_rb(struct iio_ring_buffer *r)
+static int iio_mark_update_needed_sw_rb(struct iio_ring_buffer *r)
{
struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
ring->update_needed = true;
return 0;
}
-EXPORT_SYMBOL(iio_mark_update_needed_sw_rb);
static void iio_sw_rb_release(struct device *dev)
{
struct iio_ring_buffer *r = to_iio_ring_buffer(dev);
+ iio_ring_access_release(&r->dev);
kfree(iio_to_sw_ring(r));
}
@@ -420,13 +433,12 @@ struct iio_ring_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
ring = kzalloc(sizeof *ring, GFP_KERNEL);
if (!ring)
return NULL;
+ ring->update_needed = true;
buf = &ring->buf;
iio_ring_buffer_init(buf, indio_dev);
__iio_init_sw_ring_buffer(ring);
buf->dev.type = &iio_sw_ring_type;
- device_initialize(&buf->dev);
buf->dev.parent = &indio_dev->dev;
- buf->dev.bus = &iio_bus_type;
dev_set_drvdata(&buf->dev, (void *)buf);
return buf;
@@ -440,73 +452,20 @@ void iio_sw_rb_free(struct iio_ring_buffer *r)
}
EXPORT_SYMBOL(iio_sw_rb_free);
-int iio_sw_ring_preenable(struct iio_dev *indio_dev)
-{
- struct iio_ring_buffer *ring = indio_dev->ring;
- size_t size;
- dev_dbg(&indio_dev->dev, "%s\n", __func__);
- /* Check if there are any scan elements enabled, if not fail*/
- if (!(ring->scan_count || ring->scan_timestamp))
- return -EINVAL;
- if (ring->scan_timestamp)
- if (ring->scan_count)
- /* Timestamp (aligned to s64) and data */
- size = (((ring->scan_count * ring->bpe)
- + sizeof(s64) - 1)
- & ~(sizeof(s64) - 1))
- + sizeof(s64);
- else /* Timestamp only */
- size = sizeof(s64);
- else /* Data only */
- size = ring->scan_count * ring->bpe;
- ring->access.set_bytes_per_datum(ring, size);
-
- return 0;
-}
-EXPORT_SYMBOL(iio_sw_ring_preenable);
-
-void iio_sw_trigger_bh_to_ring(struct work_struct *work_s)
-{
- struct iio_sw_ring_helper_state *st
- = container_of(work_s, struct iio_sw_ring_helper_state,
- work_trigger_to_ring);
- struct iio_ring_buffer *ring = st->indio_dev->ring;
- int len = 0;
- size_t datasize = ring->access.get_bytes_per_datum(ring);
- char *data = kmalloc(datasize, GFP_KERNEL);
-
- if (data == NULL) {
- dev_err(st->indio_dev->dev.parent,
- "memory alloc failed in ring bh");
- return;
- }
-
- if (ring->scan_count)
- len = st->get_ring_element(st, data);
-
- /* Guaranteed to be aligned with 8 byte boundary */
- if (ring->scan_timestamp)
- *(s64 *)(((phys_addr_t)data + len
- + sizeof(s64) - 1) & ~(sizeof(s64) - 1))
- = st->last_timestamp;
- ring->access.store_to(ring,
- (u8 *)data,
- st->last_timestamp);
-
- iio_trigger_notify_done(st->indio_dev->trig);
- kfree(data);
-
- return;
-}
-EXPORT_SYMBOL(iio_sw_trigger_bh_to_ring);
-
-void iio_sw_poll_func_th(struct iio_dev *indio_dev, s64 time)
-{ struct iio_sw_ring_helper_state *h
- = iio_dev_get_devdata(indio_dev);
- h->last_timestamp = time;
- schedule_work(&h->work_trigger_to_ring);
-}
-EXPORT_SYMBOL(iio_sw_poll_func_th);
+const struct iio_ring_access_funcs ring_sw_access_funcs = {
+ .mark_in_use = &iio_mark_sw_rb_in_use,
+ .unmark_in_use = &iio_unmark_sw_rb_in_use,
+ .store_to = &iio_store_to_sw_rb,
+ .read_last = &iio_read_last_from_sw_rb,
+ .read_first_n = &iio_read_first_n_sw_rb,
+ .mark_param_change = &iio_mark_update_needed_sw_rb,
+ .request_update = &iio_request_update_sw_rb,
+ .get_bytes_per_datum = &iio_get_bytes_per_datum_sw_rb,
+ .set_bytes_per_datum = &iio_set_bytes_per_datum_sw_rb,
+ .get_length = &iio_get_length_sw_rb,
+ .set_length = &iio_set_length_sw_rb,
+};
+EXPORT_SYMBOL(ring_sw_access_funcs);
MODULE_DESCRIPTION("Industrialio I/O software ring buffer");
MODULE_LICENSE("GPL");