aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-09-07 17:31:27 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-07 17:31:27 -0700
commit0481990b758628e12f4b0a9e15094e70cefc7cd1 (patch)
tree67a4b4b7acc6a688b87ef2a2d3ec0e296e6e480c /fs
parentdb400b3c4ee89d384d9163836a55577abdae772d (diff)
parent17fa53da1239b8712c5cebbd72a74c713b6c2db9 (diff)
downloadkernel_samsung_smdk4412-0481990b758628e12f4b0a9e15094e70cefc7cd1.zip
kernel_samsung_smdk4412-0481990b758628e12f4b0a9e15094e70cefc7cd1.tar.gz
kernel_samsung_smdk4412-0481990b758628e12f4b0a9e15094e70cefc7cd1.tar.bz2
Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-for-linus-2.6
Diffstat (limited to 'fs')
-rw-r--r--fs/bio.c227
1 files changed, 182 insertions, 45 deletions
diff --git a/fs/bio.c b/fs/bio.c
index bf3ec9d..a7d4fd3a 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/mempool.h>
#include <linux/workqueue.h>
+#include <scsi/sg.h> /* for struct sg_iovec */
#define BIO_POOL_SIZE 256
@@ -555,22 +556,34 @@ out_bmd:
return ERR_PTR(ret);
}
-static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev,
- unsigned long uaddr, unsigned int len,
- int write_to_vm)
+static struct bio *__bio_map_user_iov(request_queue_t *q,
+ struct block_device *bdev,
+ struct sg_iovec *iov, int iov_count,
+ int write_to_vm)
{
- unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- unsigned long start = uaddr >> PAGE_SHIFT;
- const int nr_pages = end - start;
- int ret, offset, i;
+ int i, j;
+ int nr_pages = 0;
struct page **pages;
struct bio *bio;
+ int cur_page = 0;
+ int ret, offset;
- /*
- * transfer and buffer must be aligned to at least hardsector
- * size for now, in the future we can relax this restriction
- */
- if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q)))
+ for (i = 0; i < iov_count; i++) {
+ unsigned long uaddr = (unsigned long)iov[i].iov_base;
+ unsigned long len = iov[i].iov_len;
+ unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned long start = uaddr >> PAGE_SHIFT;
+
+ nr_pages += end - start;
+ /*
+ * transfer and buffer must be aligned to at least hardsector
+ * size for now, in the future we can relax this restriction
+ */
+ if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q)))
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!nr_pages)
return ERR_PTR(-EINVAL);
bio = bio_alloc(GFP_KERNEL, nr_pages);
@@ -582,42 +595,54 @@ static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev,
if (!pages)
goto out;
- down_read(&current->mm->mmap_sem);
- ret = get_user_pages(current, current->mm, uaddr, nr_pages,
- write_to_vm, 0, pages, NULL);
- up_read(&current->mm->mmap_sem);
-
- if (ret < nr_pages)
- goto out;
-
- bio->bi_bdev = bdev;
-
- offset = uaddr & ~PAGE_MASK;
- for (i = 0; i < nr_pages; i++) {
- unsigned int bytes = PAGE_SIZE - offset;
-
- if (len <= 0)
- break;
-
- if (bytes > len)
- bytes = len;
+ memset(pages, 0, nr_pages * sizeof(struct page *));
+
+ for (i = 0; i < iov_count; i++) {
+ unsigned long uaddr = (unsigned long)iov[i].iov_base;
+ unsigned long len = iov[i].iov_len;
+ unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned long start = uaddr >> PAGE_SHIFT;
+ const int local_nr_pages = end - start;
+ const int page_limit = cur_page + local_nr_pages;
+
+ down_read(&current->mm->mmap_sem);
+ ret = get_user_pages(current, current->mm, uaddr,
+ local_nr_pages,
+ write_to_vm, 0, &pages[cur_page], NULL);
+ up_read(&current->mm->mmap_sem);
+
+ if (ret < local_nr_pages)
+ goto out_unmap;
+
+
+ offset = uaddr & ~PAGE_MASK;
+ for (j = cur_page; j < page_limit; j++) {
+ unsigned int bytes = PAGE_SIZE - offset;
+
+ if (len <= 0)
+ break;
+
+ if (bytes > len)
+ bytes = len;
+
+ /*
+ * sorry...
+ */
+ if (__bio_add_page(q, bio, pages[j], bytes, offset) < bytes)
+ break;
+
+ len -= bytes;
+ offset = 0;
+ }
+ cur_page = j;
/*
- * sorry...
+ * release the pages we didn't map into the bio, if any
*/
- if (__bio_add_page(q, bio, pages[i], bytes, offset) < bytes)
- break;
-
- len -= bytes;
- offset = 0;
+ while (j < page_limit)
+ page_cache_release(pages[j++]);
}
- /*
- * release the pages we didn't map into the bio, if any
- */
- while (i < nr_pages)
- page_cache_release(pages[i++]);
-
kfree(pages);
/*
@@ -626,9 +651,17 @@ static struct bio *__bio_map_user(request_queue_t *q, struct block_device *bdev,
if (!write_to_vm)
bio->bi_rw |= (1 << BIO_RW);
+ bio->bi_bdev = bdev;
bio->bi_flags |= (1 << BIO_USER_MAPPED);
return bio;
-out:
+
+ out_unmap:
+ for (i = 0; i < nr_pages; i++) {
+ if(!pages[i])
+ break;
+ page_cache_release(pages[i]);
+ }
+ out:
kfree(pages);
bio_put(bio);
return ERR_PTR(ret);
@@ -648,9 +681,33 @@ out:
struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
unsigned long uaddr, unsigned int len, int write_to_vm)
{
+ struct sg_iovec iov;
+
+ iov.iov_base = (__user void *)uaddr;
+ iov.iov_len = len;
+
+ return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm);
+}
+
+/**
+ * bio_map_user_iov - map user sg_iovec table into bio
+ * @q: the request_queue_t for the bio
+ * @bdev: destination block device
+ * @iov: the iovec.
+ * @iov_count: number of elements in the iovec
+ * @write_to_vm: bool indicating writing to pages or not
+ *
+ * Map the user space address into a bio suitable for io to a block
+ * device. Returns an error pointer in case of error.
+ */
+struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev,
+ struct sg_iovec *iov, int iov_count,
+ int write_to_vm)
+{
struct bio *bio;
+ int len = 0, i;
- bio = __bio_map_user(q, bdev, uaddr, len, write_to_vm);
+ bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm);
if (IS_ERR(bio))
return bio;
@@ -663,6 +720,9 @@ struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
*/
bio_get(bio);
+ for (i = 0; i < iov_count; i++)
+ len += iov[i].iov_len;
+
if (bio->bi_size == len)
return bio;
@@ -707,6 +767,82 @@ void bio_unmap_user(struct bio *bio)
bio_put(bio);
}
+static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err)
+{
+ if (bio->bi_size)
+ return 1;
+
+ bio_put(bio);
+ return 0;
+}
+
+
+static struct bio *__bio_map_kern(request_queue_t *q, void *data,
+ unsigned int len, unsigned int gfp_mask)
+{
+ unsigned long kaddr = (unsigned long)data;
+ unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned long start = kaddr >> PAGE_SHIFT;
+ const int nr_pages = end - start;
+ int offset, i;
+ struct bio *bio;
+
+ bio = bio_alloc(gfp_mask, nr_pages);
+ if (!bio)
+ return ERR_PTR(-ENOMEM);
+
+ offset = offset_in_page(kaddr);
+ for (i = 0; i < nr_pages; i++) {
+ unsigned int bytes = PAGE_SIZE - offset;
+
+ if (len <= 0)
+ break;
+
+ if (bytes > len)
+ bytes = len;
+
+ if (__bio_add_page(q, bio, virt_to_page(data), bytes,
+ offset) < bytes)
+ break;
+
+ data += bytes;
+ len -= bytes;
+ offset = 0;
+ }
+
+ bio->bi_end_io = bio_map_kern_endio;
+ return bio;
+}
+
+/**
+ * bio_map_kern - map kernel address into bio
+ * @q: the request_queue_t for the bio
+ * @data: pointer to buffer to map
+ * @len: length in bytes
+ * @gfp_mask: allocation flags for bio allocation
+ *
+ * Map the kernel address into a bio suitable for io to a block
+ * device. Returns an error pointer in case of error.
+ */
+struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len,
+ unsigned int gfp_mask)
+{
+ struct bio *bio;
+
+ bio = __bio_map_kern(q, data, len, gfp_mask);
+ if (IS_ERR(bio))
+ return bio;
+
+ if (bio->bi_size == len)
+ return bio;
+
+ /*
+ * Don't support partial mappings.
+ */
+ bio_put(bio);
+ return ERR_PTR(-EINVAL);
+}
+
/*
* bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
* for performing direct-IO in BIOs.
@@ -1095,6 +1231,7 @@ EXPORT_SYMBOL(bio_add_page);
EXPORT_SYMBOL(bio_get_nr_vecs);
EXPORT_SYMBOL(bio_map_user);
EXPORT_SYMBOL(bio_unmap_user);
+EXPORT_SYMBOL(bio_map_kern);
EXPORT_SYMBOL(bio_pair_release);
EXPORT_SYMBOL(bio_split);
EXPORT_SYMBOL(bio_split_pool);