aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorWolfgang Wiedmeyer <wolfgit@wiedmeyer.de>2015-10-23 03:29:33 +0200
committerWolfgang Wiedmeyer <wolfgit@wiedmeyer.de>2015-10-23 03:29:33 +0200
commit15dfd0df63ce6847081d09b2bbd567cc0cc4eae1 (patch)
tree3b73f24fcef970bfcace3cbb297cfa57f3994682 /fs/btrfs
parent328aa7a45af61bc0060c80847daa67fef7b9c0d0 (diff)
parent0149138c4142da287d23f9d5c6038f7fb5e30ac2 (diff)
downloadkernel_samsung_smdk4412-15dfd0df63ce6847081d09b2bbd567cc0cc4eae1.zip
kernel_samsung_smdk4412-15dfd0df63ce6847081d09b2bbd567cc0cc4eae1.tar.gz
kernel_samsung_smdk4412-15dfd0df63ce6847081d09b2bbd567cc0cc4eae1.tar.bz2
initial merge with 3.2.72
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/backref.c776
-rw-r--r--fs/btrfs/backref.h62
-rw-r--r--fs/btrfs/reada.c951
3 files changed, 1789 insertions, 0 deletions
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
new file mode 100644
index 0000000..22c64ff
--- /dev/null
+++ b/fs/btrfs/backref.c
@@ -0,0 +1,776 @@
+/*
+ * Copyright (C) 2011 STRATO. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include "ctree.h"
+#include "disk-io.h"
+#include "backref.h"
+
+struct __data_ref {
+ struct list_head list;
+ u64 inum;
+ u64 root;
+ u64 extent_data_item_offset;
+};
+
+struct __shared_ref {
+ struct list_head list;
+ u64 disk_byte;
+};
+
+static int __inode_info(u64 inum, u64 ioff, u8 key_type,
+ struct btrfs_root *fs_root, struct btrfs_path *path,
+ struct btrfs_key *found_key)
+{
+ int ret;
+ struct btrfs_key key;
+ struct extent_buffer *eb;
+
+ key.type = key_type;
+ key.objectid = inum;
+ key.offset = ioff;
+
+ ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ eb = path->nodes[0];
+ if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
+ ret = btrfs_next_leaf(fs_root, path);
+ if (ret)
+ return ret;
+ eb = path->nodes[0];
+ }
+
+ btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
+ if (found_key->type != key.type || found_key->objectid != key.objectid)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * this makes the path point to (inum INODE_ITEM ioff)
+ */
+int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
+ struct btrfs_path *path)
+{
+ struct btrfs_key key;
+ return __inode_info(inum, ioff, BTRFS_INODE_ITEM_KEY, fs_root, path,
+ &key);
+}
+
+static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
+ struct btrfs_path *path,
+ struct btrfs_key *found_key)
+{
+ return __inode_info(inum, ioff, BTRFS_INODE_REF_KEY, fs_root, path,
+ found_key);
+}
+
+/*
+ * this iterates to turn a btrfs_inode_ref into a full filesystem path. elements
+ * of the path are separated by '/' and the path is guaranteed to be
+ * 0-terminated. the path is only given within the current file system.
+ * Therefore, it never starts with a '/'. the caller is responsible to provide
+ * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
+ * the start point of the resulting string is returned. this pointer is within
+ * dest, normally.
+ * in case the path buffer would overflow, the pointer is decremented further
+ * as if output was written to the buffer, though no more output is actually
+ * generated. that way, the caller can determine how much space would be
+ * required for the path to fit into the buffer. in that case, the returned
+ * value will be smaller than dest. callers must check this!
+ */
+static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
+ struct btrfs_inode_ref *iref,
+ struct extent_buffer *eb_in, u64 parent,
+ char *dest, u32 size)
+{
+ u32 len;
+ int slot;
+ u64 next_inum;
+ int ret;
+ s64 bytes_left = size - 1;
+ struct extent_buffer *eb = eb_in;
+ struct btrfs_key found_key;
+
+ if (bytes_left >= 0)
+ dest[bytes_left] = '\0';
+
+ while (1) {
+ len = btrfs_inode_ref_name_len(eb, iref);
+ bytes_left -= len;
+ if (bytes_left >= 0)
+ read_extent_buffer(eb, dest + bytes_left,
+ (unsigned long)(iref + 1), len);
+ if (eb != eb_in)
+ free_extent_buffer(eb);
+ ret = inode_ref_info(parent, 0, fs_root, path, &found_key);
+ if (ret)
+ break;
+ next_inum = found_key.offset;
+
+ /* regular exit ahead */
+ if (parent == next_inum)
+ break;
+
+ slot = path->slots[0];
+ eb = path->nodes[0];
+ /* make sure we can use eb after releasing the path */
+ if (eb != eb_in)
+ atomic_inc(&eb->refs);
+ btrfs_release_path(path);
+
+ iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
+ parent = next_inum;
+ --bytes_left;
+ if (bytes_left >= 0)
+ dest[bytes_left] = '/';
+ }
+
+ btrfs_release_path(path);
+
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dest + bytes_left;
+}
+
+/*
+ * this makes the path point to (logical EXTENT_ITEM *)
+ * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
+ * tree blocks and <0 on error.
+ */
+int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
+ struct btrfs_path *path, struct btrfs_key *found_key)
+{
+ int ret;
+ u64 flags;
+ u32 item_size;
+ struct extent_buffer *eb;
+ struct btrfs_extent_item *ei;
+ struct btrfs_key key;
+
+ key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.objectid = logical;
+ key.offset = (u64)-1;
+
+ ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
+ if (ret < 0)
+ return ret;
+ ret = btrfs_previous_item(fs_info->extent_root, path,
+ 0, BTRFS_EXTENT_ITEM_KEY);
+ if (ret < 0)
+ return ret;
+
+ btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
+ if (found_key->type != BTRFS_EXTENT_ITEM_KEY ||
+ found_key->objectid > logical ||
+ found_key->objectid + found_key->offset <= logical)
+ return -ENOENT;
+
+ eb = path->nodes[0];
+ item_size = btrfs_item_size_nr(eb, path->slots[0]);
+ BUG_ON(item_size < sizeof(*ei));
+
+ ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
+ flags = btrfs_extent_flags(eb, ei);
+
+ if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
+ return BTRFS_EXTENT_FLAG_TREE_BLOCK;
+ if (flags & BTRFS_EXTENT_FLAG_DATA)
+ return BTRFS_EXTENT_FLAG_DATA;
+
+ return -EIO;
+}
+
+/*
+ * helper function to iterate extent inline refs. ptr must point to a 0 value
+ * for the first call and may be modified. it is used to track state.
+ * if more refs exist, 0 is returned and the next call to
+ * __get_extent_inline_ref must pass the modified ptr parameter to get the
+ * next ref. after the last ref was processed, 1 is returned.
+ * returns <0 on error
+ */
+static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
+ struct btrfs_extent_item *ei, u32 item_size,
+ struct btrfs_extent_inline_ref **out_eiref,
+ int *out_type)
+{
+ unsigned long end;
+ u64 flags;
+ struct btrfs_tree_block_info *info;
+
+ if (!*ptr) {
+ /* first call */
+ flags = btrfs_extent_flags(eb, ei);
+ if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+ info = (struct btrfs_tree_block_info *)(ei + 1);
+ *out_eiref =
+ (struct btrfs_extent_inline_ref *)(info + 1);
+ } else {
+ *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
+ }
+ *ptr = (unsigned long)*out_eiref;
+ if ((void *)*ptr >= (void *)ei + item_size)
+ return -ENOENT;
+ }
+
+ end = (unsigned long)ei + item_size;
+ *out_eiref = (struct btrfs_extent_inline_ref *)*ptr;
+ *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref);
+
+ *ptr += btrfs_extent_inline_ref_size(*out_type);
+ WARN_ON(*ptr > end);
+ if (*ptr == end)
+ return 1; /* last */
+
+ return 0;
+}
+
+/*
+ * reads the tree block backref for an extent. tree level and root are returned
+ * through out_level and out_root. ptr must point to a 0 value for the first
+ * call and may be modified (see __get_extent_inline_ref comment).
+ * returns 0 if data was provided, 1 if there was no more data to provide or
+ * <0 on error.
+ */
+int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
+ struct btrfs_extent_item *ei, u32 item_size,
+ u64 *out_root, u8 *out_level)
+{
+ int ret;
+ int type;
+ struct btrfs_tree_block_info *info;
+ struct btrfs_extent_inline_ref *eiref;
+
+ if (*ptr == (unsigned long)-1)
+ return 1;
+
+ while (1) {
+ ret = __get_extent_inline_ref(ptr, eb, ei, item_size,
+ &eiref, &type);
+ if (ret < 0)
+ return ret;
+
+ if (type == BTRFS_TREE_BLOCK_REF_KEY ||
+ type == BTRFS_SHARED_BLOCK_REF_KEY)
+ break;
+
+ if (ret == 1)
+ return 1;
+ }
+
+ /* we can treat both ref types equally here */
+ info = (struct btrfs_tree_block_info *)(ei + 1);
+ *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
+ *out_level = btrfs_tree_block_level(eb, info);
+
+ if (ret == 1)
+ *ptr = (unsigned long)-1;
+
+ return 0;
+}
+
+static int __data_list_add(struct list_head *head, u64 inum,
+ u64 extent_data_item_offset, u64 root)
+{
+ struct __data_ref *ref;
+
+ ref = kmalloc(sizeof(*ref), GFP_NOFS);
+ if (!ref)
+ return -ENOMEM;
+
+ ref->inum = inum;
+ ref->extent_data_item_offset = extent_data_item_offset;
+ ref->root = root;
+ list_add_tail(&ref->list, head);
+
+ return 0;
+}
+
+static int __data_list_add_eb(struct list_head *head, struct extent_buffer *eb,
+ struct btrfs_extent_data_ref *dref)
+{
+ return __data_list_add(head, btrfs_extent_data_ref_objectid(eb, dref),
+ btrfs_extent_data_ref_offset(eb, dref),
+ btrfs_extent_data_ref_root(eb, dref));
+}
+
+static int __shared_list_add(struct list_head *head, u64 disk_byte)
+{
+ struct __shared_ref *ref;
+
+ ref = kmalloc(sizeof(*ref), GFP_NOFS);
+ if (!ref)
+ return -ENOMEM;
+
+ ref->disk_byte = disk_byte;
+ list_add_tail(&ref->list, head);
+
+ return 0;
+}
+
+static int __iter_shared_inline_ref_inodes(struct btrfs_fs_info *fs_info,
+ u64 logical, u64 inum,
+ u64 extent_data_item_offset,
+ u64 extent_offset,
+ struct btrfs_path *path,
+ struct list_head *data_refs,
+ iterate_extent_inodes_t *iterate,
+ void *ctx)
+{
+ u64 ref_root;
+ u32 item_size;
+ struct btrfs_key key;
+ struct extent_buffer *eb;
+ struct btrfs_extent_item *ei;
+ struct btrfs_extent_inline_ref *eiref;
+ struct __data_ref *ref;
+ int ret;
+ int type;
+ int last;
+ unsigned long ptr = 0;
+
+ WARN_ON(!list_empty(data_refs));
+ ret = extent_from_logical(fs_info, logical, path, &key);
+ if (ret & BTRFS_EXTENT_FLAG_DATA)
+ ret = -EIO;
+ if (ret < 0)
+ goto out;
+
+ eb = path->nodes[0];
+ ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
+ item_size = btrfs_item_size_nr(eb, path->slots[0]);
+
+ ret = 0;
+ ref_root = 0;
+ /*
+ * as done in iterate_extent_inodes, we first build a list of refs to
+ * iterate, then free the path and then iterate them to avoid deadlocks.
+ */
+ do {
+ last = __get_extent_inline_ref(&ptr, eb, ei, item_size,
+ &eiref, &type);
+ if (last < 0) {
+ ret = last;
+ goto out;
+ }
+ if (type == BTRFS_TREE_BLOCK_REF_KEY ||
+ type == BTRFS_SHARED_BLOCK_REF_KEY) {
+ ref_root = btrfs_extent_inline_ref_offset(eb, eiref);
+ ret = __data_list_add(data_refs, inum,
+ extent_data_item_offset,
+ ref_root);
+ }
+ } while (!ret && !last);
+
+ btrfs_release_path(path);
+
+ if (ref_root == 0) {
+ printk(KERN_ERR "btrfs: failed to find tree block ref "
+ "for shared data backref %llu\n", logical);
+ WARN_ON(1);
+ ret = -EIO;
+ }
+
+out:
+ while (!list_empty(data_refs)) {
+ ref = list_first_entry(data_refs, struct __data_ref, list);
+ list_del(&ref->list);
+ if (!ret)
+ ret = iterate(ref->inum, extent_offset +
+ ref->extent_data_item_offset,
+ ref->root, ctx);
+ kfree(ref);
+ }
+
+ return ret;
+}
+
+static int __iter_shared_inline_ref(struct btrfs_fs_info *fs_info,
+ u64 logical, u64 orig_extent_item_objectid,
+ u64 extent_offset, struct btrfs_path *path,
+ struct list_head *data_refs,
+ iterate_extent_inodes_t *iterate,
+ void *ctx)
+{
+ u64 disk_byte;
+ struct btrfs_key key;
+ struct btrfs_file_extent_item *fi;
+ struct extent_buffer *eb;
+ int slot;
+ int nritems;
+ int ret;
+ int found = 0;
+
+ eb = read_tree_block(fs_info->tree_root, logical,
+ fs_info->tree_root->leafsize, 0);
+ if (!eb)
+ return -EIO;
+
+ /*
+ * from the shared data ref, we only have the leaf but we need
+ * the key. thus, we must look into all items and see that we
+ * find one (some) with a reference to our extent item.
+ */
+ nritems = btrfs_header_nritems(eb);
+ for (slot = 0; slot < nritems; ++slot) {
+ btrfs_item_key_to_cpu(eb, &key, slot);
+ if (key.type != BTRFS_EXTENT_DATA_KEY)
+ continue;
+ fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
+ if (!fi) {
+ free_extent_buffer(eb);
+ return -EIO;
+ }
+ disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
+ if (disk_byte != orig_extent_item_objectid) {
+ if (found)
+ break;
+ else
+ continue;
+ }
+ ++found;
+ ret = __iter_shared_inline_ref_inodes(fs_info, logical,
+ key.objectid,
+ key.offset,
+ extent_offset, path,
+ data_refs,
+ iterate, ctx);
+ if (ret)
+ break;
+ }
+
+ if (!found) {
+ printk(KERN_ERR "btrfs: failed to follow shared data backref "
+ "to parent %llu\n", logical);
+ WARN_ON(1);
+ ret = -EIO;
+ }
+
+ free_extent_buffer(eb);
+ return ret;
+}
+
+/*
+ * calls iterate() for every inode that references the extent identified by
+ * the given parameters. will use the path given as a parameter and return it
+ * released.
+ * when the iterator function returns a non-zero value, iteration stops.
+ */
+int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
+ struct btrfs_path *path,
+ u64 extent_item_objectid,
+ u64 extent_offset,
+ iterate_extent_inodes_t *iterate, void *ctx)
+{
+ unsigned long ptr = 0;
+ int last;
+ int ret;
+ int type;
+ u64 logical;
+ u32 item_size;
+ struct btrfs_extent_inline_ref *eiref;
+ struct btrfs_extent_data_ref *dref;
+ struct extent_buffer *eb;
+ struct btrfs_extent_item *ei;
+ struct btrfs_key key;
+ struct list_head data_refs = LIST_HEAD_INIT(data_refs);
+ struct list_head shared_refs = LIST_HEAD_INIT(shared_refs);
+ struct __data_ref *ref_d;
+ struct __shared_ref *ref_s;
+
+ eb = path->nodes[0];
+ ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
+ item_size = btrfs_item_size_nr(eb, path->slots[0]);
+
+ /* first we iterate the inline refs, ... */
+ do {
+ last = __get_extent_inline_ref(&ptr, eb, ei, item_size,
+ &eiref, &type);
+ if (last == -ENOENT) {
+ ret = 0;
+ break;
+ }
+ if (last < 0) {
+ ret = last;
+ break;
+ }
+
+ if (type == BTRFS_EXTENT_DATA_REF_KEY) {
+ dref = (struct btrfs_extent_data_ref *)(&eiref->offset);
+ ret = __data_list_add_eb(&data_refs, eb, dref);
+ } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
+ logical = btrfs_extent_inline_ref_offset(eb, eiref);
+ ret = __shared_list_add(&shared_refs, logical);
+ }
+ } while (!ret && !last);
+
+ /* ... then we proceed to in-tree references and ... */
+ while (!ret) {
+ ++path->slots[0];
+ if (path->slots[0] > btrfs_header_nritems(eb)) {
+ ret = btrfs_next_leaf(fs_info->extent_root, path);
+ if (ret) {
+ if (ret == 1)
+ ret = 0; /* we're done */
+ break;
+ }
+ eb = path->nodes[0];
+ }
+ btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
+ if (key.objectid != extent_item_objectid)
+ break;
+ if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
+ dref = btrfs_item_ptr(eb, path->slots[0],
+ struct btrfs_extent_data_ref);
+ ret = __data_list_add_eb(&data_refs, eb, dref);
+ } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
+ ret = __shared_list_add(&shared_refs, key.offset);
+ }
+ }
+
+ btrfs_release_path(path);
+
+ /*
+ * ... only at the very end we can process the refs we found. this is
+ * because the iterator function we call is allowed to make tree lookups
+ * and we have to avoid deadlocks. additionally, we need more tree
+ * lookups ourselves for shared data refs.
+ */
+ while (!list_empty(&data_refs)) {
+ ref_d = list_first_entry(&data_refs, struct __data_ref, list);
+ list_del(&ref_d->list);
+ if (!ret)
+ ret = iterate(ref_d->inum, extent_offset +
+ ref_d->extent_data_item_offset,
+ ref_d->root, ctx);
+ kfree(ref_d);
+ }
+
+ while (!list_empty(&shared_refs)) {
+ ref_s = list_first_entry(&shared_refs, struct __shared_ref,
+ list);
+ list_del(&ref_s->list);
+ if (!ret)
+ ret = __iter_shared_inline_ref(fs_info,
+ ref_s->disk_byte,
+ extent_item_objectid,
+ extent_offset, path,
+ &data_refs,
+ iterate, ctx);
+ kfree(ref_s);
+ }
+
+ return ret;
+}
+
+int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
+ struct btrfs_path *path,
+ iterate_extent_inodes_t *iterate, void *ctx)
+{
+ int ret;
+ u64 offset;
+ struct btrfs_key found_key;
+
+ ret = extent_from_logical(fs_info, logical, path,
+ &found_key);
+ if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
+ ret = -EINVAL;
+ if (ret < 0)
+ return ret;
+
+ offset = logical - found_key.objectid;
+ ret = iterate_extent_inodes(fs_info, path, found_key.objectid,
+ offset, iterate, ctx);
+
+ return ret;
+}
+
+static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
+ struct btrfs_path *path,
+ iterate_irefs_t *iterate, void *ctx)
+{
+ int ret;
+ int slot;
+ u32 cur;
+ u32 len;
+ u32 name_len;
+ u64 parent = 0;
+ int found = 0;
+ struct extent_buffer *eb;
+ struct btrfs_item *item;
+ struct btrfs_inode_ref *iref;
+ struct btrfs_key found_key;
+
+ while (1) {
+ ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path,
+ &found_key);
+ if (ret < 0)
+ break;
+ if (ret) {
+ ret = found ? 0 : -ENOENT;
+ break;
+ }
+ ++found;
+
+ parent = found_key.offset;
+ slot = path->slots[0];
+ eb = path->nodes[0];
+ /* make sure we can use eb after releasing the path */
+ atomic_inc(&eb->refs);
+ btrfs_release_path(path);
+
+ item = btrfs_item_nr(eb, slot);
+ iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
+
+ for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
+ name_len = btrfs_inode_ref_name_len(eb, iref);
+ /* path must be released before calling iterate()! */
+ ret = iterate(parent, iref, eb, ctx);
+ if (ret) {
+ free_extent_buffer(eb);
+ break;
+ }
+ len = sizeof(*iref) + name_len;
+ iref = (struct btrfs_inode_ref *)((char *)iref + len);
+ }
+ free_extent_buffer(eb);
+ }
+
+ btrfs_release_path(path);
+
+ return ret;
+}
+
+/*
+ * returns 0 if the path could be dumped (probably truncated)
+ * returns <0 in case of an error
+ */
+static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref,
+ struct extent_buffer *eb, void *ctx)
+{
+ struct inode_fs_paths *ipath = ctx;
+ char *fspath;
+ char *fspath_min;
+ int i = ipath->fspath->elem_cnt;
+ const int s_ptr = sizeof(char *);
+ u32 bytes_left;
+
+ bytes_left = ipath->fspath->bytes_left > s_ptr ?
+ ipath->fspath->bytes_left - s_ptr : 0;
+
+ fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
+ fspath = iref_to_path(ipath->fs_root, ipath->btrfs_path, iref, eb,
+ inum, fspath_min, bytes_left);
+ if (IS_ERR(fspath))
+ return PTR_ERR(fspath);
+
+ if (fspath > fspath_min) {
+ ipath->fspath->val[i] = (u64)(unsigned long)fspath;
+ ++ipath->fspath->elem_cnt;
+ ipath->fspath->bytes_left = fspath - fspath_min;
+ } else {
+ ++ipath->fspath->elem_missed;
+ ipath->fspath->bytes_missing += fspath_min - fspath;
+ ipath->fspath->bytes_left = 0;
+ }
+
+ return 0;
+}
+
+/*
+ * this dumps all file system paths to the inode into the ipath struct, provided
+ * is has been created large enough. each path is zero-terminated and accessed
+ * from ipath->fspath->val[i].
+ * when it returns, there are ipath->fspath->elem_cnt number of paths available
+ * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
+ * number of missed paths in recored in ipath->fspath->elem_missed, otherwise,
+ * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
+ * have been needed to return all paths.
+ */
+int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
+{
+ return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
+ inode_to_path, ipath);
+}
+
+/*
+ * allocates space to return multiple file system paths for an inode.
+ * total_bytes to allocate are passed, note that space usable for actual path
+ * information will be total_bytes - sizeof(struct inode_fs_paths).
+ * the returned pointer must be freed with free_ipath() in the end.
+ */
+struct btrfs_data_container *init_data_container(u32 total_bytes)
+{
+ struct btrfs_data_container *data;
+ size_t alloc_bytes;
+
+ alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
+ data = kmalloc(alloc_bytes, GFP_NOFS);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ if (total_bytes >= sizeof(*data)) {
+ data->bytes_left = total_bytes - sizeof(*data);
+ data->bytes_missing = 0;
+ } else {
+ data->bytes_missing = sizeof(*data) - total_bytes;
+ data->bytes_left = 0;
+ }
+
+ data->elem_cnt = 0;
+ data->elem_missed = 0;
+
+ return data;
+}
+
+/*
+ * allocates space to return multiple file system paths for an inode.
+ * total_bytes to allocate are passed, note that space usable for actual path
+ * information will be total_bytes - sizeof(struct inode_fs_paths).
+ * the returned pointer must be freed with free_ipath() in the end.
+ */
+struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
+ struct btrfs_path *path)
+{
+ struct inode_fs_paths *ifp;
+ struct btrfs_data_container *fspath;
+
+ fspath = init_data_container(total_bytes);
+ if (IS_ERR(fspath))
+ return (void *)fspath;
+
+ ifp = kmalloc(sizeof(*ifp), GFP_NOFS);
+ if (!ifp) {
+ kfree(fspath);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ifp->btrfs_path = path;
+ ifp->fspath = fspath;
+ ifp->fs_root = fs_root;
+
+ return ifp;
+}
+
+void free_ipath(struct inode_fs_paths *ipath)
+{
+ kfree(ipath);
+}
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
new file mode 100644
index 0000000..9261883
--- /dev/null
+++ b/fs/btrfs/backref.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2011 STRATO. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __BTRFS_BACKREF__
+#define __BTRFS_BACKREF__
+
+#include "ioctl.h"
+
+struct inode_fs_paths {
+ struct btrfs_path *btrfs_path;
+ struct btrfs_root *fs_root;
+ struct btrfs_data_container *fspath;
+};
+
+typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 root,
+ void *ctx);
+typedef int (iterate_irefs_t)(u64 parent, struct btrfs_inode_ref *iref,
+ struct extent_buffer *eb, void *ctx);
+
+int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
+ struct btrfs_path *path);
+
+int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
+ struct btrfs_path *path, struct btrfs_key *found_key);
+
+int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
+ struct btrfs_extent_item *ei, u32 item_size,
+ u64 *out_root, u8 *out_level);
+
+int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
+ struct btrfs_path *path,
+ u64 extent_item_objectid,
+ u64 extent_offset,
+ iterate_extent_inodes_t *iterate, void *ctx);
+
+int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
+ struct btrfs_path *path,
+ iterate_extent_inodes_t *iterate, void *ctx);
+
+int paths_from_inode(u64 inum, struct inode_fs_paths *ipath);
+
+struct btrfs_data_container *init_data_container(u32 total_bytes);
+struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
+ struct btrfs_path *path);
+void free_ipath(struct inode_fs_paths *ipath);
+
+#endif
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
new file mode 100644
index 0000000..2373b39
--- /dev/null
+++ b/fs/btrfs/reada.c
@@ -0,0 +1,951 @@
+/*
+ * Copyright (C) 2011 STRATO. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/sched.h>
+#include <linux/pagemap.h>
+#include <linux/writeback.h>
+#include <linux/blkdev.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include "ctree.h"
+#include "volumes.h"
+#include "disk-io.h"
+#include "transaction.h"
+
+#undef DEBUG
+
+/*
+ * This is the implementation for the generic read ahead framework.
+ *
+ * To trigger a readahead, btrfs_reada_add must be called. It will start
+ * a read ahead for the given range [start, end) on tree root. The returned
+ * handle can either be used to wait on the readahead to finish
+ * (btrfs_reada_wait), or to send it to the background (btrfs_reada_detach).
+ *
+ * The read ahead works as follows:
+ * On btrfs_reada_add, the root of the tree is inserted into a radix_tree.
+ * reada_start_machine will then search for extents to prefetch and trigger
+ * some reads. When a read finishes for a node, all contained node/leaf
+ * pointers that lie in the given range will also be enqueued. The reads will
+ * be triggered in sequential order, thus giving a big win over a naive
+ * enumeration. It will also make use of multi-device layouts. Each disk
+ * will have its on read pointer and all disks will by utilized in parallel.
+ * Also will no two disks read both sides of a mirror simultaneously, as this
+ * would waste seeking capacity. Instead both disks will read different parts
+ * of the filesystem.
+ * Any number of readaheads can be started in parallel. The read order will be
+ * determined globally, i.e. 2 parallel readaheads will normally finish faster
+ * than the 2 started one after another.
+ */
+
+#define MAX_MIRRORS 2
+#define MAX_IN_FLIGHT 6
+
+struct reada_extctl {
+ struct list_head list;
+ struct reada_control *rc;
+ u64 generation;
+};
+
+struct reada_extent {
+ u64 logical;
+ struct btrfs_key top;
+ u32 blocksize;
+ int err;
+ struct list_head extctl;
+ struct kref refcnt;
+ spinlock_t lock;
+ struct reada_zone *zones[MAX_MIRRORS];
+ int nzones;
+ struct btrfs_device *scheduled_for;
+};
+
+struct reada_zone {
+ u64 start;
+ u64 end;
+ u64 elems;
+ struct list_head list;
+ spinlock_t lock;
+ int locked;
+ struct btrfs_device *device;
+ struct btrfs_device *devs[MAX_MIRRORS]; /* full list, incl self */
+ int ndevs;
+ struct kref refcnt;
+};
+
+struct reada_machine_work {
+ struct btrfs_work work;
+ struct btrfs_fs_info *fs_info;
+};
+
+static void reada_extent_put(struct btrfs_fs_info *, struct reada_extent *);
+static void reada_control_release(struct kref *kref);
+static void reada_zone_release(struct kref *kref);
+static void reada_start_machine(struct btrfs_fs_info *fs_info);
+static void __reada_start_machine(struct btrfs_fs_info *fs_info);
+
+static int reada_add_block(struct reada_control *rc, u64 logical,
+ struct btrfs_key *top, int level, u64 generation);
+
+/* recurses */
+/* in case of err, eb might be NULL */
+static int __readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
+ u64 start, int err)
+{
+ int level = 0;
+ int nritems;
+ int i;
+ u64 bytenr;
+ u64 generation;
+ struct reada_extent *re;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct list_head list;
+ unsigned long index = start >> PAGE_CACHE_SHIFT;
+ struct btrfs_device *for_dev;
+
+ if (eb)
+ level = btrfs_header_level(eb);
+
+ /* find extent */
+ spin_lock(&fs_info->reada_lock);
+ re = radix_tree_lookup(&fs_info->reada_tree, index);
+ if (re)
+ kref_get(&re->refcnt);
+ spin_unlock(&fs_info->reada_lock);
+
+ if (!re)
+ return -1;
+
+ spin_lock(&re->lock);
+ /*
+ * just take the full list from the extent. afterwards we
+ * don't need the lock anymore
+ */
+ list_replace_init(&re->extctl, &list);
+ for_dev = re->scheduled_for;
+ re->scheduled_for = NULL;
+ spin_unlock(&re->lock);
+
+ if (err == 0) {
+ nritems = level ? btrfs_header_nritems(eb) : 0;
+ generation = btrfs_header_generation(eb);
+ /*
+ * FIXME: currently we just set nritems to 0 if this is a leaf,
+ * effectively ignoring the content. In a next step we could
+ * trigger more readahead depending from the content, e.g.
+ * fetch the checksums for the extents in the leaf.
+ */
+ } else {
+ /*
+ * this is the error case, the extent buffer has not been
+ * read correctly. We won't access anything from it and
+ * just cleanup our data structures. Effectively this will
+ * cut the branch below this node from read ahead.
+ */
+ nritems = 0;
+ generation = 0;
+ }
+
+ for (i = 0; i < nritems; i++) {
+ struct reada_extctl *rec;
+ u64 n_gen;
+ struct btrfs_key key;
+ struct btrfs_key next_key;
+
+ btrfs_node_key_to_cpu(eb, &key, i);
+ if (i + 1 < nritems)
+ btrfs_node_key_to_cpu(eb, &next_key, i + 1);
+ else
+ next_key = re->top;
+ bytenr = btrfs_node_blockptr(eb, i);
+ n_gen = btrfs_node_ptr_generation(eb, i);
+
+ list_for_each_entry(rec, &list, list) {
+ struct reada_control *rc = rec->rc;
+
+ /*
+ * if the generation doesn't match, just ignore this
+ * extctl. This will probably cut off a branch from
+ * prefetch. Alternatively one could start a new (sub-)
+ * prefetch for this branch, starting again from root.
+ * FIXME: move the generation check out of this loop
+ */
+#ifdef DEBUG
+ if (rec->generation != generation) {
+ printk(KERN_DEBUG "generation mismatch for "
+ "(%llu,%d,%llu) %llu != %llu\n",
+ key.objectid, key.type, key.offset,
+ rec->generation, generation);
+ }
+#endif
+ if (rec->generation == generation &&
+ btrfs_comp_cpu_keys(&key, &rc->key_end) < 0 &&
+ btrfs_comp_cpu_keys(&next_key, &rc->key_start) > 0)
+ reada_add_block(rc, bytenr, &next_key,
+ level - 1, n_gen);
+ }
+ }
+ /*
+ * free extctl records
+ */
+ while (!list_empty(&list)) {
+ struct reada_control *rc;
+ struct reada_extctl *rec;
+
+ rec = list_first_entry(&list, struct reada_extctl, list);
+ list_del(&rec->list);
+ rc = rec->rc;
+ kfree(rec);
+
+ kref_get(&rc->refcnt);
+ if (atomic_dec_and_test(&rc->elems)) {
+ kref_put(&rc->refcnt, reada_control_release);
+ wake_up(&rc->wait);
+ }
+ kref_put(&rc->refcnt, reada_control_release);
+
+ reada_extent_put(fs_info, re); /* one ref for each entry */
+ }
+ reada_extent_put(fs_info, re); /* our ref */
+ if (for_dev)
+ atomic_dec(&for_dev->reada_in_flight);
+
+ return 0;
+}
+
+/*
+ * start is passed separately in case eb in NULL, which may be the case with
+ * failed I/O
+ */
+int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
+ u64 start, int err)
+{
+ int ret;
+
+ ret = __readahead_hook(root, eb, start, err);
+
+ reada_start_machine(root->fs_info);
+
+ return ret;
+}
+
+static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
+ struct btrfs_device *dev, u64 logical,
+ struct btrfs_bio *bbio)
+{
+ int ret;
+ int looped = 0;
+ struct reada_zone *zone;
+ struct btrfs_block_group_cache *cache = NULL;
+ u64 start;
+ u64 end;
+ int i;
+
+again:
+ zone = NULL;
+ spin_lock(&fs_info->reada_lock);
+ ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
+ logical >> PAGE_CACHE_SHIFT, 1);
+ if (ret == 1)
+ kref_get(&zone->refcnt);
+ spin_unlock(&fs_info->reada_lock);
+
+ if (ret == 1) {
+ if (logical >= zone->start && logical < zone->end)
+ return zone;
+ spin_lock(&fs_info->reada_lock);
+ kref_put(&zone->refcnt, reada_zone_release);
+ spin_unlock(&fs_info->reada_lock);
+ }
+
+ if (looped)
+ return NULL;
+
+ cache = btrfs_lookup_block_group(fs_info, logical);
+ if (!cache)
+ return NULL;
+
+ start = cache->key.objectid;
+ end = start + cache->key.offset - 1;
+ btrfs_put_block_group(cache);
+
+ zone = kzalloc(sizeof(*zone), GFP_NOFS);
+ if (!zone)
+ return NULL;
+
+ zone->start = start;
+ zone->end = end;
+ INIT_LIST_HEAD(&zone->list);
+ spin_lock_init(&zone->lock);
+ zone->locked = 0;
+ kref_init(&zone->refcnt);
+ zone->elems = 0;
+ zone->device = dev; /* our device always sits at index 0 */
+ for (i = 0; i < bbio->num_stripes; ++i) {
+ /* bounds have already been checked */
+ zone->devs[i] = bbio->stripes[i].dev;
+ }
+ zone->ndevs = bbio->num_stripes;
+
+ spin_lock(&fs_info->reada_lock);
+ ret = radix_tree_insert(&dev->reada_zones,
+ (unsigned long)zone->end >> PAGE_CACHE_SHIFT,
+ zone);
+ spin_unlock(&fs_info->reada_lock);
+
+ if (ret) {
+ kfree(zone);
+ looped = 1;
+ goto again;
+ }
+
+ return zone;
+}
+
+static struct reada_extent *reada_find_extent(struct btrfs_root *root,
+ u64 logical,
+ struct btrfs_key *top, int level)
+{
+ int ret;
+ int looped = 0;
+ struct reada_extent *re = NULL;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
+ struct btrfs_bio *bbio = NULL;
+ struct btrfs_device *dev;
+ u32 blocksize;
+ u64 length;
+ int nzones = 0;
+ int i;
+ unsigned long index = logical >> PAGE_CACHE_SHIFT;
+
+again:
+ spin_lock(&fs_info->reada_lock);
+ re = radix_tree_lookup(&fs_info->reada_tree, index);
+ if (re)
+ kref_get(&re->refcnt);
+ spin_unlock(&fs_info->reada_lock);
+
+ if (re || looped)
+ return re;
+
+ re = kzalloc(sizeof(*re), GFP_NOFS);
+ if (!re)
+ return NULL;
+
+ blocksize = btrfs_level_size(root, level);
+ re->logical = logical;
+ re->blocksize = blocksize;
+ re->top = *top;
+ INIT_LIST_HEAD(&re->extctl);
+ spin_lock_init(&re->lock);
+ kref_init(&re->refcnt);
+
+ /*
+ * map block
+ */
+ length = blocksize;
+ ret = btrfs_map_block(map_tree, REQ_WRITE, logical, &length, &bbio, 0);
+ if (ret || !bbio || length < blocksize)
+ goto error;
+
+ if (bbio->num_stripes > MAX_MIRRORS) {
+ printk(KERN_ERR "btrfs readahead: more than %d copies not "
+ "supported", MAX_MIRRORS);
+ goto error;
+ }
+
+ for (nzones = 0; nzones < bbio->num_stripes; ++nzones) {
+ struct reada_zone *zone;
+
+ dev = bbio->stripes[nzones].dev;
+ zone = reada_find_zone(fs_info, dev, logical, bbio);
+ if (!zone)
+ break;
+
+ re->zones[nzones] = zone;
+ spin_lock(&zone->lock);
+ if (!zone->elems)
+ kref_get(&zone->refcnt);
+ ++zone->elems;
+ spin_unlock(&zone->lock);
+ spin_lock(&fs_info->reada_lock);
+ kref_put(&zone->refcnt, reada_zone_release);
+ spin_unlock(&fs_info->reada_lock);
+ }
+ re->nzones = nzones;
+ if (nzones == 0) {
+ /* not a single zone found, error and out */
+ goto error;
+ }
+
+ /* insert extent in reada_tree + all per-device trees, all or nothing */
+ spin_lock(&fs_info->reada_lock);
+ ret = radix_tree_insert(&fs_info->reada_tree, index, re);
+ if (ret) {
+ spin_unlock(&fs_info->reada_lock);
+ if (ret != -ENOMEM) {
+ /* someone inserted the extent in the meantime */
+ looped = 1;
+ }
+ goto error;
+ }
+ for (i = 0; i < nzones; ++i) {
+ dev = bbio->stripes[i].dev;
+ ret = radix_tree_insert(&dev->reada_extents, index, re);
+ if (ret) {
+ while (--i >= 0) {
+ dev = bbio->stripes[i].dev;
+ BUG_ON(dev == NULL);
+ radix_tree_delete(&dev->reada_extents, index);
+ }
+ BUG_ON(fs_info == NULL);
+ radix_tree_delete(&fs_info->reada_tree, index);
+ spin_unlock(&fs_info->reada_lock);
+ goto error;
+ }
+ }
+ spin_unlock(&fs_info->reada_lock);
+
+ kfree(bbio);
+ return re;
+
+error:
+ while (nzones) {
+ struct reada_zone *zone;
+
+ --nzones;
+ zone = re->zones[nzones];
+ kref_get(&zone->refcnt);
+ spin_lock(&zone->lock);
+ --zone->elems;
+ if (zone->elems == 0) {
+ /*
+ * no fs_info->reada_lock needed, as this can't be
+ * the last ref
+ */
+ kref_put(&zone->refcnt, reada_zone_release);
+ }
+ spin_unlock(&zone->lock);
+
+ spin_lock(&fs_info->reada_lock);
+ kref_put(&zone->refcnt, reada_zone_release);
+ spin_unlock(&fs_info->reada_lock);
+ }
+ kfree(bbio);
+ kfree(re);
+ if (looped)
+ goto again;
+ return NULL;
+}
+
+static void reada_kref_dummy(struct kref *kr)
+{
+}
+
+static void reada_extent_put(struct btrfs_fs_info *fs_info,
+ struct reada_extent *re)
+{
+ int i;
+ unsigned long index = re->logical >> PAGE_CACHE_SHIFT;
+
+ spin_lock(&fs_info->reada_lock);
+ if (!kref_put(&re->refcnt, reada_kref_dummy)) {
+ spin_unlock(&fs_info->reada_lock);
+ return;
+ }
+
+ radix_tree_delete(&fs_info->reada_tree, index);
+ for (i = 0; i < re->nzones; ++i) {
+ struct reada_zone *zone = re->zones[i];
+
+ radix_tree_delete(&zone->device->reada_extents, index);
+ }
+
+ spin_unlock(&fs_info->reada_lock);
+
+ for (i = 0; i < re->nzones; ++i) {
+ struct reada_zone *zone = re->zones[i];
+
+ kref_get(&zone->refcnt);
+ spin_lock(&zone->lock);
+ --zone->elems;
+ if (zone->elems == 0) {
+ /* no fs_info->reada_lock needed, as this can't be
+ * the last ref */
+ kref_put(&zone->refcnt, reada_zone_release);
+ }
+ spin_unlock(&zone->lock);
+
+ spin_lock(&fs_info->reada_lock);
+ kref_put(&zone->refcnt, reada_zone_release);
+ spin_unlock(&fs_info->reada_lock);
+ }
+ if (re->scheduled_for)
+ atomic_dec(&re->scheduled_for->reada_in_flight);
+
+ kfree(re);
+}
+
+static void reada_zone_release(struct kref *kref)
+{
+ struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
+
+ radix_tree_delete(&zone->device->reada_zones,
+ zone->end >> PAGE_CACHE_SHIFT);
+
+ kfree(zone);
+}
+
+static void reada_control_release(struct kref *kref)
+{
+ struct reada_control *rc = container_of(kref, struct reada_control,
+ refcnt);
+
+ kfree(rc);
+}
+
+static int reada_add_block(struct reada_control *rc, u64 logical,
+ struct btrfs_key *top, int level, u64 generation)
+{
+ struct btrfs_root *root = rc->root;
+ struct reada_extent *re;
+ struct reada_extctl *rec;
+
+ re = reada_find_extent(root, logical, top, level); /* takes one ref */
+ if (!re)
+ return -1;
+
+ rec = kzalloc(sizeof(*rec), GFP_NOFS);
+ if (!rec) {
+ reada_extent_put(root->fs_info, re);
+ return -1;
+ }
+
+ rec->rc = rc;
+ rec->generation = generation;
+ atomic_inc(&rc->elems);
+
+ spin_lock(&re->lock);
+ list_add_tail(&rec->list, &re->extctl);
+ spin_unlock(&re->lock);
+
+ /* leave the ref on the extent */
+
+ return 0;
+}
+
+/*
+ * called with fs_info->reada_lock held
+ */
+static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
+{
+ int i;
+ unsigned long index = zone->end >> PAGE_CACHE_SHIFT;
+
+ for (i = 0; i < zone->ndevs; ++i) {
+ struct reada_zone *peer;
+ peer = radix_tree_lookup(&zone->devs[i]->reada_zones, index);
+ if (peer && peer->device != zone->device)
+ peer->locked = lock;
+ }
+}
+
+/*
+ * called with fs_info->reada_lock held
+ */
+static int reada_pick_zone(struct btrfs_device *dev)
+{
+ struct reada_zone *top_zone = NULL;
+ struct reada_zone *top_locked_zone = NULL;
+ u64 top_elems = 0;
+ u64 top_locked_elems = 0;
+ unsigned long index = 0;
+ int ret;
+
+ if (dev->reada_curr_zone) {
+ reada_peer_zones_set_lock(dev->reada_curr_zone, 0);
+ kref_put(&dev->reada_curr_zone->refcnt, reada_zone_release);
+ dev->reada_curr_zone = NULL;
+ }
+ /* pick the zone with the most elements */
+ while (1) {
+ struct reada_zone *zone;
+
+ ret = radix_tree_gang_lookup(&dev->reada_zones,
+ (void **)&zone, index, 1);
+ if (ret == 0)
+ break;
+ index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
+ if (zone->locked) {
+ if (zone->elems > top_locked_elems) {
+ top_locked_elems = zone->elems;
+ top_locked_zone = zone;
+ }
+ } else {
+ if (zone->elems > top_elems) {
+ top_elems = zone->elems;
+ top_zone = zone;
+ }
+ }
+ }
+ if (top_zone)
+ dev->reada_curr_zone = top_zone;
+ else if (top_locked_zone)
+ dev->reada_curr_zone = top_locked_zone;
+ else
+ return 0;
+
+ dev->reada_next = dev->reada_curr_zone->start;
+ kref_get(&dev->reada_curr_zone->refcnt);
+ reada_peer_zones_set_lock(dev->reada_curr_zone, 1);
+
+ return 1;
+}
+
+static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
+ struct btrfs_device *dev)
+{
+ struct reada_extent *re = NULL;
+ int mirror_num = 0;
+ struct extent_buffer *eb = NULL;
+ u64 logical;
+ u32 blocksize;
+ int ret;
+ int i;
+ int need_kick = 0;
+
+ spin_lock(&fs_info->reada_lock);
+ if (dev->reada_curr_zone == NULL) {
+ ret = reada_pick_zone(dev);
+ if (!ret) {
+ spin_unlock(&fs_info->reada_lock);
+ return 0;
+ }
+ }
+ /*
+ * FIXME currently we issue the reads one extent at a time. If we have
+ * a contiguous block of extents, we could also coagulate them or use
+ * plugging to speed things up
+ */
+ ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
+ dev->reada_next >> PAGE_CACHE_SHIFT, 1);
+ if (ret == 0 || re->logical >= dev->reada_curr_zone->end) {
+ ret = reada_pick_zone(dev);
+ if (!ret) {
+ spin_unlock(&fs_info->reada_lock);
+ return 0;
+ }
+ re = NULL;
+ ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
+ dev->reada_next >> PAGE_CACHE_SHIFT, 1);
+ }
+ if (ret == 0) {
+ spin_unlock(&fs_info->reada_lock);
+ return 0;
+ }
+ dev->reada_next = re->logical + re->blocksize;
+ kref_get(&re->refcnt);
+
+ spin_unlock(&fs_info->reada_lock);
+
+ /*
+ * find mirror num
+ */
+ for (i = 0; i < re->nzones; ++i) {
+ if (re->zones[i]->device == dev) {
+ mirror_num = i + 1;
+ break;
+ }
+ }
+ logical = re->logical;
+ blocksize = re->blocksize;
+
+ spin_lock(&re->lock);
+ if (re->scheduled_for == NULL) {
+ re->scheduled_for = dev;
+ need_kick = 1;
+ }
+ spin_unlock(&re->lock);
+
+ reada_extent_put(fs_info, re);
+
+ if (!need_kick)
+ return 0;
+
+ atomic_inc(&dev->reada_in_flight);
+ ret = reada_tree_block_flagged(fs_info->extent_root, logical, blocksize,
+ mirror_num, &eb);
+ if (ret)
+ __readahead_hook(fs_info->extent_root, NULL, logical, ret);
+ else if (eb)
+ __readahead_hook(fs_info->extent_root, eb, eb->start, ret);
+
+ if (eb)
+ free_extent_buffer(eb);
+
+ return 1;
+
+}
+
+static void reada_start_machine_worker(struct btrfs_work *work)
+{
+ struct reada_machine_work *rmw;
+ struct btrfs_fs_info *fs_info;
+
+ rmw = container_of(work, struct reada_machine_work, work);
+ fs_info = rmw->fs_info;
+
+ kfree(rmw);
+
+ __reada_start_machine(fs_info);
+}
+
+static void __reada_start_machine(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_device *device;
+ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+ u64 enqueued;
+ u64 total = 0;
+ int i;
+
+ do {
+ enqueued = 0;
+ list_for_each_entry(device, &fs_devices->devices, dev_list) {
+ if (atomic_read(&device->reada_in_flight) <
+ MAX_IN_FLIGHT)
+ enqueued += reada_start_machine_dev(fs_info,
+ device);
+ }
+ total += enqueued;
+ } while (enqueued && total < 10000);
+
+ if (enqueued == 0)
+ return;
+
+ /*
+ * If everything is already in the cache, this is effectively single
+ * threaded. To a) not hold the caller for too long and b) to utilize
+ * more cores, we broke the loop above after 10000 iterations and now
+ * enqueue to workers to finish it. This will distribute the load to
+ * the cores.
+ */
+ for (i = 0; i < 2; ++i)
+ reada_start_machine(fs_info);
+}
+
+static void reada_start_machine(struct btrfs_fs_info *fs_info)
+{
+ struct reada_machine_work *rmw;
+
+ rmw = kzalloc(sizeof(*rmw), GFP_NOFS);
+ if (!rmw) {
+ /* FIXME we cannot handle this properly right now */
+ BUG();
+ }
+ rmw->work.func = reada_start_machine_worker;
+ rmw->fs_info = fs_info;
+
+ btrfs_queue_worker(&fs_info->readahead_workers, &rmw->work);
+}
+
+#ifdef DEBUG
+static void dump_devs(struct btrfs_fs_info *fs_info, int all)
+{
+ struct btrfs_device *device;
+ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+ unsigned long index;
+ int ret;
+ int i;
+ int j;
+ int cnt;
+
+ spin_lock(&fs_info->reada_lock);
+ list_for_each_entry(device, &fs_devices->devices, dev_list) {
+ printk(KERN_DEBUG "dev %lld has %d in flight\n", device->devid,
+ atomic_read(&device->reada_in_flight));
+ index = 0;
+ while (1) {
+ struct reada_zone *zone;
+ ret = radix_tree_gang_lookup(&device->reada_zones,
+ (void **)&zone, index, 1);
+ if (ret == 0)
+ break;
+ printk(KERN_DEBUG " zone %llu-%llu elems %llu locked "
+ "%d devs", zone->start, zone->end, zone->elems,
+ zone->locked);
+ for (j = 0; j < zone->ndevs; ++j) {
+ printk(KERN_CONT " %lld",
+ zone->devs[j]->devid);
+ }
+ if (device->reada_curr_zone == zone)
+ printk(KERN_CONT " curr off %llu",
+ device->reada_next - zone->start);
+ printk(KERN_CONT "\n");
+ index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
+ }
+ cnt = 0;
+ index = 0;
+ while (all) {
+ struct reada_extent *re = NULL;
+
+ ret = radix_tree_gang_lookup(&device->reada_extents,
+ (void **)&re, index, 1);
+ if (ret == 0)
+ break;
+ printk(KERN_DEBUG
+ " re: logical %llu size %u empty %d for %lld",
+ re->logical, re->blocksize,
+ list_empty(&re->extctl), re->scheduled_for ?
+ re->scheduled_for->devid : -1);
+
+ for (i = 0; i < re->nzones; ++i) {
+ printk(KERN_CONT " zone %llu-%llu devs",
+ re->zones[i]->start,
+ re->zones[i]->end);
+ for (j = 0; j < re->zones[i]->ndevs; ++j) {
+ printk(KERN_CONT " %lld",
+ re->zones[i]->devs[j]->devid);
+ }
+ }
+ printk(KERN_CONT "\n");
+ index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
+ if (++cnt > 15)
+ break;
+ }
+ }
+
+ index = 0;
+ cnt = 0;
+ while (all) {
+ struct reada_extent *re = NULL;
+
+ ret = radix_tree_gang_lookup(&fs_info->reada_tree, (void **)&re,
+ index, 1);
+ if (ret == 0)
+ break;
+ if (!re->scheduled_for) {
+ index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
+ continue;
+ }
+ printk(KERN_DEBUG
+ "re: logical %llu size %u list empty %d for %lld",
+ re->logical, re->blocksize, list_empty(&re->extctl),
+ re->scheduled_for ? re->scheduled_for->devid : -1);
+ for (i = 0; i < re->nzones; ++i) {
+ printk(KERN_CONT " zone %llu-%llu devs",
+ re->zones[i]->start,
+ re->zones[i]->end);
+ for (i = 0; i < re->nzones; ++i) {
+ printk(KERN_CONT " zone %llu-%llu devs",
+ re->zones[i]->start,
+ re->zones[i]->end);
+ for (j = 0; j < re->zones[i]->ndevs; ++j) {
+ printk(KERN_CONT " %lld",
+ re->zones[i]->devs[j]->devid);
+ }
+ }
+ }
+ printk(KERN_CONT "\n");
+ index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
+ }
+ spin_unlock(&fs_info->reada_lock);
+}
+#endif
+
+/*
+ * interface
+ */
+struct reada_control *btrfs_reada_add(struct btrfs_root *root,
+ struct btrfs_key *key_start, struct btrfs_key *key_end)
+{
+ struct reada_control *rc;
+ u64 start;
+ u64 generation;
+ int level;
+ struct extent_buffer *node;
+ static struct btrfs_key max_key = {
+ .objectid = (u64)-1,
+ .type = (u8)-1,
+ .offset = (u64)-1
+ };
+
+ rc = kzalloc(sizeof(*rc), GFP_NOFS);
+ if (!rc)
+ return ERR_PTR(-ENOMEM);
+
+ rc->root = root;
+ rc->key_start = *key_start;
+ rc->key_end = *key_end;
+ atomic_set(&rc->elems, 0);
+ init_waitqueue_head(&rc->wait);
+ kref_init(&rc->refcnt);
+ kref_get(&rc->refcnt); /* one ref for having elements */
+
+ node = btrfs_root_node(root);
+ start = node->start;
+ level = btrfs_header_level(node);
+ generation = btrfs_header_generation(node);
+ free_extent_buffer(node);
+
+ reada_add_block(rc, start, &max_key, level, generation);
+
+ reada_start_machine(root->fs_info);
+
+ return rc;
+}
+
+#ifdef DEBUG
+int btrfs_reada_wait(void *handle)
+{
+ struct reada_control *rc = handle;
+
+ while (atomic_read(&rc->elems)) {
+ wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
+ 5 * HZ);
+ dump_devs(rc->root->fs_info, rc->elems < 10 ? 1 : 0);
+ }
+
+ dump_devs(rc->root->fs_info, rc->elems < 10 ? 1 : 0);
+
+ kref_put(&rc->refcnt, reada_control_release);
+
+ return 0;
+}
+#else
+int btrfs_reada_wait(void *handle)
+{
+ struct reada_control *rc = handle;
+
+ while (atomic_read(&rc->elems)) {
+ wait_event(rc->wait, atomic_read(&rc->elems) == 0);
+ }
+
+ kref_put(&rc->refcnt, reada_control_release);
+
+ return 0;
+}
+#endif
+
+void btrfs_reada_detach(void *handle)
+{
+ struct reada_control *rc = handle;
+
+ kref_put(&rc->refcnt, reada_control_release);
+}