aboutsummaryrefslogtreecommitdiffstats
path: root/mm/shmem.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/shmem.c')
-rw-r--r--mm/shmem.c1996
1 files changed, 1164 insertions, 832 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index 83efac6..a858b67 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -6,8 +6,7 @@
* 2000-2001 Christoph Rohland
* 2000-2001 SAP AG
* 2002 Red Hat Inc.
- * Copyright (C) 2002-2011 Hugh Dickins.
- * Copyright (C) 2011 Google Inc.
+ * Copyright (C) 2002-2005 Hugh Dickins.
* Copyright (C) 2002-2005 VERITAS Software Corporation.
* Copyright (C) 2004 Andi Kleen, SuSE Labs
*
@@ -28,7 +27,8 @@
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/mm.h>
-#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/percpu_counter.h>
#include <linux/swap.h>
static struct vfsmount *shm_mnt;
@@ -51,9 +51,6 @@ static struct vfsmount *shm_mnt;
#include <linux/shmem_fs.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
-#include <linux/pagevec.h>
-#include <linux/percpu_counter.h>
-#include <linux/splice.h>
#include <linux/security.h>
#include <linux/swapops.h>
#include <linux/mempolicy.h>
@@ -65,27 +62,42 @@ static struct vfsmount *shm_mnt;
#include <linux/magic.h>
#include <asm/uaccess.h>
+#include <asm/div64.h>
#include <asm/pgtable.h>
+/*
+ * The maximum size of a shmem/tmpfs file is limited by the maximum size of
+ * its triple-indirect swap vector - see illustration at shmem_swp_entry().
+ *
+ * With 4kB page size, maximum file size is just over 2TB on a 32-bit kernel,
+ * but one eighth of that on a 64-bit kernel. With 8kB page size, maximum
+ * file size is just over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
+ * MAX_LFS_FILESIZE being then more restrictive than swap vector layout.
+ *
+ * We use / and * instead of shifts in the definitions below, so that the swap
+ * vector can be tested with small even values (e.g. 20) for ENTRIES_PER_PAGE.
+ */
+#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
+#define ENTRIES_PER_PAGEPAGE ((unsigned long long)ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
+
+#define SHMSWP_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
+#define SHMSWP_MAX_BYTES (SHMSWP_MAX_INDEX << PAGE_CACHE_SHIFT)
+
+#define SHMEM_MAX_BYTES min_t(unsigned long long, SHMSWP_MAX_BYTES, MAX_LFS_FILESIZE)
+#define SHMEM_MAX_INDEX ((unsigned long)((SHMEM_MAX_BYTES+1) >> PAGE_CACHE_SHIFT))
+
#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
-/* Pretend that each entry is of this size in directory's i_size */
-#define BOGO_DIRENT_SIZE 20
+/* info->flags needs VM_flags to handle pagein/truncate races efficiently */
+#define SHMEM_PAGEIN VM_READ
+#define SHMEM_TRUNCATE VM_WRITE
-/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
-#define SHORT_SYMLINK_LEN 128
+/* Definition to limit shmem_truncate's steps between cond_rescheds */
+#define LATENCY_LIMIT 64
-/*
- * vmtruncate_range() communicates with shmem_fault via
- * inode->i_private (with i_mutex making sure that it has only one user at
- * a time): we would prefer not to enlarge the shmem inode just for that.
- */
-struct shmem_falloc {
- wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
- pgoff_t start; /* start of range currently being fallocated */
- pgoff_t next; /* the next page offset to be fallocated */
-};
+/* Pretend that each entry is of this size in directory's i_size */
+#define BOGO_DIRENT_SIZE 20
struct shmem_xattr {
struct list_head list; /* anchored by shmem_inode_info->xattr_list */
@@ -94,7 +106,7 @@ struct shmem_xattr {
char value[0];
};
-/* Flag allocation requirements to shmem_getpage */
+/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
enum sgp_type {
SGP_READ, /* don't exceed i_size, don't allocate page */
SGP_CACHE, /* don't exceed i_size, may allocate page */
@@ -114,14 +126,57 @@ static unsigned long shmem_default_max_inodes(void)
}
#endif
-static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
- struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
+static int shmem_getpage(struct inode *inode, unsigned long idx,
+ struct page **pagep, enum sgp_type sgp, int *type);
+
+static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
+{
+ /*
+ * The above definition of ENTRIES_PER_PAGE, and the use of
+ * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
+ * might be reconsidered if it ever diverges from PAGE_SIZE.
+ *
+ * Mobility flags are masked out as swap vectors cannot move
+ */
+ return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO,
+ PAGE_CACHE_SHIFT-PAGE_SHIFT);
+}
-static inline int shmem_getpage(struct inode *inode, pgoff_t index,
- struct page **pagep, enum sgp_type sgp, int *fault_type)
+static inline void shmem_dir_free(struct page *page)
{
- return shmem_getpage_gfp(inode, index, pagep, sgp,
- mapping_gfp_mask(inode->i_mapping), fault_type);
+ __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
+}
+
+static struct page **shmem_dir_map(struct page *page)
+{
+ return (struct page **)kmap_atomic(page, KM_USER0);
+}
+
+static inline void shmem_dir_unmap(struct page **dir)
+{
+ kunmap_atomic(dir, KM_USER0);
+}
+
+static swp_entry_t *shmem_swp_map(struct page *page)
+{
+ return (swp_entry_t *)kmap_atomic(page, KM_USER1);
+}
+
+static inline void shmem_swp_balance_unmap(void)
+{
+ /*
+ * When passing a pointer to an i_direct entry, to code which
+ * also handles indirect entries and so will shmem_swp_unmap,
+ * we must arrange for the preempt count to remain in balance.
+ * What kmap_atomic of a lowmem page does depends on config
+ * and architecture, so pretend to kmap_atomic some lowmem page.
+ */
+ (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
+}
+
+static inline void shmem_swp_unmap(swp_entry_t *entry)
+{
+ kunmap_atomic(entry, KM_USER1);
}
static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
@@ -181,6 +236,17 @@ static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
static LIST_HEAD(shmem_swaplist);
static DEFINE_MUTEX(shmem_swaplist_mutex);
+static void shmem_free_blocks(struct inode *inode, long pages)
+{
+ struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
+ if (sbinfo->max_blocks) {
+ percpu_counter_add(&sbinfo->used_blocks, -pages);
+ spin_lock(&inode->i_lock);
+ inode->i_blocks -= pages*BLOCKS_PER_PAGE;
+ spin_unlock(&inode->i_lock);
+ }
+}
+
static int shmem_reserve_inode(struct super_block *sb)
{
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
@@ -207,7 +273,7 @@ static void shmem_free_inode(struct super_block *sb)
}
/**
- * shmem_recalc_inode - recalculate the block usage of an inode
+ * shmem_recalc_inode - recalculate the size of an inode
* @inode: inode to recalc
*
* We have to calculate the free blocks since the mm can drop
@@ -225,335 +291,474 @@ static void shmem_recalc_inode(struct inode *inode)
freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
if (freed > 0) {
- struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
- if (sbinfo->max_blocks)
- percpu_counter_add(&sbinfo->used_blocks, -freed);
info->alloced -= freed;
- inode->i_blocks -= freed * BLOCKS_PER_PAGE;
shmem_unacct_blocks(info->flags, freed);
+ shmem_free_blocks(inode, freed);
}
}
-/*
- * Replace item expected in radix tree by a new item, while holding tree lock.
- */
-static int shmem_radix_tree_replace(struct address_space *mapping,
- pgoff_t index, void *expected, void *replacement)
-{
- void **pslot;
- void *item = NULL;
-
- VM_BUG_ON(!expected);
- pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
- if (pslot)
- item = radix_tree_deref_slot_protected(pslot,
- &mapping->tree_lock);
- if (item != expected)
- return -ENOENT;
- if (replacement)
- radix_tree_replace_slot(pslot, replacement);
- else
- radix_tree_delete(&mapping->page_tree, index);
- return 0;
-}
-
-/*
- * Like add_to_page_cache_locked, but error if expected item has gone.
+/**
+ * shmem_swp_entry - find the swap vector position in the info structure
+ * @info: info structure for the inode
+ * @index: index of the page to find
+ * @page: optional page to add to the structure. Has to be preset to
+ * all zeros
+ *
+ * If there is no space allocated yet it will return NULL when
+ * page is NULL, else it will use the page for the needed block,
+ * setting it to NULL on return to indicate that it has been used.
+ *
+ * The swap vector is organized the following way:
+ *
+ * There are SHMEM_NR_DIRECT entries directly stored in the
+ * shmem_inode_info structure. So small files do not need an addional
+ * allocation.
+ *
+ * For pages with index > SHMEM_NR_DIRECT there is the pointer
+ * i_indirect which points to a page which holds in the first half
+ * doubly indirect blocks, in the second half triple indirect blocks:
+ *
+ * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
+ * following layout (for SHMEM_NR_DIRECT == 16):
+ *
+ * i_indirect -> dir --> 16-19
+ * | +-> 20-23
+ * |
+ * +-->dir2 --> 24-27
+ * | +-> 28-31
+ * | +-> 32-35
+ * | +-> 36-39
+ * |
+ * +-->dir3 --> 40-43
+ * +-> 44-47
+ * +-> 48-51
+ * +-> 52-55
*/
-static int shmem_add_to_page_cache(struct page *page,
- struct address_space *mapping,
- pgoff_t index, gfp_t gfp, void *expected)
+static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
{
- int error = 0;
-
- VM_BUG_ON(!PageLocked(page));
- VM_BUG_ON(!PageSwapBacked(page));
+ unsigned long offset;
+ struct page **dir;
+ struct page *subdir;
- if (!expected)
- error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
- if (!error) {
- page_cache_get(page);
- page->mapping = mapping;
- page->index = index;
-
- spin_lock_irq(&mapping->tree_lock);
- if (!expected)
- error = radix_tree_insert(&mapping->page_tree,
- index, page);
- else
- error = shmem_radix_tree_replace(mapping, index,
- expected, page);
- if (!error) {
- mapping->nrpages++;
- __inc_zone_page_state(page, NR_FILE_PAGES);
- __inc_zone_page_state(page, NR_SHMEM);
- spin_unlock_irq(&mapping->tree_lock);
- } else {
- page->mapping = NULL;
- spin_unlock_irq(&mapping->tree_lock);
- page_cache_release(page);
+ if (index < SHMEM_NR_DIRECT) {
+ shmem_swp_balance_unmap();
+ return info->i_direct+index;
+ }
+ if (!info->i_indirect) {
+ if (page) {
+ info->i_indirect = *page;
+ *page = NULL;
}
- if (!expected)
- radix_tree_preload_end();
+ return NULL; /* need another page */
}
- if (error)
- mem_cgroup_uncharge_cache_page(page);
- return error;
-}
-
-/*
- * Like delete_from_page_cache, but substitutes swap for page.
- */
-static void shmem_delete_from_page_cache(struct page *page, void *radswap)
-{
- struct address_space *mapping = page->mapping;
- int error;
- spin_lock_irq(&mapping->tree_lock);
- error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
- page->mapping = NULL;
- mapping->nrpages--;
- __dec_zone_page_state(page, NR_FILE_PAGES);
- __dec_zone_page_state(page, NR_SHMEM);
- spin_unlock_irq(&mapping->tree_lock);
- page_cache_release(page);
- BUG_ON(error);
-}
-
-/*
- * Like find_get_pages, but collecting swap entries as well as pages.
- */
-static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping,
- pgoff_t start, unsigned int nr_pages,
- struct page **pages, pgoff_t *indices)
-{
- unsigned int i;
- unsigned int ret;
- unsigned int nr_found;
-
- rcu_read_lock();
-restart:
- nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
- (void ***)pages, indices, start, nr_pages);
- ret = 0;
- for (i = 0; i < nr_found; i++) {
- struct page *page;
-repeat:
- page = radix_tree_deref_slot((void **)pages[i]);
- if (unlikely(!page))
- continue;
- if (radix_tree_exception(page)) {
- if (radix_tree_deref_retry(page))
- goto restart;
- /*
- * Otherwise, we must be storing a swap entry
- * here as an exceptional entry: so return it
- * without attempting to raise page count.
- */
- goto export;
+ index -= SHMEM_NR_DIRECT;
+ offset = index % ENTRIES_PER_PAGE;
+ index /= ENTRIES_PER_PAGE;
+ dir = shmem_dir_map(info->i_indirect);
+
+ if (index >= ENTRIES_PER_PAGE/2) {
+ index -= ENTRIES_PER_PAGE/2;
+ dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
+ index %= ENTRIES_PER_PAGE;
+ subdir = *dir;
+ if (!subdir) {
+ if (page) {
+ *dir = *page;
+ *page = NULL;
+ }
+ shmem_dir_unmap(dir);
+ return NULL; /* need another page */
}
- if (!page_cache_get_speculative(page))
- goto repeat;
+ shmem_dir_unmap(dir);
+ dir = shmem_dir_map(subdir);
+ }
- /* Has the page moved? */
- if (unlikely(page != *((void **)pages[i]))) {
- page_cache_release(page);
- goto repeat;
+ dir += index;
+ subdir = *dir;
+ if (!subdir) {
+ if (!page || !(subdir = *page)) {
+ shmem_dir_unmap(dir);
+ return NULL; /* need a page */
}
-export:
- indices[ret] = indices[i];
- pages[ret] = page;
- ret++;
- }
- if (unlikely(!ret && nr_found))
- goto restart;
- rcu_read_unlock();
- return ret;
+ *dir = subdir;
+ *page = NULL;
+ }
+ shmem_dir_unmap(dir);
+ return shmem_swp_map(subdir) + offset;
}
-/*
- * Remove swap entry from radix tree, free the swap and its page cache.
- */
-static int shmem_free_swap(struct address_space *mapping,
- pgoff_t index, void *radswap)
+static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
{
- int error;
-
- spin_lock_irq(&mapping->tree_lock);
- error = shmem_radix_tree_replace(mapping, index, radswap, NULL);
- spin_unlock_irq(&mapping->tree_lock);
- if (!error)
- free_swap_and_cache(radix_to_swp_entry(radswap));
- return error;
-}
+ long incdec = value? 1: -1;
-/*
- * Pagevec may contain swap entries, so shuffle up pages before releasing.
- */
-static void shmem_deswap_pagevec(struct pagevec *pvec)
-{
- int i, j;
-
- for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
- struct page *page = pvec->pages[i];
- if (!radix_tree_exceptional_entry(page))
- pvec->pages[j++] = page;
+ entry->val = value;
+ info->swapped += incdec;
+ if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
+ struct page *page = kmap_atomic_to_page(entry);
+ set_page_private(page, page_private(page) + incdec);
}
- pvec->nr = j;
}
-/*
- * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
+/**
+ * shmem_swp_alloc - get the position of the swap entry for the page.
+ * @info: info structure for the inode
+ * @index: index of the page to find
+ * @sgp: check and recheck i_size? skip allocation?
+ *
+ * If the entry does not exist, allocate it.
*/
-void shmem_unlock_mapping(struct address_space *mapping)
+static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
{
- struct pagevec pvec;
- pgoff_t indices[PAGEVEC_SIZE];
- pgoff_t index = 0;
+ struct inode *inode = &info->vfs_inode;
+ struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
+ struct page *page = NULL;
+ swp_entry_t *entry;
- pagevec_init(&pvec, 0);
- /*
- * Minor point, but we might as well stop if someone else SHM_LOCKs it.
- */
- while (!mapping_unevictable(mapping)) {
+ if (sgp != SGP_WRITE &&
+ ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
+ return ERR_PTR(-EINVAL);
+
+ while (!(entry = shmem_swp_entry(info, index, &page))) {
+ if (sgp == SGP_READ)
+ return shmem_swp_map(ZERO_PAGE(0));
/*
- * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
- * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
+ * Test used_blocks against 1 less max_blocks, since we have 1 data
+ * page (and perhaps indirect index pages) yet to allocate:
+ * a waste to allocate index if we cannot allocate data.
*/
- pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
- PAGEVEC_SIZE, pvec.pages, indices);
- if (!pvec.nr)
+ if (sbinfo->max_blocks) {
+ if (percpu_counter_compare(&sbinfo->used_blocks,
+ sbinfo->max_blocks - 1) >= 0)
+ return ERR_PTR(-ENOSPC);
+ percpu_counter_inc(&sbinfo->used_blocks);
+ spin_lock(&inode->i_lock);
+ inode->i_blocks += BLOCKS_PER_PAGE;
+ spin_unlock(&inode->i_lock);
+ }
+
+ spin_unlock(&info->lock);
+ page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
+ spin_lock(&info->lock);
+
+ if (!page) {
+ shmem_free_blocks(inode, 1);
+ return ERR_PTR(-ENOMEM);
+ }
+ if (sgp != SGP_WRITE &&
+ ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
+ entry = ERR_PTR(-EINVAL);
break;
- index = indices[pvec.nr - 1] + 1;
- shmem_deswap_pagevec(&pvec);
- check_move_unevictable_pages(pvec.pages, pvec.nr);
- pagevec_release(&pvec);
- cond_resched();
+ }
+ if (info->next_index <= index)
+ info->next_index = index + 1;
+ }
+ if (page) {
+ /* another task gave its page, or truncated the file */
+ shmem_free_blocks(inode, 1);
+ shmem_dir_free(page);
}
+ if (info->next_index <= index && !IS_ERR(entry))
+ info->next_index = index + 1;
+ return entry;
}
-/*
- * Remove range of pages and swap entries from radix tree, and free them.
+/**
+ * shmem_free_swp - free some swap entries in a directory
+ * @dir: pointer to the directory
+ * @edir: pointer after last entry of the directory
+ * @punch_lock: pointer to spinlock when needed for the holepunch case
*/
-void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir,
+ spinlock_t *punch_lock)
+{
+ spinlock_t *punch_unlock = NULL;
+ swp_entry_t *ptr;
+ int freed = 0;
+
+ for (ptr = dir; ptr < edir; ptr++) {
+ if (ptr->val) {
+ if (unlikely(punch_lock)) {
+ punch_unlock = punch_lock;
+ punch_lock = NULL;
+ spin_lock(punch_unlock);
+ if (!ptr->val)
+ continue;
+ }
+ free_swap_and_cache(*ptr);
+ *ptr = (swp_entry_t){0};
+ freed++;
+ }
+ }
+ if (punch_unlock)
+ spin_unlock(punch_unlock);
+ return freed;
+}
+
+static int shmem_map_and_free_swp(struct page *subdir, int offset,
+ int limit, struct page ***dir, spinlock_t *punch_lock)
+{
+ swp_entry_t *ptr;
+ int freed = 0;
+
+ ptr = shmem_swp_map(subdir);
+ for (; offset < limit; offset += LATENCY_LIMIT) {
+ int size = limit - offset;
+ if (size > LATENCY_LIMIT)
+ size = LATENCY_LIMIT;
+ freed += shmem_free_swp(ptr+offset, ptr+offset+size,
+ punch_lock);
+ if (need_resched()) {
+ shmem_swp_unmap(ptr);
+ if (*dir) {
+ shmem_dir_unmap(*dir);
+ *dir = NULL;
+ }
+ cond_resched();
+ ptr = shmem_swp_map(subdir);
+ }
+ }
+ shmem_swp_unmap(ptr);
+ return freed;
+}
+
+static void shmem_free_pages(struct list_head *next)
+{
+ struct page *page;
+ int freed = 0;
+
+ do {
+ page = container_of(next, struct page, lru);
+ next = next->next;
+ shmem_dir_free(page);
+ freed++;
+ if (freed >= LATENCY_LIMIT) {
+ cond_resched();
+ freed = 0;
+ }
+ } while (next);
+}
+
+void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
{
- struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode);
- pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
- pgoff_t end = (lend >> PAGE_CACHE_SHIFT);
- struct pagevec pvec;
- pgoff_t indices[PAGEVEC_SIZE];
+ unsigned long idx;
+ unsigned long size;
+ unsigned long limit;
+ unsigned long stage;
+ unsigned long diroff;
+ struct page **dir;
+ struct page *topdir;
+ struct page *middir;
+ struct page *subdir;
+ swp_entry_t *ptr;
+ LIST_HEAD(pages_to_free);
+ long nr_pages_to_free = 0;
long nr_swaps_freed = 0;
- pgoff_t index;
- int i;
-
- BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
-
- pagevec_init(&pvec, 0);
- index = start;
- while (index <= end) {
- pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
- min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
- pvec.pages, indices);
- if (!pvec.nr)
- break;
- mem_cgroup_uncharge_start();
- for (i = 0; i < pagevec_count(&pvec); i++) {
- struct page *page = pvec.pages[i];
+ int offset;
+ int freed;
+ int punch_hole;
+ spinlock_t *needs_lock;
+ spinlock_t *punch_lock;
+ unsigned long upper_limit;
- index = indices[i];
- if (index > end)
- break;
+ truncate_inode_pages_range(inode->i_mapping, start, end);
- if (radix_tree_exceptional_entry(page)) {
- nr_swaps_freed += !shmem_free_swap(mapping,
- index, page);
- continue;
- }
+ inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+ idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ if (idx >= info->next_index)
+ return;
- if (!trylock_page(page))
- continue;
- if (page->mapping == mapping) {
- VM_BUG_ON(PageWriteback(page));
- truncate_inode_page(mapping, page);
- }
- unlock_page(page);
+ spin_lock(&info->lock);
+ info->flags |= SHMEM_TRUNCATE;
+ if (likely(end == (loff_t) -1)) {
+ limit = info->next_index;
+ upper_limit = SHMEM_MAX_INDEX;
+ info->next_index = idx;
+ needs_lock = NULL;
+ punch_hole = 0;
+ } else {
+ if (end + 1 >= inode->i_size) { /* we may free a little more */
+ limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT;
+ upper_limit = SHMEM_MAX_INDEX;
+ } else {
+ limit = (end + 1) >> PAGE_CACHE_SHIFT;
+ upper_limit = limit;
}
- shmem_deswap_pagevec(&pvec);
- pagevec_release(&pvec);
- mem_cgroup_uncharge_end();
- cond_resched();
- index++;
+ needs_lock = &info->lock;
+ punch_hole = 1;
}
- if (partial) {
- struct page *page = NULL;
- shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
- if (page) {
- zero_user_segment(page, partial, PAGE_CACHE_SIZE);
- set_page_dirty(page);
- unlock_page(page);
- page_cache_release(page);
- }
+ topdir = info->i_indirect;
+ if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
+ info->i_indirect = NULL;
+ nr_pages_to_free++;
+ list_add(&topdir->lru, &pages_to_free);
}
+ spin_unlock(&info->lock);
- index = start;
- while (index <= end) {
- cond_resched();
- pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
- min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
- pvec.pages, indices);
- if (!pvec.nr) {
- /* If all gone or hole-punch, we're done */
- if (index == start || end != -1)
- break;
- /* But if truncating, restart to make sure all gone */
- index = start;
- continue;
- }
- mem_cgroup_uncharge_start();
- for (i = 0; i < pagevec_count(&pvec); i++) {
- struct page *page = pvec.pages[i];
+ if (info->swapped && idx < SHMEM_NR_DIRECT) {
+ ptr = info->i_direct;
+ size = limit;
+ if (size > SHMEM_NR_DIRECT)
+ size = SHMEM_NR_DIRECT;
+ nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock);
+ }
- index = indices[i];
- if (index > end)
- break;
+ /*
+ * If there are no indirect blocks or we are punching a hole
+ * below indirect blocks, nothing to be done.
+ */
+ if (!topdir || limit <= SHMEM_NR_DIRECT)
+ goto done2;
- if (radix_tree_exceptional_entry(page)) {
- if (shmem_free_swap(mapping, index, page)) {
- /* Swap was replaced by page: retry */
- index--;
- break;
- }
- nr_swaps_freed++;
- continue;
+ /*
+ * The truncation case has already dropped info->lock, and we're safe
+ * because i_size and next_index have already been lowered, preventing
+ * access beyond. But in the punch_hole case, we still need to take
+ * the lock when updating the swap directory, because there might be
+ * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
+ * shmem_writepage. However, whenever we find we can remove a whole
+ * directory page (not at the misaligned start or end of the range),
+ * we first NULLify its pointer in the level above, and then have no
+ * need to take the lock when updating its contents: needs_lock and
+ * punch_lock (either pointing to info->lock or NULL) manage this.
+ */
+
+ upper_limit -= SHMEM_NR_DIRECT;
+ limit -= SHMEM_NR_DIRECT;
+ idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
+ offset = idx % ENTRIES_PER_PAGE;
+ idx -= offset;
+
+ dir = shmem_dir_map(topdir);
+ stage = ENTRIES_PER_PAGEPAGE/2;
+ if (idx < ENTRIES_PER_PAGEPAGE/2) {
+ middir = topdir;
+ diroff = idx/ENTRIES_PER_PAGE;
+ } else {
+ dir += ENTRIES_PER_PAGE/2;
+ dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
+ while (stage <= idx)
+ stage += ENTRIES_PER_PAGEPAGE;
+ middir = *dir;
+ if (*dir) {
+ diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
+ ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
+ if (!diroff && !offset && upper_limit >= stage) {
+ if (needs_lock) {
+ spin_lock(needs_lock);
+ *dir = NULL;
+ spin_unlock(needs_lock);
+ needs_lock = NULL;
+ } else
+ *dir = NULL;
+ nr_pages_to_free++;
+ list_add(&middir->lru, &pages_to_free);
}
+ shmem_dir_unmap(dir);
+ dir = shmem_dir_map(middir);
+ } else {
+ diroff = 0;
+ offset = 0;
+ idx = stage;
+ }
+ }
- lock_page(page);
- if (page->mapping == mapping) {
- VM_BUG_ON(PageWriteback(page));
- truncate_inode_page(mapping, page);
- } else {
- /* Page was replaced by swap: retry */
- unlock_page(page);
- index--;
- break;
+ for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
+ if (unlikely(idx == stage)) {
+ shmem_dir_unmap(dir);
+ dir = shmem_dir_map(topdir) +
+ ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
+ while (!*dir) {
+ dir++;
+ idx += ENTRIES_PER_PAGEPAGE;
+ if (idx >= limit)
+ goto done1;
}
- unlock_page(page);
+ stage = idx + ENTRIES_PER_PAGEPAGE;
+ middir = *dir;
+ if (punch_hole)
+ needs_lock = &info->lock;
+ if (upper_limit >= stage) {
+ if (needs_lock) {
+ spin_lock(needs_lock);
+ *dir = NULL;
+ spin_unlock(needs_lock);
+ needs_lock = NULL;
+ } else
+ *dir = NULL;
+ nr_pages_to_free++;
+ list_add(&middir->lru, &pages_to_free);
+ }
+ shmem_dir_unmap(dir);
+ cond_resched();
+ dir = shmem_dir_map(middir);
+ diroff = 0;
}
- shmem_deswap_pagevec(&pvec);
- pagevec_release(&pvec);
- mem_cgroup_uncharge_end();
- index++;
+ punch_lock = needs_lock;
+ subdir = dir[diroff];
+ if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) {
+ if (needs_lock) {
+ spin_lock(needs_lock);
+ dir[diroff] = NULL;
+ spin_unlock(needs_lock);
+ punch_lock = NULL;
+ } else
+ dir[diroff] = NULL;
+ nr_pages_to_free++;
+ list_add(&subdir->lru, &pages_to_free);
+ }
+ if (subdir && page_private(subdir) /* has swap entries */) {
+ size = limit - idx;
+ if (size > ENTRIES_PER_PAGE)
+ size = ENTRIES_PER_PAGE;
+ freed = shmem_map_and_free_swp(subdir,
+ offset, size, &dir, punch_lock);
+ if (!dir)
+ dir = shmem_dir_map(middir);
+ nr_swaps_freed += freed;
+ if (offset || punch_lock) {
+ spin_lock(&info->lock);
+ set_page_private(subdir,
+ page_private(subdir) - freed);
+ spin_unlock(&info->lock);
+ } else
+ BUG_ON(page_private(subdir) != freed);
+ }
+ offset = 0;
+ }
+done1:
+ shmem_dir_unmap(dir);
+done2:
+ if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
+ /*
+ * Call truncate_inode_pages again: racing shmem_unuse_inode
+ * may have swizzled a page in from swap since
+ * truncate_pagecache or generic_delete_inode did it, before we
+ * lowered next_index. Also, though shmem_getpage checks
+ * i_size before adding to cache, no recheck after: so fix the
+ * narrow window there too.
+ */
+ truncate_inode_pages_range(inode->i_mapping, start, end);
}
spin_lock(&info->lock);
+ info->flags &= ~SHMEM_TRUNCATE;
info->swapped -= nr_swaps_freed;
+ if (nr_pages_to_free)
+ shmem_free_blocks(inode, nr_pages_to_free);
shmem_recalc_inode(inode);
spin_unlock(&info->lock);
- inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+ /*
+ * Empty swap vector directory pages to be freed?
+ */
+ if (!list_empty(&pages_to_free)) {
+ pages_to_free.prev->next = NULL;
+ shmem_free_pages(pages_to_free.next);
+ }
}
EXPORT_SYMBOL_GPL(shmem_truncate_range);
@@ -569,7 +774,37 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
loff_t oldsize = inode->i_size;
loff_t newsize = attr->ia_size;
+ struct page *page = NULL;
+ if (newsize < oldsize) {
+ /*
+ * If truncating down to a partial page, then
+ * if that page is already allocated, hold it
+ * in memory until the truncation is over, so
+ * truncate_partial_page cannot miss it were
+ * it assigned to swap.
+ */
+ if (newsize & (PAGE_CACHE_SIZE-1)) {
+ (void) shmem_getpage(inode,
+ newsize >> PAGE_CACHE_SHIFT,
+ &page, SGP_READ, NULL);
+ if (page)
+ unlock_page(page);
+ }
+ /*
+ * Reset SHMEM_PAGEIN flag so that shmem_truncate can
+ * detect if any pages might have been added to cache
+ * after truncate_inode_pages. But we needn't bother
+ * if it's being fully truncated to zero-length: the
+ * nrpages check is efficient enough in that case.
+ */
+ if (newsize) {
+ struct shmem_inode_info *info = SHMEM_I(inode);
+ spin_lock(&info->lock);
+ info->flags &= ~SHMEM_PAGEIN;
+ spin_unlock(&info->lock);
+ }
+ }
if (newsize != oldsize) {
i_size_write(inode, newsize);
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
@@ -581,6 +816,8 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
/* unmap again to remove racily COWed private pages */
unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
}
+ if (page)
+ page_cache_release(page);
}
setattr_copy(inode, attr);
@@ -605,39 +842,117 @@ static void shmem_evict_inode(struct inode *inode)
list_del_init(&info->swaplist);
mutex_unlock(&shmem_swaplist_mutex);
}
- } else
- kfree(info->symlink);
+ }
list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) {
kfree(xattr->name);
kfree(xattr);
}
- WARN_ON(inode->i_blocks);
+ BUG_ON(inode->i_blocks);
shmem_free_inode(inode->i_sb);
end_writeback(inode);
}
-/*
- * If swap found in inode, free it and move page from swapcache to filecache.
- */
-static int shmem_unuse_inode(struct shmem_inode_info *info,
- swp_entry_t swap, struct page *page)
+static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
{
- struct address_space *mapping = info->vfs_inode.i_mapping;
- void *radswap;
- pgoff_t index;
+ swp_entry_t *ptr;
+
+ for (ptr = dir; ptr < edir; ptr++) {
+ if (ptr->val == entry.val)
+ return ptr - dir;
+ }
+ return -1;
+}
+
+static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
+{
+ struct address_space *mapping;
+ unsigned long idx;
+ unsigned long size;
+ unsigned long limit;
+ unsigned long stage;
+ struct page **dir;
+ struct page *subdir;
+ swp_entry_t *ptr;
+ int offset;
int error;
- radswap = swp_to_radix_entry(swap);
- index = radix_tree_locate_item(&mapping->page_tree, radswap);
- if (index == -1)
- return 0;
+ idx = 0;
+ ptr = info->i_direct;
+ spin_lock(&info->lock);
+ if (!info->swapped) {
+ list_del_init(&info->swaplist);
+ goto lost2;
+ }
+ limit = info->next_index;
+ size = limit;
+ if (size > SHMEM_NR_DIRECT)
+ size = SHMEM_NR_DIRECT;
+ offset = shmem_find_swp(entry, ptr, ptr+size);
+ if (offset >= 0) {
+ shmem_swp_balance_unmap();
+ goto found;
+ }
+ if (!info->i_indirect)
+ goto lost2;
+
+ dir = shmem_dir_map(info->i_indirect);
+ stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
+
+ for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
+ if (unlikely(idx == stage)) {
+ shmem_dir_unmap(dir-1);
+ if (cond_resched_lock(&info->lock)) {
+ /* check it has not been truncated */
+ if (limit > info->next_index) {
+ limit = info->next_index;
+ if (idx >= limit)
+ goto lost2;
+ }
+ }
+ dir = shmem_dir_map(info->i_indirect) +
+ ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
+ while (!*dir) {
+ dir++;
+ idx += ENTRIES_PER_PAGEPAGE;
+ if (idx >= limit)
+ goto lost1;
+ }
+ stage = idx + ENTRIES_PER_PAGEPAGE;
+ subdir = *dir;
+ shmem_dir_unmap(dir);
+ dir = shmem_dir_map(subdir);
+ }
+ subdir = *dir;
+ if (subdir && page_private(subdir)) {
+ ptr = shmem_swp_map(subdir);
+ size = limit - idx;
+ if (size > ENTRIES_PER_PAGE)
+ size = ENTRIES_PER_PAGE;
+ offset = shmem_find_swp(entry, ptr, ptr+size);
+ shmem_swp_unmap(ptr);
+ if (offset >= 0) {
+ shmem_dir_unmap(dir);
+ ptr = shmem_swp_map(subdir);
+ goto found;
+ }
+ }
+ }
+lost1:
+ shmem_dir_unmap(dir-1);
+lost2:
+ spin_unlock(&info->lock);
+ return 0;
+found:
+ idx += offset;
+ ptr += offset;
/*
* Move _head_ to start search for next from here.
* But be careful: shmem_evict_inode checks list_empty without taking
* mutex, and there's an instant in list_move_tail when info->swaplist
- * would appear empty, if it were the only one on shmem_swaplist.
+ * would appear empty, if it were the only one on shmem_swaplist. We
+ * could avoid doing it if inode NULL; or use this minor optimization.
*/
if (shmem_swaplist.next != &info->swaplist)
list_move_tail(&shmem_swaplist, &info->swaplist);
@@ -647,34 +962,42 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
* but also to hold up shmem_evict_inode(): so inode cannot be freed
* beneath us (pagelock doesn't help until the page is in pagecache).
*/
- error = shmem_add_to_page_cache(page, mapping, index,
- GFP_NOWAIT, radswap);
+ mapping = info->vfs_inode.i_mapping;
+ error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT);
/* which does mem_cgroup_uncharge_cache_page on error */
- if (error != -ENOMEM) {
- /*
- * Truncation and eviction use free_swap_and_cache(), which
- * only does trylock page: if we raced, best clean up here.
- */
+ if (error == -EEXIST) {
+ struct page *filepage = find_get_page(mapping, idx);
+ error = 1;
+ if (filepage) {
+ /*
+ * There might be a more uptodate page coming down
+ * from a stacked writepage: forget our swappage if so.
+ */
+ if (PageUptodate(filepage))
+ error = 0;
+ page_cache_release(filepage);
+ }
+ }
+ if (!error) {
delete_from_swap_cache(page);
set_page_dirty(page);
- if (!error) {
- spin_lock(&info->lock);
- info->swapped--;
- spin_unlock(&info->lock);
- swap_free(swap);
- }
+ info->flags |= SHMEM_PAGEIN;
+ shmem_swp_set(info, ptr, 0);
+ swap_free(entry);
error = 1; /* not an error, but entry was found */
}
+ shmem_swp_unmap(ptr);
+ spin_unlock(&info->lock);
return error;
}
/*
- * Search through swapped inodes to find and replace swap by page.
+ * shmem_unuse() search for an eventually swapped out shmem page.
*/
-int shmem_unuse(swp_entry_t swap, struct page *page)
+int shmem_unuse(swp_entry_t entry, struct page *page)
{
- struct list_head *this, *next;
+ struct list_head *p, *next;
struct shmem_inode_info *info;
int found = 0;
int error;
@@ -683,25 +1006,32 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
* Charge page using GFP_KERNEL while we can wait, before taking
* the shmem_swaplist_mutex which might hold up shmem_writepage().
* Charged back to the user (not to caller) when swap account is used.
+ * add_to_page_cache() will be called with GFP_NOWAIT.
*/
error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
if (error)
goto out;
- /* No radix_tree_preload: swap entry keeps a place for page in tree */
+ /*
+ * Try to preload while we can wait, to not make a habit of
+ * draining atomic reserves; but don't latch on to this cpu,
+ * it's okay if sometimes we get rescheduled after this.
+ */
+ error = radix_tree_preload(GFP_KERNEL);
+ if (error)
+ goto uncharge;
+ radix_tree_preload_end();
mutex_lock(&shmem_swaplist_mutex);
- list_for_each_safe(this, next, &shmem_swaplist) {
- info = list_entry(this, struct shmem_inode_info, swaplist);
- if (info->swapped)
- found = shmem_unuse_inode(info, swap, page);
- else
- list_del_init(&info->swaplist);
+ list_for_each_safe(p, next, &shmem_swaplist) {
+ info = list_entry(p, struct shmem_inode_info, swaplist);
+ found = shmem_unuse_inode(info, entry, page);
cond_resched();
if (found)
break;
}
mutex_unlock(&shmem_swaplist_mutex);
+uncharge:
if (!found)
mem_cgroup_uncharge_cache_page(page);
if (found < 0)
@@ -718,10 +1048,10 @@ out:
static int shmem_writepage(struct page *page, struct writeback_control *wbc)
{
struct shmem_inode_info *info;
+ swp_entry_t *entry, swap;
struct address_space *mapping;
+ unsigned long index;
struct inode *inode;
- swp_entry_t swap;
- pgoff_t index;
BUG_ON(!PageLocked(page));
mapping = page->mapping;
@@ -730,52 +1060,84 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
info = SHMEM_I(inode);
if (info->flags & VM_LOCKED)
goto redirty;
+#ifdef CONFIG_ZRAM_FOR_ANDROID
+ /*
+ * Modification for compcache
+ * shmem_writepage can be reason of kernel panic when using swap.
+ * This modification prevent using swap by shmem.
+ */
+ goto redirty;
+#else
if (!total_swap_pages)
goto redirty;
+#endif
/*
* shmem_backing_dev_info's capabilities prevent regular writeback or
* sync from ever calling shmem_writepage; but a stacking filesystem
- * might use ->writepage of its underlying filesystem, in which case
+ * may use the ->writepage of its underlying filesystem, in which case
* tmpfs should write out to swap only in response to memory pressure,
- * and not for the writeback threads or sync.
+ * and not for the writeback threads or sync. However, in those cases,
+ * we do still want to check if there's a redundant swappage to be
+ * discarded.
*/
- if (!wbc->for_reclaim) {
- WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
- goto redirty;
- }
- swap = get_swap_page();
- if (!swap.val)
- goto redirty;
+ if (wbc->for_reclaim)
+ swap = get_swap_page();
+ else
+ swap.val = 0;
/*
* Add inode to shmem_unuse()'s list of swapped-out inodes,
- * if it's not already there. Do it now before the page is
- * moved to swap cache, when its pagelock no longer protects
+ * if it's not already there. Do it now because we cannot take
+ * mutex while holding spinlock, and must do so before the page
+ * is moved to swap cache, when its pagelock no longer protects
* the inode from eviction. But don't unlock the mutex until
- * we've incremented swapped, because shmem_unuse_inode() will
- * prune a !swapped inode from the swaplist under this mutex.
+ * we've taken the spinlock, because shmem_unuse_inode() will
+ * prune a !swapped inode from the swaplist under both locks.
*/
- mutex_lock(&shmem_swaplist_mutex);
- if (list_empty(&info->swaplist))
- list_add_tail(&info->swaplist, &shmem_swaplist);
+ if (swap.val) {
+ mutex_lock(&shmem_swaplist_mutex);
+ if (list_empty(&info->swaplist))
+ list_add_tail(&info->swaplist, &shmem_swaplist);
+ }
- if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
- swap_shmem_alloc(swap);
- shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
+ spin_lock(&info->lock);
+ if (swap.val)
+ mutex_unlock(&shmem_swaplist_mutex);
- spin_lock(&info->lock);
- info->swapped++;
- shmem_recalc_inode(inode);
- spin_unlock(&info->lock);
+ if (index >= info->next_index) {
+ BUG_ON(!(info->flags & SHMEM_TRUNCATE));
+ goto unlock;
+ }
+ entry = shmem_swp_entry(info, index, NULL);
+ if (entry->val) {
+ /*
+ * The more uptodate page coming down from a stacked
+ * writepage should replace our old swappage.
+ */
+ free_swap_and_cache(*entry);
+ shmem_swp_set(info, entry, 0);
+ }
+ shmem_recalc_inode(inode);
- mutex_unlock(&shmem_swaplist_mutex);
+ if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
+ delete_from_page_cache(page);
+ shmem_swp_set(info, entry, swap.val);
+ shmem_swp_unmap(entry);
+ swap_shmem_alloc(swap);
+ spin_unlock(&info->lock);
BUG_ON(page_mapped(page));
swap_writepage(page, wbc);
return 0;
}
- mutex_unlock(&shmem_swaplist_mutex);
+ shmem_swp_unmap(entry);
+unlock:
+ spin_unlock(&info->lock);
+ /*
+ * add_to_swap_cache() doesn't return -EEXIST, so we can safely
+ * clear SWAP_HAS_CACHE flag.
+ */
swapcache_free(swap, NULL);
redirty:
set_page_dirty(page);
@@ -812,19 +1174,19 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
}
#endif /* CONFIG_TMPFS */
-static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
- struct shmem_inode_info *info, pgoff_t index)
+static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
+ struct shmem_inode_info *info, unsigned long idx)
{
struct vm_area_struct pvma;
struct page *page;
/* Create a pseudo vma that just contains the policy */
pvma.vm_start = 0;
- pvma.vm_pgoff = index;
+ pvma.vm_pgoff = idx;
pvma.vm_ops = NULL;
- pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
+ pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
- page = swapin_readahead(swap, gfp, &pvma, 0);
+ page = swapin_readahead(entry, gfp, &pvma, 0);
/* Drop reference taken by mpol_shared_policy_lookup() */
mpol_cond_put(pvma.vm_policy);
@@ -833,16 +1195,16 @@ static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
}
static struct page *shmem_alloc_page(gfp_t gfp,
- struct shmem_inode_info *info, pgoff_t index)
+ struct shmem_inode_info *info, unsigned long idx)
{
struct vm_area_struct pvma;
struct page *page;
/* Create a pseudo vma that just contains the policy */
pvma.vm_start = 0;
- pvma.vm_pgoff = index;
+ pvma.vm_pgoff = idx;
pvma.vm_ops = NULL;
- pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
+ pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
page = alloc_page_vma(gfp, &pvma, 0);
@@ -853,19 +1215,19 @@ static struct page *shmem_alloc_page(gfp_t gfp,
}
#else /* !CONFIG_NUMA */
#ifdef CONFIG_TMPFS
-static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
+static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p)
{
}
#endif /* CONFIG_TMPFS */
-static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
- struct shmem_inode_info *info, pgoff_t index)
+static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
+ struct shmem_inode_info *info, unsigned long idx)
{
- return swapin_readahead(swap, gfp, NULL, 0);
+ return swapin_readahead(entry, gfp, NULL, 0);
}
static inline struct page *shmem_alloc_page(gfp_t gfp,
- struct shmem_inode_info *info, pgoff_t index)
+ struct shmem_inode_info *info, unsigned long idx)
{
return alloc_page(gfp);
}
@@ -879,195 +1241,311 @@ static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
#endif
/*
- * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
+ * shmem_getpage - either get the page from swap or allocate a new one
*
* If we allocate a new one we do not mark it dirty. That's up to the
* vm. If we swap it in we mark it dirty since we also free the swap
* entry since a page cannot live in both the swap and page cache
*/
-static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
- struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
+static int shmem_getpage(struct inode *inode, unsigned long idx,
+ struct page **pagep, enum sgp_type sgp, int *type)
{
struct address_space *mapping = inode->i_mapping;
- struct shmem_inode_info *info;
+ struct shmem_inode_info *info = SHMEM_I(inode);
struct shmem_sb_info *sbinfo;
- struct page *page;
+ struct page *filepage = *pagep;
+ struct page *swappage;
+ struct page *prealloc_page = NULL;
+ swp_entry_t *entry;
swp_entry_t swap;
+ gfp_t gfp;
int error;
- int once = 0;
- if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
+ if (idx >= SHMEM_MAX_INDEX)
return -EFBIG;
-repeat:
- swap.val = 0;
- page = find_lock_page(mapping, index);
- if (radix_tree_exceptional_entry(page)) {
- swap = radix_to_swp_entry(page);
- page = NULL;
- }
- if (sgp != SGP_WRITE &&
- ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
- error = -EINVAL;
- goto failed;
- }
+ if (type)
+ *type = 0;
- if (page || (sgp == SGP_READ && !swap.val)) {
+ /*
+ * Normally, filepage is NULL on entry, and either found
+ * uptodate immediately, or allocated and zeroed, or read
+ * in under swappage, which is then assigned to filepage.
+ * But shmem_readpage (required for splice) passes in a locked
+ * filepage, which may be found not uptodate by other callers
+ * too, and may need to be copied from the swappage read in.
+ */
+repeat:
+ if (!filepage)
+ filepage = find_lock_page(mapping, idx);
+ if (filepage && PageUptodate(filepage))
+ goto done;
+ gfp = mapping_gfp_mask(mapping);
+ if (!filepage) {
/*
- * Once we can get the page lock, it must be uptodate:
- * if there were an error in reading back from swap,
- * the page would not be inserted into the filecache.
+ * Try to preload while we can wait, to not make a habit of
+ * draining atomic reserves; but don't latch on to this cpu.
*/
- BUG_ON(page && !PageUptodate(page));
- *pagep = page;
- return 0;
+ error = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
+ if (error)
+ goto failed;
+ radix_tree_preload_end();
+ if (sgp != SGP_READ && !prealloc_page) {
+ /* We don't care if this fails */
+ prealloc_page = shmem_alloc_page(gfp, info, idx);
+ if (prealloc_page) {
+ if (mem_cgroup_cache_charge(prealloc_page,
+ current->mm, GFP_KERNEL)) {
+ page_cache_release(prealloc_page);
+ prealloc_page = NULL;
+ }
+ }
+ }
}
+ error = 0;
- /*
- * Fast cache lookup did not find it:
- * bring it back from swap or allocate.
- */
- info = SHMEM_I(inode);
- sbinfo = SHMEM_SB(inode->i_sb);
+ spin_lock(&info->lock);
+ shmem_recalc_inode(inode);
+ entry = shmem_swp_alloc(info, idx, sgp);
+ if (IS_ERR(entry)) {
+ spin_unlock(&info->lock);
+ error = PTR_ERR(entry);
+ goto failed;
+ }
+ swap = *entry;
if (swap.val) {
/* Look it up and read it in.. */
- page = lookup_swap_cache(swap);
- if (!page) {
+ swappage = lookup_swap_cache(swap);
+ if (!swappage) {
+ shmem_swp_unmap(entry);
+ spin_unlock(&info->lock);
/* here we actually do the io */
- if (fault_type)
- *fault_type |= VM_FAULT_MAJOR;
- page = shmem_swapin(swap, gfp, info, index);
- if (!page) {
- error = -ENOMEM;
- goto failed;
+ if (type)
+ *type |= VM_FAULT_MAJOR;
+ swappage = shmem_swapin(swap, gfp, info, idx);
+ if (!swappage) {
+ spin_lock(&info->lock);
+ entry = shmem_swp_alloc(info, idx, sgp);
+ if (IS_ERR(entry))
+ error = PTR_ERR(entry);
+ else {
+ if (entry->val == swap.val)
+ error = -ENOMEM;
+ shmem_swp_unmap(entry);
+ }
+ spin_unlock(&info->lock);
+ if (error)
+ goto failed;
+ goto repeat;
}
+ wait_on_page_locked(swappage);
+ page_cache_release(swappage);
+ goto repeat;
}
/* We have to do this with page locked to prevent races */
- lock_page(page);
- if (!PageUptodate(page)) {
- error = -EIO;
- goto failed;
+ if (!trylock_page(swappage)) {
+ shmem_swp_unmap(entry);
+ spin_unlock(&info->lock);
+ wait_on_page_locked(swappage);
+ page_cache_release(swappage);
+ goto repeat;
}
- wait_on_page_writeback(page);
-
- /* Someone may have already done it for us */
- if (page->mapping) {
- if (page->mapping == mapping &&
- page->index == index)
- goto done;
- error = -EEXIST;
- goto failed;
+ if (PageWriteback(swappage)) {
+ shmem_swp_unmap(entry);
+ spin_unlock(&info->lock);
+ wait_on_page_writeback(swappage);
+ unlock_page(swappage);
+ page_cache_release(swappage);
+ goto repeat;
}
-
- error = mem_cgroup_cache_charge(page, current->mm,
- gfp & GFP_RECLAIM_MASK);
- if (!error)
- error = shmem_add_to_page_cache(page, mapping, index,
- gfp, swp_to_radix_entry(swap));
- if (error)
+ if (!PageUptodate(swappage)) {
+ shmem_swp_unmap(entry);
+ spin_unlock(&info->lock);
+ unlock_page(swappage);
+ page_cache_release(swappage);
+ error = -EIO;
goto failed;
+ }
- spin_lock(&info->lock);
- info->swapped--;
- shmem_recalc_inode(inode);
+ if (filepage) {
+ shmem_swp_set(info, entry, 0);
+ shmem_swp_unmap(entry);
+ delete_from_swap_cache(swappage);
+ spin_unlock(&info->lock);
+ copy_highpage(filepage, swappage);
+ unlock_page(swappage);
+ page_cache_release(swappage);
+ flush_dcache_page(filepage);
+ SetPageUptodate(filepage);
+ set_page_dirty(filepage);
+ swap_free(swap);
+ } else if (!(error = add_to_page_cache_locked(swappage, mapping,
+ idx, GFP_NOWAIT))) {
+ info->flags |= SHMEM_PAGEIN;
+ shmem_swp_set(info, entry, 0);
+ shmem_swp_unmap(entry);
+ delete_from_swap_cache(swappage);
+ spin_unlock(&info->lock);
+ filepage = swappage;
+ set_page_dirty(filepage);
+ swap_free(swap);
+ } else {
+ shmem_swp_unmap(entry);
+ spin_unlock(&info->lock);
+ if (error == -ENOMEM) {
+ /*
+ * reclaim from proper memory cgroup and
+ * call memcg's OOM if needed.
+ */
+ error = mem_cgroup_shmem_charge_fallback(
+ swappage,
+ current->mm,
+ gfp);
+ if (error) {
+ unlock_page(swappage);
+ page_cache_release(swappage);
+ goto failed;
+ }
+ }
+ unlock_page(swappage);
+ page_cache_release(swappage);
+ goto repeat;
+ }
+ } else if (sgp == SGP_READ && !filepage) {
+ shmem_swp_unmap(entry);
+ filepage = find_get_page(mapping, idx);
+ if (filepage &&
+ (!PageUptodate(filepage) || !trylock_page(filepage))) {
+ spin_unlock(&info->lock);
+ wait_on_page_locked(filepage);
+ page_cache_release(filepage);
+ filepage = NULL;
+ goto repeat;
+ }
spin_unlock(&info->lock);
-
- delete_from_swap_cache(page);
- set_page_dirty(page);
- swap_free(swap);
-
} else {
- if (shmem_acct_block(info->flags)) {
- error = -ENOSPC;
- goto failed;
- }
+ shmem_swp_unmap(entry);
+ sbinfo = SHMEM_SB(inode->i_sb);
if (sbinfo->max_blocks) {
if (percpu_counter_compare(&sbinfo->used_blocks,
- sbinfo->max_blocks) >= 0) {
- error = -ENOSPC;
- goto unacct;
- }
+ sbinfo->max_blocks) >= 0 ||
+ shmem_acct_block(info->flags))
+ goto nospace;
percpu_counter_inc(&sbinfo->used_blocks);
- }
+ spin_lock(&inode->i_lock);
+ inode->i_blocks += BLOCKS_PER_PAGE;
+ spin_unlock(&inode->i_lock);
+ } else if (shmem_acct_block(info->flags))
+ goto nospace;
+
+ if (!filepage) {
+ int ret;
+
+ if (!prealloc_page) {
+ spin_unlock(&info->lock);
+ filepage = shmem_alloc_page(gfp, info, idx);
+ if (!filepage) {
+ shmem_unacct_blocks(info->flags, 1);
+ shmem_free_blocks(inode, 1);
+ error = -ENOMEM;
+ goto failed;
+ }
+ SetPageSwapBacked(filepage);
+
+ /*
+ * Precharge page while we can wait, compensate
+ * after
+ */
+ error = mem_cgroup_cache_charge(filepage,
+ current->mm, GFP_KERNEL);
+ if (error) {
+ page_cache_release(filepage);
+ shmem_unacct_blocks(info->flags, 1);
+ shmem_free_blocks(inode, 1);
+ filepage = NULL;
+ goto failed;
+ }
- page = shmem_alloc_page(gfp, info, index);
- if (!page) {
- error = -ENOMEM;
- goto decused;
- }
+ spin_lock(&info->lock);
+ } else {
+ filepage = prealloc_page;
+ prealloc_page = NULL;
+ SetPageSwapBacked(filepage);
+ }
- SetPageSwapBacked(page);
- __set_page_locked(page);
- error = mem_cgroup_cache_charge(page, current->mm,
- gfp & GFP_RECLAIM_MASK);
- if (!error)
- error = shmem_add_to_page_cache(page, mapping, index,
- gfp, NULL);
- if (error)
- goto decused;
- lru_cache_add_anon(page);
+ entry = shmem_swp_alloc(info, idx, sgp);
+ if (IS_ERR(entry))
+ error = PTR_ERR(entry);
+ else {
+ swap = *entry;
+ shmem_swp_unmap(entry);
+ }
+ ret = error || swap.val;
+ if (ret)
+ mem_cgroup_uncharge_cache_page(filepage);
+ else
+ ret = add_to_page_cache_lru(filepage, mapping,
+ idx, GFP_NOWAIT);
+ /*
+ * At add_to_page_cache_lru() failure, uncharge will
+ * be done automatically.
+ */
+ if (ret) {
+ spin_unlock(&info->lock);
+ page_cache_release(filepage);
+ shmem_unacct_blocks(info->flags, 1);
+ shmem_free_blocks(inode, 1);
+ filepage = NULL;
+ if (error)
+ goto failed;
+ goto repeat;
+ }
+ info->flags |= SHMEM_PAGEIN;
+ }
- spin_lock(&info->lock);
info->alloced++;
- inode->i_blocks += BLOCKS_PER_PAGE;
- shmem_recalc_inode(inode);
spin_unlock(&info->lock);
-
- clear_highpage(page);
- flush_dcache_page(page);
- SetPageUptodate(page);
+ clear_highpage(filepage);
+ flush_dcache_page(filepage);
+ SetPageUptodate(filepage);
if (sgp == SGP_DIRTY)
- set_page_dirty(page);
+ set_page_dirty(filepage);
}
done:
- /* Perhaps the file has been truncated since we checked */
- if (sgp != SGP_WRITE &&
- ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
- error = -EINVAL;
- goto trunc;
- }
- *pagep = page;
- return 0;
+ *pagep = filepage;
+ error = 0;
+ goto out;
+nospace:
/*
- * Error recovery.
+ * Perhaps the page was brought in from swap between find_lock_page
+ * and taking info->lock? We allow for that at add_to_page_cache_lru,
+ * but must also avoid reporting a spurious ENOSPC while working on a
+ * full tmpfs. (When filepage has been passed in to shmem_getpage, it
+ * is already in page cache, which prevents this race from occurring.)
*/
-trunc:
- ClearPageDirty(page);
- delete_from_page_cache(page);
- spin_lock(&info->lock);
- info->alloced--;
- inode->i_blocks -= BLOCKS_PER_PAGE;
+ if (!filepage) {
+ struct page *page = find_get_page(mapping, idx);
+ if (page) {
+ spin_unlock(&info->lock);
+ page_cache_release(page);
+ goto repeat;
+ }
+ }
spin_unlock(&info->lock);
-decused:
- if (sbinfo->max_blocks)
- percpu_counter_add(&sbinfo->used_blocks, -1);
-unacct:
- shmem_unacct_blocks(info->flags, 1);
+ error = -ENOSPC;
failed:
- if (swap.val && error != -EINVAL) {
- struct page *test = find_get_page(mapping, index);
- if (test && !radix_tree_exceptional_entry(test))
- page_cache_release(test);
- /* Have another try if the entry has changed */
- if (test != swp_to_radix_entry(swap))
- error = -EEXIST;
+ if (*pagep != filepage) {
+ unlock_page(filepage);
+ page_cache_release(filepage);
}
- if (page) {
- unlock_page(page);
- page_cache_release(page);
- }
- if (error == -ENOSPC && !once++) {
- info = SHMEM_I(inode);
- spin_lock(&info->lock);
- shmem_recalc_inode(inode);
- spin_unlock(&info->lock);
- goto repeat;
+out:
+ if (prealloc_page) {
+ mem_cgroup_uncharge_cache_page(prealloc_page);
+ page_cache_release(prealloc_page);
}
- if (error == -EEXIST)
- goto repeat;
return error;
}
@@ -1075,132 +1553,36 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
int error;
- int ret = VM_FAULT_LOCKED;
-
- /*
- * Trinity finds that probing a hole which tmpfs is punching can
- * prevent the hole-punch from ever completing: which in turn
- * locks writers out with its hold on i_mutex. So refrain from
- * faulting pages into the hole while it's being punched. Although
- * shmem_truncate_range() does remove the additions, it may be unable to
- * keep up, as each new page needs its own unmap_mapping_range() call,
- * and the i_mmap tree grows ever slower to scan if new vmas are added.
- *
- * It does not matter if we sometimes reach this check just before the
- * hole-punch begins, so that one fault then races with the punch:
- * we just need to make racing faults a rare case.
- *
- * The implementation below would be much simpler if we just used a
- * standard mutex or completion: but we cannot take i_mutex in fault,
- * and bloating every shmem inode for this unlikely case would be sad.
- */
- if (unlikely(inode->i_private)) {
- struct shmem_falloc *shmem_falloc;
-
- spin_lock(&inode->i_lock);
- shmem_falloc = inode->i_private;
- if (shmem_falloc &&
- vmf->pgoff >= shmem_falloc->start &&
- vmf->pgoff < shmem_falloc->next) {
- wait_queue_head_t *shmem_falloc_waitq;
- DEFINE_WAIT(shmem_fault_wait);
-
- ret = VM_FAULT_NOPAGE;
- if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
- !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
- /* It's polite to up mmap_sem if we can */
- up_read(&vma->vm_mm->mmap_sem);
- ret = VM_FAULT_RETRY;
- }
-
- shmem_falloc_waitq = shmem_falloc->waitq;
- prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
- TASK_UNINTERRUPTIBLE);
- spin_unlock(&inode->i_lock);
- schedule();
+ int ret;
- /*
- * shmem_falloc_waitq points into the vmtruncate_range()
- * stack of the hole-punching task: shmem_falloc_waitq
- * is usually invalid by the time we reach here, but
- * finish_wait() does not dereference it in that case;
- * though i_lock needed lest racing with wake_up_all().
- */
- spin_lock(&inode->i_lock);
- finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
- spin_unlock(&inode->i_lock);
- return ret;
- }
- spin_unlock(&inode->i_lock);
- }
+ if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
+ return VM_FAULT_SIGBUS;
error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
if (error)
return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
-
if (ret & VM_FAULT_MAJOR) {
count_vm_event(PGMAJFAULT);
mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
}
- return ret;
-}
-
-int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
-{
- /*
- * If the underlying filesystem is not going to provide
- * a way to truncate a range of blocks (punch a hole) -
- * we should return failure right now.
- * Only CONFIG_SHMEM shmem.c ever supported i_op->truncate_range().
- */
- if (inode->i_op->truncate_range != shmem_truncate_range)
- return -ENOSYS;
-
- mutex_lock(&inode->i_mutex);
- {
- struct shmem_falloc shmem_falloc;
- struct address_space *mapping = inode->i_mapping;
- loff_t unmap_start = round_up(lstart, PAGE_SIZE);
- loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
-
- shmem_falloc.waitq = &shmem_falloc_waitq;
- shmem_falloc.start = unmap_start >> PAGE_SHIFT;
- shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
- spin_lock(&inode->i_lock);
- inode->i_private = &shmem_falloc;
- spin_unlock(&inode->i_lock);
-
- if ((u64)unmap_end > (u64)unmap_start)
- unmap_mapping_range(mapping, unmap_start,
- 1 + unmap_end - unmap_start, 0);
- shmem_truncate_range(inode, lstart, lend);
- /* No need to unmap again: hole-punching leaves COWed pages */
-
- spin_lock(&inode->i_lock);
- inode->i_private = NULL;
- wake_up_all(&shmem_falloc_waitq);
- spin_unlock(&inode->i_lock);
- }
- mutex_unlock(&inode->i_mutex);
- return 0;
+ return ret | VM_FAULT_LOCKED;
}
#ifdef CONFIG_NUMA
-static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
+static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
{
- struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
- return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
+ struct inode *i = vma->vm_file->f_path.dentry->d_inode;
+ return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
}
static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
unsigned long addr)
{
- struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
- pgoff_t index;
+ struct inode *i = vma->vm_file->f_path.dentry->d_inode;
+ unsigned long idx;
- index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
- return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
+ idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+ return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
}
#endif
@@ -1221,6 +1603,7 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
user_shm_unlock(inode->i_size, user);
info->flags &= ~VM_LOCKED;
mapping_clear_unevictable(file->f_mapping);
+ scan_mapping_unevictable_pages(file->f_mapping);
}
retval = 0;
@@ -1297,7 +1680,20 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
#ifdef CONFIG_TMPFS
static const struct inode_operations shmem_symlink_inode_operations;
-static const struct inode_operations shmem_short_symlink_operations;
+static const struct inode_operations shmem_symlink_inline_operations;
+
+/*
+ * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin;
+ * but providing them allows a tmpfs file to be used for splice, sendfile, and
+ * below the loop driver, in the generic fashion that many filesystems support.
+ */
+static int shmem_readpage(struct file *file, struct page *page)
+{
+ struct inode *inode = page->mapping->host;
+ int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL);
+ unlock_page(page);
+ return error;
+}
static int
shmem_write_begin(struct file *file, struct address_space *mapping,
@@ -1306,6 +1702,7 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
{
struct inode *inode = mapping->host;
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ *pagep = NULL;
return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
}
@@ -1330,8 +1727,7 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
{
struct inode *inode = filp->f_path.dentry->d_inode;
struct address_space *mapping = inode->i_mapping;
- pgoff_t index;
- unsigned long offset;
+ unsigned long index, offset;
enum sgp_type sgp = SGP_READ;
/*
@@ -1347,8 +1743,7 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
for (;;) {
struct page *page = NULL;
- pgoff_t end_index;
- unsigned long nr, ret;
+ unsigned long end_index, nr, ret;
loff_t i_size = i_size_read(inode);
end_index = i_size >> PAGE_CACHE_SHIFT;
@@ -1464,120 +1859,6 @@ static ssize_t shmem_file_aio_read(struct kiocb *iocb,
return retval;
}
-static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
- struct pipe_inode_info *pipe, size_t len,
- unsigned int flags)
-{
- struct address_space *mapping = in->f_mapping;
- struct inode *inode = mapping->host;
- unsigned int loff, nr_pages, req_pages;
- struct page *pages[PIPE_DEF_BUFFERS];
- struct partial_page partial[PIPE_DEF_BUFFERS];
- struct page *page;
- pgoff_t index, end_index;
- loff_t isize, left;
- int error, page_nr;
- struct splice_pipe_desc spd = {
- .pages = pages,
- .partial = partial,
- .nr_pages_max = PIPE_DEF_BUFFERS,
- .flags = flags,
- .ops = &page_cache_pipe_buf_ops,
- .spd_release = spd_release_page,
- };
-
- isize = i_size_read(inode);
- if (unlikely(*ppos >= isize))
- return 0;
-
- left = isize - *ppos;
- if (unlikely(left < len))
- len = left;
-
- if (splice_grow_spd(pipe, &spd))
- return -ENOMEM;
-
- index = *ppos >> PAGE_CACHE_SHIFT;
- loff = *ppos & ~PAGE_CACHE_MASK;
- req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- nr_pages = min(req_pages, pipe->buffers);
-
- spd.nr_pages = find_get_pages_contig(mapping, index,
- nr_pages, spd.pages);
- index += spd.nr_pages;
- error = 0;
-
- while (spd.nr_pages < nr_pages) {
- error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL);
- if (error)
- break;
- unlock_page(page);
- spd.pages[spd.nr_pages++] = page;
- index++;
- }
-
- index = *ppos >> PAGE_CACHE_SHIFT;
- nr_pages = spd.nr_pages;
- spd.nr_pages = 0;
-
- for (page_nr = 0; page_nr < nr_pages; page_nr++) {
- unsigned int this_len;
-
- if (!len)
- break;
-
- this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
- page = spd.pages[page_nr];
-
- if (!PageUptodate(page) || page->mapping != mapping) {
- error = shmem_getpage(inode, index, &page,
- SGP_CACHE, NULL);
- if (error)
- break;
- unlock_page(page);
- page_cache_release(spd.pages[page_nr]);
- spd.pages[page_nr] = page;
- }
-
- isize = i_size_read(inode);
- end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
- if (unlikely(!isize || index > end_index))
- break;
-
- if (end_index == index) {
- unsigned int plen;
-
- plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
- if (plen <= loff)
- break;
-
- this_len = min(this_len, plen - loff);
- len = this_len;
- }
-
- spd.partial[page_nr].offset = loff;
- spd.partial[page_nr].len = this_len;
- len -= this_len;
- loff = 0;
- spd.nr_pages++;
- index++;
- }
-
- while (page_nr < nr_pages)
- page_cache_release(spd.pages[page_nr++]);
-
- if (spd.nr_pages)
- error = splice_to_pipe(pipe, &spd);
-
- splice_shrink_spd(&spd);
-
- if (error > 0) {
- *ppos += error;
- file_accessed(in);
- }
- return error;
-}
-
static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
@@ -1587,9 +1868,8 @@ static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_namelen = NAME_MAX;
if (sbinfo->max_blocks) {
buf->f_blocks = sbinfo->max_blocks;
- buf->f_bavail =
- buf->f_bfree = sbinfo->max_blocks -
- percpu_counter_sum(&sbinfo->used_blocks);
+ buf->f_bavail = buf->f_bfree =
+ sbinfo->max_blocks - percpu_counter_sum(&sbinfo->used_blocks);
}
if (sbinfo->max_inodes) {
buf->f_files = sbinfo->max_inodes;
@@ -1611,7 +1891,7 @@ shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
if (inode) {
error = security_inode_init_security(inode, dir,
- &dentry->d_name,
+ &dentry->d_name, NULL,
NULL, NULL);
if (error) {
if (error != -EOPNOTSUPP) {
@@ -1719,10 +1999,8 @@ static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct
if (new_dentry->d_inode) {
(void) shmem_unlink(new_dir, new_dentry);
- if (they_are_dirs) {
- drop_nlink(new_dentry->d_inode);
+ if (they_are_dirs)
drop_nlink(old_dir);
- }
} else if (they_are_dirs) {
drop_nlink(old_dir);
inc_nlink(new_dir);
@@ -1741,7 +2019,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
int error;
int len;
struct inode *inode;
- struct page *page;
+ struct page *page = NULL;
char *kaddr;
struct shmem_inode_info *info;
@@ -1753,7 +2031,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
if (!inode)
return -ENOSPC;
- error = security_inode_init_security(inode, dir, &dentry->d_name,
+ error = security_inode_init_security(inode, dir, &dentry->d_name, NULL,
NULL, NULL);
if (error) {
if (error != -EOPNOTSUPP) {
@@ -1765,13 +2043,10 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
info = SHMEM_I(inode);
inode->i_size = len-1;
- if (len <= SHORT_SYMLINK_LEN) {
- info->symlink = kmemdup(symname, len, GFP_KERNEL);
- if (!info->symlink) {
- iput(inode);
- return -ENOMEM;
- }
- inode->i_op = &shmem_short_symlink_operations;
+ if (len <= SHMEM_SYMLINK_INLINE_LEN) {
+ /* do it inline */
+ memcpy(info->inline_symlink, symname, len);
+ inode->i_op = &shmem_symlink_inline_operations;
} else {
error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
if (error) {
@@ -1794,17 +2069,17 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
return 0;
}
-static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd)
+static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
{
- nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink);
+ nd_set_link(nd, SHMEM_I(dentry->d_inode)->inline_symlink);
return NULL;
}
static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct page *page = NULL;
- int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
- nd_set_link(nd, error ? ERR_PTR(error) : kmap(page));
+ int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
+ nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
if (page)
unlock_page(page);
return page;
@@ -1915,6 +2190,7 @@ out:
return err;
}
+
static const struct xattr_handler *shmem_xattr_handlers[] = {
#ifdef CONFIG_TMPFS_POSIX_ACL
&generic_acl_access_handler,
@@ -2044,9 +2320,9 @@ static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
}
#endif /* CONFIG_TMPFS_XATTR */
-static const struct inode_operations shmem_short_symlink_operations = {
+static const struct inode_operations shmem_symlink_inline_operations = {
.readlink = generic_readlink,
- .follow_link = shmem_follow_short_symlink,
+ .follow_link = shmem_follow_link_inline,
#ifdef CONFIG_TMPFS_XATTR
.setxattr = shmem_setxattr,
.getxattr = shmem_getxattr,
@@ -2249,7 +2525,8 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
if (config.max_inodes < inodes)
goto out;
/*
- * Those tests disallow limited->unlimited while any are in use;
+ * Those tests also disallow limited->unlimited while any are in
+ * use, so i_blocks will always be zero when max_blocks is zero;
* but we must separately disallow unlimited->limited, because
* in that case we have no record of how much is already in use.
*/
@@ -2346,7 +2623,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
goto failed;
sbinfo->free_inodes = sbinfo->max_inodes;
- sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_maxbytes = SHMEM_MAX_BYTES;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = TMPFS_MAGIC;
@@ -2381,14 +2658,14 @@ static struct kmem_cache *shmem_inode_cachep;
static struct inode *shmem_alloc_inode(struct super_block *sb)
{
- struct shmem_inode_info *info;
- info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
- if (!info)
+ struct shmem_inode_info *p;
+ p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
+ if (!p)
return NULL;
- return &info->vfs_inode;
+ return &p->vfs_inode;
}
-static void shmem_destroy_callback(struct rcu_head *head)
+static void shmem_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
INIT_LIST_HEAD(&inode->i_dentry);
@@ -2397,26 +2674,29 @@ static void shmem_destroy_callback(struct rcu_head *head)
static void shmem_destroy_inode(struct inode *inode)
{
- if ((inode->i_mode & S_IFMT) == S_IFREG)
+ if ((inode->i_mode & S_IFMT) == S_IFREG) {
+ /* only struct inode is valid if it's an inline symlink */
mpol_free_shared_policy(&SHMEM_I(inode)->policy);
- call_rcu(&inode->i_rcu, shmem_destroy_callback);
+ }
+ call_rcu(&inode->i_rcu, shmem_i_callback);
}
-static void shmem_init_inode(void *foo)
+static void init_once(void *foo)
{
- struct shmem_inode_info *info = foo;
- inode_init_once(&info->vfs_inode);
+ struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
+
+ inode_init_once(&p->vfs_inode);
}
-static int shmem_init_inodecache(void)
+static int init_inodecache(void)
{
shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
sizeof(struct shmem_inode_info),
- 0, SLAB_PANIC, shmem_init_inode);
+ 0, SLAB_PANIC, init_once);
return 0;
}
-static void shmem_destroy_inodecache(void)
+static void destroy_inodecache(void)
{
kmem_cache_destroy(shmem_inode_cachep);
}
@@ -2425,6 +2705,7 @@ static const struct address_space_operations shmem_aops = {
.writepage = shmem_writepage,
.set_page_dirty = __set_page_dirty_no_writeback,
#ifdef CONFIG_TMPFS
+ .readpage = shmem_readpage,
.write_begin = shmem_write_begin,
.write_end = shmem_write_end,
#endif
@@ -2441,7 +2722,7 @@ static const struct file_operations shmem_file_operations = {
.aio_read = shmem_file_aio_read,
.aio_write = generic_file_aio_write,
.fsync = noop_fsync,
- .splice_read = shmem_file_splice_read,
+ .splice_read = generic_file_splice_read,
.splice_write = generic_file_splice_write,
#endif
};
@@ -2455,6 +2736,10 @@ static const struct inode_operations shmem_inode_operations = {
.listxattr = shmem_listxattr,
.removexattr = shmem_removexattr,
#endif
+#ifdef CONFIG_TMPFS_POSIX_ACL
+ .check_acl = generic_check_acl,
+#endif
+
};
static const struct inode_operations shmem_dir_inode_operations = {
@@ -2477,6 +2762,7 @@ static const struct inode_operations shmem_dir_inode_operations = {
#endif
#ifdef CONFIG_TMPFS_POSIX_ACL
.setattr = shmem_setattr,
+ .check_acl = generic_check_acl,
#endif
};
@@ -2489,6 +2775,7 @@ static const struct inode_operations shmem_special_inode_operations = {
#endif
#ifdef CONFIG_TMPFS_POSIX_ACL
.setattr = shmem_setattr,
+ .check_acl = generic_check_acl,
#endif
};
@@ -2513,20 +2800,21 @@ static const struct vm_operations_struct shmem_vm_ops = {
#endif
};
+
static struct dentry *shmem_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_nodev(fs_type, flags, data, shmem_fill_super);
}
-static struct file_system_type shmem_fs_type = {
+static struct file_system_type tmpfs_fs_type = {
.owner = THIS_MODULE,
.name = "tmpfs",
.mount = shmem_mount,
.kill_sb = kill_litter_super,
};
-int __init shmem_init(void)
+int __init init_tmpfs(void)
{
int error;
@@ -2534,18 +2822,18 @@ int __init shmem_init(void)
if (error)
goto out4;
- error = shmem_init_inodecache();
+ error = init_inodecache();
if (error)
goto out3;
- error = register_filesystem(&shmem_fs_type);
+ error = register_filesystem(&tmpfs_fs_type);
if (error) {
printk(KERN_ERR "Could not register tmpfs\n");
goto out2;
}
- shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER,
- shmem_fs_type.name, NULL);
+ shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER,
+ tmpfs_fs_type.name, NULL);
if (IS_ERR(shm_mnt)) {
error = PTR_ERR(shm_mnt);
printk(KERN_ERR "Could not kern_mount tmpfs\n");
@@ -2554,9 +2842,9 @@ int __init shmem_init(void)
return 0;
out1:
- unregister_filesystem(&shmem_fs_type);
+ unregister_filesystem(&tmpfs_fs_type);
out2:
- shmem_destroy_inodecache();
+ destroy_inodecache();
out3:
bdi_destroy(&shmem_backing_dev_info);
out4:
@@ -2564,6 +2852,45 @@ out4:
return error;
}
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+/**
+ * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
+ * @inode: the inode to be searched
+ * @pgoff: the offset to be searched
+ * @pagep: the pointer for the found page to be stored
+ * @ent: the pointer for the found swap entry to be stored
+ *
+ * If a page is found, refcount of it is incremented. Callers should handle
+ * these refcount.
+ */
+void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
+ struct page **pagep, swp_entry_t *ent)
+{
+ swp_entry_t entry = { .val = 0 }, *ptr;
+ struct page *page = NULL;
+ struct shmem_inode_info *info = SHMEM_I(inode);
+
+ if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
+ goto out;
+
+ spin_lock(&info->lock);
+ ptr = shmem_swp_entry(info, pgoff, NULL);
+#ifdef CONFIG_SWAP
+ if (ptr && ptr->val) {
+ entry.val = ptr->val;
+ page = find_get_page(&swapper_space, entry.val);
+ } else
+#endif
+ page = find_get_page(inode->i_mapping, pgoff);
+ if (ptr)
+ shmem_swp_unmap(ptr);
+ spin_unlock(&info->lock);
+out:
+ *pagep = page;
+ *ent = entry;
+}
+#endif
+
#else /* !CONFIG_SHMEM */
/*
@@ -2577,23 +2904,23 @@ out4:
#include <linux/ramfs.h>
-static struct file_system_type shmem_fs_type = {
+static struct file_system_type tmpfs_fs_type = {
.name = "tmpfs",
.mount = ramfs_mount,
.kill_sb = kill_litter_super,
};
-int __init shmem_init(void)
+int __init init_tmpfs(void)
{
- BUG_ON(register_filesystem(&shmem_fs_type) != 0);
+ BUG_ON(register_filesystem(&tmpfs_fs_type) != 0);
- shm_mnt = kern_mount(&shmem_fs_type);
+ shm_mnt = kern_mount(&tmpfs_fs_type);
BUG_ON(IS_ERR(shm_mnt));
return 0;
}
-int shmem_unuse(swp_entry_t swap, struct page *page)
+int shmem_unuse(swp_entry_t entry, struct page *page)
{
return 0;
}
@@ -2603,27 +2930,43 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
return 0;
}
-void shmem_unlock_mapping(struct address_space *mapping)
-{
-}
-
-void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
{
- truncate_inode_pages_range(inode->i_mapping, lstart, lend);
+ truncate_inode_pages_range(inode->i_mapping, start, end);
}
EXPORT_SYMBOL_GPL(shmem_truncate_range);
-int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+/**
+ * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
+ * @inode: the inode to be searched
+ * @pgoff: the offset to be searched
+ * @pagep: the pointer for the found page to be stored
+ * @ent: the pointer for the found swap entry to be stored
+ *
+ * If a page is found, refcount of it is incremented. Callers should handle
+ * these refcount.
+ */
+void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
+ struct page **pagep, swp_entry_t *ent)
{
- /* Only CONFIG_SHMEM shmem.c ever supported i_op->truncate_range(). */
- return -ENOSYS;
+ struct page *page = NULL;
+
+ if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
+ goto out;
+ page = find_get_page(inode->i_mapping, pgoff);
+out:
+ *pagep = page;
+ *ent = (swp_entry_t){ .val = 0 };
}
+#endif
#define shmem_vm_ops generic_file_vm_ops
#define shmem_file_operations ramfs_file_operations
#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
#define shmem_acct_size(flags, size) 0
#define shmem_unacct_size(flags, size) do {} while (0)
+#define SHMEM_MAX_BYTES MAX_LFS_FILESIZE
#endif /* CONFIG_SHMEM */
@@ -2647,7 +2990,7 @@ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags
if (IS_ERR(shm_mnt))
return (void *)shm_mnt;
- if (size < 0 || size > MAX_LFS_FILESIZE)
+ if (size < 0 || size > SHMEM_MAX_BYTES)
return ERR_PTR(-EINVAL);
if (shmem_acct_size(flags, size))
@@ -2670,7 +3013,7 @@ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags
d_instantiate(path.dentry, inode);
inode->i_size = size;
- clear_nlink(inode); /* It is unlinked */
+ inode->i_nlink = 0; /* It is unlinked */
#ifndef CONFIG_MMU
error = ramfs_nommu_expand_for_mapping(inode, size);
if (error)
@@ -2693,6 +3036,15 @@ put_memory:
}
EXPORT_SYMBOL_GPL(shmem_file_setup);
+void shmem_set_file(struct vm_area_struct *vma, struct file *file)
+{
+ if (vma->vm_file)
+ fput(vma->vm_file);
+ vma->vm_file = file;
+ vma->vm_ops = &shmem_vm_ops;
+ vma->vm_flags |= VM_CAN_NONLINEAR;
+}
+
/**
* shmem_zero_setup - setup a shared anonymous mapping
* @vma: the vma to be mmapped is prepared by do_mmap_pgoff
@@ -2706,11 +3058,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
if (IS_ERR(file))
return PTR_ERR(file);
- if (vma->vm_file)
- fput(vma->vm_file);
- vma->vm_file = file;
- vma->vm_ops = &shmem_vm_ops;
- vma->vm_flags |= VM_CAN_NONLINEAR;
+ shmem_set_file(vma, file);
return 0;
}
@@ -2726,29 +3074,13 @@ int shmem_zero_setup(struct vm_area_struct *vma)
* suit tmpfs, since it may have pages in swapcache, and needs to find those
* for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
*
- * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
- * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
+ * Provide a stub for those callers to start using now, then later
+ * flesh it out to call shmem_getpage() with additional gfp mask, when
+ * shmem_file_splice_read() is added and shmem_readpage() is removed.
*/
struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp)
{
-#ifdef CONFIG_SHMEM
- struct inode *inode = mapping->host;
- struct page *page;
- int error;
-
- BUG_ON(mapping->a_ops != &shmem_aops);
- error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL);
- if (error)
- page = ERR_PTR(error);
- else
- unlock_page(page);
- return page;
-#else
- /*
- * The tiny !SHMEM case uses ramfs without swap
- */
return read_cache_page_gfp(mapping, index, gfp);
-#endif
}
EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);