aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2009-01-06 14:39:36 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-06 15:59:03 -0800
commita2c43eed8334e878702fca713b212ae2a11d84b9 (patch)
tree4eb3f9b9153df5e7a638b15a6f7c9aa924066a94 /mm
parent7b1fe59793e61f826bef053107b57b23954833bb (diff)
downloadkernel_samsung_smdk4412-a2c43eed8334e878702fca713b212ae2a11d84b9.zip
kernel_samsung_smdk4412-a2c43eed8334e878702fca713b212ae2a11d84b9.tar.gz
kernel_samsung_smdk4412-a2c43eed8334e878702fca713b212ae2a11d84b9.tar.bz2
mm: try_to_free_swap replaces remove_exclusive_swap_page
remove_exclusive_swap_page(): its problem is in living up to its name. It doesn't matter if someone else has a reference to the page (raised page_count); it doesn't matter if the page is mapped into userspace (raised page_mapcount - though that hints it may be worth keeping the swap): all that matters is that there be no more references to the swap (and no writeback in progress). swapoff (try_to_unuse) has been removing pages from swapcache for years, with no concern for page count or page mapcount, and we used to have a comment in lookup_swap_cache() recognizing that: if you go for a page of swapcache, you'll get the right page, but it could have been removed from swapcache by the time you get page lock. So, give up asking for exclusivity: get rid of remove_exclusive_swap_page(), and remove_exclusive_swap_page_ref() and remove_exclusive_swap_page_count() which were spawned for the recent LRU work: replace them by the simpler try_to_free_swap() which just checks page_swapcount(). Similarly, remove the page_count limitation from free_swap_and_count(), but assume that it's worth holding on to the swap if page is mapped and swap nowhere near full. Add a vm_swap_full() test in free_swap_cache()? It would be consistent, but I think we probably have enough for now. Signed-off-by: Hugh Dickins <hugh@veritas.com> Cc: Lee Schermerhorn <lee.schermerhorn@hp.com> Cc: Rik van Riel <riel@redhat.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Robin Holt <holt@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c2
-rw-r--r--mm/page_io.c2
-rw-r--r--mm/swap.c3
-rw-r--r--mm/swap_state.c8
-rw-r--r--mm/swapfile.c70
-rw-r--r--mm/vmscan.c2
6 files changed, 20 insertions, 67 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 8f471ed..1a83fe5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2403,7 +2403,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
swap_free(entry);
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
- remove_exclusive_swap_page(page);
+ try_to_free_swap(page);
unlock_page(page);
if (write_access) {
diff --git a/mm/page_io.c b/mm/page_io.c
index d277a80..dc6ce0a 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -98,7 +98,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
struct bio *bio;
int ret = 0, rw = WRITE;
- if (remove_exclusive_swap_page(page)) {
+ if (try_to_free_swap(page)) {
unlock_page(page);
goto out;
}
diff --git a/mm/swap.c b/mm/swap.c
index ff0b290..ba2c0e8 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -454,8 +454,7 @@ void pagevec_swap_free(struct pagevec *pvec)
struct page *page = pvec->pages[i];
if (PageSwapCache(page) && trylock_page(page)) {
- if (PageSwapCache(page))
- remove_exclusive_swap_page_ref(page);
+ try_to_free_swap(page);
unlock_page(page);
}
}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index e793fde..bcb4727 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -195,14 +195,14 @@ void delete_from_swap_cache(struct page *page)
* If we are the only user, then try to free up the swap cache.
*
* Its ok to check for PageSwapCache without the page lock
- * here because we are going to recheck again inside
- * exclusive_swap_page() _with_ the lock.
+ * here because we are going to recheck again inside
+ * try_to_free_swap() _with_ the lock.
* - Marcelo
*/
static inline void free_swap_cache(struct page *page)
{
- if (PageSwapCache(page) && trylock_page(page)) {
- remove_exclusive_swap_page(page);
+ if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
+ try_to_free_swap(page);
unlock_page(page);
}
}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index bfd4ee5..f436018 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -348,68 +348,23 @@ int reuse_swap_page(struct page *page)
}
/*
- * Work out if there are any other processes sharing this
- * swap cache page. Free it if you can. Return success.
+ * If swap is getting full, or if there are no more mappings of this page,
+ * then try_to_free_swap is called to free its swap space.
*/
-static int remove_exclusive_swap_page_count(struct page *page, int count)
+int try_to_free_swap(struct page *page)
{
- int retval;
- struct swap_info_struct * p;
- swp_entry_t entry;
-
VM_BUG_ON(!PageLocked(page));
if (!PageSwapCache(page))
return 0;
if (PageWriteback(page))
return 0;
- if (page_count(page) != count) /* us + cache + ptes */
- return 0;
-
- entry.val = page_private(page);
- p = swap_info_get(entry);
- if (!p)
+ if (page_swapcount(page))
return 0;
- /* Is the only swap cache user the cache itself? */
- retval = 0;
- if (p->swap_map[swp_offset(entry)] == 1) {
- /* Recheck the page count with the swapcache lock held.. */
- spin_lock_irq(&swapper_space.tree_lock);
- if ((page_count(page) == count) && !PageWriteback(page)) {
- __delete_from_swap_cache(page);
- SetPageDirty(page);
- retval = 1;
- }
- spin_unlock_irq(&swapper_space.tree_lock);
- }
- spin_unlock(&swap_lock);
-
- if (retval) {
- swap_free(entry);
- page_cache_release(page);
- }
-
- return retval;
-}
-
-/*
- * Most of the time the page should have two references: one for the
- * process and one for the swap cache.
- */
-int remove_exclusive_swap_page(struct page *page)
-{
- return remove_exclusive_swap_page_count(page, 2);
-}
-
-/*
- * The pageout code holds an extra reference to the page. That raises
- * the reference count to test for to 2 for a page that is only in the
- * swap cache plus 1 for each process that maps the page.
- */
-int remove_exclusive_swap_page_ref(struct page *page)
-{
- return remove_exclusive_swap_page_count(page, 2 + page_mapcount(page));
+ delete_from_swap_cache(page);
+ SetPageDirty(page);
+ return 1;
}
/*
@@ -436,13 +391,12 @@ void free_swap_and_cache(swp_entry_t entry)
spin_unlock(&swap_lock);
}
if (page) {
- int one_user;
-
- one_user = (page_count(page) == 2);
- /* Only cache user (+us), or swap space full? Free it! */
- /* Also recheck PageSwapCache after page is locked (above) */
+ /*
+ * Not mapped elsewhere, or swap space full? Free it!
+ * Also recheck PageSwapCache now page is locked (above).
+ */
if (PageSwapCache(page) && !PageWriteback(page) &&
- (one_user || vm_swap_full())) {
+ (!page_mapped(page) || vm_swap_full())) {
delete_from_swap_cache(page);
SetPageDirty(page);
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d196f46..c8601dd 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -759,7 +759,7 @@ cull_mlocked:
activate_locked:
/* Not a candidate for swapping, so reclaim swap space. */
if (PageSwapCache(page) && vm_swap_full())
- remove_exclusive_swap_page_ref(page);
+ try_to_free_swap(page);
VM_BUG_ON(PageActive(page));
SetPageActive(page);
pgactivate++;