From 8413ac9d8c9a1366a4f57880723126cd24e5a5c3 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sat, 18 Oct 2008 20:26:59 -0700 Subject: mm: page lock use lock bitops trylock_page, unlock_page open and close a critical section. Hence, we can use the lock bitops to get the desired memory ordering. Also, mark trylock as likely to succeed (and remove the annotation from callers). Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/pagemap.h | 2 +- mm/filemap.c | 13 +++++-------- mm/swapfile.c | 2 +- 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 7334b2b..709742b 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -311,7 +311,7 @@ static inline void __clear_page_locked(struct page *page) static inline int trylock_page(struct page *page) { - return !test_and_set_bit(PG_locked, &page->flags); + return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); } /* diff --git a/mm/filemap.c b/mm/filemap.c index a1ddd25..e1b23fd 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -573,17 +573,14 @@ EXPORT_SYMBOL(wait_on_page_bit); * mechananism between PageLocked pages and PageWriteback pages is shared. * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. * - * The first mb is necessary to safely close the critical section opened by the - * test_and_set_bit() to lock the page; the second mb is necessary to enforce - * ordering between the clear_bit and the read of the waitqueue (to avoid SMP - * races with a parallel wait_on_page_locked()). + * The mb is necessary to enforce ordering between the clear_bit and the read + * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()). */ void unlock_page(struct page *page) { - smp_mb__before_clear_bit(); - if (!test_and_clear_bit(PG_locked, &page->flags)) - BUG(); - smp_mb__after_clear_bit(); + VM_BUG_ON(!PageLocked(page)); + clear_bit_unlock(PG_locked, &page->flags); + smp_mb__after_clear_bit(); wake_up_page(page, PG_locked); } EXPORT_SYMBOL(unlock_page); diff --git a/mm/swapfile.c b/mm/swapfile.c index 2a97faf..90cb67a 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -422,7 +422,7 @@ void free_swap_and_cache(swp_entry_t entry) if (p) { if (swap_entry_free(p, swp_offset(entry)) == 1) { page = find_get_page(&swapper_space, entry.val); - if (page && unlikely(!trylock_page(page))) { + if (page && !trylock_page(page)) { page_cache_release(page); page = NULL; } -- cgit v1.1