aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mlock.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mlock.c')
-rw-r--r--mm/mlock.c17
1 files changed, 3 insertions, 14 deletions
diff --git a/mm/mlock.c b/mm/mlock.c
index 39b3a7d..048260c 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -14,7 +14,7 @@
#include <linux/mempolicy.h>
#include <linux/syscalls.h>
#include <linux/sched.h>
-#include <linux/export.h>
+#include <linux/module.h>
#include <linux/rmap.h>
#include <linux/mmzone.h>
#include <linux/hugetlb.h>
@@ -78,7 +78,6 @@ void __clear_page_mlock(struct page *page)
*/
void mlock_vma_page(struct page *page)
{
- /* Serialize with page migration */
BUG_ON(!PageLocked(page));
if (!TestSetPageMlocked(page)) {
@@ -106,21 +105,12 @@ void mlock_vma_page(struct page *page)
*/
void munlock_vma_page(struct page *page)
{
- /* For try_to_munlock() and to serialize with page migration */
BUG_ON(!PageLocked(page));
if (TestClearPageMlocked(page)) {
dec_zone_page_state(page, NR_MLOCK);
if (!isolate_lru_page(page)) {
- int ret = SWAP_AGAIN;
-
- /*
- * Optimization: if the page was mapped just once,
- * that's our mapping and we don't need to check all the
- * other vmas.
- */
- if (page_mapcount(page) > 1)
- ret = try_to_munlock(page);
+ int ret = try_to_munlock(page);
/*
* did try_to_unlock() succeed or punt?
*/
@@ -559,8 +549,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
if (!can_do_mlock())
goto out;
- if (flags & MCL_CURRENT)
- lru_add_drain_all(); /* flush pagevec */
+ lru_add_drain_all(); /* flush pagevec */
down_write(&current->mm->mmap_sem);