aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c5
-rw-r--r--mm/mlock.c16
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/page_cgroup.c4
5 files changed, 16 insertions, 15 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d143ab6..6058b53 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1796,6 +1796,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
struct page *page, unsigned long address)
{
+ struct hstate *h = hstate_vma(vma);
struct vm_area_struct *iter_vma;
struct address_space *mapping;
struct prio_tree_iter iter;
@@ -1805,7 +1806,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
* vm_pgoff is in PAGE_SIZE units, hence the different calculation
* from page cache lookup which is in HPAGE_SIZE units.
*/
- address = address & huge_page_mask(hstate_vma(vma));
+ address = address & huge_page_mask(h);
pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
+ (vma->vm_pgoff >> PAGE_SHIFT);
mapping = (struct address_space *)page_private(page);
@@ -1824,7 +1825,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
*/
if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
unmap_hugepage_range(iter_vma,
- address, address + HPAGE_SIZE,
+ address, address + huge_page_size(h),
page);
}
diff --git a/mm/mlock.c b/mm/mlock.c
index 008ea70..a6da2ae 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -66,14 +66,10 @@ void __clear_page_mlock(struct page *page)
putback_lru_page(page);
} else {
/*
- * Page not on the LRU yet. Flush all pagevecs and retry.
+ * We lost the race. the page already moved to evictable list.
*/
- lru_add_drain_all();
- if (!isolate_lru_page(page))
- putback_lru_page(page);
- else if (PageUnevictable(page))
+ if (PageUnevictable(page))
count_vm_event(UNEVICTABLE_PGSTRANDED);
-
}
}
@@ -187,8 +183,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
if (vma->vm_flags & VM_WRITE)
gup_flags |= GUP_FLAGS_WRITE;
- lru_add_drain_all(); /* push cached pages to LRU */
-
while (nr_pages > 0) {
int i;
@@ -251,8 +245,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
ret = 0;
}
- lru_add_drain_all(); /* to update stats */
-
return ret; /* count entire vma as locked_vm */
}
@@ -546,6 +538,8 @@ asmlinkage long sys_mlock(unsigned long start, size_t len)
if (!can_do_mlock())
return -EPERM;
+ lru_add_drain_all(); /* flush pagevec */
+
down_write(&current->mm->mmap_sem);
len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
start &= PAGE_MASK;
@@ -612,6 +606,8 @@ asmlinkage long sys_mlockall(int flags)
if (!can_do_mlock())
goto out;
+ lru_add_drain_all(); /* flush pagevec */
+
down_write(&current->mm->mmap_sem);
lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
diff --git a/mm/mmap.c b/mm/mmap.c
index de14ac2..d4855a6 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1704,7 +1704,7 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
vma = find_vma_prev(mm, addr, &prev);
if (vma && (vma->vm_start <= addr))
return vma;
- if (expand_stack(prev, addr))
+ if (!prev || expand_stack(prev, addr))
return NULL;
if (prev->vm_flags & VM_LOCKED) {
if (mlock_vma_pages_range(prev, addr, prev->vm_end) < 0)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 54069e6..d8ac014 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1561,6 +1561,10 @@ nofail_alloc:
/* We now go into synchronous reclaim */
cpuset_memory_pressure_bump();
+ /*
+ * The task's cpuset might have expanded its set of allowable nodes
+ */
+ cpuset_update_task_memory_state();
p->flags |= PF_MEMALLOC;
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index f59d797..1223d92 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -165,7 +165,7 @@ int online_page_cgroup(unsigned long start_pfn,
unsigned long start, end, pfn;
int fail = 0;
- start = start_pfn & (PAGES_PER_SECTION - 1);
+ start = start_pfn & ~(PAGES_PER_SECTION - 1);
end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
@@ -188,7 +188,7 @@ int offline_page_cgroup(unsigned long start_pfn,
{
unsigned long start, end, pfn;
- start = start_pfn & (PAGES_PER_SECTION - 1);
+ start = start_pfn & ~(PAGES_PER_SECTION - 1);
end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)