aboutsummaryrefslogtreecommitdiffstats
path: root/mm/msync.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-11-28 14:34:23 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-28 14:34:23 -0800
commit6aab341e0a28aff100a09831c5300a2994b8b986 (patch)
tree1af3908275aa5e1b16e80efee554a9a7504c56d4 /mm/msync.c
parent458af5439fe7ae7d95ca14106844e61f0795166c (diff)
downloadkernel_samsung_smdk4412-6aab341e0a28aff100a09831c5300a2994b8b986.zip
kernel_samsung_smdk4412-6aab341e0a28aff100a09831c5300a2994b8b986.tar.gz
kernel_samsung_smdk4412-6aab341e0a28aff100a09831c5300a2994b8b986.tar.bz2
mm: re-architect the VM_UNPAGED logic
This replaces the (in my opinion horrible) VM_UNMAPPED logic with very explicit support for a "remapped page range" aka VM_PFNMAP. It allows a VM area to contain an arbitrary range of page table entries that the VM never touches, and never considers to be normal pages. Any user of "remap_pfn_range()" automatically gets this new functionality, and doesn't even have to mark the pages reserved or indeed mark them any other way. It just works. As a side effect, doing mmap() on /dev/mem works for arbitrary ranges. Sparc update from David in the next commit. Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/msync.c')
-rw-r--r--mm/msync.c12
1 files changed, 3 insertions, 9 deletions
diff --git a/mm/msync.c b/mm/msync.c
index b3f4caf..1b5b6f6 100644
--- a/mm/msync.c
+++ b/mm/msync.c
@@ -27,7 +27,6 @@ static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
again:
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
do {
- unsigned long pfn;
struct page *page;
if (progress >= 64) {
@@ -40,13 +39,9 @@ again:
continue;
if (!pte_maybe_dirty(*pte))
continue;
- pfn = pte_pfn(*pte);
- if (unlikely(!pfn_valid(pfn))) {
- print_bad_pte(vma, *pte, addr);
+ page = vm_normal_page(vma, addr, *pte);
+ if (!page)
continue;
- }
- page = pfn_to_page(pfn);
-
if (ptep_clear_flush_dirty(vma, addr, pte) ||
page_test_and_clear_dirty(page))
set_page_dirty(page);
@@ -97,9 +92,8 @@ static void msync_page_range(struct vm_area_struct *vma,
/* For hugepages we can't go walking the page table normally,
* but that's ok, hugetlbfs is memory based, so we don't need
* to do anything more on an msync().
- * Can't do anything with VM_UNPAGED regions either.
*/
- if (vma->vm_flags & (VM_HUGETLB|VM_UNPAGED))
+ if (vma->vm_flags & VM_HUGETLB)
return;
BUG_ON(addr >= end);