aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/asm-generic/pgtable.h17
-rw-r--r--include/asm-i386/pgtable.h8
-rw-r--r--include/asm-ia64/pgtable.h25
-rw-r--r--include/asm-powerpc/pgtable-ppc32.h12
-rw-r--r--include/asm-powerpc/pgtable-ppc64.h12
-rw-r--r--include/asm-ppc/pgtable.h12
-rw-r--r--include/asm-s390/pgtable.h7
-rw-r--r--include/asm-sparc/pgtable.h11
-rw-r--r--include/asm-x86_64/pgtable.h14
-rw-r--r--mm/hugetlb.c7
-rw-r--r--mm/memory.c13
11 files changed, 92 insertions, 46 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index dc8f99e..7d7bcf9 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -27,13 +27,20 @@ do { \
* Largely same as above, but only sets the access flags (dirty,
* accessed, and writable). Furthermore, we know it always gets set
* to a "more permissive" setting, which allows most architectures
- * to optimize this.
+ * to optimize this. We return whether the PTE actually changed, which
+ * in turn instructs the caller to do things like update__mmu_cache.
+ * This used to be done in the caller, but sparc needs minor faults to
+ * force that call on sun4c so we changed this macro slightly
*/
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-do { \
- set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
- flush_tlb_page(__vma, __address); \
-} while (0)
+({ \
+ int __changed = !pte_same(*(__ptep), __entry); \
+ if (__changed) { \
+ set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
+ flush_tlb_page(__vma, __address); \
+ } \
+ __changed; \
+})
#endif
#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index d62bdb0..628fa77 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -285,13 +285,15 @@ static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
*/
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
-do { \
- if (dirty) { \
+({ \
+ int __changed = !pte_same(*(ptep), entry); \
+ if (__changed && dirty) { \
(ptep)->pte_low = (entry).pte_low; \
pte_update_defer((vma)->vm_mm, (address), (ptep)); \
flush_tlb_page(vma, address); \
} \
-} while (0)
+ __changed; \
+})
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
#define ptep_test_and_clear_dirty(vma, addr, ptep) ({ \
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index 670b706..6580f31 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -533,16 +533,23 @@ extern void lazy_mmu_prot_update (pte_t pte);
* daccess_bit in ivt.S).
*/
#ifdef CONFIG_SMP
-# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
-do { \
- if (__safely_writable) { \
- set_pte(__ptep, __entry); \
- flush_tlb_page(__vma, __addr); \
- } \
-} while (0)
+# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
+({ \
+ int __changed = !pte_same(*(__ptep), __entry); \
+ if (__changed && __safely_writable) { \
+ set_pte(__ptep, __entry); \
+ flush_tlb_page(__vma, __addr); \
+ } \
+ __changed; \
+})
#else
-# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
- ptep_establish(__vma, __addr, __ptep, __entry)
+# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
+({ \
+ int __changed = !pte_same(*(__ptep), __entry); \
+ if (__changed) \
+ ptep_establish(__vma, __addr, __ptep, __entry); \
+ __changed; \
+})
#endif
# ifdef CONFIG_VIRTUAL_MEM_MAP
diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h
index c863bdb..7fb730c 100644
--- a/include/asm-powerpc/pgtable-ppc32.h
+++ b/include/asm-powerpc/pgtable-ppc32.h
@@ -673,10 +673,14 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
}
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
- do { \
- __ptep_set_access_flags(__ptep, __entry, __dirty); \
- flush_tlb_page_nohash(__vma, __address); \
- } while(0)
+({ \
+ int __changed = !pte_same(*(__ptep), __entry); \
+ if (__changed) { \
+ __ptep_set_access_flags(__ptep, __entry, __dirty); \
+ flush_tlb_page_nohash(__vma, __address); \
+ } \
+ __changed; \
+})
/*
* Macro to mark a page protection value as "uncacheable".
diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h
index 704c4e6..3cfd98f 100644
--- a/include/asm-powerpc/pgtable-ppc64.h
+++ b/include/asm-powerpc/pgtable-ppc64.h
@@ -413,10 +413,14 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
:"cc");
}
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
- do { \
- __ptep_set_access_flags(__ptep, __entry, __dirty); \
- flush_tlb_page_nohash(__vma, __address); \
- } while(0)
+({ \
+ int __changed = !pte_same(*(__ptep), __entry); \
+ if (__changed) { \
+ __ptep_set_access_flags(__ptep, __entry, __dirty); \
+ flush_tlb_page_nohash(__vma, __address); \
+ } \
+ __changed; \
+})
/*
* Macro to mark a page protection value as "uncacheable".
diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h
index bed452d..9d0ce9f 100644
--- a/include/asm-ppc/pgtable.h
+++ b/include/asm-ppc/pgtable.h
@@ -694,10 +694,14 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
}
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
- do { \
- __ptep_set_access_flags(__ptep, __entry, __dirty); \
- flush_tlb_page_nohash(__vma, __address); \
- } while(0)
+({ \
+ int __changed = !pte_same(*(__ptep), __entry); \
+ if (__changed) { \
+ __ptep_set_access_flags(__ptep, __entry, __dirty); \
+ flush_tlb_page_nohash(__vma, __address); \
+ } \
+ __changed; \
+})
/*
* Macro to mark a page protection value as "uncacheable".
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 8fe8d42..0a307bb 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -744,7 +744,12 @@ ptep_establish(struct vm_area_struct *vma,
}
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
- ptep_establish(__vma, __address, __ptep, __entry)
+({ \
+ int __changed = !pte_same(*(__ptep), __entry); \
+ if (__changed) \
+ ptep_establish(__vma, __address, __ptep, __entry); \
+ __changed; \
+})
/*
* Test and clear dirty bit in storage key.
diff --git a/include/asm-sparc/pgtable.h b/include/asm-sparc/pgtable.h
index 4f0a5ba..59229ae 100644
--- a/include/asm-sparc/pgtable.h
+++ b/include/asm-sparc/pgtable.h
@@ -446,6 +446,17 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma,
#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
#define GET_PFN(pfn) (pfn & 0x0fffffffUL)
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+({ \
+ int __changed = !pte_same(*(__ptep), __entry); \
+ if (__changed) { \
+ set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
+ flush_tlb_page(__vma, __address); \
+ } \
+ (sparc_cpu_model == sun4c) || __changed; \
+})
+
#include <asm-generic/pgtable.h>
#endif /* !(__ASSEMBLY__) */
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 08b9831..0a71e0b 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -395,12 +395,14 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
* bit at the same time. */
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
- do { \
- if (__dirty) { \
- set_pte(__ptep, __entry); \
- flush_tlb_page(__vma, __address); \
- } \
- } while (0)
+({ \
+ int __changed = !pte_same(*(__ptep), __entry); \
+ if (__changed && __dirty) { \
+ set_pte(__ptep, __entry); \
+ flush_tlb_page(__vma, __address); \
+ } \
+ __changed; \
+})
/* Encode and de-code a swap entry */
#define __swp_type(x) (((x).val >> 1) & 0x3f)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index eb7180d..a45d1f0 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -326,9 +326,10 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
pte_t entry;
entry = pte_mkwrite(pte_mkdirty(*ptep));
- ptep_set_access_flags(vma, address, ptep, entry, 1);
- update_mmu_cache(vma, address, entry);
- lazy_mmu_prot_update(entry);
+ if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
+ update_mmu_cache(vma, address, entry);
+ lazy_mmu_prot_update(entry);
+ }
}
diff --git a/mm/memory.c b/mm/memory.c
index cb94488..f64cbf9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1691,9 +1691,10 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
flush_cache_page(vma, address, pte_pfn(orig_pte));
entry = pte_mkyoung(orig_pte);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- ptep_set_access_flags(vma, address, page_table, entry, 1);
- update_mmu_cache(vma, address, entry);
- lazy_mmu_prot_update(entry);
+ if (ptep_set_access_flags(vma, address, page_table, entry,1)) {
+ update_mmu_cache(vma, address, entry);
+ lazy_mmu_prot_update(entry);
+ }
ret |= VM_FAULT_WRITE;
goto unlock;
}
@@ -2525,10 +2526,9 @@ static inline int handle_pte_fault(struct mm_struct *mm,
pte_t *pte, pmd_t *pmd, int write_access)
{
pte_t entry;
- pte_t old_entry;
spinlock_t *ptl;
- old_entry = entry = *pte;
+ entry = *pte;
if (!pte_present(entry)) {
if (pte_none(entry)) {
if (vma->vm_ops) {
@@ -2561,8 +2561,7 @@ static inline int handle_pte_fault(struct mm_struct *mm,
entry = pte_mkdirty(entry);
}
entry = pte_mkyoung(entry);
- if (!pte_same(old_entry, entry)) {
- ptep_set_access_flags(vma, address, pte, entry, write_access);
+ if (ptep_set_access_flags(vma, address, pte, entry, write_access)) {
update_mmu_cache(vma, address, entry);
lazy_mmu_prot_update(entry);
} else {