diff options
-rw-r--r-- | include/linux/mm.h | 1 | ||||
-rw-r--r-- | mm/memory.c | 14 |
2 files changed, 13 insertions, 2 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 92a055b..b465c7f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1574,6 +1574,7 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address, #define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ #define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ #define FOLL_NO_CMA 0x200 /* avoid putting pages to CMA regions */ +#define FOLL_COW 0x4000 /* internal GUP flag */ typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, void *data); diff --git a/mm/memory.c b/mm/memory.c index 067a4f3..29c8112 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1436,6 +1436,16 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, } EXPORT_SYMBOL_GPL(zap_vma_ptes); +/* + * FOLL_FORCE can write to even unwritable pte's, but only + * after we've gone through a COW cycle and they are dirty. + */ +static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) +{ + return pte_write(pte) || + ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); +} + /** * follow_page - look up a page descriptor from a user-virtual address * @vma: vm_area_struct mapping @address @@ -1518,7 +1528,7 @@ split_fallthrough: pte = *ptep; if (!pte_present(pte)) goto no_page; - if ((flags & FOLL_WRITE) && !pte_write(pte)) + if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) goto unlock; page = vm_normal_page(vma, address, pte); @@ -1832,7 +1842,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, */ if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) - foll_flags &= ~FOLL_WRITE; + foll_flags |= FOLL_COW; cond_resched(); } |