aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorEspen Fjellvær Olsen <espen@mrfjo.org>2013-01-08 21:30:59 +0100
committerEspen Fjellvær Olsen <espen@mrfjo.org>2013-01-09 18:46:56 +0100
commit49017aa9e80dbdb44cbfe8f4aa3b5edd9466705c (patch)
tree8a22b8d121d4ce41c2f4a590eb9fbf186af5f368 /mm/memory.c
parent5528b5d5c43f0c38903db0e672581ec4176ae523 (diff)
downloadkernel_samsung_smdk4412-49017aa9e80dbdb44cbfe8f4aa3b5edd9466705c.zip
kernel_samsung_smdk4412-49017aa9e80dbdb44cbfe8f4aa3b5edd9466705c.tar.gz
kernel_samsung_smdk4412-49017aa9e80dbdb44cbfe8f4aa3b5edd9466705c.tar.bz2
Update to the exynos-mem security issue from Samsung I9300 Update7
p2: includes the secmem changes as well as Andreis MFC addition as well Change-Id: I144c2b42586f07b737fba09742315683cbab36ef
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c110
1 files changed, 106 insertions, 4 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 326492a..159985d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -49,6 +49,7 @@
#include <linux/rmap.h>
#include <linux/module.h>
#include <linux/delayacct.h>
+#include <linux/delay.h>
#include <linux/init.h>
#include <linux/writeback.h>
#include <linux/memcontrol.h>
@@ -57,6 +58,7 @@
#include <linux/swapops.h>
#include <linux/elf.h>
#include <linux/gfp.h>
+#include <linux/migrate.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
@@ -1592,6 +1594,25 @@ static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long add
stack_guard_page_end(vma, addr+PAGE_SIZE);
}
+#ifdef CONFIG_DMA_CMA
+static inline int __replace_cma_page(struct page *page, struct page **res)
+{
+ struct page *newpage;
+ int ret;
+
+ ret = migrate_replace_cma_page(page, &newpage);
+ if (ret == 0) {
+ *res = newpage;
+ return 0;
+ }
+ /*
+ * Migration errors in case of get_user_pages() might not
+ * be fatal to CMA itself, so better don't fail here.
+ */
+ return 0;
+}
+#endif
+
/**
* __get_user_pages() - pin user pages in memory
* @tsk: task_struct of target task
@@ -1742,6 +1763,11 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
int ret;
unsigned int fault_flags = 0;
+#ifdef CONFIG_DMA_CMA
+ if (gup_flags & FOLL_NO_CMA)
+ fault_flags = FAULT_FLAG_NO_CMA;
+#endif
+
/* For mlock, just skip the stack guard page. */
if (foll_flags & FOLL_MLOCK) {
if (stack_guard_page(vma, start))
@@ -1807,6 +1833,16 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
}
if (IS_ERR(page))
return i ? i : PTR_ERR(page);
+
+#ifdef CONFIG_DMA_CMA
+ if ((gup_flags & FOLL_NO_CMA)
+ && is_cma_pageblock(page)) {
+ int rc = __replace_cma_page(page, &page);
+ if (rc)
+ return i ? i : rc;
+ }
+#endif
+
if (pages) {
pages[i] = page;
@@ -1950,6 +1986,26 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
}
EXPORT_SYMBOL(get_user_pages);
+#ifdef CONFIG_DMA_CMA
+int get_user_pages_nocma(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int nr_pages, int write, int force,
+ struct page **pages, struct vm_area_struct **vmas)
+{
+ int flags = FOLL_TOUCH | FOLL_NO_CMA;
+
+ if (pages)
+ flags |= FOLL_GET;
+ if (write)
+ flags |= FOLL_WRITE;
+ if (force)
+ flags |= FOLL_FORCE;
+
+ return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
+ NULL);
+}
+EXPORT_SYMBOL(get_user_pages_nocma);
+#endif
+
/**
* get_dump_page() - pin user page in memory while writing it to core dump
* @addr: user address
@@ -2485,7 +2541,11 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
*/
static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
+#ifdef CONFIG_DMA_CMA
+ spinlock_t *ptl, pte_t orig_pte, unsigned int flags)
+#else
spinlock_t *ptl, pte_t orig_pte)
+#endif
__releases(ptl)
{
struct page *old_page, *new_page;
@@ -2657,11 +2717,25 @@ gotten:
goto oom;
if (is_zero_pfn(pte_pfn(orig_pte))) {
- new_page = alloc_zeroed_user_highpage_movable(vma, address);
+#ifdef CONFIG_DMA_CMA
+ if (flags & FAULT_FLAG_NO_CMA)
+ new_page = alloc_zeroed_user_highpage(vma, address);
+ else
+#endif
+ new_page =
+ alloc_zeroed_user_highpage_movable(vma, address);
+
if (!new_page)
goto oom;
} else {
- new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+#ifdef CONFIG_DMA_CMA
+ if (flags & FAULT_FLAG_NO_CMA)
+ new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
+ else
+#endif
+ new_page =
+ alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+
if (!new_page)
goto oom;
cow_user_page(new_page, old_page, address, vma);
@@ -2888,6 +2962,16 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
entry = pte_to_swp_entry(orig_pte);
if (unlikely(non_swap_entry(entry))) {
if (is_migration_entry(entry)) {
+#ifdef CONFIG_DMA_CMA
+ /*
+ * FIXME: mszyprow: cruel, brute-force method for
+ * letting cma/migration to finish it's job without
+ * stealing the lock migration_entry_wait() and creating
+ * a live-lock on the faulted page
+ * (page->_count == 2 migration failure issue)
+ */
+ mdelay(10);
+#endif
migration_entry_wait(mm, pmd, address);
} else if (is_hwpoison_entry(entry)) {
ret = VM_FAULT_HWPOISON;
@@ -3021,7 +3105,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
}
if (flags & FAULT_FLAG_WRITE) {
+#ifdef CONFIG_DMA_CMA
+ ret |= do_wp_page(mm, vma, address, page_table,
+ pmd, ptl, pte, flags);
+#else
ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
+#endif
if (ret & VM_FAULT_ERROR)
ret &= VM_FAULT_ERROR;
goto out;
@@ -3213,8 +3302,16 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
ret = VM_FAULT_OOM;
goto out;
}
- page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
- vma, address);
+
+#ifdef CONFIG_DMA_CMA
+ if (flags & FAULT_FLAG_NO_CMA)
+ page = alloc_page_vma(GFP_HIGHUSER,
+ vma, address);
+ else
+#endif
+ page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
+ vma, address);
+
if (!page) {
ret = VM_FAULT_OOM;
goto out;
@@ -3422,8 +3519,13 @@ int handle_pte_fault(struct mm_struct *mm,
goto unlock;
if (flags & FAULT_FLAG_WRITE) {
if (!pte_write(entry))
+#ifdef CONFIG_DMA_CMA
+ return do_wp_page(mm, vma, address,
+ pte, pmd, ptl, entry, flags);
+#else
return do_wp_page(mm, vma, address,
pte, pmd, ptl, entry);
+#endif
entry = pte_mkdirty(entry);
}
entry = pte_mkyoung(entry);