aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2008-02-09 18:24:35 +0100
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2008-02-09 18:24:40 +0100
commit146e4b3c8b92071b18f0b2e6f47165bad4f9e825 (patch)
tree7e9db61cacca0f55ce34db089f27fc22a56ebbdd /include
parent0c1f1dcd8c7792aeff6ef62e9508b0041928ab87 (diff)
downloadkernel_samsung_smdk4412-146e4b3c8b92071b18f0b2e6f47165bad4f9e825.zip
kernel_samsung_smdk4412-146e4b3c8b92071b18f0b2e6f47165bad4f9e825.tar.gz
kernel_samsung_smdk4412-146e4b3c8b92071b18f0b2e6f47165bad4f9e825.tar.bz2
[S390] 1K/2K page table pages.
This patch implements 1K/2K page table pages for s390. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'include')
-rw-r--r--include/asm-s390/elf.h13
-rw-r--r--include/asm-s390/mmu.h8
-rw-r--r--include/asm-s390/mmu_context.h14
-rw-r--r--include/asm-s390/page.h36
-rw-r--r--include/asm-s390/pgalloc.h79
-rw-r--r--include/asm-s390/pgtable.h105
-rw-r--r--include/asm-s390/tlb.h6
-rw-r--r--include/asm-s390/tlbflush.h11
8 files changed, 114 insertions, 158 deletions
diff --git a/include/asm-s390/elf.h b/include/asm-s390/elf.h
index b73a424..8181ca5 100644
--- a/include/asm-s390/elf.h
+++ b/include/asm-s390/elf.h
@@ -115,6 +115,7 @@ typedef s390_regs elf_gregset_t;
#include <linux/sched.h> /* for task_struct */
#include <asm/system.h> /* for save_access_regs */
+#include <asm/mmu_context.h>
/*
* This is used to ensure we don't load something for the wrong architecture.
@@ -214,4 +215,16 @@ do { \
} while (0)
#endif /* __s390x__ */
+/*
+ * An executable for which elf_read_implies_exec() returns TRUE will
+ * have the READ_IMPLIES_EXEC personality flag set automatically.
+ */
+#define elf_read_implies_exec(ex, executable_stack) \
+({ \
+ if (current->mm->context.noexec && \
+ executable_stack != EXSTACK_DISABLE_X) \
+ disable_noexec(current->mm, current); \
+ current->mm->context.noexec == 0; \
+})
+
#endif
diff --git a/include/asm-s390/mmu.h b/include/asm-s390/mmu.h
index ccd36d2..13ec421 100644
--- a/include/asm-s390/mmu.h
+++ b/include/asm-s390/mmu.h
@@ -1,7 +1,11 @@
#ifndef __MMU_H
#define __MMU_H
-/* Default "unsigned long" context */
-typedef unsigned long mm_context_t;
+typedef struct {
+ struct list_head crst_list;
+ struct list_head pgtable_list;
+ unsigned long asce_bits;
+ int noexec;
+} mm_context_t;
#endif
diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h
index a77d4ba..3eaac5e 100644
--- a/include/asm-s390/mmu_context.h
+++ b/include/asm-s390/mmu_context.h
@@ -10,15 +10,17 @@
#define __S390_MMU_CONTEXT_H
#include <asm/pgalloc.h>
+#include <asm/uaccess.h>
#include <asm-generic/mm_hooks.h>
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
- mm->context = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
+ mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
#ifdef CONFIG_64BIT
- mm->context |= _ASCE_TYPE_REGION3;
+ mm->context.asce_bits |= _ASCE_TYPE_REGION3;
#endif
+ mm->context.noexec = s390_noexec;
return 0;
}
@@ -32,11 +34,13 @@ static inline int init_new_context(struct task_struct *tsk,
static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
{
- S390_lowcore.user_asce = mm->context | __pa(mm->pgd);
+ pgd_t *pgd = mm->pgd;
+
+ S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
if (switch_amode) {
/* Load primary space page table origin. */
- pgd_t *shadow_pgd = get_shadow_table(mm->pgd) ? : mm->pgd;
- S390_lowcore.user_exec_asce = mm->context | __pa(shadow_pgd);
+ pgd = mm->context.noexec ? get_shadow_table(pgd) : pgd;
+ S390_lowcore.user_exec_asce = mm->context.asce_bits | __pa(pgd);
asm volatile(LCTL_OPCODE" 1,1,%0\n"
: : "m" (S390_lowcore.user_exec_asce) );
} else
diff --git a/include/asm-s390/page.h b/include/asm-s390/page.h
index 7f29a98..fe7f92b 100644
--- a/include/asm-s390/page.h
+++ b/include/asm-s390/page.h
@@ -74,43 +74,17 @@ static inline void copy_page(void *to, void *from)
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct { unsigned long pte; } pte_t;
-
-#define pte_val(x) ((x).pte)
-#define pgprot_val(x) ((x).pgprot)
-
-#ifndef __s390x__
-
typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pud; } pud_t;
-typedef struct {
- unsigned long pgd0;
- unsigned long pgd1;
- unsigned long pgd2;
- unsigned long pgd3;
- } pgd_t;
-
-#define pmd_val(x) ((x).pmd)
-#define pud_val(x) ((x).pud)
-#define pgd_val(x) ((x).pgd0)
-
-#else /* __s390x__ */
-
-typedef struct {
- unsigned long pmd0;
- unsigned long pmd1;
- } pmd_t;
-typedef struct { unsigned long pud; } pud_t;
typedef struct { unsigned long pgd; } pgd_t;
+typedef pte_t *pgtable_t;
-#define pmd_val(x) ((x).pmd0)
-#define pmd_val1(x) ((x).pmd1)
+#define pgprot_val(x) ((x).pgprot)
+#define pte_val(x) ((x).pte)
+#define pmd_val(x) ((x).pmd)
#define pud_val(x) ((x).pud)
#define pgd_val(x) ((x).pgd)
-#endif /* __s390x__ */
-
-typedef struct page *pgtable_t;
-
#define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
@@ -167,7 +141,7 @@ static inline int pfn_valid(unsigned long pfn)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#include <asm-generic/memory_model.h>
diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h
index 900d448..af4aee8 100644
--- a/include/asm-s390/pgalloc.h
+++ b/include/asm-s390/pgalloc.h
@@ -20,10 +20,11 @@
#define check_pgt_cache() do {} while (0)
unsigned long *crst_table_alloc(struct mm_struct *, int);
-void crst_table_free(unsigned long *);
+void crst_table_free(struct mm_struct *, unsigned long *);
-unsigned long *page_table_alloc(int);
-void page_table_free(unsigned long *);
+unsigned long *page_table_alloc(struct mm_struct *);
+void page_table_free(struct mm_struct *, unsigned long *);
+void disable_noexec(struct mm_struct *, struct task_struct *);
static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
{
@@ -80,12 +81,12 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
{
- unsigned long *crst = crst_table_alloc(mm, s390_noexec);
- if (crst)
- crst_table_init(crst, _SEGMENT_ENTRY_EMPTY);
- return (pmd_t *) crst;
+ unsigned long *table = crst_table_alloc(mm, mm->context.noexec);
+ if (table)
+ crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
+ return (pmd_t *) table;
}
-#define pmd_free(mm, pmd) crst_table_free((unsigned long *)pmd)
+#define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd)
#define pgd_populate(mm, pgd, pud) BUG()
#define pgd_populate_kernel(mm, pgd, pud) BUG()
@@ -98,63 +99,55 @@ static inline void pud_populate_kernel(struct mm_struct *mm,
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
- pud_t *shadow_pud = get_shadow_table(pud);
- pmd_t *shadow_pmd = get_shadow_table(pmd);
-
- if (shadow_pud && shadow_pmd)
- pud_populate_kernel(mm, shadow_pud, shadow_pmd);
pud_populate_kernel(mm, pud, pmd);
+ if (mm->context.noexec) {
+ pud = get_shadow_table(pud);
+ pmd = get_shadow_table(pmd);
+ pud_populate_kernel(mm, pud, pmd);
+ }
}
#endif /* __s390x__ */
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- unsigned long *crst = crst_table_alloc(mm, s390_noexec);
+ unsigned long *crst;
+
+ INIT_LIST_HEAD(&mm->context.crst_list);
+ INIT_LIST_HEAD(&mm->context.pgtable_list);
+ crst = crst_table_alloc(mm, s390_noexec);
if (crst)
crst_table_init(crst, pgd_entry_type(mm));
return (pgd_t *) crst;
}
-#define pgd_free(mm, pgd) crst_table_free((unsigned long *) pgd)
+#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
-static inline void
-pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
+static inline void pmd_populate_kernel(struct mm_struct *mm,
+ pmd_t *pmd, pte_t *pte)
{
-#ifndef __s390x__
- pmd_val(pmd[0]) = _SEGMENT_ENTRY + __pa(pte);
- pmd_val(pmd[1]) = _SEGMENT_ENTRY + __pa(pte+256);
- pmd_val(pmd[2]) = _SEGMENT_ENTRY + __pa(pte+512);
- pmd_val(pmd[3]) = _SEGMENT_ENTRY + __pa(pte+768);
-#else /* __s390x__ */
pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
- pmd_val1(*pmd) = _SEGMENT_ENTRY + __pa(pte+256);
-#endif /* __s390x__ */
}
-static inline void
-pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page)
+static inline void pmd_populate(struct mm_struct *mm,
+ pmd_t *pmd, pgtable_t pte)
{
- pte_t *pte = (pte_t *)page_to_phys(page);
- pmd_t *shadow_pmd = get_shadow_table(pmd);
- pte_t *shadow_pte = get_shadow_pte(pte);
-
pmd_populate_kernel(mm, pmd, pte);
- if (shadow_pmd && shadow_pte)
- pmd_populate_kernel(mm, shadow_pmd, shadow_pte);
+ if (mm->context.noexec) {
+ pmd = get_shadow_table(pmd);
+ pmd_populate_kernel(mm, pmd, pte + PTRS_PER_PTE);
+ }
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
+
+#define pmd_pgtable(pmd) \
+ (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
/*
* page table entry allocation/free routines.
*/
-#define pte_alloc_one_kernel(mm, vmaddr) \
- ((pte_t *) page_table_alloc(s390_noexec))
-#define pte_alloc_one(mm, vmaddr) \
- virt_to_page(page_table_alloc(s390_noexec))
-
-#define pte_free_kernel(mm, pte) \
- page_table_free((unsigned long *) pte)
-#define pte_free(mm, pte) \
- page_table_free((unsigned long *) page_to_phys((struct page *) pte))
+#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
+#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
+
+#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
+#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
#endif /* _S390_PGALLOC_H */
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 65d3338..4fc9377 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -57,11 +57,11 @@ extern char empty_zero_page[PAGE_SIZE];
* PGDIR_SHIFT determines what a third-level page table entry can map
*/
#ifndef __s390x__
-# define PMD_SHIFT 22
-# define PUD_SHIFT 22
-# define PGDIR_SHIFT 22
+# define PMD_SHIFT 20
+# define PUD_SHIFT 20
+# define PGDIR_SHIFT 20
#else /* __s390x__ */
-# define PMD_SHIFT 21
+# define PMD_SHIFT 20
# define PUD_SHIFT 31
# define PGDIR_SHIFT 31
#endif /* __s390x__ */
@@ -79,17 +79,14 @@ extern char empty_zero_page[PAGE_SIZE];
* for S390 segment-table entries are combined to one PGD
* that leads to 1024 pte per pgd
*/
+#define PTRS_PER_PTE 256
#ifndef __s390x__
-# define PTRS_PER_PTE 1024
-# define PTRS_PER_PMD 1
-# define PTRS_PER_PUD 1
-# define PTRS_PER_PGD 512
+#define PTRS_PER_PMD 1
#else /* __s390x__ */
-# define PTRS_PER_PTE 512
-# define PTRS_PER_PMD 1024
-# define PTRS_PER_PUD 1
-# define PTRS_PER_PGD 2048
+#define PTRS_PER_PMD 2048
#endif /* __s390x__ */
+#define PTRS_PER_PUD 1
+#define PTRS_PER_PGD 2048
#define FIRST_USER_ADDRESS 0
@@ -376,24 +373,6 @@ extern char empty_zero_page[PAGE_SIZE];
# define PxD_SHADOW_SHIFT 2
#endif /* __s390x__ */
-static inline struct page *get_shadow_page(struct page *page)
-{
- if (s390_noexec && page->index)
- return virt_to_page((void *)(addr_t) page->index);
- return NULL;
-}
-
-static inline void *get_shadow_pte(void *table)
-{
- unsigned long addr, offset;
- struct page *page;
-
- addr = (unsigned long) table;
- offset = addr & (PAGE_SIZE - 1);
- page = virt_to_page((void *)(addr ^ offset));
- return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
-}
-
static inline void *get_shadow_table(void *table)
{
unsigned long addr, offset;
@@ -411,17 +390,16 @@ static inline void *get_shadow_table(void *table)
* hook is made available.
*/
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *pteptr, pte_t pteval)
+ pte_t *ptep, pte_t entry)
{
- pte_t *shadow_pte = get_shadow_pte(pteptr);
-
- *pteptr = pteval;
- if (shadow_pte) {
- if (!(pte_val(pteval) & _PAGE_INVALID) &&
- (pte_val(pteval) & _PAGE_SWX))
- pte_val(*shadow_pte) = pte_val(pteval) | _PAGE_RO;
+ *ptep = entry;
+ if (mm->context.noexec) {
+ if (!(pte_val(entry) & _PAGE_INVALID) &&
+ (pte_val(entry) & _PAGE_SWX))
+ pte_val(entry) |= _PAGE_RO;
else
- pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY;
+ pte_val(entry) = _PAGE_TYPE_EMPTY;
+ ptep[PTRS_PER_PTE] = entry;
}
}
@@ -536,14 +514,6 @@ static inline int pte_young(pte_t pte)
#define pgd_clear(pgd) do { } while (0)
#define pud_clear(pud) do { } while (0)
-static inline void pmd_clear_kernel(pmd_t * pmdp)
-{
- pmd_val(pmdp[0]) = _SEGMENT_ENTRY_EMPTY;
- pmd_val(pmdp[1]) = _SEGMENT_ENTRY_EMPTY;
- pmd_val(pmdp[2]) = _SEGMENT_ENTRY_EMPTY;
- pmd_val(pmdp[3]) = _SEGMENT_ENTRY_EMPTY;
-}
-
#else /* __s390x__ */
#define pgd_clear(pgd) do { } while (0)
@@ -562,30 +532,27 @@ static inline void pud_clear(pud_t * pud)
pud_clear_kernel(shadow);
}
+#endif /* __s390x__ */
+
static inline void pmd_clear_kernel(pmd_t * pmdp)
{
pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
- pmd_val1(*pmdp) = _SEGMENT_ENTRY_EMPTY;
}
-#endif /* __s390x__ */
-
-static inline void pmd_clear(pmd_t * pmdp)
+static inline void pmd_clear(pmd_t *pmd)
{
- pmd_t *shadow_pmd = get_shadow_table(pmdp);
+ pmd_t *shadow = get_shadow_table(pmd);
- pmd_clear_kernel(pmdp);
- if (shadow_pmd)
- pmd_clear_kernel(shadow_pmd);
+ pmd_clear_kernel(pmd);
+ if (shadow)
+ pmd_clear_kernel(shadow);
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
- pte_t *shadow_pte = get_shadow_pte(ptep);
-
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
- if (shadow_pte)
- pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY;
+ if (mm->context.noexec)
+ pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY;
}
/*
@@ -666,7 +633,7 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
{
if (!(pte_val(*ptep) & _PAGE_INVALID)) {
#ifndef __s390x__
- /* S390 has 1mb segments, we are emulating 4MB segments */
+ /* pto must point to the start of the segment table */
pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
#else
/* ipte in zarch mode can do the math */
@@ -680,12 +647,12 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
}
-static inline void ptep_invalidate(unsigned long address, pte_t *ptep)
+static inline void ptep_invalidate(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep)
{
__ptep_ipte(address, ptep);
- ptep = get_shadow_pte(ptep);
- if (ptep)
- __ptep_ipte(address, ptep);
+ if (mm->context.noexec)
+ __ptep_ipte(address, ptep + PTRS_PER_PTE);
}
/*
@@ -707,7 +674,7 @@ static inline void ptep_invalidate(unsigned long address, pte_t *ptep)
pte_t __pte = *(__ptep); \
if (atomic_read(&(__mm)->mm_users) > 1 || \
(__mm) != current->active_mm) \
- ptep_invalidate(__address, __ptep); \
+ ptep_invalidate(__mm, __address, __ptep); \
else \
pte_clear((__mm), (__address), (__ptep)); \
__pte; \
@@ -718,7 +685,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
pte_t pte = *ptep;
- ptep_invalidate(address, ptep);
+ ptep_invalidate(vma->vm_mm, address, ptep);
return pte;
}
@@ -739,7 +706,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
if (full)
pte_clear(mm, addr, ptep);
else
- ptep_invalidate(addr, ptep);
+ ptep_invalidate(mm, addr, ptep);
return pte;
}
@@ -750,7 +717,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
if (pte_write(__pte)) { \
if (atomic_read(&(__mm)->mm_users) > 1 || \
(__mm) != current->active_mm) \
- ptep_invalidate(__addr, __ptep); \
+ ptep_invalidate(__mm, __addr, __ptep); \
set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
} \
})
@@ -760,7 +727,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
({ \
int __changed = !pte_same(*(__ptep), __entry); \
if (__changed) { \
- ptep_invalidate(__addr, __ptep); \
+ ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
} \
__changed; \
diff --git a/include/asm-s390/tlb.h b/include/asm-s390/tlb.h
index 3c8177f..ecac75e 100644
--- a/include/asm-s390/tlb.h
+++ b/include/asm-s390/tlb.h
@@ -95,14 +95,14 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
* pte_free_tlb frees a pte table and clears the CRSTE for the
* page table from the tlb.
*/
-static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t page)
+static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte)
{
if (!tlb->fullmm) {
- tlb->array[tlb->nr_ptes++] = page;
+ tlb->array[tlb->nr_ptes++] = pte;
if (tlb->nr_ptes >= tlb->nr_pmds)
tlb_flush_mmu(tlb, 0, 0);
} else
- pte_free(tlb->mm, page);
+ pte_free(tlb->mm, pte);
}
/*
diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h
index 70fa5ae..35fb4f9 100644
--- a/include/asm-s390/tlbflush.h
+++ b/include/asm-s390/tlbflush.h
@@ -61,11 +61,12 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
* only ran on the local cpu.
*/
if (MACHINE_HAS_IDTE) {
- pgd_t *shadow = get_shadow_table(mm->pgd);
-
- if (shadow)
- __tlb_flush_idte((unsigned long) shadow | mm->context);
- __tlb_flush_idte((unsigned long) mm->pgd | mm->context);
+ if (mm->context.noexec)
+ __tlb_flush_idte((unsigned long)
+ get_shadow_table(mm->pgd) |
+ mm->context.asce_bits);
+ __tlb_flush_idte((unsigned long) mm->pgd |
+ mm->context.asce_bits);
return;
}
preempt_disable();