aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Glauber <jang@linux.vnet.ibm.com>2011-05-19 16:55:26 -0600
committerRusty Russell <rusty@rustcorp.com.au>2011-05-19 16:55:26 +0930
commit448694a1d50432be63aafccb42d6f54d8cf3d02c (patch)
treebd066b7cdb04e8bdc61efdeaa0e358269f185f7e
parent4d10380e720a3ce19dbe88d0133f66ded07b6a8f (diff)
downloadkernel_samsung_smdk4412-448694a1d50432be63aafccb42d6f54d8cf3d02c.zip
kernel_samsung_smdk4412-448694a1d50432be63aafccb42d6f54d8cf3d02c.tar.gz
kernel_samsung_smdk4412-448694a1d50432be63aafccb42d6f54d8cf3d02c.tar.bz2
module: undo module RONX protection correctly.
While debugging I stumbled over two problems in the code that protects module pages. First issue is that disabling the protection before freeing init or unload of a module is not symmetric with the enablement. For instance, if pages are set to RO the page range from module_core to module_core + core_ro_size is protected. If a module is unloaded the page range from module_core to module_core + core_size is set back to RW. So pages that were not set to RO are also changed to RW. This is not critical but IMHO it should be symmetric. Second issue is that while set_memory_rw & set_memory_ro are used for RO/RW changes only set_memory_nx is involved for NX/X. One would await that the inverse function is called when the NX protection should be removed, which is not the case here, unless I'm missing something. Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
-rw-r--r--arch/s390/include/asm/cacheflush.h1
-rw-r--r--arch/s390/mm/pageattr.c5
-rw-r--r--kernel/module.c25
3 files changed, 19 insertions, 12 deletions
diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h
index 43a5c78..3e20383 100644
--- a/arch/s390/include/asm/cacheflush.h
+++ b/arch/s390/include/asm/cacheflush.h
@@ -11,5 +11,6 @@ void kernel_map_pages(struct page *page, int numpages, int enable);
int set_memory_ro(unsigned long addr, int numpages);
int set_memory_rw(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
+int set_memory_x(unsigned long addr, int numpages);
#endif /* _S390_CACHEFLUSH_H */
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 0607e4b..f05edcc 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -54,3 +54,8 @@ int set_memory_nx(unsigned long addr, int numpages)
return 0;
}
EXPORT_SYMBOL_GPL(set_memory_nx);
+
+int set_memory_x(unsigned long addr, int numpages)
+{
+ return 0;
+}
diff --git a/kernel/module.c b/kernel/module.c
index 92112c9..b99dceb 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1607,22 +1607,23 @@ static void set_section_ro_nx(void *base,
}
}
-/* Setting memory back to RW+NX before releasing it */
+/* Setting memory back to W+X before releasing it */
void unset_section_ro_nx(struct module *mod, void *module_region)
{
- unsigned long total_pages;
-
if (mod->module_core == module_region) {
- /* Set core as NX+RW */
- total_pages = MOD_NUMBER_OF_PAGES(mod->module_core, mod->core_size);
- set_memory_nx((unsigned long)mod->module_core, total_pages);
- set_memory_rw((unsigned long)mod->module_core, total_pages);
-
+ set_page_attributes(mod->module_core + mod->core_text_size,
+ mod->module_core + mod->core_size,
+ set_memory_x);
+ set_page_attributes(mod->module_core,
+ mod->module_core + mod->core_ro_size,
+ set_memory_rw);
} else if (mod->module_init == module_region) {
- /* Set init as NX+RW */
- total_pages = MOD_NUMBER_OF_PAGES(mod->module_init, mod->init_size);
- set_memory_nx((unsigned long)mod->module_init, total_pages);
- set_memory_rw((unsigned long)mod->module_init, total_pages);
+ set_page_attributes(mod->module_init + mod->init_text_size,
+ mod->module_init + mod->init_size,
+ set_memory_x);
+ set_page_attributes(mod->module_init,
+ mod->module_init + mod->init_ro_size,
+ set_memory_rw);
}
}