aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorHuang, Ying <ying.huang@intel.com>2008-06-02 14:26:18 +0800
committerThomas Gleixner <tglx@linutronix.de>2008-06-05 15:10:02 +0200
commitd0ec2c6f2c2f0478b34ae78b3e65f60a561ac807 (patch)
tree4d4f6d1e11aa45bc4084e733f52d402e9582d1c7 /arch
parentd3fbe5ea9518b46a68e6b278974e92e2c3acef4a (diff)
downloadkernel_samsung_smdk4412-d0ec2c6f2c2f0478b34ae78b3e65f60a561ac807.zip
kernel_samsung_smdk4412-d0ec2c6f2c2f0478b34ae78b3e65f60a561ac807.tar.gz
kernel_samsung_smdk4412-d0ec2c6f2c2f0478b34ae78b3e65f60a561ac807.tar.bz2
x86: reserve highmem pages via reserve_early
This patch makes early reserved highmem pages become reserved pages. This can be used for highmem pages allocated by bootloader such as EFI memory map, linked list of setup_data, etc. Signed-off-by: Huang Ying <ying.huang@intel.com> Cc: andi@firstfloor.org Cc: mingo@redhat.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/e820.c11
-rw-r--r--arch/x86/mm/init_32.c3
2 files changed, 13 insertions, 1 deletions
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index ac5e9eb..a706e90 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -612,6 +612,17 @@ void __init free_early(u64 start, u64 end)
early_res[j - 1].end = 0;
}
+int __init page_is_reserved_early(unsigned long pagenr)
+{
+ u64 start = (u64)pagenr << PAGE_SHIFT;
+ int i;
+ struct early_res *r;
+
+ i = find_overlapped_early(start, start + PAGE_SIZE);
+ r = &early_res[i];
+ return (i < MAX_EARLY_RES && r->end);
+}
+
void __init early_res_to_bootmem(u64 start, u64 end)
{
int i;
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index ec30d10..0e7bb5e 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -289,7 +289,8 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
{
- if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
+ if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn)) &&
+ !page_is_reserved_early(pfn)) {
ClearPageReserved(page);
init_page_count(page);
__free_page(page);