aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-06 17:24:58 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-06 17:24:58 -0800
commit0670afdf0e69e5e73c8358da9c39bf3a8807b03e (patch)
tree671e6030b1ad9c38513c1cba9a9116f9dee8d41a /arch
parentdda2ac15d23b38e4335e858848aa8c9a6710304f (diff)
parent51099005ab8e09d68a13fea8d55bc739c1040ca6 (diff)
downloadkernel_samsung_smdk4412-0670afdf0e69e5e73c8358da9c39bf3a8807b03e.zip
kernel_samsung_smdk4412-0670afdf0e69e5e73c8358da9c39bf3a8807b03e.tar.gz
kernel_samsung_smdk4412-0670afdf0e69e5e73c8358da9c39bf3a8807b03e.tar.bz2
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: (27 commits) [IA64] swiotlb abstraction (e.g. for Xen) [IA64] swiotlb cleanup [IA64] make swiotlb use bus_to_virt/virt_to_bus [IA64] swiotlb bug fixes [IA64] Hook up getcpu system call for IA64 [IA64] clean up sparsemem memory_present call [IA64] show_mem() for IA64 sparsemem NUMA [IA64] missing exports hwsw_sync_... [IA64] virt_to_page() can be called with NULL arg [IA64] alignment bug in ldscript [IA64] register memory ranges in a consistent manner [IA64] Enable SWIOTLB only when needed [IA64-SGI] Check for TIO errors on shub2 Altix [IA64] remove bogus prototype ia64_esi_init() [IA64] Clear IRQ affinity when unregistered [IA64] fix ACPI Kconfig issues [IA64] Fix NULL-pointer dereference in ia64_machine_kexec() [IA64] find thread for user rbs address [IA64] use snprintf() on features field of /proc/cpuinfo [IA64] enable singlestep on system call ...
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/Kconfig9
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c4
-rw-r--r--arch/ia64/kernel/crash.c16
-rw-r--r--arch/ia64/kernel/crash_dump.c3
-rw-r--r--arch/ia64/kernel/efi.c2
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/ia64/kernel/iosapic.c5
-rw-r--r--arch/ia64/kernel/machine_kexec.c15
-rw-r--r--arch/ia64/kernel/process.c16
-rw-r--r--arch/ia64/kernel/ptrace.c14
-rw-r--r--arch/ia64/kernel/setup.c31
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S1
-rw-r--r--arch/ia64/mm/contig.c76
-rw-r--r--arch/ia64/mm/discontig.c46
-rw-r--r--arch/ia64/mm/init.c38
-rw-r--r--arch/ia64/sn/kernel/huberror.c16
-rw-r--r--arch/x86_64/kernel/pci-swiotlb.c2
17 files changed, 183 insertions, 113 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index fcacfe2..f1d2899 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -11,6 +11,8 @@ menu "Processor type and features"
config IA64
bool
+ select PCI if (!IA64_HP_SIM)
+ select ACPI if (!IA64_HP_SIM)
default y
help
The Itanium Processor Family is Intel's 64-bit successor to
@@ -28,7 +30,6 @@ config MMU
config SWIOTLB
bool
- default y
config RWSEM_XCHGADD_ALGORITHM
bool
@@ -84,10 +85,9 @@ choice
config IA64_GENERIC
bool "generic"
- select ACPI
- select PCI
select NUMA
select ACPI_NUMA
+ select SWIOTLB
help
This selects the system type of your hardware. A "generic" kernel
will run on any supported IA-64 system. However, if you configure
@@ -104,6 +104,7 @@ config IA64_GENERIC
config IA64_DIG
bool "DIG-compliant"
+ select SWIOTLB
config IA64_HP_ZX1
bool "HP-zx1/sx1000"
@@ -113,6 +114,7 @@ config IA64_HP_ZX1
config IA64_HP_ZX1_SWIOTLB
bool "HP-zx1/sx1000 with software I/O TLB"
+ select SWIOTLB
help
Build a kernel that runs on HP zx1 and sx1000 systems even when they
have broken PCI devices which cannot DMA to full 32 bits. Apart
@@ -131,6 +133,7 @@ config IA64_SGI_SN2
config IA64_HP_SIM
bool "Ski-simulator"
+ select SWIOTLB
endchoice
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index a5a5637..2153bca 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -192,3 +192,7 @@ EXPORT_SYMBOL(hwsw_unmap_sg);
EXPORT_SYMBOL(hwsw_dma_supported);
EXPORT_SYMBOL(hwsw_alloc_coherent);
EXPORT_SYMBOL(hwsw_free_coherent);
+EXPORT_SYMBOL(hwsw_sync_single_for_cpu);
+EXPORT_SYMBOL(hwsw_sync_single_for_device);
+EXPORT_SYMBOL(hwsw_sync_sg_for_cpu);
+EXPORT_SYMBOL(hwsw_sync_sg_for_device);
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index bc2f64d..9d92097 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -79,6 +79,7 @@ crash_save_this_cpu()
final_note(buf);
}
+#ifdef CONFIG_SMP
static int
kdump_wait_cpu_freeze(void)
{
@@ -91,6 +92,7 @@ kdump_wait_cpu_freeze(void)
}
return 1;
}
+#endif
void
machine_crash_shutdown(struct pt_regs *pt)
@@ -116,6 +118,11 @@ machine_crash_shutdown(struct pt_regs *pt)
static void
machine_kdump_on_init(void)
{
+ if (!ia64_kimage) {
+ printk(KERN_NOTICE "machine_kdump_on_init(): "
+ "kdump not configured\n");
+ return;
+ }
local_irq_disable();
kexec_disable_iosapic();
machine_kexec(ia64_kimage);
@@ -132,11 +139,12 @@ kdump_cpu_freeze(struct unw_frame_info *info, void *arg)
atomic_inc(&kdump_cpu_freezed);
kdump_status[cpuid] = 1;
mb();
- if (cpuid == 0) {
- for (;;)
- cpu_relax();
- } else
+#ifdef CONFIG_HOTPLUG_CPU
+ if (cpuid != 0)
ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]);
+#endif
+ for (;;)
+ cpu_relax();
}
static int
diff --git a/arch/ia64/kernel/crash_dump.c b/arch/ia64/kernel/crash_dump.c
index 83b8c91..da60e90 100644
--- a/arch/ia64/kernel/crash_dump.c
+++ b/arch/ia64/kernel/crash_dump.c
@@ -9,7 +9,8 @@
#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/uaccess.h>
+#include <asm/page.h>
+#include <asm/uaccess.h>
/**
* copy_oldmem_page - copy one page from "oldmem"
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 0b25a7d..6c03928 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -380,7 +380,7 @@ efi_get_pal_addr (void)
#endif
return __va(md->phys_addr);
}
- printk(KERN_WARNING "%s: no PAL-code memory-descriptor found",
+ printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n",
__FUNCTION__);
return NULL;
}
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 15234ed..e7873ee 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1610,5 +1610,7 @@ sys_call_table:
data8 sys_sync_file_range // 1300
data8 sys_tee
data8 sys_vmsplice
+ data8 sys_ni_syscall // reserved for move_pages
+ data8 sys_getcpu
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 0fc5fb7..d6aab40 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -925,6 +925,11 @@ iosapic_unregister_intr (unsigned int gsi)
/* Clear the interrupt controller descriptor */
idesc->chip = &no_irq_type;
+#ifdef CONFIG_SMP
+ /* Clear affinity */
+ cpus_setall(idesc->affinity);
+#endif
+
/* Clear the interrupt information */
memset(&iosapic_intr_info[vector], 0,
sizeof(struct iosapic_intr_info));
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
index e2ccc9f..4f0f3b8 100644
--- a/arch/ia64/kernel/machine_kexec.c
+++ b/arch/ia64/kernel/machine_kexec.c
@@ -14,6 +14,7 @@
#include <linux/kexec.h>
#include <linux/cpu.h>
#include <linux/irq.h>
+#include <linux/efi.h>
#include <asm/mmu_context.h>
#include <asm/setup.h>
#include <asm/delay.h>
@@ -68,22 +69,10 @@ void machine_kexec_cleanup(struct kimage *image)
{
}
-void machine_shutdown(void)
-{
- int cpu;
-
- for_each_online_cpu(cpu) {
- if (cpu != smp_processor_id())
- cpu_down(cpu);
- }
- kexec_disable_iosapic();
-}
-
/*
* Do not allocate memory (or fail in any way) in machine_kexec().
* We are past the point of no return, committed to rebooting now.
*/
-extern void *efi_get_pal_addr(void);
static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
{
struct kimage *image = arg;
@@ -93,6 +82,7 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
unsigned long vector;
int ii;
+ BUG_ON(!image);
if (image->type == KEXEC_TYPE_CRASH) {
crash_save_this_cpu();
current->thread.ksp = (__u64)info->sw - 16;
@@ -131,6 +121,7 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
void machine_kexec(struct kimage *image)
{
+ BUG_ON(!image);
unw_init_running(ia64_machine_kexec, image);
for(;;);
}
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 17685ab..ae96d41 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -34,6 +34,7 @@
#include <asm/ia32.h>
#include <asm/irq.h>
#include <asm/kdebug.h>
+#include <asm/kexec.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/sal.h>
@@ -803,6 +804,21 @@ cpu_halt (void)
ia64_pal_halt(min_power_state);
}
+void machine_shutdown(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id())
+ cpu_down(cpu);
+ }
+#endif
+#ifdef CONFIG_KEXEC
+ kexec_disable_iosapic();
+#endif
+}
+
void
machine_restart (char *restart_cmd)
{
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index aa705e4..3f89187 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -607,7 +607,7 @@ find_thread_for_addr (struct task_struct *child, unsigned long addr)
*/
list_for_each_safe(this, next, &current->children) {
p = list_entry(this, struct task_struct, sibling);
- if (p->mm != mm)
+ if (p->tgid != child->tgid)
continue;
if (thread_matches(p, addr)) {
child = p;
@@ -1405,6 +1405,7 @@ ptrace_disable (struct task_struct *child)
struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
/* make sure the single step/taken-branch trap bits are not set: */
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
child_psr->ss = 0;
child_psr->tb = 0;
}
@@ -1525,6 +1526,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
* Make sure the single step/taken-branch trap bits
* are not set:
*/
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
ia64_psr(pt)->ss = 0;
ia64_psr(pt)->tb = 0;
@@ -1556,6 +1558,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
goto out_tsk;
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ set_tsk_thread_flag(child, TIF_SINGLESTEP);
if (request == PTRACE_SINGLESTEP) {
ia64_psr(pt)->ss = 1;
} else {
@@ -1595,13 +1598,9 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
}
-void
+static void
syscall_trace (void)
{
- if (!test_thread_flag(TIF_SYSCALL_TRACE))
- return;
- if (!(current->ptrace & PT_PTRACED))
- return;
/*
* The 0x80 provides a way for the tracing parent to
* distinguish between a syscall stop and SIGTRAP delivery.
@@ -1664,7 +1663,8 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
audit_syscall_exit(success, result);
}
- if (test_thread_flag(TIF_SYSCALL_TRACE)
+ if ((test_thread_flag(TIF_SYSCALL_TRACE)
+ || test_thread_flag(TIF_SINGLESTEP))
&& (current->ptrace & PT_PTRACED))
syscall_trace();
}
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index ad567b8d..83c2629 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -569,34 +569,31 @@ show_cpuinfo (struct seq_file *m, void *v)
{ 1UL << 1, "spontaneous deferral"},
{ 1UL << 2, "16-byte atomic ops" }
};
- char features[128], *cp, sep;
+ char features[128], *cp, *sep;
struct cpuinfo_ia64 *c = v;
unsigned long mask;
unsigned long proc_freq;
- int i;
+ int i, size;
mask = c->features;
/* build the feature string: */
- memcpy(features, " standard", 10);
+ memcpy(features, "standard", 9);
cp = features;
- sep = 0;
- for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) {
+ size = sizeof(features);
+ sep = "";
+ for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) {
if (mask & feature_bits[i].mask) {
- if (sep)
- *cp++ = sep;
- sep = ',';
- *cp++ = ' ';
- strcpy(cp, feature_bits[i].feature_name);
- cp += strlen(feature_bits[i].feature_name);
+ cp += snprintf(cp, size, "%s%s", sep,
+ feature_bits[i].feature_name),
+ sep = ", ";
mask &= ~feature_bits[i].mask;
+ size = sizeof(features) - (cp - features);
}
}
- if (mask) {
- /* print unknown features as a hex value: */
- if (sep)
- *cp++ = sep;
- sprintf(cp, " 0x%lx", mask);
+ if (mask && size > 1) {
+ /* print unknown features as a hex value */
+ snprintf(cp, size, "%s0x%lx", sep, mask);
}
proc_freq = cpufreq_quick_get(cpunum);
@@ -612,7 +609,7 @@ show_cpuinfo (struct seq_file *m, void *v)
"model name : %s\n"
"revision : %u\n"
"archrev : %u\n"
- "features :%s\n" /* don't change this---it _is_ right! */
+ "features : %s\n"
"cpu number : %lu\n"
"cpu regs : %u\n"
"cpu MHz : %lu.%06lu\n"
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index d6083a0..8f3d006 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -157,6 +157,7 @@ SECTIONS
}
#endif
+ . = ALIGN(8);
__con_initcall_start = .;
.con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET)
{ *(.con_initcall.init) }
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 1e79551..63e6d49 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -30,47 +30,69 @@ static unsigned long max_gap;
#endif
/**
- * show_mem - display a memory statistics summary
+ * show_mem - give short summary of memory stats
*
- * Just walks the pages in the system and describes where they're allocated.
+ * Shows a simple page count of reserved and used pages in the system.
+ * For discontig machines, it does this on a per-pgdat basis.
*/
-void
-show_mem (void)
+void show_mem(void)
{
- int i, total = 0, reserved = 0;
- int shared = 0, cached = 0;
+ int i, total_reserved = 0;
+ int total_shared = 0, total_cached = 0;
+ unsigned long total_present = 0;
+ pg_data_t *pgdat;
printk(KERN_INFO "Mem-info:\n");
show_free_areas();
-
printk(KERN_INFO "Free swap: %6ldkB\n",
nr_swap_pages<<(PAGE_SHIFT-10));
- i = max_mapnr;
- for (i = 0; i < max_mapnr; i++) {
- if (!pfn_valid(i)) {
+ printk(KERN_INFO "Node memory in pages:\n");
+ for_each_online_pgdat(pgdat) {
+ unsigned long present;
+ unsigned long flags;
+ int shared = 0, cached = 0, reserved = 0;
+
+ pgdat_resize_lock(pgdat, &flags);
+ present = pgdat->node_present_pages;
+ for(i = 0; i < pgdat->node_spanned_pages; i++) {
+ struct page *page;
+ if (pfn_valid(pgdat->node_start_pfn + i))
+ page = pfn_to_page(pgdat->node_start_pfn + i);
+ else {
#ifdef CONFIG_VIRTUAL_MEM_MAP
- if (max_gap < LARGE_GAP)
- continue;
- i = vmemmap_find_next_valid_pfn(0, i) - 1;
+ if (max_gap < LARGE_GAP)
+ continue;
#endif
- continue;
+ i = vmemmap_find_next_valid_pfn(pgdat->node_id,
+ i) - 1;
+ continue;
+ }
+ if (PageReserved(page))
+ reserved++;
+ else if (PageSwapCache(page))
+ cached++;
+ else if (page_count(page))
+ shared += page_count(page)-1;
}
- total++;
- if (PageReserved(mem_map+i))
- reserved++;
- else if (PageSwapCache(mem_map+i))
- cached++;
- else if (page_count(mem_map + i))
- shared += page_count(mem_map + i) - 1;
+ pgdat_resize_unlock(pgdat, &flags);
+ total_present += present;
+ total_reserved += reserved;
+ total_cached += cached;
+ total_shared += shared;
+ printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, "
+ "shrd: %10d, swpd: %10d\n", pgdat->node_id,
+ present, reserved, shared, cached);
}
- printk(KERN_INFO "%d pages of RAM\n", total);
- printk(KERN_INFO "%d reserved pages\n", reserved);
- printk(KERN_INFO "%d pages shared\n", shared);
- printk(KERN_INFO "%d pages swap cached\n", cached);
- printk(KERN_INFO "%ld pages in page table cache\n",
+ printk(KERN_INFO "%ld pages of RAM\n", total_present);
+ printk(KERN_INFO "%d reserved pages\n", total_reserved);
+ printk(KERN_INFO "%d pages shared\n", total_shared);
+ printk(KERN_INFO "%d pages swap cached\n", total_cached);
+ printk(KERN_INFO "Total of %ld pages in page table cache\n",
pgtable_quicklist_total_size());
+ printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
}
+
/* physical address where the bootmem map is located */
unsigned long bootmap_start;
@@ -177,7 +199,7 @@ find_memory (void)
#ifdef CONFIG_CRASH_DUMP
/* If we are doing a crash dump, we still need to know the real mem
- * size before original memory map is * reset. */
+ * size before original memory map is reset. */
saved_max_pfn = max_pfn;
#endif
}
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 96722cb..6eae596 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -412,37 +412,6 @@ static void __init memory_less_nodes(void)
return;
}
-#ifdef CONFIG_SPARSEMEM
-/**
- * register_sparse_mem - notify SPARSEMEM that this memory range exists.
- * @start: physical start of range
- * @end: physical end of range
- * @arg: unused
- *
- * Simply calls SPARSEMEM to register memory section(s).
- */
-static int __init register_sparse_mem(unsigned long start, unsigned long end,
- void *arg)
-{
- int nid;
-
- start = __pa(start) >> PAGE_SHIFT;
- end = __pa(end) >> PAGE_SHIFT;
- nid = early_pfn_to_nid(start);
- memory_present(nid, start, end);
-
- return 0;
-}
-
-static void __init arch_sparse_init(void)
-{
- efi_memmap_walk(register_sparse_mem, NULL);
- sparse_init();
-}
-#else
-#define arch_sparse_init() do {} while (0)
-#endif
-
/**
* find_memory - walk the EFI memory map and setup the bootmem allocator
*
@@ -473,6 +442,9 @@ void __init find_memory(void)
node_clear(node, memory_less_mask);
mem_data[node].min_pfn = ~0UL;
}
+
+ efi_memmap_walk(register_active_ranges, NULL);
+
/*
* Initialize the boot memory maps in reverse order since that's
* what the bootmem allocator expects
@@ -506,6 +478,12 @@ void __init find_memory(void)
max_pfn = max_low_pfn;
find_initrd();
+
+#ifdef CONFIG_CRASH_DUMP
+ /* If we are doing a crash dump, we still need to know the real mem
+ * size before original memory map is reset. */
+ saved_max_pfn = max_pfn;
+#endif
}
#ifdef CONFIG_SMP
@@ -654,7 +632,6 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
{
unsigned long end = start + len;
- add_active_range(node, start >> PAGE_SHIFT, end >> PAGE_SHIFT);
mem_data[node].num_physpages += len >> PAGE_SHIFT;
if (start <= __pa(MAX_DMA_ADDRESS))
mem_data[node].num_dma_physpages +=
@@ -686,10 +663,11 @@ void __init paging_init(void)
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
- arch_sparse_init();
-
efi_memmap_walk(filter_rsvd_memory, count_node_pages);
+ sparse_memory_present_with_active_regions(MAX_NUMNODES);
+ sparse_init();
+
#ifdef CONFIG_VIRTUAL_MEM_MAP
vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
sizeof(struct page));
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 1373fae..faaca21 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -19,6 +19,7 @@
#include <linux/swap.h>
#include <linux/proc_fs.h>
#include <linux/bitops.h>
+#include <linux/kexec.h>
#include <asm/a.out.h>
#include <asm/dma.h>
@@ -128,6 +129,25 @@ lazy_mmu_prot_update (pte_t pte)
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
}
+/*
+ * Since DMA is i-cache coherent, any (complete) pages that were written via
+ * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
+ * flush them when they get mapped into an executable vm-area.
+ */
+void
+dma_mark_clean(void *addr, size_t size)
+{
+ unsigned long pg_addr, end;
+
+ pg_addr = PAGE_ALIGN((unsigned long) addr);
+ end = (unsigned long) addr + size;
+ while (pg_addr + PAGE_SIZE <= end) {
+ struct page *page = virt_to_page(pg_addr);
+ set_bit(PG_arch_1, &page->flags);
+ pg_addr += PAGE_SIZE;
+ }
+}
+
inline void
ia64_set_rbs_bot (void)
{
@@ -595,13 +615,27 @@ find_largest_hole (u64 start, u64 end, void *arg)
return 0;
}
+#endif /* CONFIG_VIRTUAL_MEM_MAP */
+
int __init
register_active_ranges(u64 start, u64 end, void *arg)
{
- add_active_range(0, __pa(start) >> PAGE_SHIFT, __pa(end) >> PAGE_SHIFT);
+ int nid = paddr_to_nid(__pa(start));
+
+ if (nid < 0)
+ nid = 0;
+#ifdef CONFIG_KEXEC
+ if (start > crashk_res.start && start < crashk_res.end)
+ start = crashk_res.end;
+ if (end > crashk_res.start && end < crashk_res.end)
+ end = crashk_res.start;
+#endif
+
+ if (start < end)
+ add_active_range(nid, __pa(start) >> PAGE_SHIFT,
+ __pa(end) >> PAGE_SHIFT);
return 0;
}
-#endif /* CONFIG_VIRTUAL_MEM_MAP */
static int __init
count_reserved_pages (u64 start, u64 end, void *arg)
diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c
index abca6bd..fcf7f93 100644
--- a/arch/ia64/sn/kernel/huberror.c
+++ b/arch/ia64/sn/kernel/huberror.c
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000,2002-2005 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000,2002-2007 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -38,12 +38,20 @@ static irqreturn_t hub_eint_handler(int irq, void *arg)
(u64) nasid, 0, 0, 0, 0, 0, 0);
if ((int)ret_stuff.v0)
- panic("hubii_eint_handler(): Fatal TIO Error");
+ panic("%s: Fatal %s Error", __FUNCTION__,
+ ((nasid & 1) ? "TIO" : "HUBII"));
if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
(void)hubiio_crb_error_handler(hubdev_info);
- } else
- bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid)));
+ } else
+ if (nasid & 1) { /* TIO errors */
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT,
+ (u64) nasid, 0, 0, 0, 0, 0, 0);
+
+ if ((int)ret_stuff.v0)
+ panic("%s: Fatal TIO Error", __FUNCTION__);
+ } else
+ bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid)));
return IRQ_HANDLED;
}
diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c
index 697f0aa..eb18be5 100644
--- a/arch/x86_64/kernel/pci-swiotlb.c
+++ b/arch/x86_64/kernel/pci-swiotlb.c
@@ -29,7 +29,7 @@ struct dma_mapping_ops swiotlb_dma_ops = {
.dma_supported = NULL,
};
-void pci_swiotlb_init(void)
+void __init pci_swiotlb_init(void)
{
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN)