aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile')
-rw-r--r--arch/tile/include/asm/bitops.h1
-rw-r--r--arch/tile/include/asm/highmem.h10
-rw-r--r--arch/tile/include/asm/irqflags.h36
-rw-r--r--arch/tile/include/asm/pgtable.h5
-rw-r--r--arch/tile/kernel/hardwall.c1
-rw-r--r--arch/tile/kernel/irq.c4
-rw-r--r--arch/tile/mm/highmem.c85
7 files changed, 51 insertions, 91 deletions
diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h
index 6832b4b..6d4f0ff 100644
--- a/arch/tile/include/asm/bitops.h
+++ b/arch/tile/include/asm/bitops.h
@@ -120,6 +120,7 @@ static inline unsigned long __arch_hweight64(__u64 w)
#include <asm-generic/bitops/const_hweight.h>
#include <asm-generic/bitops/lock.h>
+#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
#include <asm-generic/bitops/minix.h>
diff --git a/arch/tile/include/asm/highmem.h b/arch/tile/include/asm/highmem.h
index d155db6..e0f7ee18 100644
--- a/arch/tile/include/asm/highmem.h
+++ b/arch/tile/include/asm/highmem.h
@@ -60,12 +60,12 @@ void *kmap_fix_kpte(struct page *page, int finished);
/* This macro is used only in map_new_virtual() to map "page". */
#define kmap_prot page_to_kpgprot(page)
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
-void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
+void *__kmap_atomic(struct page *page);
+void __kunmap_atomic(void *kvaddr);
+void *kmap_atomic_pfn(unsigned long pfn);
+void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
struct page *kmap_atomic_to_page(void *ptr);
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
-void *kmap_atomic(struct page *page, enum km_type type);
+void *kmap_atomic_prot(struct page *page, pgprot_t prot);
void kmap_atomic_fix_kpte(struct page *page, int finished);
#define flush_cache_kmaps() do { } while (0)
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h
index 6ebdd7d..641e4ff 100644
--- a/arch/tile/include/asm/irqflags.h
+++ b/arch/tile/include/asm/irqflags.h
@@ -103,55 +103,57 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
#define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR)
/* Disable interrupts. */
-#define raw_local_irq_disable() \
+#define arch_local_irq_disable() \
interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS)
/* Disable all interrupts, including NMIs. */
-#define raw_local_irq_disable_all() \
+#define arch_local_irq_disable_all() \
interrupt_mask_set_mask(-1UL)
/* Re-enable all maskable interrupts. */
-#define raw_local_irq_enable() \
+#define arch_local_irq_enable() \
interrupt_mask_reset_mask(__get_cpu_var(interrupts_enabled_mask))
/* Disable or enable interrupts based on flag argument. */
-#define raw_local_irq_restore(disabled) do { \
+#define arch_local_irq_restore(disabled) do { \
if (disabled) \
- raw_local_irq_disable(); \
+ arch_local_irq_disable(); \
else \
- raw_local_irq_enable(); \
+ arch_local_irq_enable(); \
} while (0)
/* Return true if "flags" argument means interrupts are disabled. */
-#define raw_irqs_disabled_flags(flags) ((flags) != 0)
+#define arch_irqs_disabled_flags(flags) ((flags) != 0)
/* Return true if interrupts are currently disabled. */
-#define raw_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR)
+#define arch_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR)
/* Save whether interrupts are currently disabled. */
-#define raw_local_save_flags(flags) ((flags) = raw_irqs_disabled())
+#define arch_local_save_flags() arch_irqs_disabled()
/* Save whether interrupts are currently disabled, then disable them. */
-#define raw_local_irq_save(flags) \
- do { raw_local_save_flags(flags); raw_local_irq_disable(); } while (0)
+#define arch_local_irq_save() ({ \
+ unsigned long __flags = arch_local_save_flags(); \
+ arch_local_irq_disable(); \
+ __flags; })
/* Prevent the given interrupt from being enabled next time we enable irqs. */
-#define raw_local_irq_mask(interrupt) \
+#define arch_local_irq_mask(interrupt) \
(__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt))
/* Prevent the given interrupt from being enabled immediately. */
-#define raw_local_irq_mask_now(interrupt) do { \
- raw_local_irq_mask(interrupt); \
+#define arch_local_irq_mask_now(interrupt) do { \
+ arch_local_irq_mask(interrupt); \
interrupt_mask_set(interrupt); \
} while (0)
/* Allow the given interrupt to be enabled next time we enable irqs. */
-#define raw_local_irq_unmask(interrupt) \
+#define arch_local_irq_unmask(interrupt) \
(__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt))
/* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */
-#define raw_local_irq_unmask_now(interrupt) do { \
- raw_local_irq_unmask(interrupt); \
+#define arch_local_irq_unmask_now(interrupt) do { \
+ arch_local_irq_unmask(interrupt); \
if (!irqs_disabled()) \
interrupt_mask_reset(interrupt); \
} while (0)
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h
index b336737..dc4ccdd 100644
--- a/arch/tile/include/asm/pgtable.h
+++ b/arch/tile/include/asm/pgtable.h
@@ -347,15 +347,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
extern pte_t *_pte_offset_map(pmd_t *, unsigned long address, enum km_type);
#define pte_offset_map(dir, address) \
_pte_offset_map(dir, address, KM_PTE0)
-#define pte_offset_map_nested(dir, address) \
- _pte_offset_map(dir, address, KM_PTE1)
#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
-#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
#else
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
-#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
#endif
/* Clear a non-executable kernel PTE and flush it from the TLB. */
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index 584b965..1e54a78 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -774,6 +774,7 @@ static const struct file_operations dev_hardwall_fops = {
#endif
.flush = hardwall_flush,
.release = hardwall_release,
+ .llseek = noop_llseek,
};
static struct cdev hardwall_dev;
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index 394d554..e639176 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -208,7 +208,7 @@ static void tile_irq_chip_eoi(unsigned int irq)
}
static struct irq_chip tile_irq_chip = {
- .typename = "tile_irq_chip",
+ .name = "tile_irq_chip",
.ack = tile_irq_chip_ack,
.eoi = tile_irq_chip_eoi,
.mask = tile_irq_chip_mask,
@@ -288,7 +288,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#endif
- seq_printf(p, " %14s", irq_desc[i].chip->typename);
+ seq_printf(p, " %14s", irq_desc[i].chip->name);
seq_printf(p, " %s", action->name);
for (action = action->next; action; action = action->next)
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
index 12ab137..8ef6595 100644
--- a/arch/tile/mm/highmem.c
+++ b/arch/tile/mm/highmem.c
@@ -56,50 +56,6 @@ void kunmap(struct page *page)
}
EXPORT_SYMBOL(kunmap);
-static void debug_kmap_atomic_prot(enum km_type type)
-{
-#ifdef CONFIG_DEBUG_HIGHMEM
- static unsigned warn_count = 10;
-
- if (unlikely(warn_count == 0))
- return;
-
- if (unlikely(in_interrupt())) {
- if (in_irq()) {
- if (type != KM_IRQ0 && type != KM_IRQ1 &&
- type != KM_BIO_SRC_IRQ &&
- /* type != KM_BIO_DST_IRQ && */
- type != KM_BOUNCE_READ) {
- WARN_ON(1);
- warn_count--;
- }
- } else if (!irqs_disabled()) { /* softirq */
- if (type != KM_IRQ0 && type != KM_IRQ1 &&
- type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
- type != KM_SKB_SUNRPC_DATA &&
- type != KM_SKB_DATA_SOFTIRQ &&
- type != KM_BOUNCE_READ) {
- WARN_ON(1);
- warn_count--;
- }
- }
- }
-
- if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
- type == KM_BIO_SRC_IRQ /* || type == KM_BIO_DST_IRQ */) {
- if (!irqs_disabled()) {
- WARN_ON(1);
- warn_count--;
- }
- } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
- if (irq_count() == 0 && !irqs_disabled()) {
- WARN_ON(1);
- warn_count--;
- }
- }
-#endif
-}
-
/*
* Describe a single atomic mapping of a page on a given cpu at a
* given address, and allow it to be linked into a list.
@@ -240,10 +196,10 @@ void kmap_atomic_fix_kpte(struct page *page, int finished)
* When holding an atomic kmap is is not legal to sleep, so atomic
* kmaps are appropriate for short, tight code paths only.
*/
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
+void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
- enum fixed_addresses idx;
unsigned long vaddr;
+ int idx, type;
pte_t *pte;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
@@ -255,8 +211,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
if (!PageHighMem(page))
return page_address(page);
- debug_kmap_atomic_prot(type);
-
+ type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
pte = kmap_get_pte(vaddr);
@@ -269,25 +224,31 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
}
EXPORT_SYMBOL(kmap_atomic_prot);
-void *kmap_atomic(struct page *page, enum km_type type)
+void *__kmap_atomic(struct page *page)
{
/* PAGE_NONE is a magic value that tells us to check immutability. */
return kmap_atomic_prot(page, type, PAGE_NONE);
}
-EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(__kmap_atomic);
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
- /*
- * Force other mappings to Oops if they try to access this pte without
- * first remapping it. Keeping stale mappings around is a bad idea.
- */
- if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) {
+ if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
+ vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
pte_t *pte = kmap_get_pte(vaddr);
pte_t pteval = *pte;
+ int idx, type;
+
+ type = kmap_atomic_idx_pop();
+ idx = type + KM_TYPE_NR*smp_processor_id();
+
+ /*
+ * Force other mappings to Oops if they try to access this pte
+ * without first remapping it. Keeping stale mappings around
+ * is a bad idea.
+ */
BUG_ON(!pte_present(pteval) && !pte_migrating(pteval));
kmap_atomic_unregister(pte_page(pteval), vaddr);
kpte_clear_flush(pte, vaddr);
@@ -300,19 +261,19 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
arch_flush_lazy_mmu_mode();
pagefault_enable();
}
-EXPORT_SYMBOL(kunmap_atomic_notypecheck);
+EXPORT_SYMBOL(__kunmap_atomic);
/*
* This API is supposed to allow us to map memory without a "struct page".
* Currently we don't support this, though this may change in the future.
*/
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
+void *kmap_atomic_pfn(unsigned long pfn)
{
- return kmap_atomic(pfn_to_page(pfn), type);
+ return kmap_atomic(pfn_to_page(pfn));
}
-void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
+void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
{
- return kmap_atomic_prot(pfn_to_page(pfn), type, prot);
+ return kmap_atomic_prot(pfn_to_page(pfn), prot);
}
struct page *kmap_atomic_to_page(void *ptr)