diff options
author | Kevin D. Kissell <kevink@mips.com> | 2007-08-03 19:38:03 +0200 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2007-10-11 23:45:57 +0100 |
commit | f571eff0a24ed97a919f2b61bb4afdeab4b43002 (patch) | |
tree | e9d6c597fafca02720f000cf795a37f2d163f10f /arch/mips | |
parent | bbf25010f1a6b761914430f5fca081ec8c7accd1 (diff) | |
download | kernel_samsung_smdk4412-f571eff0a24ed97a919f2b61bb4afdeab4b43002.zip kernel_samsung_smdk4412-f571eff0a24ed97a919f2b61bb4afdeab4b43002.tar.gz kernel_samsung_smdk4412-f571eff0a24ed97a919f2b61bb4afdeab4b43002.tar.bz2 |
[MIPS] IRQ Affinity Support for SMTC on Malta Platform
Signed-off-by: Kevin D. Kissell <kevink@mips.com>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips')
-rw-r--r-- | arch/mips/Kconfig | 13 | ||||
-rw-r--r-- | arch/mips/kernel/i8259.c | 3 | ||||
-rw-r--r-- | arch/mips/kernel/smtc.c | 63 | ||||
-rw-r--r-- | arch/mips/mips-boards/malta/malta_smtc.c | 50 |
4 files changed, 129 insertions, 0 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 3b807b4..b09eee2 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1378,6 +1378,19 @@ config MIPS_MT_SMTC_IM_BACKSTOP impact on interrupt service overhead. Disable it only if you know what you are doing. +config MIPS_MT_SMTC_IRQAFF + bool "Support IRQ affinity API" + depends on MIPS_MT_SMTC + default n + help + Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.) + for SMTC Linux kernel. Requires platform support, of which + an example can be found in the MIPS kernel i8259 and Malta + platform code. It is recommended that MIPS_MT_SMTC_INSTANT_REPLAY + be enabled if MIPS_MT_SMTC_IRQAFF is used. Adds overhead to + interrupt dispatch, and should be used only if you know what + you are doing. + config MIPS_VPE_LOADER_TOM bool "Load VPE program into memory hidden from linux" depends on MIPS_VPE_LOADER diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index 3a2d255..4f4359b 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c @@ -39,6 +39,9 @@ static struct irq_chip i8259A_chip = { .disable = disable_8259A_irq, .unmask = enable_8259A_irq, .mask_ack = mask_and_ack_8259A, +#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF + .set_affinity = plat_set_irq_affinity, +#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ }; /* diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index f094043..fe22387 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -606,6 +606,60 @@ int setup_irq_smtc(unsigned int irq, struct irqaction * new, return setup_irq(irq, new); } +#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF +/* + * Support for IRQ affinity to TCs + */ + +void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity) +{ + /* + * If a "fast path" cache of quickly decodable affinity state + * is maintained, this is where it gets done, on a call up + * from the platform affinity code. + */ +} + +void smtc_forward_irq(unsigned int irq) +{ + int target; + + /* + * OK wise guy, now figure out how to get the IRQ + * to be serviced on an authorized "CPU". + * + * Ideally, to handle the situation where an IRQ has multiple + * eligible CPUS, we would maintain state per IRQ that would + * allow a fair distribution of service requests. Since the + * expected use model is any-or-only-one, for simplicity + * and efficiency, we just pick the easiest one to find. + */ + + target = first_cpu(irq_desc[irq].affinity); + + /* + * We depend on the platform code to have correctly processed + * IRQ affinity change requests to ensure that the IRQ affinity + * mask has been purged of bits corresponding to nonexistent and + * offline "CPUs", and to TCs bound to VPEs other than the VPE + * connected to the physical interrupt input for the interrupt + * in question. Otherwise we have a nasty problem with interrupt + * mask management. This is best handled in non-performance-critical + * platform IRQ affinity setting code, to minimize interrupt-time + * checks. + */ + + /* If no one is eligible, service locally */ + if (target >= NR_CPUS) { + do_IRQ_no_affinity(irq); + return; + } + + smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq); +} + +#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ + /* * IPI model for SMTC is tricky, because interrupts aren't TC-specific. * Within a VPE one TC can interrupt another by different approaches. @@ -830,6 +884,15 @@ void ipi_decode(struct smtc_ipi *pipi) break; } break; +#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF + case IRQ_AFFINITY_IPI: + /* + * Accept a "forwarded" interrupt that was initially + * taken by a TC who doesn't have affinity for the IRQ. + */ + do_IRQ_no_affinity((int)arg_copy); + break; +#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ default: printk("Impossible SMTC IPI Type 0x%x\n", type_copy); break; diff --git a/arch/mips/mips-boards/malta/malta_smtc.c b/arch/mips/mips-boards/malta/malta_smtc.c index ae05d05..5c980f4 100644 --- a/arch/mips/mips-boards/malta/malta_smtc.c +++ b/arch/mips/mips-boards/malta/malta_smtc.c @@ -88,3 +88,53 @@ void __cpuinit prom_smp_finish(void) void prom_cpus_done(void) { } + +#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF +/* + * IRQ affinity hook + */ + + +void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity) +{ + cpumask_t tmask = affinity; + int cpu = 0; + void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff); + + /* + * On the legacy Malta development board, all I/O interrupts + * are routed through the 8259 and combined in a single signal + * to the CPU daughterboard, and on the CoreFPGA2/3 34K models, + * that signal is brought to IP2 of both VPEs. To avoid racing + * concurrent interrupt service events, IP2 is enabled only on + * one VPE, by convention VPE0. So long as no bits are ever + * cleared in the affinity mask, there will never be any + * interrupt forwarding. But as soon as a program or operator + * sets affinity for one of the related IRQs, we need to make + * sure that we don't ever try to forward across the VPE boundry, + * at least not until we engineer a system where the interrupt + * _ack() or _end() function can somehow know that it corresponds + * to an interrupt taken on another VPE, and perform the appropriate + * restoration of Status.IM state using MFTR/MTTR instead of the + * normal local behavior. We also ensure that no attempt will + * be made to forward to an offline "CPU". + */ + + for_each_cpu_mask(cpu, affinity) { + if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu)) + cpu_clear(cpu, tmask); + } + irq_desc[irq].affinity = tmask; + + if (cpus_empty(tmask)) + /* + * We could restore a default mask here, but the + * runtime code can anyway deal with the null set + */ + printk(KERN_WARNING + "IRQ affinity leaves no legal CPU for IRQ %d\n", irq); + + /* Do any generic SMTC IRQ affinity setup */ + smtc_set_irq_affinity(irq, tmask); +} +#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ |