From aa88dea2270f685349ab7b92169600452fe73b62 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 2 Jul 2012 07:52:16 -0400 Subject: random: make 'add_interrupt_randomness()' do something sane commit 775f4b297b780601e61787b766f306ed3e1d23eb upstream. We've been moving away from add_interrupt_randomness() for various reasons: it's too expensive to do on every interrupt, and flooding the CPU with interrupts could theoretically cause bogus floods of entropy from a somewhat externally controllable source. This solves both problems by limiting the actual randomness addition to just once a second or after 64 interrupts, whicever comes first. During that time, the interrupt cycle data is buffered up in a per-cpu pool. Also, we make sure the the nonblocking pool used by urandom is initialized before we start feeding the normal input pool. This assures that /dev/urandom is returning unpredictable data as soon as possible. (Based on an original patch by Linus, but significantly modified by tytso.) Tested-by: Eric Wustrow Reported-by: Eric Wustrow Reported-by: Nadia Heninger Reported-by: Zakir Durumeric Reported-by: J. Alex Halderman Signed-off-by: Linus Torvalds Signed-off-by: "Theodore Ts'o" Signed-off-by: Greg Kroah-Hartman --- kernel/irq/handle.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'kernel/irq') diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 470d08c..10e0772 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -117,7 +117,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) { irqreturn_t retval = IRQ_NONE; - unsigned int random = 0, irq = desc->irq_data.irq; + unsigned int flags = 0, irq = desc->irq_data.irq; do { irqreturn_t res; @@ -145,7 +145,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) /* Fall through to add to randomness */ case IRQ_HANDLED: - random |= action->flags; + flags |= action->flags; break; default: @@ -156,8 +156,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) action = action->next; } while (action); - if (random & IRQF_SAMPLE_RANDOM) - add_interrupt_randomness(irq); + add_interrupt_randomness(irq, flags); if (!noirqdebug) note_interrupt(irq, desc, retval); -- cgit v1.1 From b6b847a93be87fc9974d8232984668a5a59754df Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Sat, 14 Jul 2012 20:27:52 -0400 Subject: random: remove rand_initialize_irq() commit c5857ccf293968348e5eb4ebedc68074de3dcda6 upstream. With the new interrupt sampling system, we are no longer using the timer_rand_state structure in the irq descriptor, so we can stop initializing it now. [ Merged in fixes from Sedat to find some last missing references to rand_initialize_irq() ] Signed-off-by: "Theodore Ts'o" Signed-off-by: Sedat Dilek Signed-off-by: Greg Kroah-Hartman --- kernel/irq/manage.c | 17 ----------------- 1 file changed, 17 deletions(-) (limited to 'kernel/irq') diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index df8136f..fa4a70e 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -886,22 +886,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) if (desc->irq_data.chip == &no_irq_chip) return -ENOSYS; - /* - * Some drivers like serial.c use request_irq() heavily, - * so we have to be careful not to interfere with a - * running system. - */ - if (new->flags & IRQF_SAMPLE_RANDOM) { - /* - * This function might sleep, we want to call it first, - * outside of the atomic block. - * Yes, this might clear the entropy pool if the wrong - * driver is attempted to be loaded, without actually - * installing a new handler, but is this really a problem, - * only the sysadmin is able to do this. - */ - rand_initialize_irq(irq); - } /* * Check whether the interrupt nests into another interrupt @@ -1325,7 +1309,6 @@ EXPORT_SYMBOL(free_irq); * Flags: * * IRQF_SHARED Interrupt is shared - * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy * IRQF_TRIGGER_* Specify active edge(s) or level * */ -- cgit v1.1 From d0389110863a993f19ba11bdf9ac639df713a1f2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 3 Nov 2012 11:52:09 +0100 Subject: genirq: Always force thread affinity commit 04aa530ec04f61875b99c12721162e2964e3318c upstream. Sankara reported that the genirq core code fails to adjust the affinity of an interrupt thread in several cases: 1) On request/setup_irq() the call to setup_affinity() happens before the new action is registered, so the new thread is not notified. 2) For secondary shared interrupts nothing notifies the new thread to change its affinity. 3) Interrupts which have the IRQ_NO_BALANCE flag set are not moving the thread either. Fix this by setting the thread affinity flag right on thread creation time. This ensures that under all circumstances the thread moves to the right place. Requires a check in irq_thread_check_affinity for an existing affinity mask (CONFIG_CPU_MASK_OFFSTACK=y) Reported-and-tested-by: Sankara Muthukrishnan Link: http://lkml.kernel.org/r/alpine.LFD.2.02.1209041738200.2754@ionos Signed-off-by: Thomas Gleixner Signed-off-by: Greg Kroah-Hartman --- kernel/irq/manage.c | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) (limited to 'kernel/irq') diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index fa4a70e..3e1bdf9 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -698,6 +698,7 @@ static void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { cpumask_var_t mask; + bool valid = true; if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) return; @@ -712,10 +713,18 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) } raw_spin_lock_irq(&desc->lock); - cpumask_copy(mask, desc->irq_data.affinity); + /* + * This code is triggered unconditionally. Check the affinity + * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. + */ + if (desc->irq_data.affinity) + cpumask_copy(mask, desc->irq_data.affinity); + else + valid = false; raw_spin_unlock_irq(&desc->lock); - set_cpus_allowed_ptr(current, mask); + if (valid) + set_cpus_allowed_ptr(current, mask); free_cpumask_var(mask); } #else @@ -925,6 +934,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) */ get_task_struct(t); new->thread = t; + /* + * Tell the thread to set its affinity. This is + * important for shared interrupt handlers as we do + * not invoke setup_affinity() for the secondary + * handlers as everything is already set up. Even for + * interrupts marked with IRQF_NO_BALANCE this is + * correct as we want the thread to move to the cpu(s) + * on which the requesting code placed the interrupt. + */ + set_bit(IRQTF_AFFINITY, &new->thread_flags); } if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { -- cgit v1.1