aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-01-12 14:10:23 -0800
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-05-05 23:16:54 -0700
commita26ac2455ffcf3be5c6ef92bc6df7182700f2114 (patch)
tree601697c7c3fd152a1b3f29f43d3a028fc8fefd5f /kernel/rcutree_plugin.h
parent12f5f524cafef3ab689929b118f2dfb8bf2be321 (diff)
downloadkernel_samsung_smdk4412-a26ac2455ffcf3be5c6ef92bc6df7182700f2114.zip
kernel_samsung_smdk4412-a26ac2455ffcf3be5c6ef92bc6df7182700f2114.tar.gz
kernel_samsung_smdk4412-a26ac2455ffcf3be5c6ef92bc6df7182700f2114.tar.bz2
rcu: move TREE_RCU from softirq to kthread
If RCU priority boosting is to be meaningful, callback invocation must be boosted in addition to preempted RCU readers. Otherwise, in presence of CPU real-time threads, the grace period ends, but the callbacks don't get invoked. If the callbacks don't get invoked, the associated memory doesn't get freed, so the system is still subject to OOM. But it is not reasonable to priority-boost RCU_SOFTIRQ, so this commit moves the callback invocations to a kthread, which can be boosted easily. Also add comments and properly synchronized all accesses to rcu_cpu_kthread_task, as suggested by Lai Jiangshan. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 774f010..b9bd69a 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1206,7 +1206,7 @@ static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
*
* Because it is not legal to invoke rcu_process_callbacks() with irqs
* disabled, we do one pass of force_quiescent_state(), then do a
- * raise_softirq() to cause rcu_process_callbacks() to be invoked later.
+ * invoke_rcu_kthread() to cause rcu_process_callbacks() to be invoked later.
* The per-cpu rcu_dyntick_drain variable controls the sequencing.
*/
int rcu_needs_cpu(int cpu)
@@ -1257,7 +1257,7 @@ int rcu_needs_cpu(int cpu)
/* If RCU callbacks are still pending, RCU still needs this CPU. */
if (c)
- raise_softirq(RCU_SOFTIRQ);
+ invoke_rcu_kthread();
return c;
}