aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/hrtimer.c1
-rw-r--r--kernel/irq/Makefile3
-rw-r--r--kernel/irq/migration.c5
-rw-r--r--kernel/panic.c1
-rw-r--r--kernel/sched.c62
-rw-r--r--kernel/sys_ni.c12
-rw-r--r--kernel/timer.c29
7 files changed, 79 insertions, 34 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index f181ff4..d2a7296 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -501,6 +501,7 @@ int hrtimer_cancel(struct hrtimer *timer)
if (ret >= 0)
return ret;
+ cpu_relax();
}
}
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index 2b33f85..9f77f50 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -1,4 +1,5 @@
-obj-y := handle.o manage.o spurious.o migration.o
+obj-y := handle.o manage.o spurious.o
obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
obj-$(CONFIG_PROC_FS) += proc.o
+obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 52a8655..134f9f2 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -1,6 +1,5 @@
-#include <linux/irq.h>
-#if defined(CONFIG_GENERIC_PENDING_IRQ)
+#include <linux/irq.h>
void set_pending_irq(unsigned int irq, cpumask_t mask)
{
@@ -61,5 +60,3 @@ void move_native_irq(int irq)
}
cpus_clear(pending_irq_cpumask[irq]);
}
-
-#endif
diff --git a/kernel/panic.c b/kernel/panic.c
index f895c7c..cc2a4c9 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -27,7 +27,6 @@ static int pause_on_oops_flag;
static DEFINE_SPINLOCK(pause_on_oops_lock);
int panic_timeout;
-EXPORT_SYMBOL(panic_timeout);
ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
diff --git a/kernel/sched.c b/kernel/sched.c
index dd153d6..365f0b9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -665,13 +665,55 @@ static int effective_prio(task_t *p)
}
/*
+ * We place interactive tasks back into the active array, if possible.
+ *
+ * To guarantee that this does not starve expired tasks we ignore the
+ * interactivity of a task if the first expired task had to wait more
+ * than a 'reasonable' amount of time. This deadline timeout is
+ * load-dependent, as the frequency of array switched decreases with
+ * increasing number of running tasks. We also ignore the interactivity
+ * if a better static_prio task has expired, and switch periodically
+ * regardless, to ensure that highly interactive tasks do not starve
+ * the less fortunate for unreasonably long periods.
+ */
+static inline int expired_starving(runqueue_t *rq)
+{
+ int limit;
+
+ /*
+ * Arrays were recently switched, all is well
+ */
+ if (!rq->expired_timestamp)
+ return 0;
+
+ limit = STARVATION_LIMIT * rq->nr_running;
+
+ /*
+ * It's time to switch arrays
+ */
+ if (jiffies - rq->expired_timestamp >= limit)
+ return 1;
+
+ /*
+ * There's a better selection in the expired array
+ */
+ if (rq->curr->static_prio > rq->best_expired_prio)
+ return 1;
+
+ /*
+ * All is well
+ */
+ return 0;
+}
+
+/*
* __activate_task - move a task to the runqueue.
*/
static void __activate_task(task_t *p, runqueue_t *rq)
{
prio_array_t *target = rq->active;
- if (batch_task(p))
+ if (unlikely(batch_task(p) || (expired_starving(rq) && !rt_task(p))))
target = rq->expired;
enqueue_task(p, target);
rq->nr_running++;
@@ -2490,22 +2532,6 @@ unsigned long long current_sched_time(const task_t *tsk)
}
/*
- * We place interactive tasks back into the active array, if possible.
- *
- * To guarantee that this does not starve expired tasks we ignore the
- * interactivity of a task if the first expired task had to wait more
- * than a 'reasonable' amount of time. This deadline timeout is
- * load-dependent, as the frequency of array switched decreases with
- * increasing number of running tasks. We also ignore the interactivity
- * if a better static_prio task has expired:
- */
-#define EXPIRED_STARVING(rq) \
- ((STARVATION_LIMIT && ((rq)->expired_timestamp && \
- (jiffies - (rq)->expired_timestamp >= \
- STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \
- ((rq)->curr->static_prio > (rq)->best_expired_prio))
-
-/*
* Account user cpu time to a process.
* @p: the process that the cpu time gets accounted to
* @hardirq_offset: the offset to subtract from hardirq_count()
@@ -2640,7 +2666,7 @@ void scheduler_tick(void)
if (!rq->expired_timestamp)
rq->expired_timestamp = jiffies;
- if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
+ if (!TASK_INTERACTIVE(p) || expired_starving(rq)) {
enqueue_task(p, rq->expired);
if (p->static_prio < rq->best_expired_prio)
rq->best_expired_prio = p->static_prio;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index d82864c..5433195 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -120,3 +120,15 @@ cond_syscall(sys32_sysctl);
cond_syscall(ppc_rtas);
cond_syscall(sys_spu_run);
cond_syscall(sys_spu_create);
+
+/* mmu depending weak syscall entries */
+cond_syscall(sys_mprotect);
+cond_syscall(sys_msync);
+cond_syscall(sys_mlock);
+cond_syscall(sys_munlock);
+cond_syscall(sys_mlockall);
+cond_syscall(sys_munlockall);
+cond_syscall(sys_mincore);
+cond_syscall(sys_madvise);
+cond_syscall(sys_mremap);
+cond_syscall(sys_remap_file_pages);
diff --git a/kernel/timer.c b/kernel/timer.c
index 471ab87..8837737 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -81,9 +81,10 @@ struct tvec_t_base_s {
} ____cacheline_aligned_in_smp;
typedef struct tvec_t_base_s tvec_base_t;
-static DEFINE_PER_CPU(tvec_base_t *, tvec_bases);
+
tvec_base_t boot_tvec_bases;
EXPORT_SYMBOL(boot_tvec_bases);
+static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = { &boot_tvec_bases };
static inline void set_running_timer(tvec_base_t *base,
struct timer_list *timer)
@@ -1224,28 +1225,36 @@ static int __devinit init_timers_cpu(int cpu)
{
int j;
tvec_base_t *base;
+ static char __devinitdata tvec_base_done[NR_CPUS];
- base = per_cpu(tvec_bases, cpu);
- if (!base) {
+ if (!tvec_base_done[cpu]) {
static char boot_done;
- /*
- * Cannot do allocation in init_timers as that runs before the
- * allocator initializes (and would waste memory if there are
- * more possible CPUs than will ever be installed/brought up).
- */
if (boot_done) {
+ /*
+ * The APs use this path later in boot
+ */
base = kmalloc_node(sizeof(*base), GFP_KERNEL,
cpu_to_node(cpu));
if (!base)
return -ENOMEM;
memset(base, 0, sizeof(*base));
+ per_cpu(tvec_bases, cpu) = base;
} else {
- base = &boot_tvec_bases;
+ /*
+ * This is for the boot CPU - we use compile-time
+ * static initialisation because per-cpu memory isn't
+ * ready yet and because the memory allocators are not
+ * initialised either.
+ */
boot_done = 1;
+ base = &boot_tvec_bases;
}
- per_cpu(tvec_bases, cpu) = base;
+ tvec_base_done[cpu] = 1;
+ } else {
+ base = per_cpu(tvec_bases, cpu);
}
+
spin_lock_init(&base->lock);
for (j = 0; j < TVN_SIZE; j++) {
INIT_LIST_HEAD(base->tv5.vec + j);