From c6da2cfeb05178a11c6d062a06f8078150ee492f Mon Sep 17 00:00:00 2001 From: codeworkx Date: Sat, 2 Jun 2012 13:09:29 +0200 Subject: samsung update 1 --- kernel/time/Kconfig | 4 ++ kernel/time/Makefile | 2 +- kernel/time/timekeeping.c | 131 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 136 insertions(+), 1 deletion(-) (limited to 'kernel/time') diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index f06a8a3..689fe69 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig @@ -27,3 +27,7 @@ config GENERIC_CLOCKEVENTS_BUILD default y depends on GENERIC_CLOCKEVENTS || GENERIC_CLOCKEVENTS_MIGR +# Selectable by architectures which want to reuse the clocksource as +# sched_clock +config HAVE_CLKSRC_SCHED_CLOCK + bool diff --git a/kernel/time/Makefile b/kernel/time/Makefile index e2fd74b..cae2ad7 100644 --- a/kernel/time/Makefile +++ b/kernel/time/Makefile @@ -1,5 +1,5 @@ obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o -obj-y += timeconv.o posix-clock.o alarmtimer.o +obj-y += timeconv.o posix-clock.o #alarmtimer.o obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 5f45831..c5406f9 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -21,6 +21,9 @@ #include #include +static void notrace sched_clock_clksrc_install(struct clocksource *clock); +static void notrace sched_clock_clksrc_update(void); + /* Structure holding internal timekeeping values. */ struct timekeeper { /* Current clocksource used for timekeeping. */ @@ -66,6 +69,9 @@ static void timekeeper_setup_internals(struct clocksource *clock) cycle_t interval; u64 tmp, ntpinterval; + if (clock->flags & CLOCK_SOURCE_SCHED_CLOCK) + sched_clock_clksrc_install(clock); + timekeeper.clock = clock; clock->cycle_last = clock->read(clock); @@ -608,6 +614,12 @@ static struct timespec timekeeping_suspend_time; */ static void __timekeeping_inject_sleeptime(struct timespec *delta) { + if (!timespec_valid(delta)) { + printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid " + "sleep delta value!\n"); + return; + } + xtime = timespec_add(xtime, *delta); wall_to_monotonic = timespec_sub(wall_to_monotonic, *delta); total_sleep_time = timespec_add(total_sleep_time, *delta); @@ -1079,6 +1091,7 @@ void do_timer(unsigned long ticks) { jiffies_64 += ticks; update_wall_time(); + sched_clock_clksrc_update(); calc_global_load(ticks); } @@ -1129,3 +1142,121 @@ void xtime_update(unsigned long ticks) do_timer(ticks); write_sequnlock(&xtime_lock); } + +/** + * struct sched_clksrc - clocksource based sched_clock + * @clock: Pointer to the clocksource + * @nsecs: Nanoseconds base value + * @seqcnt: Sequence counter for sched_clock + * @last_update: Counter value at last update + * @mult: Multiplier for nsec conversion + * @shift: Shift value (divisor) for nsec conversion + * @mask: Mask for the delta + * @update_cycles: Cycles after which we update nsecs and last_update + * @update_nsesc: Nanoseconds value corresponding to @update_cycles + */ +struct sched_clksrc { + struct clocksource *clock; + u64 nsecs; + struct seqcount seqcnt; + u64 last_update; + u32 mult; + u32 shift; + u64 mask; + u64 update_cycles; + u64 update_nsecs; +}; + +static struct sched_clksrc sched_clksrc; + +/* + * Called from clocksource code when a clocksource usable for + * sched_clock is installed. + */ +static void notrace sched_clock_clksrc_install(struct clocksource *clock) +{ + u64 nsecs, cyc = clock->mask & CLOCKSOURCE_MASK(32); + + if (sched_clksrc.clock) + return; + + /* Make sure we get the wraparounds */ + cyc >>= 2; + + /* Use the raw mult/shift values */ + sched_clksrc.mult = clock->mult; + sched_clksrc.shift = clock->shift; + sched_clksrc.mask = clock->mask; + sched_clksrc.update_cycles = cyc; + nsecs = clocksource_cyc2ns(cyc, sched_clksrc.mult, sched_clksrc.shift); + sched_clksrc.update_nsecs = nsecs; + /* Establish the base line */ + sched_clksrc.nsecs = (u64)(jiffies - INITIAL_JIFFIES) * + (NSEC_PER_SEC / HZ); + sched_clksrc.last_update = clock->read(clock) & sched_clksrc.mask; + sched_clksrc.clock = clock; +} + +/* + * Called from timekeeping code with xtime lock held and interrupts + * disabled, so we have only one updater at a time. Note that readers + * of sched_clock are _NOT_ affected by xtime_lock. We have our own + * sequence counter for sched_clksrc. + */ +static void notrace sched_clock_clksrc_update(void) +{ + struct clocksource *clock = sched_clksrc.clock; + u64 delta; + + if (!clock) + return; + + delta = clock->read(clock) - sched_clksrc.last_update; + delta &= sched_clksrc.mask; + while (delta >= sched_clksrc.update_cycles) { + delta -= sched_clksrc.update_cycles; + write_seqcount_begin(&sched_clksrc.seqcnt); + sched_clksrc.last_update += sched_clksrc.update_cycles; + sched_clksrc.nsecs += sched_clksrc.update_nsecs; + write_seqcount_end(&sched_clksrc.seqcnt); + } +} + +/* + * Scheduler clock clocksource based - returns current time in nanosec units. + * + * Can be called from the default implementation below or from + * architecture code if it overrides the default implementation. + */ +unsigned long long notrace sched_clock_clksrc(void) +{ + struct clocksource *clock = sched_clksrc.clock; + unsigned int seq; + u64 nsecs, last, delta; + + if (!sched_clksrc.clock) + return (unsigned long long)(jiffies - INITIAL_JIFFIES) * + (NSEC_PER_SEC / HZ); + + do { + seq = read_seqcount_begin(&sched_clksrc.seqcnt); + last = sched_clksrc.last_update; + nsecs = sched_clksrc.nsecs; + } while (read_seqcount_retry(&sched_clksrc.seqcnt, seq)); + + delta = (clock->read(clock) - last) & sched_clksrc.mask; + + return nsecs + clocksource_cyc2ns(delta, sched_clksrc.mult, + sched_clksrc.shift); +} + +/* + * Scheduler clock - returns current time in nanosec units. + * This is default implementation. + * Architectures and sub-architectures can override this. + */ +unsigned long long __attribute__((weak)) sched_clock(void) +{ + return sched_clock_clksrc(); +} +EXPORT_SYMBOL_GPL(sched_clock); -- cgit v1.1