From b18ae08deac23187e4a22a8c94a1a473be8e8c93 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 3 Jan 2011 03:49:25 +0000 Subject: powerpc/cell: Use system_wq in cpufreq_spudemand With cmwq, there's no reason to use a separate workqueue in cpufreq_spudemand. Use system_wq instead. The work items are already sync canceled on stop, so it's already guaranteed that no work is running when spu_gov_exit() is entered. Signed-off-by: Tejun Heo Cc: Arnd Bergmann Cc: linuxppc-dev@lists.ozlabs.org Cc: Dave Jones Cc: cpufreq@vger.kernel.org Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/time.c | 25 ++++++++++++++++++++----- arch/powerpc/platforms/cell/cpufreq_spudemand.c | 20 +++----------------- 2 files changed, 23 insertions(+), 22 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 09e4dea..09d31db 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -265,11 +265,26 @@ void accumulate_stolen_time(void) { u64 sst, ust; - sst = scan_dispatch_log(get_paca()->starttime_user); - ust = scan_dispatch_log(get_paca()->starttime); - get_paca()->system_time -= sst; - get_paca()->user_time -= ust; - get_paca()->stolen_time += ust + sst; + u8 save_soft_enabled = local_paca->soft_enabled; + u8 save_hard_enabled = local_paca->hard_enabled; + + /* We are called early in the exception entry, before + * soft/hard_enabled are sync'ed to the expected state + * for the exception. We are hard disabled but the PACA + * needs to reflect that so various debug stuff doesn't + * complain + */ + local_paca->soft_enabled = 0; + local_paca->hard_enabled = 0; + + sst = scan_dispatch_log(local_paca->starttime_user); + ust = scan_dispatch_log(local_paca->starttime); + local_paca->system_time -= sst; + local_paca->user_time -= ust; + local_paca->stolen_time += ust + sst; + + local_paca->soft_enabled = save_soft_enabled; + local_paca->hard_enabled = save_hard_enabled; } static inline u64 calculate_stolen_time(u64 stop_tb) diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c index 968c1c0..d809836 100644 --- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c +++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c @@ -39,8 +39,6 @@ struct spu_gov_info_struct { }; static DEFINE_PER_CPU(struct spu_gov_info_struct, spu_gov_info); -static struct workqueue_struct *kspugov_wq; - static int calc_freq(struct spu_gov_info_struct *info) { int cpu; @@ -71,14 +69,14 @@ static void spu_gov_work(struct work_struct *work) __cpufreq_driver_target(info->policy, target_freq, CPUFREQ_RELATION_H); delay = usecs_to_jiffies(info->poll_int); - queue_delayed_work_on(info->policy->cpu, kspugov_wq, &info->work, delay); + schedule_delayed_work_on(info->policy->cpu, &info->work, delay); } static void spu_gov_init_work(struct spu_gov_info_struct *info) { int delay = usecs_to_jiffies(info->poll_int); INIT_DELAYED_WORK_DEFERRABLE(&info->work, spu_gov_work); - queue_delayed_work_on(info->policy->cpu, kspugov_wq, &info->work, delay); + schedule_delayed_work_on(info->policy->cpu, &info->work, delay); } static void spu_gov_cancel_work(struct spu_gov_info_struct *info) @@ -152,27 +150,15 @@ static int __init spu_gov_init(void) { int ret; - kspugov_wq = create_workqueue("kspugov"); - if (!kspugov_wq) { - printk(KERN_ERR "creation of kspugov failed\n"); - ret = -EFAULT; - goto out; - } - ret = cpufreq_register_governor(&spu_governor); - if (ret) { + if (ret) printk(KERN_ERR "registration of governor failed\n"); - destroy_workqueue(kspugov_wq); - goto out; - } -out: return ret; } static void __exit spu_gov_exit(void) { cpufreq_unregister_governor(&spu_governor); - destroy_workqueue(kspugov_wq); } -- cgit v1.1