summaryrefslogtreecommitdiffstats
path: root/base/time_win.cc
diff options
context:
space:
mode:
authormbelshe@chromium.org <mbelshe@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-06-29 04:58:15 +0000
committermbelshe@chromium.org <mbelshe@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-06-29 04:58:15 +0000
commit57f030a503ed96f974a4edcb8c65c982ea8fd765 (patch)
tree07036d200a6f22c529fda6db035e874ef7f5f3d5 /base/time_win.cc
parent13729e7753dfdaf4cc90f5050827a8ebc9875390 (diff)
downloadchromium_src-57f030a503ed96f974a4edcb8c65c982ea8fd765.zip
chromium_src-57f030a503ed96f974a4edcb8c65c982ea8fd765.tar.gz
chromium_src-57f030a503ed96f974a4edcb8c65c982ea8fd765.tar.bz2
Change chrome from statically enabling high resolution timers on windows
to enabling them dynamically - only when the application really needs them. I am working on some test cases for this, and will add them. But wanted to send out the concept for review. In this implementation, I modify the message loop to detect when the application has requested high resolution timers. Note that there are multiple MessageLoops active in a single process. After a period of time, we simply shut it off again. We could have set a timer or kept a count of active timers, or any number of more complex algorithms. But I think this algorithm is very simple and good enough. If an application continues needing high resolution timers for more than 1s, we'll turn the high-resolution timers back on again. One last change - since we've implemented the clamp at 4ms, there isn't a lot of point to our use of 1ms for timeBeginPeriod. I've modified that to 2 (which is half of 4ms, our target minimal interval). BUG=46531 TEST=MessageLoop.HighResolutionTimers Review URL: http://codereview.chromium.org/2822035 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@51102 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'base/time_win.cc')
-rw-r--r--base/time_win.cc33
1 files changed, 24 insertions, 9 deletions
diff --git a/base/time_win.cc b/base/time_win.cc
index f44c7f1..d4da0f4 100644
--- a/base/time_win.cc
+++ b/base/time_win.cc
@@ -98,6 +98,8 @@ void InitializeClock() {
// static
const int64 Time::kTimeTToMicrosecondsOffset = GG_INT64_C(11644473600000000);
+bool Time::high_resolution_timer_enabled_ = false;
+
// static
Time Time::Now() {
if (initial_time == 0)
@@ -148,18 +150,31 @@ FILETIME Time::ToFileTime() const {
}
// static
-bool Time::UseHighResolutionTimer(bool use) {
- // TODO(mbelshe): Make sure that switching the system timer resolution
- // doesn't break Timer firing order etc. An example test would be to have
- // two threads. One would have a bunch of timers, and another would turn the
- // high resolution timer on and off.
+void Time::EnableHighResolutionTimer(bool enable) {
+ // Test for single-threaded access.
+ static PlatformThreadId my_thread = PlatformThread::CurrentId();
+ DCHECK(PlatformThread::CurrentId() == my_thread);
+
+ if (high_resolution_timer_enabled_ == enable)
+ return;
+
+ high_resolution_timer_enabled_ = enable;
+}
+
+// static
+bool Time::ActivateHighResolutionTimer(bool activate) {
+ if (!high_resolution_timer_enabled_)
+ return false;
+ // Using anything other than 1ms makes timers granular
+ // to that interval.
+ const int kMinTimerIntervalMs = 1;
MMRESULT result;
- if (use)
- result = timeBeginPeriod(1);
+ if (activate)
+ result = timeBeginPeriod(kMinTimerIntervalMs);
else
- result = timeEndPeriod(1);
- return (result == TIMERR_NOERROR);
+ result = timeEndPeriod(kMinTimerIntervalMs);
+ return result == TIMERR_NOERROR;
}
// static