summaryrefslogtreecommitdiffstats
path: root/base/time_unittest.cc
diff options
context:
space:
mode:
Diffstat (limited to 'base/time_unittest.cc')
-rw-r--r--base/time_unittest.cc33
1 files changed, 25 insertions, 8 deletions
diff --git a/base/time_unittest.cc b/base/time_unittest.cc
index 0076e10..21e6f89 100644
--- a/base/time_unittest.cc
+++ b/base/time_unittest.cc
@@ -125,18 +125,35 @@ TEST(TimeTicks, HighResNow) {
return;
#endif
- TimeTicks ticks_start = TimeTicks::HighResNow();
+ // Why do we loop here?
+ // We're trying to measure that intervals increment in a VERY small amount
+ // of time -- less than 15ms. Unfortunately, if we happen to have a
+ // context switch in the middle of our test, the context switch could easily
+ // exceed our limit. So, we iterate on this several times. As long as we're
+ // able to detect the fine-granularity timers at least once, then the test
+ // has succeeded.
+
+ const int kTargetGranularityUs = 15000; // 15ms
+
+ bool success = false;
+ int retries = 100; // Arbitrary.
TimeDelta delta;
- // Loop until we can detect that the clock has changed. Non-HighRes timers
- // will increment in chunks, e.g. 15ms. By spinning until we see a clock
- // change, we detect the minimum time between measurements.
- do {
- delta = TimeTicks::HighResNow() - ticks_start;
- } while (delta.InMilliseconds() == 0);
+ while (!success && retries--) {
+ TimeTicks ticks_start = TimeTicks::HighResNow();
+ // Loop until we can detect that the clock has changed. Non-HighRes timers
+ // will increment in chunks, e.g. 15ms. By spinning until we see a clock
+ // change, we detect the minimum time between measurements.
+ do {
+ delta = TimeTicks::HighResNow() - ticks_start;
+ } while (delta.InMilliseconds() == 0);
+
+ if (delta.InMicroseconds() <= kTargetGranularityUs)
+ success = true;
+ }
// In high resolution mode, we expect to see the clock increment
// in intervals less than 15ms.
- EXPECT_LT(delta.InMicroseconds(), 15000);
+ EXPECT_TRUE(success);
}
TEST(TimeDelta, FromAndIn) {