summaryrefslogtreecommitdiffstats
path: root/media/capture
diff options
context:
space:
mode:
authormcasas <mcasas@chromium.org>2015-07-16 19:00:32 -0700
committerCommit bot <commit-bot@chromium.org>2015-07-17 02:01:19 +0000
commit26973ef3748fda32787ce05a021ed4c68565fe6a (patch)
tree78de02cfe3e5bd0639c0ee688f142768f8390e2d /media/capture
parentc36773843cb0d729166bb19074e960129e609dd5 (diff)
downloadchromium_src-26973ef3748fda32787ce05a021ed4c68565fe6a.zip
chromium_src-26973ef3748fda32787ce05a021ed4c68565fe6a.tar.gz
chromium_src-26973ef3748fda32787ce05a021ed4c68565fe6a.tar.bz2
Folder shuffle media/capture -> media/capture/content and media/video/capture -> media/capture/video
Folder reorg, no code changes whatsoever (except #include path renaming). Rationale: media/capture includes ATM screen/tab capture; media/video has a bunch of unrelated files and a large capture/ folder. All that can be consolidated under media/capture/bla where bla={content, video}. Suggestion: move audio capture code in capture/audio. TBR= avi@chromium.org for content/browser/DEPS since is a mechanical folder name change. Review URL: https://codereview.chromium.org/1231863011 Cr-Commit-Position: refs/heads/master@{#339199}
Diffstat (limited to 'media/capture')
-rw-r--r--media/capture/content/OWNERS (renamed from media/capture/OWNERS)0
-rw-r--r--media/capture/content/README5
-rw-r--r--media/capture/content/animated_content_sampler.cc (renamed from media/capture/animated_content_sampler.cc)32
-rw-r--r--media/capture/content/animated_content_sampler.h (renamed from media/capture/animated_content_sampler.h)0
-rw-r--r--media/capture/content/animated_content_sampler_unittest.cc (renamed from media/capture/animated_content_sampler_unittest.cc)245
-rw-r--r--media/capture/content/capture_resolution_chooser.cc (renamed from media/capture/capture_resolution_chooser.cc)21
-rw-r--r--media/capture/content/capture_resolution_chooser.h (renamed from media/capture/capture_resolution_chooser.h)9
-rw-r--r--media/capture/content/capture_resolution_chooser_unittest.cc (renamed from media/capture/capture_resolution_chooser_unittest.cc)93
-rw-r--r--media/capture/content/feedback_signal_accumulator.cc (renamed from media/capture/feedback_signal_accumulator.cc)2
-rw-r--r--media/capture/content/feedback_signal_accumulator.h (renamed from media/capture/feedback_signal_accumulator.h)6
-rw-r--r--media/capture/content/feedback_signal_accumulator_unittest.cc (renamed from media/capture/feedback_signal_accumulator_unittest.cc)2
-rw-r--r--media/capture/content/screen_capture_device_core.cc (renamed from media/capture/screen_capture_device_core.cc)28
-rw-r--r--media/capture/content/screen_capture_device_core.h (renamed from media/capture/screen_capture_device_core.h)14
-rw-r--r--media/capture/content/smooth_event_sampler.cc (renamed from media/capture/smooth_event_sampler.cc)20
-rw-r--r--media/capture/content/smooth_event_sampler.h (renamed from media/capture/smooth_event_sampler.h)0
-rw-r--r--media/capture/content/smooth_event_sampler_unittest.cc704
-rw-r--r--media/capture/content/thread_safe_capture_oracle.cc (renamed from media/capture/thread_safe_capture_oracle.cc)77
-rw-r--r--media/capture/content/thread_safe_capture_oracle.h (renamed from media/capture/thread_safe_capture_oracle.h)19
-rw-r--r--media/capture/content/video_capture_oracle.cc (renamed from media/capture/video_capture_oracle.cc)34
-rw-r--r--media/capture/content/video_capture_oracle.h (renamed from media/capture/video_capture_oracle.h)8
-rw-r--r--media/capture/content/video_capture_oracle_unittest.cc (renamed from media/capture/video_capture_oracle_unittest.cc)129
-rw-r--r--media/capture/smooth_event_sampler_unittest.cc488
-rw-r--r--media/capture/video/OWNERS3
-rw-r--r--media/capture/video/android/video_capture_device_android.cc204
-rw-r--r--media/capture/video/android/video_capture_device_android.h95
-rw-r--r--media/capture/video/android/video_capture_device_factory_android.cc139
-rw-r--r--media/capture/video/android/video_capture_device_factory_android.h42
-rw-r--r--media/capture/video/fake_video_capture_device.cc217
-rw-r--r--media/capture/video/fake_video_capture_device.h73
-rw-r--r--media/capture/video/fake_video_capture_device_factory.cc86
-rw-r--r--media/capture/video/fake_video_capture_device_factory.h44
-rw-r--r--media/capture/video/fake_video_capture_device_unittest.cc240
-rw-r--r--media/capture/video/file_video_capture_device.cc256
-rw-r--r--media/capture/video/file_video_capture_device.h78
-rw-r--r--media/capture/video/file_video_capture_device_factory.cc76
-rw-r--r--media/capture/video/file_video_capture_device_factory.h31
-rw-r--r--media/capture/video/linux/OWNERS1
-rw-r--r--media/capture/video/linux/v4l2_capture_delegate.cc425
-rw-r--r--media/capture/video/linux/v4l2_capture_delegate.h169
-rw-r--r--media/capture/video/linux/v4l2_capture_delegate_multi_plane.cc99
-rw-r--r--media/capture/video/linux/v4l2_capture_delegate_multi_plane.h61
-rw-r--r--media/capture/video/linux/v4l2_capture_delegate_single_plane.cc61
-rw-r--r--media/capture/video/linux/v4l2_capture_delegate_single_plane.h56
-rw-r--r--media/capture/video/linux/video_capture_device_chromeos.cc114
-rw-r--r--media/capture/video/linux/video_capture_device_chromeos.h36
-rw-r--r--media/capture/video/linux/video_capture_device_factory_linux.cc225
-rw-r--r--media/capture/video/linux/video_capture_device_factory_linux.h38
-rw-r--r--media/capture/video/linux/video_capture_device_linux.cc147
-rw-r--r--media/capture/video/linux/video_capture_device_linux.h60
-rw-r--r--media/capture/video/mac/DEPS3
-rw-r--r--media/capture/video/mac/platform_video_capturing_mac.h53
-rw-r--r--media/capture/video/mac/video_capture_device_avfoundation_mac.h122
-rw-r--r--media/capture/video/mac/video_capture_device_avfoundation_mac.mm354
-rw-r--r--media/capture/video/mac/video_capture_device_decklink_mac.h82
-rw-r--r--media/capture/video/mac/video_capture_device_decklink_mac.mm484
-rw-r--r--media/capture/video/mac/video_capture_device_factory_mac.h42
-rw-r--r--media/capture/video/mac/video_capture_device_factory_mac.mm219
-rw-r--r--media/capture/video/mac/video_capture_device_factory_mac_unittest.mm49
-rw-r--r--media/capture/video/mac/video_capture_device_mac.h115
-rw-r--r--media/capture/video/mac/video_capture_device_mac.mm569
-rw-r--r--media/capture/video/mac/video_capture_device_qtkit_mac.h76
-rw-r--r--media/capture/video/mac/video_capture_device_qtkit_mac.mm360
-rw-r--r--media/capture/video/video_capture_device.cc161
-rw-r--r--media/capture/video/video_capture_device.h293
-rw-r--r--media/capture/video/video_capture_device_factory.cc65
-rw-r--r--media/capture/video/video_capture_device_factory.h57
-rw-r--r--media/capture/video/video_capture_device_info.cc21
-rw-r--r--media/capture/video/video_capture_device_info.h28
-rw-r--r--media/capture/video/video_capture_device_unittest.cc485
-rw-r--r--media/capture/video/win/capability_list_win.cc54
-rw-r--r--media/capture/video/win/capability_list_win.h46
-rw-r--r--media/capture/video/win/filter_base_win.cc173
-rw-r--r--media/capture/video/win/filter_base_win.h74
-rw-r--r--media/capture/video/win/pin_base_win.cc285
-rw-r--r--media/capture/video/win/pin_base_win.h111
-rw-r--r--media/capture/video/win/sink_filter_observer_win.h25
-rw-r--r--media/capture/video/win/sink_filter_win.cc63
-rw-r--r--media/capture/video/win/sink_filter_win.h62
-rw-r--r--media/capture/video/win/sink_input_pin_win.cc192
-rw-r--r--media/capture/video/win/sink_input_pin_win.h53
-rw-r--r--media/capture/video/win/video_capture_device_factory_win.cc438
-rw-r--r--media/capture/video/win/video_capture_device_factory_win.h39
-rw-r--r--media/capture/video/win/video_capture_device_mf_win.cc327
-rw-r--r--media/capture/video/win/video_capture_device_mf_win.h72
-rw-r--r--media/capture/video/win/video_capture_device_win.cc581
-rw-r--r--media/capture/video/win/video_capture_device_win.h114
86 files changed, 10449 insertions, 910 deletions
diff --git a/media/capture/OWNERS b/media/capture/content/OWNERS
index 02bdb39..02bdb39 100644
--- a/media/capture/OWNERS
+++ b/media/capture/content/OWNERS
diff --git a/media/capture/content/README b/media/capture/content/README
new file mode 100644
index 0000000..20154f0
--- /dev/null
+++ b/media/capture/content/README
@@ -0,0 +1,5 @@
+This folder contains code refering to content capture, or the capture of
+"already-rendered stuff." The screen capture implementations (desktop, window,
+tab, etc.) in content/ use these tools. The code in this dir is all about
+deciding how to capture content w.r.t. timing, quality, and other performance
+concerns. \ No newline at end of file
diff --git a/media/capture/animated_content_sampler.cc b/media/capture/content/animated_content_sampler.cc
index 0fe5148..ae90419 100644
--- a/media/capture/animated_content_sampler.cc
+++ b/media/capture/content/animated_content_sampler.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/capture/animated_content_sampler.h"
+#include "media/capture/content/animated_content_sampler.h"
#include <algorithm>
@@ -41,19 +41,20 @@ const int kDriftCorrectionMillis = 2000;
AnimatedContentSampler::AnimatedContentSampler(
base::TimeDelta min_capture_period)
- : min_capture_period_(min_capture_period),
- sampling_state_(NOT_SAMPLING) {
+ : min_capture_period_(min_capture_period), sampling_state_(NOT_SAMPLING) {
DCHECK_GT(min_capture_period_, base::TimeDelta());
}
-AnimatedContentSampler::~AnimatedContentSampler() {}
+AnimatedContentSampler::~AnimatedContentSampler() {
+}
void AnimatedContentSampler::SetTargetSamplingPeriod(base::TimeDelta period) {
target_sampling_period_ = period;
}
void AnimatedContentSampler::ConsiderPresentationEvent(
- const gfx::Rect& damage_rect, base::TimeTicks event_time) {
+ const gfx::Rect& damage_rect,
+ base::TimeTicks event_time) {
// Analyze the current event and recent history to determine whether animating
// content is detected.
AddObservation(damage_rect, event_time);
@@ -71,9 +72,8 @@ void AnimatedContentSampler::ConsiderPresentationEvent(
// At this point, animation is being detected. Update the sampling period
// since the client may call the accessor method even if the heuristics below
// decide not to sample the current event.
- sampling_period_ = ComputeSamplingPeriod(detected_period_,
- target_sampling_period_,
- min_capture_period_);
+ sampling_period_ = ComputeSamplingPeriod(
+ detected_period_, target_sampling_period_, min_capture_period_);
// If this is the first event causing animating content to be detected,
// transition to the START_SAMPLING state.
@@ -207,13 +207,13 @@ bool AnimatedContentSampler::AnalyzeObservations(
if (last_event_time.is_null()) {
last_event_time = i->event_time;
if ((event_time - last_event_time) >=
- base::TimeDelta::FromMilliseconds(kNonAnimatingThresholdMillis)) {
+ base::TimeDelta::FromMilliseconds(kNonAnimatingThresholdMillis)) {
return false; // Content animation has recently ended.
}
} else {
const base::TimeDelta frame_duration = first_event_time - i->event_time;
if (frame_duration >=
- base::TimeDelta::FromMilliseconds(kNonAnimatingThresholdMillis)) {
+ base::TimeDelta::FromMilliseconds(kNonAnimatingThresholdMillis)) {
break; // Content not animating before this point.
}
sum_frame_durations += frame_duration;
@@ -223,7 +223,7 @@ bool AnimatedContentSampler::AnalyzeObservations(
}
if ((last_event_time - first_event_time) <
- base::TimeDelta::FromMilliseconds(kMinObservationWindowMillis)) {
+ base::TimeDelta::FromMilliseconds(kMinObservationWindowMillis)) {
return false; // Content has not animated for long enough for accuracy.
}
if (num_pixels_damaged_in_chosen <= (num_pixels_damaged_in_all * 2 / 3))
@@ -250,7 +250,7 @@ base::TimeTicks AnimatedContentSampler::ComputeNextFrameTimestamp(
const base::TimeDelta drift = ideal_timestamp - event_time;
const int64 correct_over_num_frames =
base::TimeDelta::FromMilliseconds(kDriftCorrectionMillis) /
- sampling_period_;
+ sampling_period_;
DCHECK_GT(correct_over_num_frames, 0);
return ideal_timestamp - drift / correct_over_num_frames;
@@ -258,9 +258,9 @@ base::TimeTicks AnimatedContentSampler::ComputeNextFrameTimestamp(
// static
base::TimeDelta AnimatedContentSampler::ComputeSamplingPeriod(
- base::TimeDelta animation_period,
- base::TimeDelta target_sampling_period,
- base::TimeDelta min_capture_period) {
+ base::TimeDelta animation_period,
+ base::TimeDelta target_sampling_period,
+ base::TimeDelta min_capture_period) {
// If the animation rate is unknown, return the ideal sampling period.
if (animation_period == base::TimeDelta()) {
return std::max(target_sampling_period, min_capture_period);
@@ -279,7 +279,7 @@ base::TimeDelta AnimatedContentSampler::ComputeSamplingPeriod(
const double target_fps = 1.0 / target_sampling_period.InSecondsF();
const double animation_fps = 1.0 / animation_period.InSecondsF();
if (std::abs(animation_fps / ratio - target_fps) <
- std::abs(animation_fps / (ratio + 1) - target_fps)) {
+ std::abs(animation_fps / (ratio + 1) - target_fps)) {
sampling_period = ratio * animation_period;
} else {
sampling_period = (ratio + 1) * animation_period;
diff --git a/media/capture/animated_content_sampler.h b/media/capture/content/animated_content_sampler.h
index bcc2d635..bcc2d635 100644
--- a/media/capture/animated_content_sampler.h
+++ b/media/capture/content/animated_content_sampler.h
diff --git a/media/capture/animated_content_sampler_unittest.cc b/media/capture/content/animated_content_sampler_unittest.cc
index 2d96018..3d00912 100644
--- a/media/capture/animated_content_sampler_unittest.cc
+++ b/media/capture/content/animated_content_sampler_unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/capture/animated_content_sampler.h"
+#include "media/capture/content/animated_content_sampler.h"
#include <cmath>
#include <utility>
@@ -46,9 +46,7 @@ class AnimatedContentSamplerTest : public ::testing::Test {
return base::TimeDelta::FromSeconds(1) / 30;
}
- AnimatedContentSampler* sampler() const {
- return sampler_.get();
- }
+ AnimatedContentSampler* sampler() const { return sampler_.get(); }
int GetRandomInRange(int begin, int end) {
const int len = end - begin;
@@ -182,10 +180,8 @@ TEST_F(AnimatedContentSamplerTest, TargetsSamplingPeriod) {
for (int content_fps = 1; content_fps <= 60; ++content_fps) {
const base::TimeDelta content_period = FpsAsPeriod(content_fps);
- const base::TimeDelta sampling_period =
- ComputeSamplingPeriod(content_period,
- target_sampling_period,
- min_capture_period);
+ const base::TimeDelta sampling_period = ComputeSamplingPeriod(
+ content_period, target_sampling_period, min_capture_period);
if (content_period >= target_sampling_period) {
ASSERT_EQ(content_period, sampling_period);
} else {
@@ -217,14 +213,12 @@ namespace {
// A test scenario for AnimatedContentSamplerParameterizedTest.
struct Scenario {
- base::TimeDelta vsync_interval; // Reflects compositor's update rate.
+ base::TimeDelta vsync_interval; // Reflects compositor's update rate.
base::TimeDelta min_capture_period; // Reflects maximum capture rate.
- base::TimeDelta content_period; // Reflects content animation rate.
+ base::TimeDelta content_period; // Reflects content animation rate.
base::TimeDelta target_sampling_period;
- Scenario(int compositor_frequency,
- int max_frame_rate,
- int content_frame_rate)
+ Scenario(int compositor_frequency, int max_frame_rate, int content_frame_rate)
: vsync_interval(FpsAsPeriod(compositor_frequency)),
min_capture_period(FpsAsPeriod(max_frame_rate)),
content_period(FpsAsPeriod(content_frame_rate)) {
@@ -249,8 +243,7 @@ struct Scenario {
::std::ostream& operator<<(::std::ostream& os, const Scenario& s) {
return os << "{ vsync_interval=" << s.vsync_interval.InMicroseconds()
<< ", min_capture_period=" << s.min_capture_period.InMicroseconds()
- << ", content_period=" << s.content_period.InMicroseconds()
- << " }";
+ << ", content_period=" << s.content_period.InMicroseconds() << " }";
}
} // namespace
@@ -277,8 +270,7 @@ class AnimatedContentSamplerParameterizedTest
base::TimeDelta ComputeExpectedSamplingPeriod() const {
return AnimatedContentSamplerTest::ComputeSamplingPeriod(
- GetParam().content_period,
- GetParam().target_sampling_period,
+ GetParam().content_period, GetParam().target_sampling_period,
GetParam().min_capture_period);
}
@@ -414,14 +406,12 @@ class AnimatedContentSamplerParameterizedTest
}
const double expected_sampling_ratio =
GetParam().content_period.InSecondsF() /
- ComputeExpectedSamplingPeriod().InSecondsF();
+ ComputeExpectedSamplingPeriod().InSecondsF();
const int total_frames = count_dropped_frames_ + count_sampled_frames_;
- EXPECT_NEAR(total_frames * expected_sampling_ratio,
- count_sampled_frames_,
+ EXPECT_NEAR(total_frames * expected_sampling_ratio, count_sampled_frames_,
1.5);
EXPECT_NEAR(total_frames * (1.0 - expected_sampling_ratio),
- count_dropped_frames_,
- 1.5);
+ count_dropped_frames_, 1.5);
}
private:
@@ -440,29 +430,18 @@ TEST_P(AnimatedContentSamplerParameterizedTest, DetectsAnimatedContent) {
// Provide random events and expect no lock-in.
RunEventSequence(
- GenerateEventSequence(begin,
- begin + base::TimeDelta::FromSeconds(5),
- false,
- true,
- &begin),
- false,
- false,
- false,
- "Provide random events and expect no lock-in.");
+ GenerateEventSequence(begin, begin + base::TimeDelta::FromSeconds(5),
+ false, true, &begin),
+ false, false, false, "Provide random events and expect no lock-in.");
if (HasFailure())
return;
// Provide content frame events with some random events mixed-in, and expect
// the sampler to lock-in.
RunEventSequence(
- GenerateEventSequence(begin,
- begin + base::TimeDelta::FromSeconds(5),
- true,
- true,
- &begin),
- false,
- true,
- false,
+ GenerateEventSequence(begin, begin + base::TimeDelta::FromSeconds(5),
+ true, true, &begin),
+ false, true, false,
"Provide content frame events with some random events mixed-in, and "
"expect the sampler to lock-in.");
if (HasFailure())
@@ -471,14 +450,9 @@ TEST_P(AnimatedContentSamplerParameterizedTest, DetectsAnimatedContent) {
// Continue providing content frame events without the random events mixed-in
// and expect the lock-in to hold.
RunEventSequence(
- GenerateEventSequence(begin,
- begin + base::TimeDelta::FromSeconds(5),
- true,
- false,
- &begin),
- true,
- true,
- false,
+ GenerateEventSequence(begin, begin + base::TimeDelta::FromSeconds(5),
+ true, false, &begin),
+ true, true, false,
"Continue providing content frame events without the random events "
"mixed-in and expect the lock-in to hold.");
if (HasFailure())
@@ -487,31 +461,21 @@ TEST_P(AnimatedContentSamplerParameterizedTest, DetectsAnimatedContent) {
// Continue providing just content frame events and expect the lock-in to
// hold. Also simulate the capture pipeline experiencing back pressure.
RunEventSequence(
- GenerateEventSequence(begin,
- begin + base::TimeDelta::FromSeconds(20),
- true,
- false,
- &begin),
- true,
- true,
- true,
+ GenerateEventSequence(begin, begin + base::TimeDelta::FromSeconds(20),
+ true, false, &begin),
+ true, true, true,
"Continue providing just content frame events and expect the lock-in to "
"hold. Also simulate the capture pipeline experiencing back pressure.");
if (HasFailure())
return;
-
// Provide a half-second of random events only, and expect the lock-in to be
// broken.
RunEventSequence(
GenerateEventSequence(begin,
begin + base::TimeDelta::FromMilliseconds(500),
- false,
- true,
- &begin),
- true,
- false,
- false,
+ false, true, &begin),
+ true, false, false,
"Provide a half-second of random events only, and expect the lock-in to "
"be broken.");
if (HasFailure())
@@ -520,14 +484,9 @@ TEST_P(AnimatedContentSamplerParameterizedTest, DetectsAnimatedContent) {
// Now, go back to providing content frame events, and expect the sampler to
// lock-in once again.
RunEventSequence(
- GenerateEventSequence(begin,
- begin + base::TimeDelta::FromSeconds(5),
- true,
- false,
- &begin),
- false,
- true,
- false,
+ GenerateEventSequence(begin, begin + base::TimeDelta::FromSeconds(5),
+ true, false, &begin),
+ false, true, false,
"Now, go back to providing content frame events, and expect the sampler "
"to lock-in once again.");
}
@@ -547,14 +506,9 @@ TEST_P(AnimatedContentSamplerParameterizedTest,
// lock-in.
base::TimeTicks begin = InitialTestTimeTicks();
RunEventSequence(
- GenerateEventSequence(begin,
- begin + base::TimeDelta::FromSeconds(5),
- true,
- false,
- &begin),
- false,
- true,
- false,
+ GenerateEventSequence(begin, begin + base::TimeDelta::FromSeconds(5),
+ true, false, &begin),
+ false, true, false,
"Start the first animation and run for a bit, and expect the sampler to "
"lock-in.");
if (HasFailure())
@@ -564,12 +518,8 @@ TEST_P(AnimatedContentSamplerParameterizedTest,
// size and frame rate, but at a different position. This will should cause
// the sampler to enter an "undetected" state since it's unclear which
// animation should be locked into.
- std::vector<Event> first_animation_events =
- GenerateEventSequence(begin,
- begin + base::TimeDelta::FromSeconds(20),
- true,
- false,
- &begin);
+ std::vector<Event> first_animation_events = GenerateEventSequence(
+ begin, begin + base::TimeDelta::FromSeconds(20), true, false, &begin);
gfx::Rect second_animation_rect(
gfx::Point(0, GetContentDamageRect().height()),
GetContentDamageRect().size());
@@ -591,14 +541,9 @@ TEST_P(AnimatedContentSamplerParameterizedTest,
// Now, run just the first animation, and expect the sampler to lock-in once
// again.
RunEventSequence(
- GenerateEventSequence(begin,
- begin + base::TimeDelta::FromSeconds(5),
- true,
- false,
- &begin),
- false,
- true,
- false,
+ GenerateEventSequence(begin, begin + base::TimeDelta::FromSeconds(5),
+ true, false, &begin),
+ false, true, false,
"Now, run just the first animation, and expect the sampler to lock-in "
"once again.");
if (HasFailure())
@@ -608,12 +553,8 @@ TEST_P(AnimatedContentSamplerParameterizedTest,
// the first animation and damage Rects with twice the area. This will should
// cause the sampler to enter an "undetected" state again. This tests that
// pixel-weighting is being accounted for in the sampler's logic.
- first_animation_events =
- GenerateEventSequence(begin,
- begin + base::TimeDelta::FromSeconds(20),
- true,
- false,
- &begin);
+ first_animation_events = GenerateEventSequence(
+ begin, begin + base::TimeDelta::FromSeconds(20), true, false, &begin);
second_animation_rect.set_width(second_animation_rect.width() * 2);
both_animations_events.clear();
bool include_second_animation_frame = true;
@@ -641,11 +582,7 @@ TEST_P(AnimatedContentSamplerParameterizedTest, FrameTimestampsAreSmooth) {
// once lock-in is continuous.
const base::TimeTicks begin = InitialTestTimeTicks();
std::vector<Event> events = GenerateEventSequence(
- begin,
- begin + base::TimeDelta::FromSeconds(20),
- true,
- false,
- nullptr);
+ begin, begin + base::TimeDelta::FromSeconds(20), true, false, nullptr);
typedef std::vector<base::TimeTicks> Timestamps;
Timestamps frame_timestamps;
for (std::vector<Event>::const_iterator i = events.begin(); i != events.end();
@@ -667,8 +604,8 @@ TEST_P(AnimatedContentSamplerParameterizedTest, FrameTimestampsAreSmooth) {
// of 30 Hz content on a 60 Hz v-sync interval should result in
// display_counts[2] == 10. Quit early if any one frame was obviously
// repeated too many times.
- const int64 max_expected_repeats_per_frame = 1 +
- ComputeExpectedSamplingPeriod() / GetParam().vsync_interval;
+ const int64 max_expected_repeats_per_frame =
+ 1 + ComputeExpectedSamplingPeriod() / GetParam().vsync_interval;
std::vector<size_t> display_counts(max_expected_repeats_per_frame + 1, 0);
base::TimeTicks last_present_time = frame_timestamps.front();
for (Timestamps::const_iterator i = frame_timestamps.begin() + 1;
@@ -728,12 +665,8 @@ TEST_P(AnimatedContentSamplerParameterizedTest,
// Generate a full minute of events.
const base::TimeTicks begin = InitialTestTimeTicks();
- std::vector<Event> events =
- GenerateEventSequence(begin,
- begin + base::TimeDelta::FromMinutes(1),
- true,
- false,
- nullptr);
+ std::vector<Event> events = GenerateEventSequence(
+ begin, begin + base::TimeDelta::FromMinutes(1), true, false, nullptr);
// Modify the event sequence so that 1-3 ms of additional drift is suddenly
// present every 100 events. This is meant to simulate that, external to
@@ -762,10 +695,10 @@ TEST_P(AnimatedContentSamplerParameterizedTest,
// the last event's timestamp.
const base::TimeDelta total_error =
events.back().second - last_frame_timestamp;
- const base::TimeDelta max_acceptable_error = GetParam().min_capture_period +
+ const base::TimeDelta max_acceptable_error =
+ GetParam().min_capture_period +
base::TimeDelta::FromMilliseconds(max_drift_increment_millis);
- EXPECT_NEAR(0.0,
- total_error.InMicroseconds(),
+ EXPECT_NEAR(0.0, total_error.InMicroseconds(),
max_acceptable_error.InMicroseconds());
}
@@ -773,47 +706,47 @@ INSTANTIATE_TEST_CASE_P(
,
AnimatedContentSamplerParameterizedTest,
::testing::Values(
- // Typical frame rate content: Compositor runs at 60 Hz, capture at 30
- // Hz, and content video animates at 30, 25, or 24 Hz.
- Scenario(60, 30, 30),
- Scenario(60, 30, 25),
- Scenario(60, 30, 24),
-
- // High frame rate content that leverages the Compositor's
- // capabilities, but capture is still at 30 Hz.
- Scenario(60, 30, 60),
- Scenario(60, 30, 50),
- Scenario(60, 30, 48),
-
- // High frame rate content that leverages the Compositor's
- // capabilities, and capture is also a buttery 60 Hz.
- Scenario(60, 60, 60),
- Scenario(60, 60, 50),
- Scenario(60, 60, 48),
-
- // High frame rate content that leverages the Compositor's
- // capabilities, but the client has disabled HFR sampling.
- Scenario(60, 60, 60, 30),
- Scenario(60, 60, 50, 30),
- Scenario(60, 60, 48, 30),
-
- // On some platforms, the Compositor runs at 50 Hz.
- Scenario(50, 30, 30),
- Scenario(50, 30, 25),
- Scenario(50, 30, 24),
- Scenario(50, 30, 50),
- Scenario(50, 30, 48),
-
- // Stable, but non-standard content frame rates.
- Scenario(60, 30, 16),
- Scenario(60, 30, 20),
- Scenario(60, 30, 23),
- Scenario(60, 30, 26),
- Scenario(60, 30, 27),
- Scenario(60, 30, 28),
- Scenario(60, 30, 29),
- Scenario(60, 30, 31),
- Scenario(60, 30, 32),
- Scenario(60, 30, 33)));
+ // Typical frame rate content: Compositor runs at 60 Hz, capture at 30
+ // Hz, and content video animates at 30, 25, or 24 Hz.
+ Scenario(60, 30, 30),
+ Scenario(60, 30, 25),
+ Scenario(60, 30, 24),
+
+ // High frame rate content that leverages the Compositor's
+ // capabilities, but capture is still at 30 Hz.
+ Scenario(60, 30, 60),
+ Scenario(60, 30, 50),
+ Scenario(60, 30, 48),
+
+ // High frame rate content that leverages the Compositor's
+ // capabilities, and capture is also a buttery 60 Hz.
+ Scenario(60, 60, 60),
+ Scenario(60, 60, 50),
+ Scenario(60, 60, 48),
+
+ // High frame rate content that leverages the Compositor's
+ // capabilities, but the client has disabled HFR sampling.
+ Scenario(60, 60, 60, 30),
+ Scenario(60, 60, 50, 30),
+ Scenario(60, 60, 48, 30),
+
+ // On some platforms, the Compositor runs at 50 Hz.
+ Scenario(50, 30, 30),
+ Scenario(50, 30, 25),
+ Scenario(50, 30, 24),
+ Scenario(50, 30, 50),
+ Scenario(50, 30, 48),
+
+ // Stable, but non-standard content frame rates.
+ Scenario(60, 30, 16),
+ Scenario(60, 30, 20),
+ Scenario(60, 30, 23),
+ Scenario(60, 30, 26),
+ Scenario(60, 30, 27),
+ Scenario(60, 30, 28),
+ Scenario(60, 30, 29),
+ Scenario(60, 30, 31),
+ Scenario(60, 30, 32),
+ Scenario(60, 30, 33)));
} // namespace media
diff --git a/media/capture/capture_resolution_chooser.cc b/media/capture/content/capture_resolution_chooser.cc
index d61fd76..3c7f3e5 100644
--- a/media/capture/capture_resolution_chooser.cc
+++ b/media/capture/content/capture_resolution_chooser.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/capture/capture_resolution_chooser.h"
+#include "media/capture/content/capture_resolution_chooser.h"
#include <algorithm>
#include <limits>
@@ -86,8 +86,8 @@ CaptureResolutionChooser::CaptureResolutionChooser(
const gfx::Size& max_frame_size,
ResolutionChangePolicy resolution_change_policy)
: max_frame_size_(max_frame_size),
- min_frame_size_(ComputeMinimumCaptureSize(max_frame_size,
- resolution_change_policy)),
+ min_frame_size_(
+ ComputeMinimumCaptureSize(max_frame_size, resolution_change_policy)),
resolution_change_policy_(resolution_change_policy),
target_area_(std::numeric_limits<decltype(target_area_)>::max()) {
DCHECK_LT(0, max_frame_size_.width());
@@ -100,7 +100,8 @@ CaptureResolutionChooser::CaptureResolutionChooser(
RecomputeCaptureSize();
}
-CaptureResolutionChooser::~CaptureResolutionChooser() {}
+CaptureResolutionChooser::~CaptureResolutionChooser() {
+}
void CaptureResolutionChooser::SetSourceSize(const gfx::Size& source_size) {
if (source_size.IsEmpty())
@@ -114,8 +115,7 @@ void CaptureResolutionChooser::SetSourceSize(const gfx::Size& source_size) {
case RESOLUTION_POLICY_FIXED_ASPECT_RATIO:
UpdateSnappedFrameSizes(ComputeBoundedCaptureSize(
- PadToMatchAspectRatio(source_size, max_frame_size_),
- min_frame_size_,
+ PadToMatchAspectRatio(source_size, max_frame_size_), min_frame_size_,
max_frame_size_));
RecomputeCaptureSize();
break;
@@ -158,7 +158,8 @@ gfx::Size CaptureResolutionChooser::FindNearestFrameSize(int area) const {
}
gfx::Size CaptureResolutionChooser::FindLargerFrameSize(
- int area, int num_steps_up) const {
+ int area,
+ int num_steps_up) const {
DCHECK_GT(num_steps_up, 0);
const auto begin = snapped_sizes_.begin();
const auto end = snapped_sizes_.end();
@@ -174,7 +175,8 @@ gfx::Size CaptureResolutionChooser::FindLargerFrameSize(
}
gfx::Size CaptureResolutionChooser::FindSmallerFrameSize(
- int area, int num_steps_down) const {
+ int area,
+ int num_steps_down) const {
DCHECK_GT(num_steps_down, 0);
const auto begin = snapped_sizes_.begin();
const auto end = snapped_sizes_.end();
@@ -209,8 +211,7 @@ void CaptureResolutionChooser::UpdateSnappedFrameSizes(
// the prior size.
int last_area = constrained_size.GetArea();
for (int height = constrained_size.height() - kSnappedHeightStep;
- height >= min_frame_size_.height();
- height -= kSnappedHeightStep) {
+ height >= min_frame_size_.height(); height -= kSnappedHeightStep) {
const int width =
height * constrained_size.width() / constrained_size.height();
if (width < min_frame_size_.width())
diff --git a/media/capture/capture_resolution_chooser.h b/media/capture/content/capture_resolution_chooser.h
index 144cfaa..f223fdf 100644
--- a/media/capture/capture_resolution_chooser.h
+++ b/media/capture/content/capture_resolution_chooser.h
@@ -36,15 +36,12 @@ class MEDIA_EXPORT CaptureResolutionChooser {
// media::ResolutionChangePolicy determines whether the variable frame
// resolutions being computed must adhere to a fixed aspect ratio or not, or
// that there must only be a single fixed resolution.
- CaptureResolutionChooser(
- const gfx::Size& max_frame_size,
- ResolutionChangePolicy resolution_change_policy);
+ CaptureResolutionChooser(const gfx::Size& max_frame_size,
+ ResolutionChangePolicy resolution_change_policy);
~CaptureResolutionChooser();
// Returns the current capture frame resolution to use.
- gfx::Size capture_size() const {
- return capture_size_;
- }
+ gfx::Size capture_size() const { return capture_size_; }
// Updates the capture size based on a change in the resolution of the source
// content.
diff --git a/media/capture/capture_resolution_chooser_unittest.cc b/media/capture/content/capture_resolution_chooser_unittest.cc
index 8eee2a8..4a41f0d 100644
--- a/media/capture/capture_resolution_chooser_unittest.cc
+++ b/media/capture/content/capture_resolution_chooser_unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/capture/capture_resolution_chooser.h"
+#include "media/capture/content/capture_resolution_chooser.h"
#include "base/location.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -31,8 +31,7 @@ void ExpectIsWithinBoundsAndSameAspectRatio(const Location& location,
EXPECT_GE(max_size.width(), size.width());
EXPECT_GE(max_size.height(), size.height());
EXPECT_NEAR(static_cast<double>(max_size.width()) / max_size.height(),
- static_cast<double>(size.width()) / size.height(),
- 0.01);
+ static_cast<double>(size.width()) / size.height(), 0.01);
}
// Test that the correct snapped frame sizes are computed for a |chooser|
@@ -41,11 +40,23 @@ void ExpectIsWithinBoundsAndSameAspectRatio(const Location& location,
void TestSnappedFrameSizes(CaptureResolutionChooser* chooser,
const gfx::Size& smallest_size) {
const int kSizes[17][2] = {
- { kMaxFrameWidth, kMaxFrameHeight },
- { 3520, 1980 }, { 3200, 1800 }, { 2880, 1620}, { 2560, 1440 },
- { 2240, 1260 }, { 1920, 1080 }, { 1760, 990 }, { 1600, 900 },
- { 1440, 810 }, { 1280, 720 }, { 1120, 630 }, { 960, 540 },
- { 800, 450 }, { 640, 360 }, { 480, 270 }, { 320, 180 },
+ {kMaxFrameWidth, kMaxFrameHeight},
+ {3520, 1980},
+ {3200, 1800},
+ {2880, 1620},
+ {2560, 1440},
+ {2240, 1260},
+ {1920, 1080},
+ {1760, 990},
+ {1600, 900},
+ {1440, 810},
+ {1280, 720},
+ {1120, 630},
+ {960, 540},
+ {800, 450},
+ {640, 360},
+ {480, 270},
+ {320, 180},
};
const gfx::Size largest_size(kMaxFrameWidth, kMaxFrameHeight);
@@ -70,11 +81,11 @@ void TestSnappedFrameSizes(CaptureResolutionChooser* chooser,
// Test the "find Nth lower size" logic.
for (size_t skips = 1; skips < 4; ++skips) {
for (size_t i = skips; i < arraysize(kSizes); ++i) {
- EXPECT_EQ(gfx::Size(kSizes[i][0], kSizes[i][1]),
- chooser->FindSmallerFrameSize(
- gfx::Size(kSizes[i - skips][0],
- kSizes[i - skips][1]).GetArea(),
- skips));
+ EXPECT_EQ(
+ gfx::Size(kSizes[i][0], kSizes[i][1]),
+ chooser->FindSmallerFrameSize(
+ gfx::Size(kSizes[i - skips][0], kSizes[i - skips][1]).GetArea(),
+ skips));
}
}
@@ -83,8 +94,7 @@ void TestSnappedFrameSizes(CaptureResolutionChooser* chooser,
for (size_t i = skips; i < arraysize(kSizes); ++i) {
EXPECT_EQ(gfx::Size(kSizes[i - skips][0], kSizes[i - skips][1]),
chooser->FindLargerFrameSize(
- gfx::Size(kSizes[i][0], kSizes[i][1]).GetArea(),
- skips));
+ gfx::Size(kSizes[i][0], kSizes[i][1]).GetArea(), skips));
}
}
@@ -215,33 +225,32 @@ TEST(CaptureResolutionChooserTest,
TEST(CaptureResolutionChooserTest,
FixedAspectRatioPolicy_CaptureSizeHasSameAspectRatio) {
- CaptureResolutionChooser chooser(
- gfx::Size(kMaxFrameWidth, kMaxFrameHeight),
- RESOLUTION_POLICY_FIXED_ASPECT_RATIO);
+ CaptureResolutionChooser chooser(gfx::Size(kMaxFrameWidth, kMaxFrameHeight),
+ RESOLUTION_POLICY_FIXED_ASPECT_RATIO);
// Starting condition.
const gfx::Size min_size(kMinFrameWidth, kMinFrameHeight);
const gfx::Size max_size(kMaxFrameWidth, kMaxFrameHeight);
- ExpectIsWithinBoundsAndSameAspectRatio(
- FROM_HERE, min_size, max_size, chooser.capture_size());
+ ExpectIsWithinBoundsAndSameAspectRatio(FROM_HERE, min_size, max_size,
+ chooser.capture_size());
// Max size in --> max size out.
chooser.SetSourceSize(gfx::Size(kMaxFrameWidth, kMaxFrameHeight));
- ExpectIsWithinBoundsAndSameAspectRatio(
- FROM_HERE, min_size, max_size, chooser.capture_size());
+ ExpectIsWithinBoundsAndSameAspectRatio(FROM_HERE, min_size, max_size,
+ chooser.capture_size());
// Various source sizes within bounds.
chooser.SetSourceSize(gfx::Size(640, 480));
- ExpectIsWithinBoundsAndSameAspectRatio(
- FROM_HERE, min_size, max_size, chooser.capture_size());
+ ExpectIsWithinBoundsAndSameAspectRatio(FROM_HERE, min_size, max_size,
+ chooser.capture_size());
chooser.SetSourceSize(gfx::Size(480, 640));
- ExpectIsWithinBoundsAndSameAspectRatio(
- FROM_HERE, min_size, max_size, chooser.capture_size());
+ ExpectIsWithinBoundsAndSameAspectRatio(FROM_HERE, min_size, max_size,
+ chooser.capture_size());
chooser.SetSourceSize(gfx::Size(640, 640));
- ExpectIsWithinBoundsAndSameAspectRatio(
- FROM_HERE, min_size, max_size, chooser.capture_size());
+ ExpectIsWithinBoundsAndSameAspectRatio(FROM_HERE, min_size, max_size,
+ chooser.capture_size());
// Bad source size results in no update.
const gfx::Size unchanged_size = chooser.capture_size();
@@ -251,30 +260,30 @@ TEST(CaptureResolutionChooserTest,
// Downscaling size (preserving aspect ratio) when source size exceeds the
// upper bounds.
chooser.SetSourceSize(gfx::Size(kMaxFrameWidth * 2, kMaxFrameHeight * 2));
- ExpectIsWithinBoundsAndSameAspectRatio(
- FROM_HERE, min_size, max_size, chooser.capture_size());
+ ExpectIsWithinBoundsAndSameAspectRatio(FROM_HERE, min_size, max_size,
+ chooser.capture_size());
chooser.SetSourceSize(gfx::Size(kMaxFrameWidth * 2, kMaxFrameHeight));
- ExpectIsWithinBoundsAndSameAspectRatio(
- FROM_HERE, min_size, max_size, chooser.capture_size());
+ ExpectIsWithinBoundsAndSameAspectRatio(FROM_HERE, min_size, max_size,
+ chooser.capture_size());
chooser.SetSourceSize(gfx::Size(kMaxFrameWidth, kMaxFrameHeight * 2));
- ExpectIsWithinBoundsAndSameAspectRatio(
- FROM_HERE, min_size, max_size, chooser.capture_size());
+ ExpectIsWithinBoundsAndSameAspectRatio(FROM_HERE, min_size, max_size,
+ chooser.capture_size());
// Upscaling size (preserving aspect ratio) when source size is under the
// lower bounds.
chooser.SetSourceSize(gfx::Size(kMinFrameWidth / 2, kMinFrameHeight / 2));
- ExpectIsWithinBoundsAndSameAspectRatio(
- FROM_HERE, min_size, max_size, chooser.capture_size());
+ ExpectIsWithinBoundsAndSameAspectRatio(FROM_HERE, min_size, max_size,
+ chooser.capture_size());
chooser.SetSourceSize(gfx::Size(kMinFrameWidth / 2, kMaxFrameHeight));
- ExpectIsWithinBoundsAndSameAspectRatio(
- FROM_HERE, min_size, max_size, chooser.capture_size());
+ ExpectIsWithinBoundsAndSameAspectRatio(FROM_HERE, min_size, max_size,
+ chooser.capture_size());
chooser.SetSourceSize(gfx::Size(kMinFrameWidth, kMinFrameHeight / 2));
- ExpectIsWithinBoundsAndSameAspectRatio(
- FROM_HERE, min_size, max_size, chooser.capture_size());
+ ExpectIsWithinBoundsAndSameAspectRatio(FROM_HERE, min_size, max_size,
+ chooser.capture_size());
// For a chooser configured with the "fixed aspect ratio" policy, the smallest
// possible computed size is the one with 180 lines of resolution and the same
@@ -288,8 +297,8 @@ TEST(CaptureResolutionChooserTest,
TEST(CaptureResolutionChooserTest,
AnyWithinLimitPolicy_CaptureSizeIsAnythingWithinLimits) {
const gfx::Size max_size(kMaxFrameWidth, kMaxFrameHeight);
- CaptureResolutionChooser chooser(
- max_size, RESOLUTION_POLICY_ANY_WITHIN_LIMIT);
+ CaptureResolutionChooser chooser(max_size,
+ RESOLUTION_POLICY_ANY_WITHIN_LIMIT);
// Starting condition.
EXPECT_EQ(max_size, chooser.capture_size());
diff --git a/media/capture/feedback_signal_accumulator.cc b/media/capture/content/feedback_signal_accumulator.cc
index 0b073f8..896c23a 100644
--- a/media/capture/feedback_signal_accumulator.cc
+++ b/media/capture/content/feedback_signal_accumulator.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/capture/feedback_signal_accumulator.h"
+#include "media/capture/content/feedback_signal_accumulator.h"
#include <algorithm>
#include <cmath>
diff --git a/media/capture/feedback_signal_accumulator.h b/media/capture/content/feedback_signal_accumulator.h
index 4139f57..c6046de 100644
--- a/media/capture/feedback_signal_accumulator.h
+++ b/media/capture/content/feedback_signal_accumulator.h
@@ -54,9 +54,9 @@ class MEDIA_EXPORT FeedbackSignalAccumulator {
// accumulated average.
const base::TimeDelta half_life_;
- base::TimeTicks reset_time_; // |timestamp| passed in last call to Reset().
- double average_; // Current accumulated average.
- double update_value_; // Latest |value| accepted by Update().
+ base::TimeTicks reset_time_; // |timestamp| passed in last call to Reset().
+ double average_; // Current accumulated average.
+ double update_value_; // Latest |value| accepted by Update().
base::TimeTicks update_time_; // Latest |timestamp| accepted by Update().
double prior_average_; // Accumulated average before last call to Update().
base::TimeTicks prior_update_time_; // |timestamp| in prior call to Update().
diff --git a/media/capture/feedback_signal_accumulator_unittest.cc b/media/capture/content/feedback_signal_accumulator_unittest.cc
index 8696056..9d0f925 100644
--- a/media/capture/feedback_signal_accumulator_unittest.cc
+++ b/media/capture/content/feedback_signal_accumulator_unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/capture/feedback_signal_accumulator.h"
+#include "media/capture/content/feedback_signal_accumulator.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/media/capture/screen_capture_device_core.cc b/media/capture/content/screen_capture_device_core.cc
index e17cbf8..53dd3818 100644
--- a/media/capture/screen_capture_device_core.cc
+++ b/media/capture/content/screen_capture_device_core.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/capture/screen_capture_device_core.h"
+#include "media/capture/content/screen_capture_device_core.h"
#include "base/bind.h"
#include "base/logging.h"
@@ -16,16 +16,17 @@ namespace media {
namespace {
-void DeleteCaptureMachine(
- scoped_ptr<VideoCaptureMachine> capture_machine) {
+void DeleteCaptureMachine(scoped_ptr<VideoCaptureMachine> capture_machine) {
capture_machine.reset();
}
} // namespace
-VideoCaptureMachine::VideoCaptureMachine() {}
+VideoCaptureMachine::VideoCaptureMachine() {
+}
-VideoCaptureMachine::~VideoCaptureMachine() {}
+VideoCaptureMachine::~VideoCaptureMachine() {
+}
bool VideoCaptureMachine::IsAutoThrottlingEnabled() const {
return false;
@@ -59,8 +60,7 @@ void ScreenCaptureDeviceCore::AllocateAndStart(
client.Pass(), params, capture_machine_->IsAutoThrottlingEnabled());
capture_machine_->Start(
- oracle_proxy_,
- params,
+ oracle_proxy_, params,
base::Bind(&ScreenCaptureDeviceCore::CaptureStarted, AsWeakPtr()));
TransitionStateTo(kCapturing);
@@ -91,8 +91,7 @@ void ScreenCaptureDeviceCore::CaptureStarted(bool success) {
ScreenCaptureDeviceCore::ScreenCaptureDeviceCore(
scoped_ptr<VideoCaptureMachine> capture_machine)
- : state_(kIdle),
- capture_machine_(capture_machine.Pass()) {
+ : state_(kIdle), capture_machine_(capture_machine.Pass()) {
DCHECK(capture_machine_.get());
}
@@ -100,8 +99,8 @@ ScreenCaptureDeviceCore::~ScreenCaptureDeviceCore() {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK_NE(state_, kCapturing);
if (capture_machine_) {
- capture_machine_->Stop(base::Bind(&DeleteCaptureMachine,
- base::Passed(&capture_machine_)));
+ capture_machine_->Stop(
+ base::Bind(&DeleteCaptureMachine, base::Passed(&capture_machine_)));
}
DVLOG(1) << "ScreenCaptureDeviceCore@" << this << " destroying.";
}
@@ -111,10 +110,9 @@ void ScreenCaptureDeviceCore::TransitionStateTo(State next_state) {
#ifndef NDEBUG
static const char* kStateNames[] = {
- "Idle", "Allocated", "Capturing", "Error"
- };
- DVLOG(1) << "State change: " << kStateNames[state_]
- << " --> " << kStateNames[next_state];
+ "Idle", "Allocated", "Capturing", "Error"};
+ DVLOG(1) << "State change: " << kStateNames[state_] << " --> "
+ << kStateNames[next_state];
#endif
state_ = next_state;
diff --git a/media/capture/screen_capture_device_core.h b/media/capture/content/screen_capture_device_core.h
index 03d6625..0720094 100644
--- a/media/capture/screen_capture_device_core.h
+++ b/media/capture/content/screen_capture_device_core.h
@@ -11,8 +11,8 @@
#include "base/memory/weak_ptr.h"
#include "base/threading/thread_checker.h"
#include "media/base/media_export.h"
-#include "media/capture/thread_safe_capture_oracle.h"
-#include "media/video/capture/video_capture_device.h"
+#include "media/capture/content/thread_safe_capture_oracle.h"
+#include "media/capture/video/video_capture_device.h"
namespace media {
@@ -58,8 +58,7 @@ class MEDIA_EXPORT VideoCaptureMachine {
class MEDIA_EXPORT ScreenCaptureDeviceCore
: public base::SupportsWeakPtr<ScreenCaptureDeviceCore> {
public:
- ScreenCaptureDeviceCore(
- scoped_ptr<VideoCaptureMachine> capture_machine);
+ ScreenCaptureDeviceCore(scoped_ptr<VideoCaptureMachine> capture_machine);
virtual ~ScreenCaptureDeviceCore();
// Asynchronous requests to change ScreenCaptureDeviceCore state.
@@ -69,11 +68,7 @@ class MEDIA_EXPORT ScreenCaptureDeviceCore
private:
// Flag indicating current state.
- enum State {
- kIdle,
- kCapturing,
- kError
- };
+ enum State { kIdle, kCapturing, kError };
void TransitionStateTo(State next_state);
@@ -103,7 +98,6 @@ class MEDIA_EXPORT ScreenCaptureDeviceCore
DISALLOW_COPY_AND_ASSIGN(ScreenCaptureDeviceCore);
};
-
} // namespace media
#endif // MEDIA_CAPTURE_SCREEN_CAPTURE_DEVICE_CORE_H_
diff --git a/media/capture/smooth_event_sampler.cc b/media/capture/content/smooth_event_sampler.cc
index 7064f86..79031eb 100644
--- a/media/capture/smooth_event_sampler.cc
+++ b/media/capture/content/smooth_event_sampler.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/capture/smooth_event_sampler.h"
+#include "media/capture/content/smooth_event_sampler.h"
#include <algorithm>
@@ -21,9 +21,9 @@ const int kOverdueDirtyThresholdMillis = 250; // 4 FPS
SmoothEventSampler::SmoothEventSampler(base::TimeDelta min_capture_period,
int redundant_capture_goal)
- : redundant_capture_goal_(redundant_capture_goal),
- overdue_sample_count_(0),
- token_bucket_(base::TimeDelta::Max()) {
+ : redundant_capture_goal_(redundant_capture_goal),
+ overdue_sample_count_(0),
+ token_bucket_(base::TimeDelta::Max()) {
SetMinCapturePeriod(min_capture_period);
}
@@ -49,8 +49,7 @@ void SmoothEventSampler::ConsiderPresentationEvent(base::TimeTicks event_time) {
if (token_bucket_ > token_bucket_capacity_)
token_bucket_ = token_bucket_capacity_;
}
- TRACE_COUNTER1("gpu.capture",
- "MirroringTokenBucketUsec",
+ TRACE_COUNTER1("gpu.capture", "MirroringTokenBucketUsec",
std::max<int64>(0, token_bucket_.InMicroseconds()));
}
current_event_ = event_time;
@@ -64,8 +63,7 @@ void SmoothEventSampler::RecordSample() {
token_bucket_ -= min_capture_period_;
if (token_bucket_ < base::TimeDelta())
token_bucket_ = base::TimeDelta();
- TRACE_COUNTER1("gpu.capture",
- "MirroringTokenBucketUsec",
+ TRACE_COUNTER1("gpu.capture", "MirroringTokenBucketUsec",
std::max<int64>(0, token_bucket_.InMicroseconds()));
if (HasUnrecordedEvent()) {
@@ -76,8 +74,8 @@ void SmoothEventSampler::RecordSample() {
}
}
-bool SmoothEventSampler::IsOverdueForSamplingAt(base::TimeTicks event_time)
- const {
+bool SmoothEventSampler::IsOverdueForSamplingAt(
+ base::TimeTicks event_time) const {
DCHECK(!event_time.is_null());
if (!HasUnrecordedEvent() && overdue_sample_count_ >= redundant_capture_goal_)
@@ -90,7 +88,7 @@ bool SmoothEventSampler::IsOverdueForSamplingAt(base::TimeTicks event_time)
// won't request a sample just yet.
base::TimeDelta dirty_interval = event_time - last_sample_;
return dirty_interval >=
- base::TimeDelta::FromMilliseconds(kOverdueDirtyThresholdMillis);
+ base::TimeDelta::FromMilliseconds(kOverdueDirtyThresholdMillis);
}
bool SmoothEventSampler::HasUnrecordedEvent() const {
diff --git a/media/capture/smooth_event_sampler.h b/media/capture/content/smooth_event_sampler.h
index c250eb4..c250eb4 100644
--- a/media/capture/smooth_event_sampler.h
+++ b/media/capture/content/smooth_event_sampler.h
diff --git a/media/capture/content/smooth_event_sampler_unittest.cc b/media/capture/content/smooth_event_sampler_unittest.cc
new file mode 100644
index 0000000..b3234aa
--- /dev/null
+++ b/media/capture/content/smooth_event_sampler_unittest.cc
@@ -0,0 +1,704 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/content/smooth_event_sampler.h"
+
+#include "base/strings/stringprintf.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+namespace {
+
+bool AddEventAndConsiderSampling(SmoothEventSampler* sampler,
+ base::TimeTicks event_time) {
+ sampler->ConsiderPresentationEvent(event_time);
+ return sampler->ShouldSample();
+}
+
+void SteadyStateSampleAndAdvance(base::TimeDelta vsync,
+ SmoothEventSampler* sampler,
+ base::TimeTicks* t) {
+ ASSERT_TRUE(AddEventAndConsiderSampling(sampler, *t));
+ ASSERT_TRUE(sampler->HasUnrecordedEvent());
+ sampler->RecordSample();
+ ASSERT_FALSE(sampler->HasUnrecordedEvent());
+ ASSERT_FALSE(sampler->IsOverdueForSamplingAt(*t));
+ *t += vsync;
+ ASSERT_FALSE(sampler->IsOverdueForSamplingAt(*t));
+}
+
+void SteadyStateNoSampleAndAdvance(base::TimeDelta vsync,
+ SmoothEventSampler* sampler,
+ base::TimeTicks* t) {
+ ASSERT_FALSE(AddEventAndConsiderSampling(sampler, *t));
+ ASSERT_TRUE(sampler->HasUnrecordedEvent());
+ ASSERT_FALSE(sampler->IsOverdueForSamplingAt(*t));
+ *t += vsync;
+ ASSERT_FALSE(sampler->IsOverdueForSamplingAt(*t));
+}
+
+base::TimeTicks InitialTestTimeTicks() {
+ return base::TimeTicks() + base::TimeDelta::FromSeconds(1);
+}
+
+void TestRedundantCaptureStrategy(base::TimeDelta capture_period,
+ int redundant_capture_goal,
+ SmoothEventSampler* sampler,
+ base::TimeTicks* t) {
+ // Before any events have been considered, we're overdue for sampling.
+ ASSERT_TRUE(sampler->IsOverdueForSamplingAt(*t));
+
+ // Consider the first event. We want to sample that.
+ ASSERT_FALSE(sampler->HasUnrecordedEvent());
+ ASSERT_TRUE(AddEventAndConsiderSampling(sampler, *t));
+ ASSERT_TRUE(sampler->HasUnrecordedEvent());
+ sampler->RecordSample();
+ ASSERT_FALSE(sampler->HasUnrecordedEvent());
+
+ // After more than 250 ms has passed without considering an event, we should
+ // repeatedly be overdue for sampling. However, once the redundant capture
+ // goal is achieved, we should no longer be overdue for sampling.
+ *t += base::TimeDelta::FromMilliseconds(250);
+ for (int i = 0; i < redundant_capture_goal; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ ASSERT_FALSE(sampler->HasUnrecordedEvent());
+ ASSERT_TRUE(sampler->IsOverdueForSamplingAt(*t))
+ << "Should sample until redundant capture goal is hit";
+ sampler->RecordSample();
+ *t += capture_period; // Timer fires once every capture period.
+ }
+ ASSERT_FALSE(sampler->IsOverdueForSamplingAt(*t))
+ << "Should not be overdue once redundant capture goal achieved.";
+}
+
+} // namespace
+
+// 60Hz sampled at 30Hz should produce 30Hz. In addition, this test contains
+// much more comprehensive before/after/edge-case scenarios than the others.
+TEST(SmoothEventSamplerTest, Sample60HertzAt30Hertz) {
+ const base::TimeDelta capture_period = base::TimeDelta::FromSeconds(1) / 30;
+ const int redundant_capture_goal = 200;
+ const base::TimeDelta vsync = base::TimeDelta::FromSeconds(1) / 60;
+
+ SmoothEventSampler sampler(capture_period, redundant_capture_goal);
+ base::TimeTicks t = InitialTestTimeTicks();
+
+ TestRedundantCaptureStrategy(capture_period, redundant_capture_goal, &sampler,
+ &t);
+
+ // Steady state, we should capture every other vsync, indefinitely.
+ for (int i = 0; i < 100; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ }
+
+ // Now pretend we're limited by backpressure in the pipeline. In this scenario
+ // case we are adding events but not sampling them.
+ for (int i = 0; i < 20; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ ASSERT_EQ(i >= 14, sampler.IsOverdueForSamplingAt(t));
+ ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
+ ASSERT_TRUE(sampler.HasUnrecordedEvent());
+ t += vsync;
+ }
+
+ // Now suppose we can sample again. We should be back in the steady state,
+ // but at a different phase.
+ ASSERT_TRUE(sampler.IsOverdueForSamplingAt(t));
+ for (int i = 0; i < 100; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ }
+}
+
+// 50Hz sampled at 30Hz should produce a sequence where some frames are skipped.
+TEST(SmoothEventSamplerTest, Sample50HertzAt30Hertz) {
+ const base::TimeDelta capture_period = base::TimeDelta::FromSeconds(1) / 30;
+ const int redundant_capture_goal = 2;
+ const base::TimeDelta vsync = base::TimeDelta::FromSeconds(1) / 50;
+
+ SmoothEventSampler sampler(capture_period, redundant_capture_goal);
+ base::TimeTicks t = InitialTestTimeTicks();
+
+ TestRedundantCaptureStrategy(capture_period, redundant_capture_goal, &sampler,
+ &t);
+
+ // Steady state, we should capture 1st, 2nd and 4th frames out of every five
+ // frames, indefinitely.
+ for (int i = 0; i < 100; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ }
+
+ // Now pretend we're limited by backpressure in the pipeline. In this scenario
+ // case we are adding events but not sampling them.
+ for (int i = 0; i < 20; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ ASSERT_EQ(i >= 11, sampler.IsOverdueForSamplingAt(t));
+ ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
+ t += vsync;
+ }
+
+ // Now suppose we can sample again. We should be back in the steady state
+ // again.
+ ASSERT_TRUE(sampler.IsOverdueForSamplingAt(t));
+ for (int i = 0; i < 100; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ }
+}
+
+// 75Hz sampled at 30Hz should produce a sequence where some frames are skipped.
+TEST(SmoothEventSamplerTest, Sample75HertzAt30Hertz) {
+ const base::TimeDelta capture_period = base::TimeDelta::FromSeconds(1) / 30;
+ const int redundant_capture_goal = 32;
+ const base::TimeDelta vsync = base::TimeDelta::FromSeconds(1) / 75;
+
+ SmoothEventSampler sampler(capture_period, redundant_capture_goal);
+ base::TimeTicks t = InitialTestTimeTicks();
+
+ TestRedundantCaptureStrategy(capture_period, redundant_capture_goal, &sampler,
+ &t);
+
+ // Steady state, we should capture 1st and 3rd frames out of every five
+ // frames, indefinitely.
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ for (int i = 0; i < 100; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ }
+
+ // Now pretend we're limited by backpressure in the pipeline. In this scenario
+ // case we are adding events but not sampling them.
+ for (int i = 0; i < 20; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ ASSERT_EQ(i >= 16, sampler.IsOverdueForSamplingAt(t));
+ ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
+ t += vsync;
+ }
+
+ // Now suppose we can sample again. We capture the next frame, and not the one
+ // after that, and then we're back in the steady state again.
+ ASSERT_TRUE(sampler.IsOverdueForSamplingAt(t));
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ for (int i = 0; i < 100; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ }
+}
+
+// 30Hz sampled at 30Hz should produce 30Hz.
+TEST(SmoothEventSamplerTest, Sample30HertzAt30Hertz) {
+ const base::TimeDelta capture_period = base::TimeDelta::FromSeconds(1) / 30;
+ const int redundant_capture_goal = 1;
+ const base::TimeDelta vsync = base::TimeDelta::FromSeconds(1) / 30;
+
+ SmoothEventSampler sampler(capture_period, redundant_capture_goal);
+ base::TimeTicks t = InitialTestTimeTicks();
+
+ TestRedundantCaptureStrategy(capture_period, redundant_capture_goal, &sampler,
+ &t);
+
+ // Steady state, we should capture every vsync, indefinitely.
+ for (int i = 0; i < 200; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ }
+
+ // Now pretend we're limited by backpressure in the pipeline. In this scenario
+ // case we are adding events but not sampling them.
+ for (int i = 0; i < 10; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ ASSERT_EQ(i >= 7, sampler.IsOverdueForSamplingAt(t));
+ ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
+ t += vsync;
+ }
+
+ // Now suppose we can sample again. We should be back in the steady state.
+ ASSERT_TRUE(sampler.IsOverdueForSamplingAt(t));
+ for (int i = 0; i < 100; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ }
+}
+
+// 24Hz sampled at 30Hz should produce 24Hz.
+TEST(SmoothEventSamplerTest, Sample24HertzAt30Hertz) {
+ const base::TimeDelta capture_period = base::TimeDelta::FromSeconds(1) / 30;
+ const int redundant_capture_goal = 333;
+ const base::TimeDelta vsync = base::TimeDelta::FromSeconds(1) / 24;
+
+ SmoothEventSampler sampler(capture_period, redundant_capture_goal);
+ base::TimeTicks t = InitialTestTimeTicks();
+
+ TestRedundantCaptureStrategy(capture_period, redundant_capture_goal, &sampler,
+ &t);
+
+ // Steady state, we should capture every vsync, indefinitely.
+ for (int i = 0; i < 200; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ }
+
+ // Now pretend we're limited by backpressure in the pipeline. In this scenario
+ // case we are adding events but not sampling them.
+ for (int i = 0; i < 10; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ ASSERT_EQ(i >= 6, sampler.IsOverdueForSamplingAt(t));
+ ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
+ t += vsync;
+ }
+
+ // Now suppose we can sample again. We should be back in the steady state.
+ ASSERT_TRUE(sampler.IsOverdueForSamplingAt(t));
+ for (int i = 0; i < 100; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ }
+}
+
+// Tests that changing the minimum capture period during usage results in the
+// desired behavior.
+TEST(SmoothEventSamplerTest, Sample60HertzWithVariedCapturePeriods) {
+ const base::TimeDelta vsync = base::TimeDelta::FromSeconds(1) / 60;
+ const base::TimeDelta one_to_one_period = vsync;
+ const base::TimeDelta two_to_one_period = vsync * 2;
+ const base::TimeDelta two_and_three_to_one_period =
+ base::TimeDelta::FromSeconds(1) / 24;
+ const int redundant_capture_goal = 1;
+
+ SmoothEventSampler sampler(one_to_one_period, redundant_capture_goal);
+ base::TimeTicks t = InitialTestTimeTicks();
+
+ TestRedundantCaptureStrategy(one_to_one_period, redundant_capture_goal,
+ &sampler, &t);
+
+ // With the capture rate at 60 Hz, we should capture every vsync.
+ for (int i = 0; i < 100; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ }
+
+ // Now change to the capture rate to 30 Hz, and we should capture every other
+ // vsync.
+ sampler.SetMinCapturePeriod(two_to_one_period);
+ for (int i = 0; i < 100; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ }
+
+ // Now change the capture rate back to 60 Hz, and we should capture every
+ // vsync again.
+ sampler.SetMinCapturePeriod(one_to_one_period);
+ for (int i = 0; i < 100; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ }
+
+ // Now change the capture rate to 24 Hz, and we should capture with a 2-3-2-3
+ // cadence.
+ sampler.SetMinCapturePeriod(two_and_three_to_one_period);
+ for (int i = 0; i < 100; i++) {
+ SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
+ SteadyStateSampleAndAdvance(vsync, &sampler, &t);
+ }
+}
+
+TEST(SmoothEventSamplerTest, DoubleDrawAtOneTimeStillDirties) {
+ const base::TimeDelta capture_period = base::TimeDelta::FromSeconds(1) / 30;
+ const base::TimeDelta overdue_period = base::TimeDelta::FromSeconds(1);
+
+ SmoothEventSampler sampler(capture_period, 1);
+ base::TimeTicks t = InitialTestTimeTicks();
+
+ ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
+ sampler.RecordSample();
+ ASSERT_FALSE(sampler.IsOverdueForSamplingAt(t))
+ << "Sampled last event; should not be dirty.";
+ t += overdue_period;
+
+ // Now simulate 2 events with the same clock value.
+ ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
+ sampler.RecordSample();
+ ASSERT_FALSE(AddEventAndConsiderSampling(&sampler, t))
+ << "Two events at same time -- expected second not to be sampled.";
+ ASSERT_TRUE(sampler.IsOverdueForSamplingAt(t + overdue_period))
+ << "Second event should dirty the capture state.";
+ sampler.RecordSample();
+ ASSERT_FALSE(sampler.IsOverdueForSamplingAt(t + overdue_period));
+}
+
+namespace {
+
+struct DataPoint {
+ bool should_capture;
+ double increment_ms;
+};
+
+void ReplayCheckingSamplerDecisions(const DataPoint* data_points,
+ size_t num_data_points,
+ SmoothEventSampler* sampler) {
+ base::TimeTicks t = InitialTestTimeTicks();
+ for (size_t i = 0; i < num_data_points; ++i) {
+ t += base::TimeDelta::FromMicroseconds(
+ static_cast<int64>(data_points[i].increment_ms * 1000));
+ ASSERT_EQ(data_points[i].should_capture,
+ AddEventAndConsiderSampling(sampler, t))
+ << "at data_points[" << i << ']';
+ if (data_points[i].should_capture)
+ sampler->RecordSample();
+ }
+}
+
+} // namespace
+
+TEST(SmoothEventSamplerTest, DrawingAt24FpsWith60HzVsyncSampledAt30Hertz) {
+ // Actual capturing of timing data: Initial instability as a 24 FPS video was
+ // started from a still screen, then clearly followed by steady-state.
+ static const DataPoint data_points[] = {{true, 1437.93},
+ {true, 150.484},
+ {true, 217.362},
+ {true, 50.161},
+ {true, 33.44},
+ {false, 0},
+ {true, 16.721},
+ {true, 66.88},
+ {true, 50.161},
+ {false, 0},
+ {false, 0},
+ {true, 50.16},
+ {true, 33.441},
+ {true, 16.72},
+ {false, 16.72},
+ {true, 117.041},
+ {true, 16.72},
+ {false, 16.72},
+ {true, 50.161},
+ {true, 50.16},
+ {true, 33.441},
+ {true, 33.44},
+ {true, 33.44},
+ {true, 16.72},
+ {false, 0},
+ {true, 50.161},
+ {false, 0},
+ {true, 33.44},
+ {true, 16.72},
+ {false, 16.721},
+ {true, 66.881},
+ {false, 0},
+ {true, 33.441},
+ {true, 16.72},
+ {true, 50.16},
+ {true, 16.72},
+ {false, 16.721},
+ {true, 50.161},
+ {true, 50.16},
+ {false, 0},
+ {true, 33.441},
+ {true, 50.337},
+ {true, 50.183},
+ {true, 16.722},
+ {true, 50.161},
+ {true, 33.441},
+ {true, 50.16},
+ {true, 33.441},
+ {true, 50.16},
+ {true, 33.441},
+ {true, 50.16},
+ {true, 33.44},
+ {true, 50.161},
+ {true, 50.16},
+ {true, 33.44},
+ {true, 33.441},
+ {true, 50.16},
+ {true, 50.161},
+ {true, 33.44},
+ {true, 33.441},
+ {true, 50.16},
+ {true, 33.44},
+ {true, 50.161},
+ {true, 33.44},
+ {true, 50.161},
+ {true, 33.44},
+ {true, 50.161},
+ {true, 33.44},
+ {true, 83.601},
+ {true, 16.72},
+ {true, 33.44},
+ {false, 0}};
+
+ SmoothEventSampler sampler(base::TimeDelta::FromSeconds(1) / 30, 3);
+ ReplayCheckingSamplerDecisions(data_points, arraysize(data_points), &sampler);
+}
+
+TEST(SmoothEventSamplerTest, DrawingAt30FpsWith60HzVsyncSampledAt30Hertz) {
+ // Actual capturing of timing data: Initial instability as a 30 FPS video was
+ // started from a still screen, then followed by steady-state. Drawing
+ // framerate from the video rendering was a bit volatile, but averaged 30 FPS.
+ static const DataPoint data_points[] = {{true, 2407.69},
+ {true, 16.733},
+ {true, 217.362},
+ {true, 33.441},
+ {true, 33.44},
+ {true, 33.44},
+ {true, 33.441},
+ {true, 33.44},
+ {true, 33.44},
+ {true, 33.441},
+ {true, 33.44},
+ {true, 33.44},
+ {true, 16.721},
+ {true, 33.44},
+ {false, 0},
+ {true, 50.161},
+ {true, 50.16},
+ {false, 0},
+ {true, 50.161},
+ {true, 33.44},
+ {true, 16.72},
+ {false, 0},
+ {false, 16.72},
+ {true, 66.881},
+ {false, 0},
+ {true, 33.44},
+ {true, 16.72},
+ {true, 50.161},
+ {false, 0},
+ {true, 33.538},
+ {true, 33.526},
+ {true, 33.447},
+ {true, 33.445},
+ {true, 33.441},
+ {true, 16.721},
+ {true, 33.44},
+ {true, 33.44},
+ {true, 50.161},
+ {true, 16.72},
+ {true, 33.44},
+ {true, 33.441},
+ {true, 33.44},
+ {false, 0},
+ {false, 16.72},
+ {true, 66.881},
+ {true, 16.72},
+ {false, 16.72},
+ {true, 50.16},
+ {true, 33.441},
+ {true, 33.44},
+ {true, 33.44},
+ {true, 33.44},
+ {true, 33.441},
+ {true, 33.44},
+ {true, 50.161},
+ {false, 0},
+ {true, 33.44},
+ {true, 33.44},
+ {true, 50.161},
+ {true, 16.72},
+ {true, 33.44},
+ {true, 33.441},
+ {false, 0},
+ {true, 66.88},
+ {true, 33.441},
+ {true, 33.44},
+ {true, 33.44},
+ {false, 0},
+ {true, 33.441},
+ {true, 33.44},
+ {true, 33.44},
+ {false, 0},
+ {true, 16.72},
+ {true, 50.161},
+ {false, 0},
+ {true, 50.16},
+ {false, 0.001},
+ {true, 16.721},
+ {true, 66.88},
+ {true, 33.44},
+ {true, 33.441},
+ {true, 33.44},
+ {true, 50.161},
+ {true, 16.72},
+ {false, 0},
+ {true, 33.44},
+ {false, 16.72},
+ {true, 66.881},
+ {true, 33.44},
+ {true, 16.72},
+ {true, 33.441},
+ {false, 16.72},
+ {true, 66.88},
+ {true, 16.721},
+ {true, 50.16},
+ {true, 33.44},
+ {true, 16.72},
+ {true, 33.441},
+ {true, 33.44},
+ {true, 33.44}};
+
+ SmoothEventSampler sampler(base::TimeDelta::FromSeconds(1) / 30, 3);
+ ReplayCheckingSamplerDecisions(data_points, arraysize(data_points), &sampler);
+}
+
+TEST(SmoothEventSamplerTest, DrawingAt60FpsWith60HzVsyncSampledAt30Hertz) {
+ // Actual capturing of timing data: WebGL Acquarium demo
+ // (http://webglsamples.googlecode.com/hg/aquarium/aquarium.html) which ran
+ // between 55-60 FPS in the steady-state.
+ static const DataPoint data_points[] = {{true, 16.72},
+ {true, 16.72},
+ {true, 4163.29},
+ {true, 50.193},
+ {true, 117.041},
+ {true, 50.161},
+ {true, 50.16},
+ {true, 33.441},
+ {true, 50.16},
+ {true, 33.44},
+ {false, 0},
+ {false, 0},
+ {true, 50.161},
+ {true, 83.601},
+ {true, 50.16},
+ {true, 16.72},
+ {true, 33.441},
+ {false, 16.72},
+ {true, 50.16},
+ {true, 16.72},
+ {false, 0.001},
+ {true, 33.441},
+ {false, 16.72},
+ {true, 16.72},
+ {true, 50.16},
+ {false, 0},
+ {true, 16.72},
+ {true, 33.441},
+ {false, 0},
+ {true, 33.44},
+ {false, 16.72},
+ {true, 16.72},
+ {true, 50.161},
+ {false, 0},
+ {true, 16.72},
+ {true, 33.44},
+ {false, 0},
+ {true, 33.44},
+ {false, 16.721},
+ {true, 16.721},
+ {true, 50.161},
+ {false, 0},
+ {true, 16.72},
+ {true, 33.441},
+ {false, 0},
+ {true, 33.44},
+ {false, 16.72},
+ {true, 33.44},
+ {false, 0},
+ {true, 16.721},
+ {true, 50.161},
+ {false, 0},
+ {true, 33.44},
+ {false, 0},
+ {true, 16.72},
+ {true, 33.441},
+ {false, 0},
+ {true, 33.44},
+ {false, 16.72},
+ {true, 16.72},
+ {true, 50.16},
+ {false, 0},
+ {true, 16.721},
+ {true, 33.44},
+ {false, 0},
+ {true, 33.44},
+ {false, 16.721},
+ {true, 16.721},
+ {true, 50.161},
+ {false, 0},
+ {true, 16.72},
+ {true, 33.44},
+ {false, 0},
+ {true, 33.441},
+ {false, 16.72},
+ {true, 16.72},
+ {true, 50.16},
+ {false, 0},
+ {true, 16.72},
+ {true, 33.441},
+ {true, 33.44},
+ {false, 0},
+ {true, 33.44},
+ {true, 33.441},
+ {false, 0},
+ {true, 33.44},
+ {true, 33.441},
+ {false, 0},
+ {true, 33.44},
+ {false, 0},
+ {true, 33.44},
+ {false, 16.72},
+ {true, 16.721},
+ {true, 50.161},
+ {false, 0},
+ {true, 16.72},
+ {true, 33.44},
+ {true, 33.441},
+ {false, 0},
+ {true, 33.44},
+ {true, 33.44},
+ {false, 0},
+ {true, 33.441},
+ {false, 16.72},
+ {true, 16.72},
+ {true, 50.16},
+ {false, 0},
+ {true, 16.72},
+ {true, 33.441},
+ {false, 0},
+ {true, 33.44},
+ {false, 16.72},
+ {true, 33.44},
+ {false, 0},
+ {true, 16.721},
+ {true, 50.161},
+ {false, 0},
+ {true, 16.72},
+ {true, 33.44},
+ {false, 0},
+ {true, 33.441},
+ {false, 16.72},
+ {true, 16.72},
+ {true, 50.16}};
+
+ SmoothEventSampler sampler(base::TimeDelta::FromSeconds(1) / 30, 3);
+ ReplayCheckingSamplerDecisions(data_points, arraysize(data_points), &sampler);
+}
+
+} // namespace media
diff --git a/media/capture/thread_safe_capture_oracle.cc b/media/capture/content/thread_safe_capture_oracle.cc
index 040c560..cbc4948 100644
--- a/media/capture/thread_safe_capture_oracle.cc
+++ b/media/capture/content/thread_safe_capture_oracle.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/capture/thread_safe_capture_oracle.h"
+#include "media/capture/content/thread_safe_capture_oracle.h"
#include "base/basictypes.h"
#include "base/bind.h"
@@ -34,15 +34,17 @@ ThreadSafeCaptureOracle::ThreadSafeCaptureOracle(
const VideoCaptureParams& params,
bool enable_auto_throttling)
: client_(client.Pass()),
- oracle_(base::TimeDelta::FromMicroseconds(
- static_cast<int64>(1000000.0 / params.requested_format.frame_rate +
- 0.5 /* to round to nearest int */)),
- params.requested_format.frame_size,
- params.resolution_change_policy,
- enable_auto_throttling),
- params_(params) {}
+ oracle_(base::TimeDelta::FromMicroseconds(static_cast<int64>(
+ 1000000.0 / params.requested_format.frame_rate +
+ 0.5 /* to round to nearest int */)),
+ params.requested_format.frame_size,
+ params.resolution_change_policy,
+ enable_auto_throttling),
+ params_(params) {
+}
-ThreadSafeCaptureOracle::~ThreadSafeCaptureOracle() {}
+ThreadSafeCaptureOracle::~ThreadSafeCaptureOracle() {
+}
bool ThreadSafeCaptureOracle::ObserveEventAndDecideCapture(
VideoCaptureOracle::Event event,
@@ -76,21 +78,20 @@ bool ThreadSafeCaptureOracle::ObserveEventAndDecideCapture(
// Get the current buffer pool utilization and attenuate it: The utilization
// reported to the oracle is in terms of a maximum sustainable amount (not the
// absolute maximum).
- const double attenuated_utilization = client_->GetBufferPoolUtilization() *
+ const double attenuated_utilization =
+ client_->GetBufferPoolUtilization() *
(100.0 / kTargetMaxPoolUtilizationPercent);
const char* event_name =
- (event == VideoCaptureOracle::kTimerPoll ? "poll" :
- (event == VideoCaptureOracle::kCompositorUpdate ? "gpu" :
- "unknown"));
+ (event == VideoCaptureOracle::kTimerPoll
+ ? "poll"
+ : (event == VideoCaptureOracle::kCompositorUpdate ? "gpu"
+ : "unknown"));
// Consider the various reasons not to initiate a capture.
if (should_capture && !output_buffer.get()) {
- TRACE_EVENT_INSTANT1("gpu.capture",
- "PipelineLimited",
- TRACE_EVENT_SCOPE_THREAD,
- "trigger",
- event_name);
+ TRACE_EVENT_INSTANT1("gpu.capture", "PipelineLimited",
+ TRACE_EVENT_SCOPE_THREAD, "trigger", event_name);
oracle_.RecordWillNotCapture(attenuated_utilization);
return false;
} else if (!should_capture && output_buffer.get()) {
@@ -99,22 +100,19 @@ bool ThreadSafeCaptureOracle::ObserveEventAndDecideCapture(
// capture rate limit: for example, the content is animating at 60fps but
// we're capturing at 30fps.
TRACE_EVENT_INSTANT1("gpu.capture", "FpsRateLimited",
- TRACE_EVENT_SCOPE_THREAD,
- "trigger", event_name);
+ TRACE_EVENT_SCOPE_THREAD, "trigger", event_name);
}
return false;
} else if (!should_capture && !output_buffer.get()) {
// We decided not to capture, but we wouldn't have been able to if we wanted
// to because no output buffer was available.
TRACE_EVENT_INSTANT1("gpu.capture", "NearlyPipelineLimited",
- TRACE_EVENT_SCOPE_THREAD,
- "trigger", event_name);
+ TRACE_EVENT_SCOPE_THREAD, "trigger", event_name);
return false;
}
const int frame_number = oracle_.RecordCapture(attenuated_utilization);
TRACE_EVENT_ASYNC_BEGIN2("gpu.capture", "Capture", output_buffer.get(),
- "frame_number", frame_number,
- "trigger", event_name);
+ "frame_number", frame_number, "trigger", event_name);
// Texture frames wrap a texture mailbox, which we don't have at the moment.
// We do not construct those frames.
if (params_.requested_format.pixel_storage != media::PIXEL_STORAGE_TEXTURE) {
@@ -124,12 +122,10 @@ bool ThreadSafeCaptureOracle::ObserveEventAndDecideCapture(
output_buffer->size(), base::TimeDelta());
DCHECK(*storage);
}
- *callback = base::Bind(&ThreadSafeCaptureOracle::DidCaptureFrame,
- this,
- frame_number,
- base::Passed(&output_buffer),
- capture_begin_time,
- oracle_.estimated_frame_duration());
+ *callback =
+ base::Bind(&ThreadSafeCaptureOracle::DidCaptureFrame, this, frame_number,
+ base::Passed(&output_buffer), capture_begin_time,
+ oracle_.estimated_frame_duration());
return true;
}
@@ -164,9 +160,8 @@ void ThreadSafeCaptureOracle::DidCaptureFrame(
base::TimeTicks timestamp,
bool success) {
base::AutoLock guard(lock_);
- TRACE_EVENT_ASYNC_END2("gpu.capture", "Capture", buffer.get(),
- "success", success,
- "timestamp", timestamp.ToInternalValue());
+ TRACE_EVENT_ASYNC_END2("gpu.capture", "Capture", buffer.get(), "success",
+ success, "timestamp", timestamp.ToInternalValue());
if (oracle_.CompleteCapture(frame_number, success, &timestamp)) {
TRACE_EVENT_INSTANT0("gpu.capture", "CaptureSucceeded",
@@ -177,18 +172,16 @@ void ThreadSafeCaptureOracle::DidCaptureFrame(
frame->metadata()->SetDouble(VideoFrameMetadata::FRAME_RATE,
params_.requested_format.frame_rate);
- frame->metadata()->SetTimeTicks(
- VideoFrameMetadata::CAPTURE_BEGIN_TIME, capture_begin_time);
- frame->metadata()->SetTimeTicks(
- VideoFrameMetadata::CAPTURE_END_TIME, base::TimeTicks::Now());
+ frame->metadata()->SetTimeTicks(VideoFrameMetadata::CAPTURE_BEGIN_TIME,
+ capture_begin_time);
+ frame->metadata()->SetTimeTicks(VideoFrameMetadata::CAPTURE_END_TIME,
+ base::TimeTicks::Now());
frame->metadata()->SetTimeDelta(VideoFrameMetadata::FRAME_DURATION,
estimated_frame_duration);
- frame->AddDestructionObserver(base::Bind(
- &ThreadSafeCaptureOracle::DidConsumeFrame,
- this,
- frame_number,
- frame->metadata()));
+ frame->AddDestructionObserver(
+ base::Bind(&ThreadSafeCaptureOracle::DidConsumeFrame, this,
+ frame_number, frame->metadata()));
client_->OnIncomingCapturedVideoFrame(buffer.Pass(), frame, timestamp);
}
diff --git a/media/capture/thread_safe_capture_oracle.h b/media/capture/content/thread_safe_capture_oracle.h
index c5d83df..1aa905e 100644
--- a/media/capture/thread_safe_capture_oracle.h
+++ b/media/capture/content/thread_safe_capture_oracle.h
@@ -11,8 +11,8 @@
#include "base/memory/scoped_ptr.h"
#include "media/base/media_export.h"
#include "media/base/video_frame.h"
-#include "media/capture/video_capture_oracle.h"
-#include "media/video/capture/video_capture_device.h"
+#include "media/capture/content/video_capture_oracle.h"
+#include "media/capture/video/video_capture_device.h"
namespace media {
@@ -70,14 +70,13 @@ class MEDIA_EXPORT ThreadSafeCaptureOracle
virtual ~ThreadSafeCaptureOracle();
// Callback invoked on completion of all captures.
- void DidCaptureFrame(
- int frame_number,
- scoped_ptr<VideoCaptureDevice::Client::Buffer> buffer,
- base::TimeTicks capture_begin_time,
- base::TimeDelta estimated_frame_duration,
- const scoped_refptr<VideoFrame>& frame,
- base::TimeTicks timestamp,
- bool success);
+ void DidCaptureFrame(int frame_number,
+ scoped_ptr<VideoCaptureDevice::Client::Buffer> buffer,
+ base::TimeTicks capture_begin_time,
+ base::TimeDelta estimated_frame_duration,
+ const scoped_refptr<VideoFrame>& frame,
+ base::TimeTicks timestamp,
+ bool success);
// Callback invoked once all consumers have finished with a delivered video
// frame. Consumer feedback signals are scanned from the frame's |metadata|.
diff --git a/media/capture/video_capture_oracle.cc b/media/capture/content/video_capture_oracle.cc
index a536aab..7f4eaa9 100644
--- a/media/capture/video_capture_oracle.cc
+++ b/media/capture/content/video_capture_oracle.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/capture/video_capture_oracle.h"
+#include "media/capture/content/video_capture_oracle.h"
#include <algorithm>
@@ -65,7 +65,7 @@ double FractionFromExpectedFrameRate(base::TimeDelta delta, int frame_rate) {
const base::TimeDelta expected_delta =
base::TimeDelta::FromSeconds(1) / frame_rate;
return (delta - expected_delta).InMillisecondsF() /
- expected_delta.InMillisecondsF();
+ expected_delta.InMillisecondsF();
}
// Returns the next-higher TimeTicks value.
@@ -83,8 +83,8 @@ bool HasSufficientRecentFeedback(const FeedbackSignalAccumulator& accumulator,
const base::TimeDelta amount_of_history =
accumulator.update_time() - accumulator.reset_time();
return (amount_of_history.InMicroseconds() >= kMinSizeChangePeriodMicros) &&
- ((now - accumulator.update_time()).InMicroseconds() <=
- kMaxTimeSinceLastFeedbackUpdateMicros);
+ ((now - accumulator.update_time()).InMicroseconds() <=
+ kMaxTimeSinceLastFeedbackUpdateMicros);
}
} // anonymous namespace
@@ -110,7 +110,8 @@ VideoCaptureOracle::VideoCaptureOracle(
<< (auto_throttling_enabled_ ? "enabled." : "disabled.");
}
-VideoCaptureOracle::~VideoCaptureOracle() {}
+VideoCaptureOracle::~VideoCaptureOracle() {
+}
void VideoCaptureOracle::SetSourceSize(const gfx::Size& source_size) {
resolution_chooser_.SetSourceSize(source_size);
@@ -262,19 +263,19 @@ bool VideoCaptureOracle::CompleteCapture(int frame_number,
const int rounded_frame_rate =
static_cast<int>(estimated_frame_rate + 0.5);
VLOG_STREAM(3) << base::StringPrintf(
- "Captured #%d: delta=%" PRId64 " usec"
+ "Captured #%d: delta=%" PRId64
+ " usec"
", now locked into {%s}, %+0.1f%% slower than %d FPS",
- frame_number,
- delta.InMicroseconds(),
+ frame_number, delta.InMicroseconds(),
content_sampler_.detected_region().ToString().c_str(),
100.0 * FractionFromExpectedFrameRate(delta, rounded_frame_rate),
rounded_frame_rate);
} else {
VLOG_STREAM(3) << base::StringPrintf(
- "Captured #%d: delta=%" PRId64 " usec"
+ "Captured #%d: delta=%" PRId64
+ " usec"
", d/30fps=%+0.1f%%, d/25fps=%+0.1f%%, d/24fps=%+0.1f%%",
- frame_number,
- delta.InMicroseconds(),
+ frame_number, delta.InMicroseconds(),
100.0 * FractionFromExpectedFrameRate(delta, 30),
100.0 * FractionFromExpectedFrameRate(delta, 25),
100.0 * FractionFromExpectedFrameRate(delta, 24));
@@ -326,8 +327,7 @@ void VideoCaptureOracle::SetFrameTimestamp(int frame_number,
bool VideoCaptureOracle::IsFrameInRecentHistory(int frame_number) const {
return ((next_frame_number_ - frame_number) < kMaxFrameTimestamps &&
- frame_number <= next_frame_number_ &&
- frame_number >= 0);
+ frame_number <= next_frame_number_ && frame_number >= 0);
}
void VideoCaptureOracle::CommitCaptureSizeAndReset(
@@ -381,8 +381,8 @@ int VideoCaptureOracle::AnalyzeForDecreasedArea(base::TimeTicks analyze_time) {
buffer_pool_utilization_.current() > 1.0) {
// This calculation is hand-wavy, but seems to work well in a variety of
// situations.
- buffer_capable_area = static_cast<int>(
- current_area / buffer_pool_utilization_.current());
+ buffer_capable_area =
+ static_cast<int>(current_area / buffer_pool_utilization_.current());
} else {
buffer_capable_area = current_area;
}
@@ -470,9 +470,9 @@ int VideoCaptureOracle::AnalyzeForIncreasedArea(base::TimeTicks analyze_time) {
// to improve the quality of non-animating content (where frame drops are not
// much of a concern).
if ((analyze_time - last_time_animation_was_detected_).InMicroseconds() <
- kDebouncingPeriodForAnimatedContentMicros) {
+ kDebouncingPeriodForAnimatedContentMicros) {
if ((analyze_time - start_time_of_underutilization_).InMicroseconds() <
- kProvingPeriodForAnimatedContentMicros) {
+ kProvingPeriodForAnimatedContentMicros) {
// Content is animating and the system has not been contiguously
// under-utilizated for long enough.
return -1;
diff --git a/media/capture/video_capture_oracle.h b/media/capture/content/video_capture_oracle.h
index 94bdc00..8ec70e6 100644
--- a/media/capture/video_capture_oracle.h
+++ b/media/capture/content/video_capture_oracle.h
@@ -9,10 +9,10 @@
#include "base/memory/scoped_ptr.h"
#include "base/time/time.h"
#include "media/base/media_export.h"
-#include "media/capture/animated_content_sampler.h"
-#include "media/capture/capture_resolution_chooser.h"
-#include "media/capture/feedback_signal_accumulator.h"
-#include "media/capture/smooth_event_sampler.h"
+#include "media/capture/content/animated_content_sampler.h"
+#include "media/capture/content/capture_resolution_chooser.h"
+#include "media/capture/content/feedback_signal_accumulator.h"
+#include "media/capture/content/smooth_event_sampler.h"
#include "ui/gfx/geometry/rect.h"
namespace media {
diff --git a/media/capture/video_capture_oracle_unittest.cc b/media/capture/content/video_capture_oracle_unittest.cc
index ab43871..b614d8d 100644
--- a/media/capture/video_capture_oracle_unittest.cc
+++ b/media/capture/content/video_capture_oracle_unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/capture/video_capture_oracle.h"
+#include "media/capture/content/video_capture_oracle.h"
#include "base/strings/stringprintf.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -31,33 +31,28 @@ TEST(VideoCaptureOracleTest, EnforcesEventTimeMonotonicity) {
const gfx::Rect damage_rect(Get720pSize());
const base::TimeDelta event_increment = Get30HzPeriod() * 2;
- VideoCaptureOracle oracle(Get30HzPeriod(),
- Get720pSize(),
- media::RESOLUTION_POLICY_FIXED_RESOLUTION,
- false);
+ VideoCaptureOracle oracle(Get30HzPeriod(), Get720pSize(),
+ media::RESOLUTION_POLICY_FIXED_RESOLUTION, false);
base::TimeTicks t = InitialTestTimeTicks();
for (int i = 0; i < 10; ++i) {
t += event_increment;
ASSERT_TRUE(oracle.ObserveEventAndDecideCapture(
- VideoCaptureOracle::kCompositorUpdate,
- damage_rect, t));
+ VideoCaptureOracle::kCompositorUpdate, damage_rect, t));
}
base::TimeTicks furthest_event_time = t;
for (int i = 0; i < 10; ++i) {
t -= event_increment;
ASSERT_FALSE(oracle.ObserveEventAndDecideCapture(
- VideoCaptureOracle::kCompositorUpdate,
- damage_rect, t));
+ VideoCaptureOracle::kCompositorUpdate, damage_rect, t));
}
t = furthest_event_time;
for (int i = 0; i < 10; ++i) {
t += event_increment;
ASSERT_TRUE(oracle.ObserveEventAndDecideCapture(
- VideoCaptureOracle::kCompositorUpdate,
- damage_rect, t));
+ VideoCaptureOracle::kCompositorUpdate, damage_rect, t));
}
}
@@ -68,10 +63,8 @@ TEST(VideoCaptureOracleTest, EnforcesFramesDeliveredInOrder) {
const gfx::Rect damage_rect(Get720pSize());
const base::TimeDelta event_increment = Get30HzPeriod() * 2;
- VideoCaptureOracle oracle(Get30HzPeriod(),
- Get720pSize(),
- media::RESOLUTION_POLICY_FIXED_RESOLUTION,
- false);
+ VideoCaptureOracle oracle(Get30HzPeriod(), Get720pSize(),
+ media::RESOLUTION_POLICY_FIXED_RESOLUTION, false);
// Most basic scenario: Frames delivered one at a time, with no additional
// captures in-between deliveries.
@@ -81,8 +74,7 @@ TEST(VideoCaptureOracleTest, EnforcesFramesDeliveredInOrder) {
for (int i = 0; i < 10; ++i) {
t += event_increment;
ASSERT_TRUE(oracle.ObserveEventAndDecideCapture(
- VideoCaptureOracle::kCompositorUpdate,
- damage_rect, t));
+ VideoCaptureOracle::kCompositorUpdate, damage_rect, t));
last_frame_number = oracle.RecordCapture(0.0);
ASSERT_TRUE(oracle.CompleteCapture(last_frame_number, true, &ignored));
}
@@ -93,8 +85,7 @@ TEST(VideoCaptureOracleTest, EnforcesFramesDeliveredInOrder) {
for (int j = 0; j < num_in_flight; ++j) {
t += event_increment;
ASSERT_TRUE(oracle.ObserveEventAndDecideCapture(
- VideoCaptureOracle::kCompositorUpdate,
- damage_rect, t));
+ VideoCaptureOracle::kCompositorUpdate, damage_rect, t));
last_frame_number = oracle.RecordCapture(0.0);
}
for (int j = num_in_flight - 1; j >= 0; --j) {
@@ -110,8 +101,7 @@ TEST(VideoCaptureOracleTest, EnforcesFramesDeliveredInOrder) {
for (int j = 0; j < num_in_flight; ++j) {
t += event_increment;
ASSERT_TRUE(oracle.ObserveEventAndDecideCapture(
- VideoCaptureOracle::kCompositorUpdate,
- damage_rect, t));
+ VideoCaptureOracle::kCompositorUpdate, damage_rect, t));
last_frame_number = oracle.RecordCapture(0.0);
}
ASSERT_TRUE(oracle.CompleteCapture(last_frame_number, true, &ignored));
@@ -128,8 +118,7 @@ TEST(VideoCaptureOracleTest, EnforcesFramesDeliveredInOrder) {
for (int j = 0; j < num_in_flight; ++j) {
t += event_increment;
ASSERT_TRUE(oracle.ObserveEventAndDecideCapture(
- VideoCaptureOracle::kCompositorUpdate,
- damage_rect, t));
+ VideoCaptureOracle::kCompositorUpdate, damage_rect, t));
last_frame_number = oracle.RecordCapture(0.0);
}
// Report the last frame as an out of order failure.
@@ -138,7 +127,6 @@ TEST(VideoCaptureOracleTest, EnforcesFramesDeliveredInOrder) {
ASSERT_TRUE(
oracle.CompleteCapture(last_frame_number - j, true, &ignored));
}
-
}
}
@@ -148,10 +136,8 @@ TEST(VideoCaptureOracleTest, TransitionsSmoothlyBetweenSamplers) {
const gfx::Rect animation_damage_rect(Get720pSize());
const base::TimeDelta event_increment = Get30HzPeriod() * 2;
- VideoCaptureOracle oracle(Get30HzPeriod(),
- Get720pSize(),
- media::RESOLUTION_POLICY_FIXED_RESOLUTION,
- false);
+ VideoCaptureOracle oracle(Get30HzPeriod(), Get720pSize(),
+ media::RESOLUTION_POLICY_FIXED_RESOLUTION, false);
// Run sequences of animation events and non-animation events through the
// oracle. As the oracle transitions between each sampler, make sure the
@@ -196,8 +182,8 @@ TEST(VideoCaptureOracleTest, TransitionsSmoothlyBetweenSamplers) {
// a few frames dropped, so allow a gap in the timestamps. Otherwise, the
// delta between frame timestamps should never be more than 2X the
// |event_increment|.
- const base::TimeDelta max_acceptable_delta = (i % 100) == 78 ?
- event_increment * 5 : event_increment * 2;
+ const base::TimeDelta max_acceptable_delta =
+ (i % 100) == 78 ? event_increment * 5 : event_increment * 2;
EXPECT_GE(max_acceptable_delta.InMicroseconds(), delta.InMicroseconds());
}
last_frame_timestamp = frame_timestamp;
@@ -207,15 +193,12 @@ TEST(VideoCaptureOracleTest, TransitionsSmoothlyBetweenSamplers) {
// Tests that VideoCaptureOracle prevents timer polling from initiating
// simultaneous captures.
TEST(VideoCaptureOracleTest, SamplesOnlyOneOverdueFrameAtATime) {
- const base::TimeDelta vsync_interval =
- base::TimeDelta::FromSeconds(1) / 60;
+ const base::TimeDelta vsync_interval = base::TimeDelta::FromSeconds(1) / 60;
const base::TimeDelta timer_interval = base::TimeDelta::FromMilliseconds(
VideoCaptureOracle::kMinTimerPollPeriodMillis);
- VideoCaptureOracle oracle(Get30HzPeriod(),
- Get720pSize(),
- media::RESOLUTION_POLICY_FIXED_RESOLUTION,
- false);
+ VideoCaptureOracle oracle(Get30HzPeriod(), Get720pSize(),
+ media::RESOLUTION_POLICY_FIXED_RESOLUTION, false);
// Have the oracle observe some compositor events. Simulate that each capture
// completes successfully.
@@ -261,8 +244,8 @@ TEST(VideoCaptureOracleTest, SamplesOnlyOneOverdueFrameAtATime) {
did_complete_a_capture = false;
for (int i = 0; i < 10; ++i) {
t += timer_interval;
- if (oracle.ObserveEventAndDecideCapture(
- VideoCaptureOracle::kTimerPoll, gfx::Rect(), t)) {
+ if (oracle.ObserveEventAndDecideCapture(VideoCaptureOracle::kTimerPoll,
+ gfx::Rect(), t)) {
ASSERT_TRUE(
oracle.CompleteCapture(oracle.RecordCapture(0.0), true, &ignored));
did_complete_a_capture = true;
@@ -274,8 +257,8 @@ TEST(VideoCaptureOracleTest, SamplesOnlyOneOverdueFrameAtATime) {
for (int i = 0; i <= 10; ++i) {
ASSERT_GT(10, i) << "BUG: Seems like it'll never happen!";
t += timer_interval;
- if (oracle.ObserveEventAndDecideCapture(
- VideoCaptureOracle::kTimerPoll, gfx::Rect(), t)) {
+ if (oracle.ObserveEventAndDecideCapture(VideoCaptureOracle::kTimerPoll,
+ gfx::Rect(), t)) {
break;
}
}
@@ -292,8 +275,8 @@ TEST(VideoCaptureOracleTest, SamplesOnlyOneOverdueFrameAtATime) {
for (int i = 0; i <= 10; ++i) {
ASSERT_GT(10, i) << "BUG: Seems like it'll never happen!";
t += timer_interval;
- if (oracle.ObserveEventAndDecideCapture(
- VideoCaptureOracle::kTimerPoll, gfx::Rect(), t)) {
+ if (oracle.ObserveEventAndDecideCapture(VideoCaptureOracle::kTimerPoll,
+ gfx::Rect(), t)) {
break;
}
}
@@ -303,10 +286,8 @@ TEST(VideoCaptureOracleTest, SamplesOnlyOneOverdueFrameAtATime) {
// to allow both the source content and the rest of the end-to-end system to
// stabilize.
TEST(VideoCaptureOracleTest, DoesNotRapidlyChangeCaptureSize) {
- VideoCaptureOracle oracle(Get30HzPeriod(),
- Get720pSize(),
- media::RESOLUTION_POLICY_ANY_WITHIN_LIMIT,
- true);
+ VideoCaptureOracle oracle(Get30HzPeriod(), Get720pSize(),
+ media::RESOLUTION_POLICY_ANY_WITHIN_LIMIT, true);
// Run 30 seconds of frame captures without any source size changes.
base::TimeTicks t = InitialTestTimeTicks();
@@ -317,8 +298,8 @@ TEST(VideoCaptureOracleTest, DoesNotRapidlyChangeCaptureSize) {
VideoCaptureOracle::kCompositorUpdate, gfx::Rect(), t));
ASSERT_EQ(Get720pSize(), oracle.capture_size());
base::TimeTicks ignored;
- ASSERT_TRUE(oracle.CompleteCapture(
- oracle.RecordCapture(0.0), true, &ignored));
+ ASSERT_TRUE(
+ oracle.CompleteCapture(oracle.RecordCapture(0.0), true, &ignored));
}
// Now run 30 seconds of frame captures with lots of random source size
@@ -330,9 +311,8 @@ TEST(VideoCaptureOracleTest, DoesNotRapidlyChangeCaptureSize) {
for (; t < end_t; t += event_increment) {
// Change the source size every frame to a random non-empty size.
const gfx::Size last_source_size = source_size;
- source_size.SetSize(
- ((last_source_size.width() * 11 + 12345) % 1280) + 1,
- ((last_source_size.height() * 11 + 12345) % 720) + 1);
+ source_size.SetSize(((last_source_size.width() * 11 + 12345) % 1280) + 1,
+ ((last_source_size.height() * 11 + 12345) % 720) + 1);
ASSERT_NE(last_source_size, source_size);
oracle.SetSourceSize(source_size);
@@ -346,8 +326,8 @@ TEST(VideoCaptureOracleTest, DoesNotRapidlyChangeCaptureSize) {
}
base::TimeTicks ignored;
- ASSERT_TRUE(oracle.CompleteCapture(
- oracle.RecordCapture(0.0), true, &ignored));
+ ASSERT_TRUE(
+ oracle.CompleteCapture(oracle.RecordCapture(0.0), true, &ignored));
}
}
@@ -362,14 +342,13 @@ namespace {
// feedback varies.
void RunAutoThrottleTest(bool is_content_animating,
bool with_consumer_feedback) {
- SCOPED_TRACE(::testing::Message() << "RunAutoThrottleTest("
+ SCOPED_TRACE(::testing::Message()
+ << "RunAutoThrottleTest("
<< "(is_content_animating=" << is_content_animating
<< ", with_consumer_feedback=" << with_consumer_feedback << ")");
- VideoCaptureOracle oracle(Get30HzPeriod(),
- Get720pSize(),
- media::RESOLUTION_POLICY_ANY_WITHIN_LIMIT,
- true);
+ VideoCaptureOracle oracle(Get30HzPeriod(), Get720pSize(),
+ media::RESOLUTION_POLICY_ANY_WITHIN_LIMIT, true);
// Run 10 seconds of frame captures with 90% utilization expect no capture
// size changes.
@@ -380,8 +359,7 @@ void RunAutoThrottleTest(bool is_content_animating,
for (; t < end_t; t += event_increment) {
ASSERT_TRUE(oracle.ObserveEventAndDecideCapture(
VideoCaptureOracle::kCompositorUpdate,
- is_content_animating ? gfx::Rect(Get720pSize()) : gfx::Rect(),
- t));
+ is_content_animating ? gfx::Rect(Get720pSize()) : gfx::Rect(), t));
ASSERT_EQ(Get720pSize(), oracle.capture_size());
const double utilization = 0.9;
const int frame_number =
@@ -397,17 +375,16 @@ void RunAutoThrottleTest(bool is_content_animating,
// expect the resolution to remain constant. Repeat.
for (int i = 0; i < 2; ++i) {
const gfx::Size starting_size = oracle.capture_size();
- SCOPED_TRACE(::testing::Message()
- << "Stepping down from " << starting_size.ToString()
- << ", i=" << i);
+ SCOPED_TRACE(::testing::Message() << "Stepping down from "
+ << starting_size.ToString()
+ << ", i=" << i);
gfx::Size stepped_down_size;
end_t = t + base::TimeDelta::FromSeconds(10);
for (; t < end_t; t += event_increment) {
ASSERT_TRUE(oracle.ObserveEventAndDecideCapture(
- VideoCaptureOracle::kCompositorUpdate,
- is_content_animating ? gfx::Rect(Get720pSize()) : gfx::Rect(),
- t));
+ VideoCaptureOracle::kCompositorUpdate,
+ is_content_animating ? gfx::Rect(Get720pSize()) : gfx::Rect(), t));
if (stepped_down_size.IsEmpty()) {
if (oracle.capture_size() != starting_size) {
@@ -435,25 +412,23 @@ void RunAutoThrottleTest(bool is_content_animating,
// utilization and expect the resolution to remain constant. Repeat.
for (int i = 0; i < 2; ++i) {
const gfx::Size starting_size = oracle.capture_size();
- SCOPED_TRACE(::testing::Message()
- << "Stepping up from " << starting_size.ToString()
- << ", i=" << i);
+ SCOPED_TRACE(::testing::Message() << "Stepping up from "
+ << starting_size.ToString()
+ << ", i=" << i);
gfx::Size stepped_up_size;
end_t = t + base::TimeDelta::FromSeconds(is_content_animating ? 90 : 10);
for (; t < end_t; t += event_increment) {
ASSERT_TRUE(oracle.ObserveEventAndDecideCapture(
VideoCaptureOracle::kCompositorUpdate,
- is_content_animating ? gfx::Rect(Get720pSize()) : gfx::Rect(),
- t));
+ is_content_animating ? gfx::Rect(Get720pSize()) : gfx::Rect(), t));
if (stepped_up_size.IsEmpty()) {
if (oracle.capture_size() != starting_size) {
// When content is animating, a much longer amount of time must pass
// before the capture size will step up.
- ASSERT_LT(
- base::TimeDelta::FromSeconds(is_content_animating ? 15 : 1),
- t - time_of_last_size_change);
+ ASSERT_LT(base::TimeDelta::FromSeconds(is_content_animating ? 15 : 1),
+ t - time_of_last_size_change);
time_of_last_size_change = t;
stepped_up_size = oracle.capture_size();
ASSERT_LT(starting_size.width(), stepped_up_size.width());
@@ -490,10 +465,8 @@ TEST(VideoCaptureOracleTest, AutoThrottlesBasedOnUtilizationFeedback) {
// Tests that VideoCaptureOracle does not change the capture size if
// auto-throttling is enabled when using a fixed resolution policy.
TEST(VideoCaptureOracleTest, DoesNotAutoThrottleWhenResolutionIsFixed) {
- VideoCaptureOracle oracle(Get30HzPeriod(),
- Get720pSize(),
- media::RESOLUTION_POLICY_FIXED_RESOLUTION,
- true);
+ VideoCaptureOracle oracle(Get30HzPeriod(), Get720pSize(),
+ media::RESOLUTION_POLICY_FIXED_RESOLUTION, true);
// Run 10 seconds of frame captures with 90% utilization expect no capture
// size changes.
diff --git a/media/capture/smooth_event_sampler_unittest.cc b/media/capture/smooth_event_sampler_unittest.cc
deleted file mode 100644
index 14a6823..0000000
--- a/media/capture/smooth_event_sampler_unittest.cc
+++ /dev/null
@@ -1,488 +0,0 @@
-// Copyright (c) 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/capture/smooth_event_sampler.h"
-
-#include "base/strings/stringprintf.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace media {
-
-namespace {
-
-bool AddEventAndConsiderSampling(SmoothEventSampler* sampler,
- base::TimeTicks event_time) {
- sampler->ConsiderPresentationEvent(event_time);
- return sampler->ShouldSample();
-}
-
-void SteadyStateSampleAndAdvance(base::TimeDelta vsync,
- SmoothEventSampler* sampler,
- base::TimeTicks* t) {
- ASSERT_TRUE(AddEventAndConsiderSampling(sampler, *t));
- ASSERT_TRUE(sampler->HasUnrecordedEvent());
- sampler->RecordSample();
- ASSERT_FALSE(sampler->HasUnrecordedEvent());
- ASSERT_FALSE(sampler->IsOverdueForSamplingAt(*t));
- *t += vsync;
- ASSERT_FALSE(sampler->IsOverdueForSamplingAt(*t));
-}
-
-void SteadyStateNoSampleAndAdvance(base::TimeDelta vsync,
- SmoothEventSampler* sampler,
- base::TimeTicks* t) {
- ASSERT_FALSE(AddEventAndConsiderSampling(sampler, *t));
- ASSERT_TRUE(sampler->HasUnrecordedEvent());
- ASSERT_FALSE(sampler->IsOverdueForSamplingAt(*t));
- *t += vsync;
- ASSERT_FALSE(sampler->IsOverdueForSamplingAt(*t));
-}
-
-base::TimeTicks InitialTestTimeTicks() {
- return base::TimeTicks() + base::TimeDelta::FromSeconds(1);
-}
-
-void TestRedundantCaptureStrategy(base::TimeDelta capture_period,
- int redundant_capture_goal,
- SmoothEventSampler* sampler,
- base::TimeTicks* t) {
- // Before any events have been considered, we're overdue for sampling.
- ASSERT_TRUE(sampler->IsOverdueForSamplingAt(*t));
-
- // Consider the first event. We want to sample that.
- ASSERT_FALSE(sampler->HasUnrecordedEvent());
- ASSERT_TRUE(AddEventAndConsiderSampling(sampler, *t));
- ASSERT_TRUE(sampler->HasUnrecordedEvent());
- sampler->RecordSample();
- ASSERT_FALSE(sampler->HasUnrecordedEvent());
-
- // After more than 250 ms has passed without considering an event, we should
- // repeatedly be overdue for sampling. However, once the redundant capture
- // goal is achieved, we should no longer be overdue for sampling.
- *t += base::TimeDelta::FromMilliseconds(250);
- for (int i = 0; i < redundant_capture_goal; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- ASSERT_FALSE(sampler->HasUnrecordedEvent());
- ASSERT_TRUE(sampler->IsOverdueForSamplingAt(*t))
- << "Should sample until redundant capture goal is hit";
- sampler->RecordSample();
- *t += capture_period; // Timer fires once every capture period.
- }
- ASSERT_FALSE(sampler->IsOverdueForSamplingAt(*t))
- << "Should not be overdue once redundant capture goal achieved.";
-}
-
-} // namespace
-
-// 60Hz sampled at 30Hz should produce 30Hz. In addition, this test contains
-// much more comprehensive before/after/edge-case scenarios than the others.
-TEST(SmoothEventSamplerTest, Sample60HertzAt30Hertz) {
- const base::TimeDelta capture_period = base::TimeDelta::FromSeconds(1) / 30;
- const int redundant_capture_goal = 200;
- const base::TimeDelta vsync = base::TimeDelta::FromSeconds(1) / 60;
-
- SmoothEventSampler sampler(capture_period, redundant_capture_goal);
- base::TimeTicks t = InitialTestTimeTicks();
-
- TestRedundantCaptureStrategy(capture_period, redundant_capture_goal,
- &sampler, &t);
-
- // Steady state, we should capture every other vsync, indefinitely.
- for (int i = 0; i < 100; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- }
-
- // Now pretend we're limited by backpressure in the pipeline. In this scenario
- // case we are adding events but not sampling them.
- for (int i = 0; i < 20; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- ASSERT_EQ(i >= 14, sampler.IsOverdueForSamplingAt(t));
- ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
- ASSERT_TRUE(sampler.HasUnrecordedEvent());
- t += vsync;
- }
-
- // Now suppose we can sample again. We should be back in the steady state,
- // but at a different phase.
- ASSERT_TRUE(sampler.IsOverdueForSamplingAt(t));
- for (int i = 0; i < 100; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- }
-}
-
-// 50Hz sampled at 30Hz should produce a sequence where some frames are skipped.
-TEST(SmoothEventSamplerTest, Sample50HertzAt30Hertz) {
- const base::TimeDelta capture_period = base::TimeDelta::FromSeconds(1) / 30;
- const int redundant_capture_goal = 2;
- const base::TimeDelta vsync = base::TimeDelta::FromSeconds(1) / 50;
-
- SmoothEventSampler sampler(capture_period, redundant_capture_goal);
- base::TimeTicks t = InitialTestTimeTicks();
-
- TestRedundantCaptureStrategy(capture_period, redundant_capture_goal,
- &sampler, &t);
-
- // Steady state, we should capture 1st, 2nd and 4th frames out of every five
- // frames, indefinitely.
- for (int i = 0; i < 100; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- }
-
- // Now pretend we're limited by backpressure in the pipeline. In this scenario
- // case we are adding events but not sampling them.
- for (int i = 0; i < 20; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- ASSERT_EQ(i >= 11, sampler.IsOverdueForSamplingAt(t));
- ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
- t += vsync;
- }
-
- // Now suppose we can sample again. We should be back in the steady state
- // again.
- ASSERT_TRUE(sampler.IsOverdueForSamplingAt(t));
- for (int i = 0; i < 100; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- }
-}
-
-// 75Hz sampled at 30Hz should produce a sequence where some frames are skipped.
-TEST(SmoothEventSamplerTest, Sample75HertzAt30Hertz) {
- const base::TimeDelta capture_period = base::TimeDelta::FromSeconds(1) / 30;
- const int redundant_capture_goal = 32;
- const base::TimeDelta vsync = base::TimeDelta::FromSeconds(1) / 75;
-
- SmoothEventSampler sampler(capture_period, redundant_capture_goal);
- base::TimeTicks t = InitialTestTimeTicks();
-
- TestRedundantCaptureStrategy(capture_period, redundant_capture_goal,
- &sampler, &t);
-
- // Steady state, we should capture 1st and 3rd frames out of every five
- // frames, indefinitely.
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- for (int i = 0; i < 100; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- }
-
- // Now pretend we're limited by backpressure in the pipeline. In this scenario
- // case we are adding events but not sampling them.
- for (int i = 0; i < 20; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- ASSERT_EQ(i >= 16, sampler.IsOverdueForSamplingAt(t));
- ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
- t += vsync;
- }
-
- // Now suppose we can sample again. We capture the next frame, and not the one
- // after that, and then we're back in the steady state again.
- ASSERT_TRUE(sampler.IsOverdueForSamplingAt(t));
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- for (int i = 0; i < 100; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- }
-}
-
-// 30Hz sampled at 30Hz should produce 30Hz.
-TEST(SmoothEventSamplerTest, Sample30HertzAt30Hertz) {
- const base::TimeDelta capture_period = base::TimeDelta::FromSeconds(1) / 30;
- const int redundant_capture_goal = 1;
- const base::TimeDelta vsync = base::TimeDelta::FromSeconds(1) / 30;
-
- SmoothEventSampler sampler(capture_period, redundant_capture_goal);
- base::TimeTicks t = InitialTestTimeTicks();
-
- TestRedundantCaptureStrategy(capture_period, redundant_capture_goal,
- &sampler, &t);
-
- // Steady state, we should capture every vsync, indefinitely.
- for (int i = 0; i < 200; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- }
-
- // Now pretend we're limited by backpressure in the pipeline. In this scenario
- // case we are adding events but not sampling them.
- for (int i = 0; i < 10; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- ASSERT_EQ(i >= 7, sampler.IsOverdueForSamplingAt(t));
- ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
- t += vsync;
- }
-
- // Now suppose we can sample again. We should be back in the steady state.
- ASSERT_TRUE(sampler.IsOverdueForSamplingAt(t));
- for (int i = 0; i < 100; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- }
-}
-
-// 24Hz sampled at 30Hz should produce 24Hz.
-TEST(SmoothEventSamplerTest, Sample24HertzAt30Hertz) {
- const base::TimeDelta capture_period = base::TimeDelta::FromSeconds(1) / 30;
- const int redundant_capture_goal = 333;
- const base::TimeDelta vsync = base::TimeDelta::FromSeconds(1) / 24;
-
- SmoothEventSampler sampler(capture_period, redundant_capture_goal);
- base::TimeTicks t = InitialTestTimeTicks();
-
- TestRedundantCaptureStrategy(capture_period, redundant_capture_goal,
- &sampler, &t);
-
- // Steady state, we should capture every vsync, indefinitely.
- for (int i = 0; i < 200; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- }
-
- // Now pretend we're limited by backpressure in the pipeline. In this scenario
- // case we are adding events but not sampling them.
- for (int i = 0; i < 10; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- ASSERT_EQ(i >= 6, sampler.IsOverdueForSamplingAt(t));
- ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
- t += vsync;
- }
-
- // Now suppose we can sample again. We should be back in the steady state.
- ASSERT_TRUE(sampler.IsOverdueForSamplingAt(t));
- for (int i = 0; i < 100; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- }
-}
-
-// Tests that changing the minimum capture period during usage results in the
-// desired behavior.
-TEST(SmoothEventSamplerTest, Sample60HertzWithVariedCapturePeriods) {
- const base::TimeDelta vsync = base::TimeDelta::FromSeconds(1) / 60;
- const base::TimeDelta one_to_one_period = vsync;
- const base::TimeDelta two_to_one_period = vsync * 2;
- const base::TimeDelta two_and_three_to_one_period =
- base::TimeDelta::FromSeconds(1) / 24;
- const int redundant_capture_goal = 1;
-
- SmoothEventSampler sampler(one_to_one_period, redundant_capture_goal);
- base::TimeTicks t = InitialTestTimeTicks();
-
- TestRedundantCaptureStrategy(one_to_one_period, redundant_capture_goal,
- &sampler, &t);
-
- // With the capture rate at 60 Hz, we should capture every vsync.
- for (int i = 0; i < 100; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- }
-
- // Now change to the capture rate to 30 Hz, and we should capture every other
- // vsync.
- sampler.SetMinCapturePeriod(two_to_one_period);
- for (int i = 0; i < 100; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- }
-
- // Now change the capture rate back to 60 Hz, and we should capture every
- // vsync again.
- sampler.SetMinCapturePeriod(one_to_one_period);
- for (int i = 0; i < 100; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- }
-
- // Now change the capture rate to 24 Hz, and we should capture with a 2-3-2-3
- // cadence.
- sampler.SetMinCapturePeriod(two_and_three_to_one_period);
- for (int i = 0; i < 100; i++) {
- SCOPED_TRACE(base::StringPrintf("Iteration %d", i));
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateNoSampleAndAdvance(vsync, &sampler, &t);
- SteadyStateSampleAndAdvance(vsync, &sampler, &t);
- }
-}
-
-TEST(SmoothEventSamplerTest, DoubleDrawAtOneTimeStillDirties) {
- const base::TimeDelta capture_period = base::TimeDelta::FromSeconds(1) / 30;
- const base::TimeDelta overdue_period = base::TimeDelta::FromSeconds(1);
-
- SmoothEventSampler sampler(capture_period, 1);
- base::TimeTicks t = InitialTestTimeTicks();
-
- ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
- sampler.RecordSample();
- ASSERT_FALSE(sampler.IsOverdueForSamplingAt(t))
- << "Sampled last event; should not be dirty.";
- t += overdue_period;
-
- // Now simulate 2 events with the same clock value.
- ASSERT_TRUE(AddEventAndConsiderSampling(&sampler, t));
- sampler.RecordSample();
- ASSERT_FALSE(AddEventAndConsiderSampling(&sampler, t))
- << "Two events at same time -- expected second not to be sampled.";
- ASSERT_TRUE(sampler.IsOverdueForSamplingAt(t + overdue_period))
- << "Second event should dirty the capture state.";
- sampler.RecordSample();
- ASSERT_FALSE(sampler.IsOverdueForSamplingAt(t + overdue_period));
-}
-
-namespace {
-
-struct DataPoint {
- bool should_capture;
- double increment_ms;
-};
-
-void ReplayCheckingSamplerDecisions(const DataPoint* data_points,
- size_t num_data_points,
- SmoothEventSampler* sampler) {
- base::TimeTicks t = InitialTestTimeTicks();
- for (size_t i = 0; i < num_data_points; ++i) {
- t += base::TimeDelta::FromMicroseconds(
- static_cast<int64>(data_points[i].increment_ms * 1000));
- ASSERT_EQ(data_points[i].should_capture,
- AddEventAndConsiderSampling(sampler, t))
- << "at data_points[" << i << ']';
- if (data_points[i].should_capture)
- sampler->RecordSample();
- }
-}
-
-} // namespace
-
-TEST(SmoothEventSamplerTest, DrawingAt24FpsWith60HzVsyncSampledAt30Hertz) {
- // Actual capturing of timing data: Initial instability as a 24 FPS video was
- // started from a still screen, then clearly followed by steady-state.
- static const DataPoint data_points[] = {
- { true, 1437.93 }, { true, 150.484 }, { true, 217.362 }, { true, 50.161 },
- { true, 33.44 }, { false, 0 }, { true, 16.721 }, { true, 66.88 },
- { true, 50.161 }, { false, 0 }, { false, 0 }, { true, 50.16 },
- { true, 33.441 }, { true, 16.72 }, { false, 16.72 }, { true, 117.041 },
- { true, 16.72 }, { false, 16.72 }, { true, 50.161 }, { true, 50.16 },
- { true, 33.441 }, { true, 33.44 }, { true, 33.44 }, { true, 16.72 },
- { false, 0 }, { true, 50.161 }, { false, 0 }, { true, 33.44 },
- { true, 16.72 }, { false, 16.721 }, { true, 66.881 }, { false, 0 },
- { true, 33.441 }, { true, 16.72 }, { true, 50.16 }, { true, 16.72 },
- { false, 16.721 }, { true, 50.161 }, { true, 50.16 }, { false, 0 },
- { true, 33.441 }, { true, 50.337 }, { true, 50.183 }, { true, 16.722 },
- { true, 50.161 }, { true, 33.441 }, { true, 50.16 }, { true, 33.441 },
- { true, 50.16 }, { true, 33.441 }, { true, 50.16 }, { true, 33.44 },
- { true, 50.161 }, { true, 50.16 }, { true, 33.44 }, { true, 33.441 },
- { true, 50.16 }, { true, 50.161 }, { true, 33.44 }, { true, 33.441 },
- { true, 50.16 }, { true, 33.44 }, { true, 50.161 }, { true, 33.44 },
- { true, 50.161 }, { true, 33.44 }, { true, 50.161 }, { true, 33.44 },
- { true, 83.601 }, { true, 16.72 }, { true, 33.44 }, { false, 0 }
- };
-
- SmoothEventSampler sampler(base::TimeDelta::FromSeconds(1) / 30, 3);
- ReplayCheckingSamplerDecisions(data_points, arraysize(data_points), &sampler);
-}
-
-TEST(SmoothEventSamplerTest, DrawingAt30FpsWith60HzVsyncSampledAt30Hertz) {
- // Actual capturing of timing data: Initial instability as a 30 FPS video was
- // started from a still screen, then followed by steady-state. Drawing
- // framerate from the video rendering was a bit volatile, but averaged 30 FPS.
- static const DataPoint data_points[] = {
- { true, 2407.69 }, { true, 16.733 }, { true, 217.362 }, { true, 33.441 },
- { true, 33.44 }, { true, 33.44 }, { true, 33.441 }, { true, 33.44 },
- { true, 33.44 }, { true, 33.441 }, { true, 33.44 }, { true, 33.44 },
- { true, 16.721 }, { true, 33.44 }, { false, 0 }, { true, 50.161 },
- { true, 50.16 }, { false, 0 }, { true, 50.161 }, { true, 33.44 },
- { true, 16.72 }, { false, 0 }, { false, 16.72 }, { true, 66.881 },
- { false, 0 }, { true, 33.44 }, { true, 16.72 }, { true, 50.161 },
- { false, 0 }, { true, 33.538 }, { true, 33.526 }, { true, 33.447 },
- { true, 33.445 }, { true, 33.441 }, { true, 16.721 }, { true, 33.44 },
- { true, 33.44 }, { true, 50.161 }, { true, 16.72 }, { true, 33.44 },
- { true, 33.441 }, { true, 33.44 }, { false, 0 }, { false, 16.72 },
- { true, 66.881 }, { true, 16.72 }, { false, 16.72 }, { true, 50.16 },
- { true, 33.441 }, { true, 33.44 }, { true, 33.44 }, { true, 33.44 },
- { true, 33.441 }, { true, 33.44 }, { true, 50.161 }, { false, 0 },
- { true, 33.44 }, { true, 33.44 }, { true, 50.161 }, { true, 16.72 },
- { true, 33.44 }, { true, 33.441 }, { false, 0 }, { true, 66.88 },
- { true, 33.441 }, { true, 33.44 }, { true, 33.44 }, { false, 0 },
- { true, 33.441 }, { true, 33.44 }, { true, 33.44 }, { false, 0 },
- { true, 16.72 }, { true, 50.161 }, { false, 0 }, { true, 50.16 },
- { false, 0.001 }, { true, 16.721 }, { true, 66.88 }, { true, 33.44 },
- { true, 33.441 }, { true, 33.44 }, { true, 50.161 }, { true, 16.72 },
- { false, 0 }, { true, 33.44 }, { false, 16.72 }, { true, 66.881 },
- { true, 33.44 }, { true, 16.72 }, { true, 33.441 }, { false, 16.72 },
- { true, 66.88 }, { true, 16.721 }, { true, 50.16 }, { true, 33.44 },
- { true, 16.72 }, { true, 33.441 }, { true, 33.44 }, { true, 33.44 }
- };
-
- SmoothEventSampler sampler(base::TimeDelta::FromSeconds(1) / 30, 3);
- ReplayCheckingSamplerDecisions(data_points, arraysize(data_points), &sampler);
-}
-
-TEST(SmoothEventSamplerTest, DrawingAt60FpsWith60HzVsyncSampledAt30Hertz) {
- // Actual capturing of timing data: WebGL Acquarium demo
- // (http://webglsamples.googlecode.com/hg/aquarium/aquarium.html) which ran
- // between 55-60 FPS in the steady-state.
- static const DataPoint data_points[] = {
- { true, 16.72 }, { true, 16.72 }, { true, 4163.29 }, { true, 50.193 },
- { true, 117.041 }, { true, 50.161 }, { true, 50.16 }, { true, 33.441 },
- { true, 50.16 }, { true, 33.44 }, { false, 0 }, { false, 0 },
- { true, 50.161 }, { true, 83.601 }, { true, 50.16 }, { true, 16.72 },
- { true, 33.441 }, { false, 16.72 }, { true, 50.16 }, { true, 16.72 },
- { false, 0.001 }, { true, 33.441 }, { false, 16.72 }, { true, 16.72 },
- { true, 50.16 }, { false, 0 }, { true, 16.72 }, { true, 33.441 },
- { false, 0 }, { true, 33.44 }, { false, 16.72 }, { true, 16.72 },
- { true, 50.161 }, { false, 0 }, { true, 16.72 }, { true, 33.44 },
- { false, 0 }, { true, 33.44 }, { false, 16.721 }, { true, 16.721 },
- { true, 50.161 }, { false, 0 }, { true, 16.72 }, { true, 33.441 },
- { false, 0 }, { true, 33.44 }, { false, 16.72 }, { true, 33.44 },
- { false, 0 }, { true, 16.721 }, { true, 50.161 }, { false, 0 },
- { true, 33.44 }, { false, 0 }, { true, 16.72 }, { true, 33.441 },
- { false, 0 }, { true, 33.44 }, { false, 16.72 }, { true, 16.72 },
- { true, 50.16 }, { false, 0 }, { true, 16.721 }, { true, 33.44 },
- { false, 0 }, { true, 33.44 }, { false, 16.721 }, { true, 16.721 },
- { true, 50.161 }, { false, 0 }, { true, 16.72 }, { true, 33.44 },
- { false, 0 }, { true, 33.441 }, { false, 16.72 }, { true, 16.72 },
- { true, 50.16 }, { false, 0 }, { true, 16.72 }, { true, 33.441 },
- { true, 33.44 }, { false, 0 }, { true, 33.44 }, { true, 33.441 },
- { false, 0 }, { true, 33.44 }, { true, 33.441 }, { false, 0 },
- { true, 33.44 }, { false, 0 }, { true, 33.44 }, { false, 16.72 },
- { true, 16.721 }, { true, 50.161 }, { false, 0 }, { true, 16.72 },
- { true, 33.44 }, { true, 33.441 }, { false, 0 }, { true, 33.44 },
- { true, 33.44 }, { false, 0 }, { true, 33.441 }, { false, 16.72 },
- { true, 16.72 }, { true, 50.16 }, { false, 0 }, { true, 16.72 },
- { true, 33.441 }, { false, 0 }, { true, 33.44 }, { false, 16.72 },
- { true, 33.44 }, { false, 0 }, { true, 16.721 }, { true, 50.161 },
- { false, 0 }, { true, 16.72 }, { true, 33.44 }, { false, 0 },
- { true, 33.441 }, { false, 16.72 }, { true, 16.72 }, { true, 50.16 }
- };
-
- SmoothEventSampler sampler(base::TimeDelta::FromSeconds(1) / 30, 3);
- ReplayCheckingSamplerDecisions(data_points, arraysize(data_points), &sampler);
-}
-
-} // namespace media
diff --git a/media/capture/video/OWNERS b/media/capture/video/OWNERS
new file mode 100644
index 0000000..3165f0b
--- /dev/null
+++ b/media/capture/video/OWNERS
@@ -0,0 +1,3 @@
+mcasas@chromium.org
+perkj@chromium.org
+tommi@chromium.org
diff --git a/media/capture/video/android/video_capture_device_android.cc b/media/capture/video/android/video_capture_device_android.cc
new file mode 100644
index 0000000..c4ec171
--- /dev/null
+++ b/media/capture/video/android/video_capture_device_android.cc
@@ -0,0 +1,204 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/android/video_capture_device_android.h"
+
+#include "base/android/jni_android.h"
+#include "base/strings/string_number_conversions.h"
+#include "jni/VideoCapture_jni.h"
+#include "media/capture/video/android/video_capture_device_factory_android.h"
+
+using base::android::AttachCurrentThread;
+using base::android::CheckException;
+using base::android::GetClass;
+using base::android::MethodID;
+using base::android::JavaRef;
+using base::android::ScopedJavaLocalRef;
+
+namespace media {
+
+// static
+bool VideoCaptureDeviceAndroid::RegisterVideoCaptureDevice(JNIEnv* env) {
+ return RegisterNativesImpl(env);
+}
+
+const std::string VideoCaptureDevice::Name::GetModel() const {
+ // Android cameras are not typically USB devices, and this method is currently
+ // only used for USB model identifiers, so this implementation just indicates
+ // an unknown device model.
+ return "";
+}
+
+VideoCaptureDeviceAndroid::VideoCaptureDeviceAndroid(const Name& device_name)
+ : state_(kIdle), got_first_frame_(false), device_name_(device_name) {
+}
+
+VideoCaptureDeviceAndroid::~VideoCaptureDeviceAndroid() {
+ StopAndDeAllocate();
+}
+
+bool VideoCaptureDeviceAndroid::Init() {
+ int id;
+ if (!base::StringToInt(device_name_.id(), &id))
+ return false;
+
+ j_capture_.Reset(VideoCaptureDeviceFactoryAndroid::createVideoCaptureAndroid(
+ id, reinterpret_cast<intptr_t>(this)));
+ return true;
+}
+
+void VideoCaptureDeviceAndroid::AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<Client> client) {
+ DVLOG(1) << "VideoCaptureDeviceAndroid::AllocateAndStart";
+ {
+ base::AutoLock lock(lock_);
+ if (state_ != kIdle)
+ return;
+ client_ = client.Pass();
+ got_first_frame_ = false;
+ }
+
+ JNIEnv* env = AttachCurrentThread();
+
+ jboolean ret = Java_VideoCapture_allocate(
+ env, j_capture_.obj(), params.requested_format.frame_size.width(),
+ params.requested_format.frame_size.height(),
+ params.requested_format.frame_rate);
+ if (!ret) {
+ SetErrorState("failed to allocate");
+ return;
+ }
+
+ // Store current width and height.
+ capture_format_.frame_size.SetSize(
+ Java_VideoCapture_queryWidth(env, j_capture_.obj()),
+ Java_VideoCapture_queryHeight(env, j_capture_.obj()));
+ capture_format_.frame_rate =
+ Java_VideoCapture_queryFrameRate(env, j_capture_.obj());
+ capture_format_.pixel_format = GetColorspace();
+ DCHECK_NE(capture_format_.pixel_format,
+ media::VIDEO_CAPTURE_PIXEL_FORMAT_UNKNOWN);
+ CHECK(capture_format_.frame_size.GetArea() > 0);
+ CHECK(!(capture_format_.frame_size.width() % 2));
+ CHECK(!(capture_format_.frame_size.height() % 2));
+
+ if (capture_format_.frame_rate > 0) {
+ frame_interval_ = base::TimeDelta::FromMicroseconds(
+ (base::Time::kMicrosecondsPerSecond + capture_format_.frame_rate - 1) /
+ capture_format_.frame_rate);
+ }
+
+ DVLOG(1) << "VideoCaptureDeviceAndroid::Allocate: queried frame_size="
+ << capture_format_.frame_size.ToString()
+ << ", frame_rate=" << capture_format_.frame_rate;
+
+ ret = Java_VideoCapture_startCapture(env, j_capture_.obj());
+ if (!ret) {
+ SetErrorState("failed to start capture");
+ return;
+ }
+
+ {
+ base::AutoLock lock(lock_);
+ state_ = kCapturing;
+ }
+}
+
+void VideoCaptureDeviceAndroid::StopAndDeAllocate() {
+ DVLOG(1) << "VideoCaptureDeviceAndroid::StopAndDeAllocate";
+ {
+ base::AutoLock lock(lock_);
+ if (state_ != kCapturing && state_ != kError)
+ return;
+ }
+
+ JNIEnv* env = AttachCurrentThread();
+
+ jboolean ret = Java_VideoCapture_stopCapture(env, j_capture_.obj());
+ if (!ret) {
+ SetErrorState("failed to stop capture");
+ return;
+ }
+
+ {
+ base::AutoLock lock(lock_);
+ state_ = kIdle;
+ client_.reset();
+ }
+
+ Java_VideoCapture_deallocate(env, j_capture_.obj());
+}
+
+void VideoCaptureDeviceAndroid::OnFrameAvailable(JNIEnv* env,
+ jobject obj,
+ jbyteArray data,
+ jint length,
+ jint rotation) {
+ DVLOG(3) << "VideoCaptureDeviceAndroid::OnFrameAvailable: length =" << length;
+
+ base::AutoLock lock(lock_);
+ if (state_ != kCapturing || !client_.get())
+ return;
+
+ jbyte* buffer = env->GetByteArrayElements(data, NULL);
+ if (!buffer) {
+ LOG(ERROR) << "VideoCaptureDeviceAndroid::OnFrameAvailable: "
+ "failed to GetByteArrayElements";
+ return;
+ }
+
+ base::TimeTicks current_time = base::TimeTicks::Now();
+ if (!got_first_frame_) {
+ // Set aside one frame allowance for fluctuation.
+ expected_next_frame_time_ = current_time - frame_interval_;
+ got_first_frame_ = true;
+ }
+
+ // Deliver the frame when it doesn't arrive too early.
+ if (expected_next_frame_time_ <= current_time) {
+ expected_next_frame_time_ += frame_interval_;
+
+ client_->OnIncomingCapturedData(reinterpret_cast<uint8*>(buffer), length,
+ capture_format_, rotation,
+ base::TimeTicks::Now());
+ }
+
+ env->ReleaseByteArrayElements(data, buffer, JNI_ABORT);
+}
+
+void VideoCaptureDeviceAndroid::OnError(JNIEnv* env,
+ jobject obj,
+ jstring message) {
+ const char* native_string = env->GetStringUTFChars(message, JNI_FALSE);
+ SetErrorState(native_string);
+ env->ReleaseStringUTFChars(message, native_string);
+}
+
+VideoCapturePixelFormat VideoCaptureDeviceAndroid::GetColorspace() {
+ JNIEnv* env = AttachCurrentThread();
+ int current_capture_colorspace =
+ Java_VideoCapture_getColorspace(env, j_capture_.obj());
+ switch (current_capture_colorspace) {
+ case ANDROID_IMAGE_FORMAT_YV12:
+ return media::VIDEO_CAPTURE_PIXEL_FORMAT_YV12;
+ case ANDROID_IMAGE_FORMAT_YUV_420_888:
+ return media::VIDEO_CAPTURE_PIXEL_FORMAT_I420;
+ case ANDROID_IMAGE_FORMAT_NV21:
+ return media::VIDEO_CAPTURE_PIXEL_FORMAT_NV21;
+ case ANDROID_IMAGE_FORMAT_UNKNOWN:
+ default:
+ return media::VIDEO_CAPTURE_PIXEL_FORMAT_UNKNOWN;
+ }
+}
+
+void VideoCaptureDeviceAndroid::SetErrorState(const std::string& reason) {
+ {
+ base::AutoLock lock(lock_);
+ state_ = kError;
+ }
+ client_->OnError(reason);
+}
+
+} // namespace media
diff --git a/media/capture/video/android/video_capture_device_android.h b/media/capture/video/android/video_capture_device_android.h
new file mode 100644
index 0000000..2089919
--- /dev/null
+++ b/media/capture/video/android/video_capture_device_android.h
@@ -0,0 +1,95 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_ANDROID_VIDEO_CAPTURE_DEVICE_ANDROID_H_
+#define MEDIA_VIDEO_CAPTURE_ANDROID_VIDEO_CAPTURE_DEVICE_ANDROID_H_
+
+#include <jni.h>
+#include <string>
+
+#include "base/android/scoped_java_ref.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+#include "media/capture/video/video_capture_device.h"
+
+namespace media {
+
+// VideoCaptureDevice on Android. The VideoCaptureDevice API's are called
+// by VideoCaptureManager on its own thread, while OnFrameAvailable is called
+// on JAVA thread (i.e., UI thread). Both will access |state_| and |client_|,
+// but only VideoCaptureManager would change their value.
+class MEDIA_EXPORT VideoCaptureDeviceAndroid : public VideoCaptureDevice {
+ public:
+ // Automatically generated enum to interface with Java world.
+ //
+ // A Java counterpart will be generated for this enum.
+ // GENERATED_JAVA_ENUM_PACKAGE: org.chromium.media
+ enum AndroidImageFormat {
+ // Android graphics ImageFormat mapping, see reference in:
+ // http://developer.android.com/reference/android/graphics/ImageFormat.html
+ ANDROID_IMAGE_FORMAT_NV21 = 17,
+ ANDROID_IMAGE_FORMAT_YUV_420_888 = 35,
+ ANDROID_IMAGE_FORMAT_YV12 = 842094169,
+ ANDROID_IMAGE_FORMAT_UNKNOWN = 0,
+ };
+
+ explicit VideoCaptureDeviceAndroid(const Name& device_name);
+ ~VideoCaptureDeviceAndroid() override;
+
+ static VideoCaptureDevice* Create(const Name& device_name);
+ static bool RegisterVideoCaptureDevice(JNIEnv* env);
+
+ // Registers the Java VideoCaptureDevice pointer, used by the rest of the
+ // methods of the class to operate the Java capture code. This method must be
+ // called after the class constructor and before AllocateAndStart().
+ bool Init();
+
+ // VideoCaptureDevice implementation.
+ void AllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<Client> client) override;
+ void StopAndDeAllocate() override;
+
+ // Implement org.chromium.media.VideoCapture.nativeOnFrameAvailable.
+ void OnFrameAvailable(JNIEnv* env,
+ jobject obj,
+ jbyteArray data,
+ jint length,
+ jint rotation);
+
+ // Implement org.chromium.media.VideoCapture.nativeOnError.
+ void OnError(JNIEnv* env, jobject obj, jstring message);
+
+ private:
+ enum InternalState {
+ kIdle, // The device is opened but not in use.
+ kCapturing, // Video is being captured.
+ kError // Hit error. User needs to recover by destroying the object.
+ };
+
+ VideoCapturePixelFormat GetColorspace();
+ void SetErrorState(const std::string& reason);
+
+ // Prevent racing on accessing |state_| and |client_| since both could be
+ // accessed from different threads.
+ base::Lock lock_;
+ InternalState state_;
+ bool got_first_frame_;
+ base::TimeTicks expected_next_frame_time_;
+ base::TimeDelta frame_interval_;
+ scoped_ptr<VideoCaptureDevice::Client> client_;
+
+ Name device_name_;
+ VideoCaptureFormat capture_format_;
+
+ // Java VideoCaptureAndroid instance.
+ base::android::ScopedJavaLocalRef<jobject> j_capture_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(VideoCaptureDeviceAndroid);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_ANDROID_VIDEO_CAPTURE_DEVICE_ANDROID_H_
diff --git a/media/capture/video/android/video_capture_device_factory_android.cc b/media/capture/video/android/video_capture_device_factory_android.cc
new file mode 100644
index 0000000..fca882e
--- /dev/null
+++ b/media/capture/video/android/video_capture_device_factory_android.cc
@@ -0,0 +1,139 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/android/video_capture_device_factory_android.h"
+
+#include "base/android/jni_string.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/stringprintf.h"
+#include "jni/VideoCaptureFactory_jni.h"
+#include "media/capture/video/android/video_capture_device_android.h"
+
+using base::android::AttachCurrentThread;
+using base::android::ScopedJavaLocalRef;
+
+namespace media {
+
+// static
+bool VideoCaptureDeviceFactoryAndroid::RegisterVideoCaptureDeviceFactory(
+ JNIEnv* env) {
+ return RegisterNativesImpl(env);
+}
+
+// static
+ScopedJavaLocalRef<jobject>
+VideoCaptureDeviceFactoryAndroid::createVideoCaptureAndroid(
+ int id,
+ jlong nativeVideoCaptureDeviceAndroid) {
+ return (Java_VideoCaptureFactory_createVideoCapture(
+ AttachCurrentThread(), base::android::GetApplicationContext(), id,
+ nativeVideoCaptureDeviceAndroid));
+}
+
+scoped_ptr<VideoCaptureDevice> VideoCaptureDeviceFactoryAndroid::Create(
+ const VideoCaptureDevice::Name& device_name) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ int id;
+ if (!base::StringToInt(device_name.id(), &id))
+ return scoped_ptr<VideoCaptureDevice>();
+
+ scoped_ptr<VideoCaptureDeviceAndroid> video_capture_device(
+ new VideoCaptureDeviceAndroid(device_name));
+
+ if (video_capture_device->Init())
+ return video_capture_device.Pass();
+
+ DLOG(ERROR) << "Error creating Video Capture Device.";
+ return scoped_ptr<VideoCaptureDevice>();
+}
+
+void VideoCaptureDeviceFactoryAndroid::GetDeviceNames(
+ VideoCaptureDevice::Names* device_names) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ device_names->clear();
+
+ JNIEnv* env = AttachCurrentThread();
+
+ const jobject context = base::android::GetApplicationContext();
+ const int num_cameras =
+ Java_VideoCaptureFactory_getNumberOfCameras(env, context);
+ DVLOG(1) << "VideoCaptureDevice::GetDeviceNames: num_cameras=" << num_cameras;
+ if (num_cameras <= 0)
+ return;
+
+ for (int camera_id = num_cameras - 1; camera_id >= 0; --camera_id) {
+ base::android::ScopedJavaLocalRef<jstring> device_name =
+ Java_VideoCaptureFactory_getDeviceName(env, camera_id, context);
+ if (device_name.obj() == NULL)
+ continue;
+
+ const int capture_api_type =
+ Java_VideoCaptureFactory_getCaptureApiType(env, camera_id, context);
+
+ VideoCaptureDevice::Name name(
+ base::android::ConvertJavaStringToUTF8(device_name),
+ base::IntToString(camera_id),
+ static_cast<VideoCaptureDevice::Name::CaptureApiType>(
+ capture_api_type));
+ device_names->push_back(name);
+
+ DVLOG(1) << "VideoCaptureDeviceFactoryAndroid::GetDeviceNames: camera "
+ << "device_name=" << name.name() << ", unique_id=" << name.id();
+ }
+}
+
+void VideoCaptureDeviceFactoryAndroid::GetDeviceSupportedFormats(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* capture_formats) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ int id;
+ if (!base::StringToInt(device.id(), &id))
+ return;
+ JNIEnv* env = AttachCurrentThread();
+ base::android::ScopedJavaLocalRef<jobjectArray> collected_formats =
+ Java_VideoCaptureFactory_getDeviceSupportedFormats(
+ env, base::android::GetApplicationContext(), id);
+ if (collected_formats.is_null())
+ return;
+
+ jsize num_formats = env->GetArrayLength(collected_formats.obj());
+ for (int i = 0; i < num_formats; ++i) {
+ base::android::ScopedJavaLocalRef<jobject> format(
+ env, env->GetObjectArrayElement(collected_formats.obj(), i));
+
+ VideoCapturePixelFormat pixel_format =
+ media::VIDEO_CAPTURE_PIXEL_FORMAT_UNKNOWN;
+ switch (media::Java_VideoCaptureFactory_getCaptureFormatPixelFormat(
+ env, format.obj())) {
+ case VideoCaptureDeviceAndroid::ANDROID_IMAGE_FORMAT_YV12:
+ pixel_format = media::VIDEO_CAPTURE_PIXEL_FORMAT_YV12;
+ break;
+ case VideoCaptureDeviceAndroid::ANDROID_IMAGE_FORMAT_NV21:
+ pixel_format = media::VIDEO_CAPTURE_PIXEL_FORMAT_NV21;
+ break;
+ default:
+ continue;
+ }
+ VideoCaptureFormat capture_format(
+ gfx::Size(media::Java_VideoCaptureFactory_getCaptureFormatWidth(
+ env, format.obj()),
+ media::Java_VideoCaptureFactory_getCaptureFormatHeight(
+ env, format.obj())),
+ media::Java_VideoCaptureFactory_getCaptureFormatFramerate(env,
+ format.obj()),
+ pixel_format);
+ capture_formats->push_back(capture_format);
+ DVLOG(1) << device.name() << " "
+ << VideoCaptureFormat::ToString(capture_format);
+ }
+}
+
+// static
+VideoCaptureDeviceFactory*
+VideoCaptureDeviceFactory::CreateVideoCaptureDeviceFactory(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner) {
+ return new VideoCaptureDeviceFactoryAndroid();
+}
+
+} // namespace media
diff --git a/media/capture/video/android/video_capture_device_factory_android.h b/media/capture/video/android/video_capture_device_factory_android.h
new file mode 100644
index 0000000..5ca432a
--- /dev/null
+++ b/media/capture/video/android/video_capture_device_factory_android.h
@@ -0,0 +1,42 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_ANDROID_VIDEO_CAPTURE_DEVICE_FACTORY_ANDROID_H_
+#define MEDIA_VIDEO_CAPTURE_ANDROID_VIDEO_CAPTURE_DEVICE_FACTORY_ANDROID_H_
+
+#include "media/capture/video/video_capture_device_factory.h"
+
+#include <jni.h>
+
+#include "base/android/scoped_java_ref.h"
+#include "media/capture/video/video_capture_device.h"
+
+namespace media {
+
+// VideoCaptureDeviceFactory on Android. This class implements the static
+// VideoCapture methods and the factory of VideoCaptureAndroid.
+class MEDIA_EXPORT VideoCaptureDeviceFactoryAndroid
+ : public VideoCaptureDeviceFactory {
+ public:
+ static bool RegisterVideoCaptureDeviceFactory(JNIEnv* env);
+ static base::android::ScopedJavaLocalRef<jobject> createVideoCaptureAndroid(
+ int id,
+ jlong nativeVideoCaptureDeviceAndroid);
+
+ VideoCaptureDeviceFactoryAndroid() {}
+ ~VideoCaptureDeviceFactoryAndroid() override {}
+
+ scoped_ptr<VideoCaptureDevice> Create(
+ const VideoCaptureDevice::Name& device_name) override;
+ void GetDeviceNames(VideoCaptureDevice::Names* device_names) override;
+ void GetDeviceSupportedFormats(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* supported_formats) override;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(VideoCaptureDeviceFactoryAndroid);
+};
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_ANDROID_VIDEO_CAPTURE_DEVICE_FACTORY_ANDROID_H_
diff --git a/media/capture/video/fake_video_capture_device.cc b/media/capture/video/fake_video_capture_device.cc
new file mode 100644
index 0000000..a44f4e3
--- /dev/null
+++ b/media/capture/video/fake_video_capture_device.cc
@@ -0,0 +1,217 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/fake_video_capture_device.h"
+
+#include <algorithm>
+
+#include "base/bind.h"
+#include "base/strings/stringprintf.h"
+#include "media/audio/fake_audio_input_stream.h"
+#include "media/base/video_frame.h"
+#include "third_party/skia/include/core/SkBitmap.h"
+#include "third_party/skia/include/core/SkCanvas.h"
+#include "third_party/skia/include/core/SkPaint.h"
+
+namespace media {
+
+static const int kFakeCaptureBeepCycle = 10; // Visual beep every 0.5s.
+
+void DrawPacman(bool use_argb,
+ uint8_t* const data,
+ int frame_count,
+ int frame_interval,
+ const gfx::Size& frame_size) {
+ // |kN32_SkColorType| stands for the appropriiate RGBA/BGRA format.
+ const SkColorType colorspace =
+ use_argb ? kN32_SkColorType : kAlpha_8_SkColorType;
+ const SkImageInfo info = SkImageInfo::Make(
+ frame_size.width(), frame_size.height(), colorspace, kOpaque_SkAlphaType);
+ SkBitmap bitmap;
+ bitmap.setInfo(info);
+ bitmap.setPixels(data);
+ SkPaint paint;
+ paint.setStyle(SkPaint::kFill_Style);
+ SkCanvas canvas(bitmap);
+
+ // Equalize Alpha_8 that has light green background while RGBA has white.
+ if (use_argb) {
+ const SkRect full_frame =
+ SkRect::MakeWH(frame_size.width(), frame_size.height());
+ paint.setARGB(255, 0, 127, 0);
+ canvas.drawRect(full_frame, paint);
+ }
+ paint.setColor(SK_ColorGREEN);
+
+ // Draw a sweeping circle to show an animation.
+ const int end_angle = (3 * kFakeCaptureBeepCycle * frame_count % 361);
+ const int radius = std::min(frame_size.width(), frame_size.height()) / 4;
+ const SkRect rect = SkRect::MakeXYWH(frame_size.width() / 2 - radius,
+ frame_size.height() / 2 - radius,
+ 2 * radius, 2 * radius);
+ canvas.drawArc(rect, 0, end_angle, true, paint);
+
+ // Draw current time.
+ const int elapsed_ms = frame_interval * frame_count;
+ const int milliseconds = elapsed_ms % 1000;
+ const int seconds = (elapsed_ms / 1000) % 60;
+ const int minutes = (elapsed_ms / 1000 / 60) % 60;
+ const int hours = (elapsed_ms / 1000 / 60 / 60) % 60;
+
+ const std::string time_string =
+ base::StringPrintf("%d:%02d:%02d:%03d %d", hours, minutes, seconds,
+ milliseconds, frame_count);
+ canvas.scale(3, 3);
+ canvas.drawText(time_string.data(), time_string.length(), 30, 20, paint);
+}
+
+FakeVideoCaptureDevice::FakeVideoCaptureDevice(
+ FakeVideoCaptureDeviceType device_type)
+ : device_type_(device_type), frame_count_(0), weak_factory_(this) {
+}
+
+FakeVideoCaptureDevice::~FakeVideoCaptureDevice() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+}
+
+void FakeVideoCaptureDevice::AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ client_ = client.Pass();
+
+ // Incoming |params| can be none of the supported formats, so we get the
+ // closest thing rounded up. TODO(mcasas): Use the |params|, if they belong to
+ // the supported ones, when http://crbug.com/309554 is verified.
+ DCHECK_EQ(params.requested_format.pixel_format,
+ VIDEO_CAPTURE_PIXEL_FORMAT_I420);
+ capture_format_.pixel_format = params.requested_format.pixel_format;
+ capture_format_.frame_rate = 30.0;
+ if (params.requested_format.frame_size.width() > 1280)
+ capture_format_.frame_size.SetSize(1920, 1080);
+ else if (params.requested_format.frame_size.width() > 640)
+ capture_format_.frame_size.SetSize(1280, 720);
+ else if (params.requested_format.frame_size.width() > 320)
+ capture_format_.frame_size.SetSize(640, 480);
+ else
+ capture_format_.frame_size.SetSize(320, 240);
+
+ if (device_type_ == USING_OWN_BUFFERS ||
+ device_type_ == USING_OWN_BUFFERS_TRIPLANAR) {
+ capture_format_.pixel_storage = PIXEL_STORAGE_CPU;
+ fake_frame_.reset(new uint8[VideoFrame::AllocationSize(
+ PIXEL_FORMAT_I420, capture_format_.frame_size)]);
+ BeepAndScheduleNextCapture(
+ base::TimeTicks::Now(),
+ base::Bind(&FakeVideoCaptureDevice::CaptureUsingOwnBuffers,
+ weak_factory_.GetWeakPtr()));
+ } else if (device_type_ == USING_CLIENT_BUFFERS) {
+ DVLOG(1) << "starting with "
+ << (params.use_gpu_memory_buffers ? "GMB" : "ShMem");
+ BeepAndScheduleNextCapture(
+ base::TimeTicks::Now(),
+ base::Bind(&FakeVideoCaptureDevice::CaptureUsingClientBuffers,
+ weak_factory_.GetWeakPtr(),
+ params.use_gpu_memory_buffers
+ ? VIDEO_CAPTURE_PIXEL_FORMAT_ARGB
+ : VIDEO_CAPTURE_PIXEL_FORMAT_I420,
+ params.use_gpu_memory_buffers ? PIXEL_STORAGE_GPUMEMORYBUFFER
+ : PIXEL_STORAGE_CPU));
+ } else {
+ client_->OnError("Unknown Fake Video Capture Device type.");
+ }
+}
+
+void FakeVideoCaptureDevice::StopAndDeAllocate() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ client_.reset();
+}
+
+void FakeVideoCaptureDevice::CaptureUsingOwnBuffers(
+ base::TimeTicks expected_execution_time) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ const size_t frame_size = capture_format_.ImageAllocationSize();
+ memset(fake_frame_.get(), 0, frame_size);
+
+ DrawPacman(false /* use_argb */, fake_frame_.get(), frame_count_,
+ kFakeCapturePeriodMs, capture_format_.frame_size);
+
+ // Give the captured frame to the client.
+ if (device_type_ == USING_OWN_BUFFERS) {
+ client_->OnIncomingCapturedData(fake_frame_.get(), frame_size,
+ capture_format_, 0 /* rotation */,
+ base::TimeTicks::Now());
+ } else if (device_type_ == USING_OWN_BUFFERS_TRIPLANAR) {
+ client_->OnIncomingCapturedYuvData(
+ fake_frame_.get(),
+ fake_frame_.get() + capture_format_.frame_size.GetArea(),
+ fake_frame_.get() + capture_format_.frame_size.GetArea() * 5 / 4,
+ capture_format_.frame_size.width(),
+ capture_format_.frame_size.width() / 2,
+ capture_format_.frame_size.width() / 2, capture_format_,
+ 0 /* rotation */, base::TimeTicks::Now());
+ }
+ BeepAndScheduleNextCapture(
+ expected_execution_time,
+ base::Bind(&FakeVideoCaptureDevice::CaptureUsingOwnBuffers,
+ weak_factory_.GetWeakPtr()));
+}
+
+void FakeVideoCaptureDevice::CaptureUsingClientBuffers(
+ VideoCapturePixelFormat pixel_format,
+ VideoPixelStorage pixel_storage,
+ base::TimeTicks expected_execution_time) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ scoped_ptr<VideoCaptureDevice::Client::Buffer> capture_buffer(
+ client_->ReserveOutputBuffer(capture_format_.frame_size, pixel_format,
+ pixel_storage));
+ DLOG_IF(ERROR, !capture_buffer) << "Couldn't allocate Capture Buffer";
+
+ if (capture_buffer.get()) {
+ uint8_t* const data_ptr = static_cast<uint8_t*>(capture_buffer->data());
+ DCHECK(data_ptr) << "Buffer has NO backing memory";
+ memset(data_ptr, 0, capture_buffer->size());
+
+ DrawPacman(
+ (pixel_format == media::VIDEO_CAPTURE_PIXEL_FORMAT_ARGB), /* use_argb */
+ data_ptr, frame_count_, kFakeCapturePeriodMs,
+ capture_format_.frame_size);
+
+ // Give the captured frame to the client.
+ const VideoCaptureFormat format(capture_format_.frame_size,
+ capture_format_.frame_rate, pixel_format,
+ pixel_storage);
+ client_->OnIncomingCapturedBuffer(capture_buffer.Pass(), format,
+ base::TimeTicks::Now());
+ }
+
+ BeepAndScheduleNextCapture(
+ expected_execution_time,
+ base::Bind(&FakeVideoCaptureDevice::CaptureUsingClientBuffers,
+ weak_factory_.GetWeakPtr(), pixel_format, pixel_storage));
+}
+
+void FakeVideoCaptureDevice::BeepAndScheduleNextCapture(
+ base::TimeTicks expected_execution_time,
+ const base::Callback<void(base::TimeTicks)>& next_capture) {
+ // Generate a synchronized beep sound every so many frames.
+ if (frame_count_++ % kFakeCaptureBeepCycle == 0)
+ FakeAudioInputStream::BeepOnce();
+
+ // Reschedule next CaptureTask.
+ const base::TimeTicks current_time = base::TimeTicks::Now();
+ const base::TimeDelta frame_interval =
+ base::TimeDelta::FromMilliseconds(kFakeCapturePeriodMs);
+ // Don't accumulate any debt if we are lagging behind - just post the next
+ // frame immediately and continue as normal.
+ const base::TimeTicks next_execution_time =
+ std::max(current_time, expected_execution_time + frame_interval);
+ const base::TimeDelta delay = next_execution_time - current_time;
+ base::MessageLoop::current()->PostDelayedTask(
+ FROM_HERE, base::Bind(next_capture, next_execution_time), delay);
+}
+
+} // namespace media
diff --git a/media/capture/video/fake_video_capture_device.h b/media/capture/video/fake_video_capture_device.h
new file mode 100644
index 0000000..a0dd4e0
--- /dev/null
+++ b/media/capture/video/fake_video_capture_device.h
@@ -0,0 +1,73 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Implementation of a fake VideoCaptureDevice class. Used for testing other
+// video capture classes when no real hardware is available.
+
+#ifndef MEDIA_VIDEO_CAPTURE_FAKE_VIDEO_CAPTURE_DEVICE_H_
+#define MEDIA_VIDEO_CAPTURE_FAKE_VIDEO_CAPTURE_DEVICE_H_
+
+#include <string>
+
+#include "base/atomicops.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_checker.h"
+#include "base/time/time.h"
+#include "media/capture/video/video_capture_device.h"
+
+namespace media {
+
+class MEDIA_EXPORT FakeVideoCaptureDevice : public VideoCaptureDevice {
+ public:
+ enum FakeVideoCaptureDeviceType {
+ USING_OWN_BUFFERS,
+ USING_OWN_BUFFERS_TRIPLANAR,
+ USING_CLIENT_BUFFERS,
+ };
+
+ static int FakeCapturePeriodMs() { return kFakeCapturePeriodMs; }
+
+ explicit FakeVideoCaptureDevice(FakeVideoCaptureDeviceType device_type);
+ ~FakeVideoCaptureDevice() override;
+
+ // VideoCaptureDevice implementation.
+ void AllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<Client> client) override;
+ void StopAndDeAllocate() override;
+
+ private:
+ static const int kFakeCapturePeriodMs = 50;
+
+ void CaptureUsingOwnBuffers(base::TimeTicks expected_execution_time);
+ void CaptureUsingClientBuffers(VideoCapturePixelFormat pixel_format,
+ VideoPixelStorage pixel_storage,
+ base::TimeTicks expected_execution_time);
+ void BeepAndScheduleNextCapture(
+ base::TimeTicks expected_execution_time,
+ const base::Callback<void(base::TimeTicks)>& next_capture);
+
+ // |thread_checker_| is used to check that all methods are called in the
+ // correct thread that owns the object.
+ base::ThreadChecker thread_checker_;
+
+ const FakeVideoCaptureDeviceType device_type_;
+
+ scoped_ptr<VideoCaptureDevice::Client> client_;
+ // |fake_frame_| is used for capturing on Own Buffers.
+ scoped_ptr<uint8[]> fake_frame_;
+ int frame_count_;
+ VideoCaptureFormat capture_format_;
+
+ // FakeVideoCaptureDevice post tasks to itself for frame construction and
+ // needs to deal with asynchronous StopAndDeallocate().
+ base::WeakPtrFactory<FakeVideoCaptureDevice> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(FakeVideoCaptureDevice);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_FAKE_VIDEO_CAPTURE_DEVICE_H_
diff --git a/media/capture/video/fake_video_capture_device_factory.cc b/media/capture/video/fake_video_capture_device_factory.cc
new file mode 100644
index 0000000..5103990
--- /dev/null
+++ b/media/capture/video/fake_video_capture_device_factory.cc
@@ -0,0 +1,86 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/fake_video_capture_device_factory.h"
+
+#include "base/command_line.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "media/base/media_switches.h"
+#include "media/capture/video/fake_video_capture_device.h"
+
+namespace media {
+
+FakeVideoCaptureDeviceFactory::FakeVideoCaptureDeviceFactory()
+ : number_of_devices_(1) {
+}
+
+scoped_ptr<VideoCaptureDevice> FakeVideoCaptureDeviceFactory::Create(
+ const VideoCaptureDevice::Name& device_name) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ const std::string option =
+ base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ switches::kUseFakeDeviceForMediaStream);
+
+ FakeVideoCaptureDevice::FakeVideoCaptureDeviceType fake_vcd_type;
+ if (option.empty())
+ fake_vcd_type = FakeVideoCaptureDevice::USING_OWN_BUFFERS;
+ else if (base::EqualsCaseInsensitiveASCII(option, "triplanar"))
+ fake_vcd_type = FakeVideoCaptureDevice::USING_OWN_BUFFERS_TRIPLANAR;
+ else
+ fake_vcd_type = FakeVideoCaptureDevice::USING_CLIENT_BUFFERS;
+
+ for (int n = 0; n < number_of_devices_; ++n) {
+ std::string possible_id = base::StringPrintf("/dev/video%d", n);
+ if (device_name.id().compare(possible_id) == 0) {
+ return scoped_ptr<VideoCaptureDevice>(
+ new FakeVideoCaptureDevice(fake_vcd_type));
+ }
+ }
+ return scoped_ptr<VideoCaptureDevice>();
+}
+
+void FakeVideoCaptureDeviceFactory::GetDeviceNames(
+ VideoCaptureDevice::Names* const device_names) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(device_names->empty());
+ for (int n = 0; n < number_of_devices_; ++n) {
+ VideoCaptureDevice::Name name(base::StringPrintf("fake_device_%d", n),
+ base::StringPrintf("/dev/video%d", n)
+#if defined(OS_LINUX)
+ ,
+ VideoCaptureDevice::Name::V4L2_SINGLE_PLANE
+#elif defined(OS_MACOSX)
+ ,
+ VideoCaptureDevice::Name::AVFOUNDATION
+#elif defined(OS_WIN)
+ ,
+ VideoCaptureDevice::Name::DIRECT_SHOW
+#elif defined(OS_ANDROID)
+ ,
+ VideoCaptureDevice::Name::API2_LEGACY
+#endif
+ );
+ device_names->push_back(name);
+ }
+}
+
+void FakeVideoCaptureDeviceFactory::GetDeviceSupportedFormats(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* supported_formats) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ const int frame_rate = 1000 / FakeVideoCaptureDevice::FakeCapturePeriodMs();
+ const gfx::Size supported_sizes[] = {gfx::Size(320, 240),
+ gfx::Size(640, 480),
+ gfx::Size(1280, 720),
+ gfx::Size(1920, 1080)};
+ supported_formats->clear();
+ for (const auto& size : supported_sizes) {
+ supported_formats->push_back(VideoCaptureFormat(
+ size, frame_rate, media::VIDEO_CAPTURE_PIXEL_FORMAT_I420));
+ }
+}
+
+} // namespace media
diff --git a/media/capture/video/fake_video_capture_device_factory.h b/media/capture/video/fake_video_capture_device_factory.h
new file mode 100644
index 0000000..a6de31a
--- /dev/null
+++ b/media/capture/video/fake_video_capture_device_factory.h
@@ -0,0 +1,44 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Implementation of a fake VideoCaptureDeviceFactory class.
+
+#ifndef MEDIA_VIDEO_CAPTURE_FAKE_VIDEO_CAPTURE_DEVICE_FACTORY_H_
+#define MEDIA_VIDEO_CAPTURE_FAKE_VIDEO_CAPTURE_DEVICE_FACTORY_H_
+
+#include "media/capture/video/video_capture_device_factory.h"
+
+namespace media {
+
+// Extension of VideoCaptureDeviceFactory to create and manipulate fake devices,
+// not including file-based ones.
+class MEDIA_EXPORT FakeVideoCaptureDeviceFactory
+ : public VideoCaptureDeviceFactory {
+ public:
+ FakeVideoCaptureDeviceFactory();
+ ~FakeVideoCaptureDeviceFactory() override {}
+
+ scoped_ptr<VideoCaptureDevice> Create(
+ const VideoCaptureDevice::Name& device_name) override;
+ void GetDeviceNames(VideoCaptureDevice::Names* device_names) override;
+ void GetDeviceSupportedFormats(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* supported_formats) override;
+
+ void set_number_of_devices(int number_of_devices) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ number_of_devices_ = number_of_devices;
+ }
+ int number_of_devices() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return number_of_devices_;
+ }
+
+ private:
+ int number_of_devices_;
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_FAKE_VIDEO_CAPTURE_DEVICE_FACTORY_H_
diff --git a/media/capture/video/fake_video_capture_device_unittest.cc b/media/capture/video/fake_video_capture_device_unittest.cc
new file mode 100644
index 0000000..a8f90f2
--- /dev/null
+++ b/media/capture/video/fake_video_capture_device_unittest.cc
@@ -0,0 +1,240 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/run_loop.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/thread.h"
+#include "media/base/video_capture_types.h"
+#include "media/capture/video/fake_video_capture_device.h"
+#include "media/capture/video/fake_video_capture_device_factory.h"
+#include "media/capture/video/video_capture_device.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::_;
+using ::testing::Bool;
+using ::testing::Combine;
+using ::testing::SaveArg;
+using ::testing::Values;
+
+namespace media {
+
+namespace {
+
+// This class is a Client::Buffer that allocates and frees the requested |size|.
+class MockBuffer : public VideoCaptureDevice::Client::Buffer {
+ public:
+ MockBuffer(int buffer_id, size_t size)
+ : id_(buffer_id), size_(size), data_(new uint8[size_]) {}
+ ~MockBuffer() override { delete[] data_; }
+
+ int id() const override { return id_; }
+ size_t size() const override { return size_; }
+ void* data() override { return data_; }
+ ClientBuffer AsClientBuffer() override { return nullptr; }
+#if defined(OS_POSIX)
+ base::FileDescriptor AsPlatformFile() override {
+ return base::FileDescriptor();
+ }
+#endif
+
+ private:
+ const int id_;
+ const size_t size_;
+ uint8* const data_;
+};
+
+class MockClient : public VideoCaptureDevice::Client {
+ public:
+ MOCK_METHOD1(OnError, void(const std::string& reason));
+
+ explicit MockClient(base::Callback<void(const VideoCaptureFormat&)> frame_cb)
+ : frame_cb_(frame_cb) {}
+
+ // Client virtual methods for capturing using Device Buffers.
+ void OnIncomingCapturedData(const uint8* data,
+ int length,
+ const VideoCaptureFormat& format,
+ int rotation,
+ const base::TimeTicks& timestamp) {
+ frame_cb_.Run(format);
+ }
+ void OnIncomingCapturedYuvData(const uint8* y_data,
+ const uint8* u_data,
+ const uint8* v_data,
+ size_t y_stride,
+ size_t u_stride,
+ size_t v_stride,
+ const VideoCaptureFormat& frame_format,
+ int clockwise_rotation,
+ const base::TimeTicks& timestamp) {
+ frame_cb_.Run(frame_format);
+ }
+
+ // Virtual methods for capturing using Client's Buffers.
+ scoped_ptr<Buffer> ReserveOutputBuffer(const gfx::Size& dimensions,
+ media::VideoCapturePixelFormat format,
+ media::VideoPixelStorage storage) {
+ EXPECT_TRUE((format == media::VIDEO_CAPTURE_PIXEL_FORMAT_I420 &&
+ storage == media::PIXEL_STORAGE_CPU) ||
+ (format == media::VIDEO_CAPTURE_PIXEL_FORMAT_ARGB &&
+ storage == media::PIXEL_STORAGE_GPUMEMORYBUFFER));
+ EXPECT_GT(dimensions.GetArea(), 0);
+ const VideoCaptureFormat frame_format(dimensions, 0.0, format);
+ return make_scoped_ptr(
+ new MockBuffer(0, frame_format.ImageAllocationSize()));
+ }
+ void OnIncomingCapturedBuffer(scoped_ptr<Buffer> buffer,
+ const VideoCaptureFormat& frame_format,
+ const base::TimeTicks& timestamp) {
+ frame_cb_.Run(frame_format);
+ }
+ void OnIncomingCapturedVideoFrame(
+ scoped_ptr<Buffer> buffer,
+ const scoped_refptr<media::VideoFrame>& frame,
+ const base::TimeTicks& timestamp) {
+ VideoCaptureFormat format(frame->natural_size(), 30.0,
+ VIDEO_CAPTURE_PIXEL_FORMAT_I420);
+ frame_cb_.Run(format);
+ }
+
+ double GetBufferPoolUtilization() const override { return 0.0; }
+
+ private:
+ base::Callback<void(const VideoCaptureFormat&)> frame_cb_;
+};
+
+class DeviceEnumerationListener
+ : public base::RefCounted<DeviceEnumerationListener> {
+ public:
+ MOCK_METHOD1(OnEnumeratedDevicesCallbackPtr,
+ void(VideoCaptureDevice::Names* names));
+ // GMock doesn't support move-only arguments, so we use this forward method.
+ void OnEnumeratedDevicesCallback(
+ scoped_ptr<VideoCaptureDevice::Names> names) {
+ OnEnumeratedDevicesCallbackPtr(names.release());
+ }
+
+ private:
+ friend class base::RefCounted<DeviceEnumerationListener>;
+ virtual ~DeviceEnumerationListener() {}
+};
+
+} // namespace
+
+class FakeVideoCaptureDeviceTest
+ : public testing::TestWithParam<
+ ::testing::tuple<FakeVideoCaptureDevice::FakeVideoCaptureDeviceType,
+ bool>> {
+ protected:
+ FakeVideoCaptureDeviceTest()
+ : loop_(new base::MessageLoop()),
+ client_(new MockClient(
+ base::Bind(&FakeVideoCaptureDeviceTest::OnFrameCaptured,
+ base::Unretained(this)))),
+ video_capture_device_factory_(new FakeVideoCaptureDeviceFactory()) {
+ device_enumeration_listener_ = new DeviceEnumerationListener();
+ }
+
+ void SetUp() override { EXPECT_CALL(*client_, OnError(_)).Times(0); }
+
+ void OnFrameCaptured(const VideoCaptureFormat& format) {
+ last_format_ = format;
+ run_loop_->QuitClosure().Run();
+ }
+
+ void WaitForCapturedFrame() {
+ run_loop_.reset(new base::RunLoop());
+ run_loop_->Run();
+ }
+
+ scoped_ptr<VideoCaptureDevice::Names> EnumerateDevices() {
+ VideoCaptureDevice::Names* names;
+ EXPECT_CALL(*device_enumeration_listener_.get(),
+ OnEnumeratedDevicesCallbackPtr(_)).WillOnce(SaveArg<0>(&names));
+
+ video_capture_device_factory_->EnumerateDeviceNames(
+ base::Bind(&DeviceEnumerationListener::OnEnumeratedDevicesCallback,
+ device_enumeration_listener_));
+ base::MessageLoop::current()->RunUntilIdle();
+ return scoped_ptr<VideoCaptureDevice::Names>(names);
+ }
+
+ const VideoCaptureFormat& last_format() const { return last_format_; }
+
+ VideoCaptureDevice::Names names_;
+ const scoped_ptr<base::MessageLoop> loop_;
+ scoped_ptr<base::RunLoop> run_loop_;
+ scoped_ptr<MockClient> client_;
+ scoped_refptr<DeviceEnumerationListener> device_enumeration_listener_;
+ VideoCaptureFormat last_format_;
+ const scoped_ptr<VideoCaptureDeviceFactory> video_capture_device_factory_;
+};
+
+TEST_P(FakeVideoCaptureDeviceTest, CaptureUsing) {
+ const scoped_ptr<VideoCaptureDevice::Names> names(EnumerateDevices());
+ ASSERT_FALSE(names->empty());
+
+ scoped_ptr<VideoCaptureDevice> device(
+ new FakeVideoCaptureDevice(testing::get<0>(GetParam())));
+ ASSERT_TRUE(device);
+
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size.SetSize(640, 480);
+ capture_params.requested_format.frame_rate = 30;
+ capture_params.requested_format.pixel_format =
+ VIDEO_CAPTURE_PIXEL_FORMAT_I420;
+ capture_params.use_gpu_memory_buffers = ::testing::get<1>(GetParam());
+ device->AllocateAndStart(capture_params, client_.Pass());
+
+ WaitForCapturedFrame();
+ EXPECT_EQ(last_format().frame_size.width(), 640);
+ EXPECT_EQ(last_format().frame_size.height(), 480);
+ EXPECT_EQ(last_format().frame_rate, 30.0);
+ device->StopAndDeAllocate();
+}
+
+INSTANTIATE_TEST_CASE_P(
+ ,
+ FakeVideoCaptureDeviceTest,
+ Combine(Values(FakeVideoCaptureDevice::USING_OWN_BUFFERS,
+ FakeVideoCaptureDevice::USING_OWN_BUFFERS_TRIPLANAR,
+ FakeVideoCaptureDevice::USING_CLIENT_BUFFERS),
+ Bool()));
+
+TEST_F(FakeVideoCaptureDeviceTest, GetDeviceSupportedFormats) {
+ scoped_ptr<VideoCaptureDevice::Names> names(EnumerateDevices());
+
+ VideoCaptureFormats supported_formats;
+
+ for (const auto& names_iterator : *names) {
+ video_capture_device_factory_->GetDeviceSupportedFormats(
+ names_iterator, &supported_formats);
+ ASSERT_EQ(supported_formats.size(), 4u);
+ EXPECT_EQ(supported_formats[0].frame_size.width(), 320);
+ EXPECT_EQ(supported_formats[0].frame_size.height(), 240);
+ EXPECT_EQ(supported_formats[0].pixel_format,
+ VIDEO_CAPTURE_PIXEL_FORMAT_I420);
+ EXPECT_GE(supported_formats[0].frame_rate, 20.0);
+ EXPECT_EQ(supported_formats[1].frame_size.width(), 640);
+ EXPECT_EQ(supported_formats[1].frame_size.height(), 480);
+ EXPECT_EQ(supported_formats[1].pixel_format,
+ VIDEO_CAPTURE_PIXEL_FORMAT_I420);
+ EXPECT_GE(supported_formats[1].frame_rate, 20.0);
+ EXPECT_EQ(supported_formats[2].frame_size.width(), 1280);
+ EXPECT_EQ(supported_formats[2].frame_size.height(), 720);
+ EXPECT_EQ(supported_formats[2].pixel_format,
+ VIDEO_CAPTURE_PIXEL_FORMAT_I420);
+ EXPECT_GE(supported_formats[2].frame_rate, 20.0);
+ EXPECT_EQ(supported_formats[3].frame_size.width(), 1920);
+ EXPECT_EQ(supported_formats[3].frame_size.height(), 1080);
+ EXPECT_EQ(supported_formats[3].pixel_format,
+ VIDEO_CAPTURE_PIXEL_FORMAT_I420);
+ EXPECT_GE(supported_formats[3].frame_rate, 20.0);
+ }
+}
+
+}; // namespace media
diff --git a/media/capture/video/file_video_capture_device.cc b/media/capture/video/file_video_capture_device.cc
new file mode 100644
index 0000000..e7d9f06
--- /dev/null
+++ b/media/capture/video/file_video_capture_device.cc
@@ -0,0 +1,256 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/file_video_capture_device.h"
+
+#include "base/bind.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_piece.h"
+#include "media/base/video_capture_types.h"
+
+namespace media {
+static const int kY4MHeaderMaxSize = 200;
+static const char kY4MSimpleFrameDelimiter[] = "FRAME";
+static const int kY4MSimpleFrameDelimiterSize = 6;
+
+int ParseY4MInt(const base::StringPiece& token) {
+ int temp_int;
+ CHECK(base::StringToInt(token, &temp_int)) << token;
+ return temp_int;
+}
+
+// Extract numerator and denominator out of a token that must have the aspect
+// numerator:denominator, both integer numbers.
+void ParseY4MRational(const base::StringPiece& token,
+ int* numerator,
+ int* denominator) {
+ size_t index_divider = token.find(':');
+ CHECK_NE(index_divider, token.npos);
+ *numerator = ParseY4MInt(token.substr(0, index_divider));
+ *denominator = ParseY4MInt(token.substr(index_divider + 1, token.length()));
+ CHECK(*denominator);
+}
+
+// This function parses the ASCII string in |header| as belonging to a Y4M file,
+// returning the collected format in |video_format|. For a non authoritative
+// explanation of the header format, check
+// http://wiki.multimedia.cx/index.php?title=YUV4MPEG2
+// Restrictions: Only interlaced I420 pixel format is supported, and pixel
+// aspect ratio is ignored.
+// Implementation notes: Y4M header should end with an ASCII 0x20 (whitespace)
+// character, however all examples mentioned in the Y4M header description end
+// with a newline character instead. Also, some headers do _not_ specify pixel
+// format, in this case it means I420.
+// This code was inspired by third_party/libvpx/.../y4minput.* .
+void ParseY4MTags(const std::string& file_header,
+ media::VideoCaptureFormat* video_format) {
+ video_format->pixel_format = media::VIDEO_CAPTURE_PIXEL_FORMAT_I420;
+ video_format->frame_size.set_width(0);
+ video_format->frame_size.set_height(0);
+ size_t index = 0;
+ size_t blank_position = 0;
+ base::StringPiece token;
+ while ((blank_position = file_header.find_first_of("\n ", index)) !=
+ std::string::npos) {
+ // Every token is supposed to have an identifier letter and a bunch of
+ // information immediately after, which we extract into a |token| here.
+ token =
+ base::StringPiece(&file_header[index + 1], blank_position - index - 1);
+ CHECK(!token.empty());
+ switch (file_header[index]) {
+ case 'W':
+ video_format->frame_size.set_width(ParseY4MInt(token));
+ break;
+ case 'H':
+ video_format->frame_size.set_height(ParseY4MInt(token));
+ break;
+ case 'F': {
+ // If the token is "FRAME", it means we have finished with the header.
+ if (token[0] == 'R')
+ break;
+ int fps_numerator, fps_denominator;
+ ParseY4MRational(token, &fps_numerator, &fps_denominator);
+ video_format->frame_rate = fps_numerator / fps_denominator;
+ break;
+ }
+ case 'I':
+ // Interlacing is ignored, but we don't like mixed modes.
+ CHECK_NE(token[0], 'm');
+ break;
+ case 'A':
+ // Pixel aspect ratio ignored.
+ break;
+ case 'C':
+ CHECK(token == "420" || token == "420jpeg" || token == "420paldv")
+ << token; // Only I420 is supported, and we fudge the variants.
+ break;
+ default:
+ break;
+ }
+ // We're done if we have found a newline character right after the token.
+ if (file_header[blank_position] == '\n')
+ break;
+ index = blank_position + 1;
+ }
+ // Last video format semantic correctness check before sending it back.
+ CHECK(video_format->IsValid());
+}
+
+// Reads and parses the header of a Y4M |file|, returning the collected pixel
+// format in |video_format|. Returns the index of the first byte of the first
+// video frame.
+// Restrictions: Only trivial per-frame headers are supported.
+// static
+int64 FileVideoCaptureDevice::ParseFileAndExtractVideoFormat(
+ base::File* file,
+ media::VideoCaptureFormat* video_format) {
+ std::string header(kY4MHeaderMaxSize, 0);
+ file->Read(0, &header[0], kY4MHeaderMaxSize - 1);
+
+ size_t header_end = header.find(kY4MSimpleFrameDelimiter);
+ CHECK_NE(header_end, header.npos);
+
+ ParseY4MTags(header, video_format);
+ return header_end + kY4MSimpleFrameDelimiterSize;
+}
+
+// Opens a given file for reading, and returns the file to the caller, who is
+// responsible for closing it.
+// static
+base::File FileVideoCaptureDevice::OpenFileForRead(
+ const base::FilePath& file_path) {
+ base::File file(file_path, base::File::FLAG_OPEN | base::File::FLAG_READ);
+ DLOG_IF(ERROR, file.IsValid())
+ << file_path.value()
+ << ", error: " << base::File::ErrorToString(file.error_details());
+ return file.Pass();
+}
+
+FileVideoCaptureDevice::FileVideoCaptureDevice(const base::FilePath& file_path)
+ : capture_thread_("CaptureThread"),
+ file_path_(file_path),
+ frame_size_(0),
+ current_byte_index_(0),
+ first_frame_byte_index_(0) {
+}
+
+FileVideoCaptureDevice::~FileVideoCaptureDevice() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ // Check if the thread is running.
+ // This means that the device have not been DeAllocated properly.
+ CHECK(!capture_thread_.IsRunning());
+}
+
+void FileVideoCaptureDevice::AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ CHECK(!capture_thread_.IsRunning());
+
+ capture_thread_.Start();
+ capture_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&FileVideoCaptureDevice::OnAllocateAndStart,
+ base::Unretained(this), params, base::Passed(&client)));
+}
+
+void FileVideoCaptureDevice::StopAndDeAllocate() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ CHECK(capture_thread_.IsRunning());
+
+ capture_thread_.message_loop()->PostTask(
+ FROM_HERE, base::Bind(&FileVideoCaptureDevice::OnStopAndDeAllocate,
+ base::Unretained(this)));
+ capture_thread_.Stop();
+}
+
+int FileVideoCaptureDevice::CalculateFrameSize() const {
+ DCHECK_EQ(capture_format_.pixel_format, VIDEO_CAPTURE_PIXEL_FORMAT_I420);
+ DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
+ return capture_format_.ImageAllocationSize();
+}
+
+void FileVideoCaptureDevice::OnAllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
+ DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
+
+ client_ = client.Pass();
+
+ // Open the file and parse the header. Get frame size and format.
+ DCHECK(!file_.IsValid());
+ file_ = OpenFileForRead(file_path_);
+ if (!file_.IsValid()) {
+ client_->OnError("Could not open Video file");
+ return;
+ }
+ first_frame_byte_index_ =
+ ParseFileAndExtractVideoFormat(&file_, &capture_format_);
+ current_byte_index_ = first_frame_byte_index_;
+ DVLOG(1) << "Opened video file " << capture_format_.frame_size.ToString()
+ << ", fps: " << capture_format_.frame_rate;
+
+ frame_size_ = CalculateFrameSize();
+ video_frame_.reset(new uint8[frame_size_]);
+
+ capture_thread_.message_loop()->PostTask(
+ FROM_HERE, base::Bind(&FileVideoCaptureDevice::OnCaptureTask,
+ base::Unretained(this)));
+}
+
+void FileVideoCaptureDevice::OnStopAndDeAllocate() {
+ DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
+ file_.Close();
+ client_.reset();
+ current_byte_index_ = 0;
+ first_frame_byte_index_ = 0;
+ frame_size_ = 0;
+ next_frame_time_ = base::TimeTicks();
+ video_frame_.reset();
+}
+
+void FileVideoCaptureDevice::OnCaptureTask() {
+ DCHECK_EQ(capture_thread_.message_loop(), base::MessageLoop::current());
+ if (!client_)
+ return;
+ int result =
+ file_.Read(current_byte_index_,
+ reinterpret_cast<char*>(video_frame_.get()), frame_size_);
+
+ // If we passed EOF to base::File, it will return 0 read characters. In that
+ // case, reset the pointer and read again.
+ if (result != frame_size_) {
+ CHECK_EQ(result, 0);
+ current_byte_index_ = first_frame_byte_index_;
+ CHECK_EQ(
+ file_.Read(current_byte_index_,
+ reinterpret_cast<char*>(video_frame_.get()), frame_size_),
+ frame_size_);
+ } else {
+ current_byte_index_ += frame_size_ + kY4MSimpleFrameDelimiterSize;
+ }
+
+ // Give the captured frame to the client.
+ const base::TimeTicks current_time = base::TimeTicks::Now();
+ client_->OnIncomingCapturedData(video_frame_.get(), frame_size_,
+ capture_format_, 0, current_time);
+ // Reschedule next CaptureTask.
+ const base::TimeDelta frame_interval =
+ base::TimeDelta::FromMicroseconds(1E6 / capture_format_.frame_rate);
+ if (next_frame_time_.is_null()) {
+ next_frame_time_ = current_time + frame_interval;
+ } else {
+ next_frame_time_ += frame_interval;
+ // Don't accumulate any debt if we are lagging behind - just post next frame
+ // immediately and continue as normal.
+ if (next_frame_time_ < current_time)
+ next_frame_time_ = current_time;
+ }
+ base::MessageLoop::current()->PostDelayedTask(
+ FROM_HERE, base::Bind(&FileVideoCaptureDevice::OnCaptureTask,
+ base::Unretained(this)),
+ next_frame_time_ - current_time);
+}
+
+} // namespace media
diff --git a/media/capture/video/file_video_capture_device.h b/media/capture/video/file_video_capture_device.h
new file mode 100644
index 0000000..f50f041
--- /dev/null
+++ b/media/capture/video/file_video_capture_device.h
@@ -0,0 +1,78 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_FILE_VIDEO_CAPTURE_DEVICE_H_
+#define MEDIA_VIDEO_CAPTURE_FILE_VIDEO_CAPTURE_DEVICE_H_
+
+#include <string>
+
+#include "base/files/file.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_checker.h"
+#include "media/capture/video/video_capture_device.h"
+
+namespace media {
+
+// Implementation of a VideoCaptureDevice class that reads from a file. Used for
+// testing the video capture pipeline when no real hardware is available. The
+// only supported file format is YUV4MPEG2 (a.k.a. Y4M), a minimal container
+// with a series of uncompressed video only frames, see the link
+// http://wiki.multimedia.cx/index.php?title=YUV4MPEG2 for more information
+// on the file format. Several restrictions and notes apply, see the
+// implementation file.
+// Example videos can be found in http://media.xiph.org/video/derf.
+class MEDIA_EXPORT FileVideoCaptureDevice : public VideoCaptureDevice {
+ public:
+ static int64 ParseFileAndExtractVideoFormat(
+ base::File* file,
+ media::VideoCaptureFormat* video_format);
+ static base::File OpenFileForRead(const base::FilePath& file_path);
+
+ // Constructor of the class, with a fully qualified file path as input, which
+ // represents the Y4M video file to stream repeatedly.
+ explicit FileVideoCaptureDevice(const base::FilePath& file_path);
+
+ // VideoCaptureDevice implementation, class methods.
+ ~FileVideoCaptureDevice() override;
+ void AllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) override;
+ void StopAndDeAllocate() override;
+
+ private:
+ // Returns size in bytes of an I420 frame, not including possible paddings,
+ // defined by |capture_format_|.
+ int CalculateFrameSize() const;
+
+ // Called on the |capture_thread_|.
+ void OnAllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<Client> client);
+ void OnStopAndDeAllocate();
+ void OnCaptureTask();
+
+ // |thread_checker_| is used to check that destructor, AllocateAndStart() and
+ // StopAndDeAllocate() are called in the correct thread that owns the object.
+ base::ThreadChecker thread_checker_;
+
+ // |capture_thread_| is used for internal operations via posting tasks to it.
+ // It is active between OnAllocateAndStart() and OnStopAndDeAllocate().
+ base::Thread capture_thread_;
+ // The following members belong to |capture_thread_|.
+ scoped_ptr<VideoCaptureDevice::Client> client_;
+ const base::FilePath file_path_;
+ base::File file_;
+ scoped_ptr<uint8[]> video_frame_;
+ VideoCaptureFormat capture_format_;
+ int frame_size_;
+ int64 current_byte_index_;
+ int64 first_frame_byte_index_;
+ // Target time for the next frame.
+ base::TimeTicks next_frame_time_;
+
+ DISALLOW_COPY_AND_ASSIGN(FileVideoCaptureDevice);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_FILE_VIDEO_CAPTURE_DEVICE_H_
diff --git a/media/capture/video/file_video_capture_device_factory.cc b/media/capture/video/file_video_capture_device_factory.cc
new file mode 100644
index 0000000..1d7ae12
--- /dev/null
+++ b/media/capture/video/file_video_capture_device_factory.cc
@@ -0,0 +1,76 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/file_video_capture_device_factory.h"
+
+#include "base/command_line.h"
+#include "base/files/file_path.h"
+#include "base/strings/sys_string_conversions.h"
+#include "media/base/media_switches.h"
+#include "media/capture/video/file_video_capture_device.h"
+
+namespace media {
+
+const char kFileVideoCaptureDeviceName[] =
+ "/dev/placeholder-for-file-backed-fake-capture-device";
+
+// Inspects the command line and retrieves the file path parameter.
+base::FilePath GetFilePathFromCommandLine() {
+ base::FilePath command_line_file_path =
+ base::CommandLine::ForCurrentProcess()->GetSwitchValuePath(
+ switches::kUseFileForFakeVideoCapture);
+ CHECK(!command_line_file_path.empty());
+ return command_line_file_path;
+}
+
+scoped_ptr<VideoCaptureDevice> FileVideoCaptureDeviceFactory::Create(
+ const VideoCaptureDevice::Name& device_name) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+#if defined(OS_WIN)
+ return scoped_ptr<VideoCaptureDevice>(new FileVideoCaptureDevice(
+ base::FilePath(base::SysUTF8ToWide(device_name.name()))));
+#else
+ return scoped_ptr<VideoCaptureDevice>(
+ new FileVideoCaptureDevice(base::FilePath(device_name.name())));
+#endif
+}
+
+void FileVideoCaptureDeviceFactory::GetDeviceNames(
+ VideoCaptureDevice::Names* const device_names) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(device_names->empty());
+ const base::FilePath command_line_file_path = GetFilePathFromCommandLine();
+#if defined(OS_WIN)
+ device_names->push_back(VideoCaptureDevice::Name(
+ base::SysWideToUTF8(command_line_file_path.value()),
+ kFileVideoCaptureDeviceName, VideoCaptureDevice::Name::DIRECT_SHOW));
+#elif defined(OS_MACOSX)
+ device_names->push_back(VideoCaptureDevice::Name(
+ command_line_file_path.value(), kFileVideoCaptureDeviceName,
+ VideoCaptureDevice::Name::AVFOUNDATION));
+#elif defined(OS_LINUX)
+ device_names->push_back(VideoCaptureDevice::Name(
+ command_line_file_path.value(), kFileVideoCaptureDeviceName,
+ VideoCaptureDevice::Name::V4L2_SINGLE_PLANE));
+#else
+ device_names->push_back(VideoCaptureDevice::Name(
+ command_line_file_path.value(), kFileVideoCaptureDeviceName));
+#endif
+}
+
+void FileVideoCaptureDeviceFactory::GetDeviceSupportedFormats(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* supported_formats) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ base::File file =
+ FileVideoCaptureDevice::OpenFileForRead(GetFilePathFromCommandLine());
+ if (!file.IsValid())
+ return;
+ VideoCaptureFormat capture_format;
+ FileVideoCaptureDevice::ParseFileAndExtractVideoFormat(&file,
+ &capture_format);
+ supported_formats->push_back(capture_format);
+}
+
+} // namespace media
diff --git a/media/capture/video/file_video_capture_device_factory.h b/media/capture/video/file_video_capture_device_factory.h
new file mode 100644
index 0000000..5335b2c
--- /dev/null
+++ b/media/capture/video/file_video_capture_device_factory.h
@@ -0,0 +1,31 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_FILE_VIDEO_CAPTURE_DEVICE_FACTORY_H_
+#define MEDIA_VIDEO_CAPTURE_FILE_VIDEO_CAPTURE_DEVICE_FACTORY_H_
+
+#include "media/capture/video/video_capture_device_factory.h"
+
+namespace media {
+
+// Extension of VideoCaptureDeviceFactory to create and manipulate file-backed
+// fake devices. These devices play back video-only files as video capture
+// input.
+class MEDIA_EXPORT FileVideoCaptureDeviceFactory
+ : public VideoCaptureDeviceFactory {
+ public:
+ FileVideoCaptureDeviceFactory() {}
+ ~FileVideoCaptureDeviceFactory() override {}
+
+ scoped_ptr<VideoCaptureDevice> Create(
+ const VideoCaptureDevice::Name& device_name) override;
+ void GetDeviceNames(VideoCaptureDevice::Names* device_names) override;
+ void GetDeviceSupportedFormats(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* supported_formats) override;
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_FILE_VIDEO_CAPTURE_DEVICE_FACTORY_H_
diff --git a/media/capture/video/linux/OWNERS b/media/capture/video/linux/OWNERS
new file mode 100644
index 0000000..bf72e04
--- /dev/null
+++ b/media/capture/video/linux/OWNERS
@@ -0,0 +1 @@
+posciak@chromium.org
diff --git a/media/capture/video/linux/v4l2_capture_delegate.cc b/media/capture/video/linux/v4l2_capture_delegate.cc
new file mode 100644
index 0000000..8010193
--- /dev/null
+++ b/media/capture/video/linux/v4l2_capture_delegate.cc
@@ -0,0 +1,425 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/linux/v4l2_capture_delegate.h"
+
+#include <poll.h>
+#include <sys/fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+
+#include "base/bind.h"
+#include "base/files/file_enumerator.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/strings/stringprintf.h"
+#include "media/base/bind_to_current_loop.h"
+#include "media/capture/video/linux/v4l2_capture_delegate_multi_plane.h"
+#include "media/capture/video/linux/v4l2_capture_delegate_single_plane.h"
+#include "media/capture/video/linux/video_capture_device_linux.h"
+
+namespace media {
+
+// Desired number of video buffers to allocate. The actual number of allocated
+// buffers by v4l2 driver can be higher or lower than this number.
+// kNumVideoBuffers should not be too small, or Chrome may not return enough
+// buffers back to driver in time.
+const uint32 kNumVideoBuffers = 4;
+// Timeout in milliseconds v4l2_thread_ blocks waiting for a frame from the hw.
+const int kCaptureTimeoutMs = 200;
+// The number of continuous timeouts tolerated before treated as error.
+const int kContinuousTimeoutLimit = 10;
+// MJPEG is preferred if the requested width or height is larger than this.
+const int kMjpegWidth = 640;
+const int kMjpegHeight = 480;
+// Typical framerate, in fps
+const int kTypicalFramerate = 30;
+
+// V4L2 color formats supported by V4L2CaptureDelegate derived classes.
+// This list is ordered by precedence of use -- but see caveats for MJPEG.
+static struct {
+ uint32_t fourcc;
+ VideoCapturePixelFormat pixel_format;
+ size_t num_planes;
+} const kSupportedFormatsAndPlanarity[] = {
+ {V4L2_PIX_FMT_YUV420, VIDEO_CAPTURE_PIXEL_FORMAT_I420, 1},
+ {V4L2_PIX_FMT_YUYV, VIDEO_CAPTURE_PIXEL_FORMAT_YUY2, 1},
+ {V4L2_PIX_FMT_UYVY, VIDEO_CAPTURE_PIXEL_FORMAT_UYVY, 1},
+ {V4L2_PIX_FMT_RGB24, VIDEO_CAPTURE_PIXEL_FORMAT_RGB24, 1},
+#if !defined(OS_OPENBSD)
+ // TODO(mcasas): add V4L2_PIX_FMT_YVU420M when available in bots.
+ {V4L2_PIX_FMT_YUV420M, VIDEO_CAPTURE_PIXEL_FORMAT_I420, 3},
+#endif
+ // MJPEG is usually sitting fairly low since we don't want to have to
+ // decode.
+ // However, is needed for large resolutions due to USB bandwidth
+ // limitations,
+ // so GetListOfUsableFourCcs() can duplicate it on top, see that method.
+ {V4L2_PIX_FMT_MJPEG, VIDEO_CAPTURE_PIXEL_FORMAT_MJPEG, 1},
+ // JPEG works as MJPEG on some gspca webcams from field reports, see
+ // https://code.google.com/p/webrtc/issues/detail?id=529, put it as the
+ // least
+ // preferred format.
+ {V4L2_PIX_FMT_JPEG, VIDEO_CAPTURE_PIXEL_FORMAT_MJPEG, 1},
+};
+
+// static
+scoped_refptr<V4L2CaptureDelegate>
+V4L2CaptureDelegate::CreateV4L2CaptureDelegate(
+ const VideoCaptureDevice::Name& device_name,
+ const scoped_refptr<base::SingleThreadTaskRunner>& v4l2_task_runner,
+ int power_line_frequency) {
+ switch (device_name.capture_api_type()) {
+ case VideoCaptureDevice::Name::V4L2_SINGLE_PLANE:
+ return make_scoped_refptr(new V4L2CaptureDelegateSinglePlane(
+ device_name, v4l2_task_runner, power_line_frequency));
+ case VideoCaptureDevice::Name::V4L2_MULTI_PLANE:
+#if !defined(OS_OPENBSD)
+ return make_scoped_refptr(new V4L2CaptureDelegateMultiPlane(
+ device_name, v4l2_task_runner, power_line_frequency));
+ default:
+#endif
+ NOTIMPLEMENTED() << "Unknown V4L2 capture API type";
+ return scoped_refptr<V4L2CaptureDelegate>();
+ }
+}
+
+// static
+size_t V4L2CaptureDelegate::GetNumPlanesForFourCc(uint32_t fourcc) {
+ for (const auto& fourcc_and_pixel_format : kSupportedFormatsAndPlanarity) {
+ if (fourcc_and_pixel_format.fourcc == fourcc)
+ return fourcc_and_pixel_format.num_planes;
+ }
+ DVLOG(1) << "Unknown fourcc " << FourccToString(fourcc);
+ return 0;
+}
+
+// static
+VideoCapturePixelFormat V4L2CaptureDelegate::V4l2FourCcToChromiumPixelFormat(
+ uint32_t v4l2_fourcc) {
+ for (const auto& fourcc_and_pixel_format : kSupportedFormatsAndPlanarity) {
+ if (fourcc_and_pixel_format.fourcc == v4l2_fourcc)
+ return fourcc_and_pixel_format.pixel_format;
+ }
+ // Not finding a pixel format is OK during device capabilities enumeration.
+ // Let the caller decide if VIDEO_CAPTURE_PIXEL_FORMAT_UNKNOWN is an error or
+ // not.
+ DVLOG(1) << "Unsupported pixel format: " << FourccToString(v4l2_fourcc);
+ return VIDEO_CAPTURE_PIXEL_FORMAT_UNKNOWN;
+}
+
+// static
+std::list<uint32_t> V4L2CaptureDelegate::GetListOfUsableFourCcs(
+ bool prefer_mjpeg) {
+ std::list<uint32_t> supported_formats;
+ for (const auto& format : kSupportedFormatsAndPlanarity)
+ supported_formats.push_back(format.fourcc);
+
+ // Duplicate MJPEG on top of the list depending on |prefer_mjpeg|.
+ if (prefer_mjpeg)
+ supported_formats.push_front(V4L2_PIX_FMT_MJPEG);
+
+ return supported_formats;
+}
+
+// static
+std::string V4L2CaptureDelegate::FourccToString(uint32_t fourcc) {
+ return base::StringPrintf("%c%c%c%c", fourcc & 0xFF, (fourcc >> 8) & 0xFF,
+ (fourcc >> 16) & 0xFF, (fourcc >> 24) & 0xFF);
+}
+
+V4L2CaptureDelegate::BufferTracker::BufferTracker() {
+}
+
+V4L2CaptureDelegate::BufferTracker::~BufferTracker() {
+ for (const auto& plane : planes_) {
+ if (plane.start == nullptr)
+ continue;
+ const int result = munmap(plane.start, plane.length);
+ PLOG_IF(ERROR, result < 0) << "Error munmap()ing V4L2 buffer";
+ }
+}
+
+void V4L2CaptureDelegate::BufferTracker::AddMmapedPlane(uint8_t* const start,
+ size_t length) {
+ Plane plane;
+ plane.start = start;
+ plane.length = length;
+ plane.payload_size = 0;
+ planes_.push_back(plane);
+}
+
+V4L2CaptureDelegate::V4L2CaptureDelegate(
+ const VideoCaptureDevice::Name& device_name,
+ const scoped_refptr<base::SingleThreadTaskRunner>& v4l2_task_runner,
+ int power_line_frequency)
+ : capture_type_((device_name.capture_api_type() ==
+ VideoCaptureDevice::Name::V4L2_SINGLE_PLANE)
+ ? V4L2_BUF_TYPE_VIDEO_CAPTURE
+ : V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE),
+ v4l2_task_runner_(v4l2_task_runner),
+ device_name_(device_name),
+ power_line_frequency_(power_line_frequency),
+ is_capturing_(false),
+ timeout_count_(0),
+ rotation_(0) {
+}
+
+V4L2CaptureDelegate::~V4L2CaptureDelegate() {
+}
+
+void V4L2CaptureDelegate::AllocateAndStart(
+ int width,
+ int height,
+ float frame_rate,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
+ DCHECK(v4l2_task_runner_->BelongsToCurrentThread());
+ DCHECK(client);
+ client_ = client.Pass();
+
+ // Need to open camera with O_RDWR after Linux kernel 3.3.
+ device_fd_.reset(HANDLE_EINTR(open(device_name_.id().c_str(), O_RDWR)));
+ if (!device_fd_.is_valid()) {
+ SetErrorState("Failed to open V4L2 device driver file.");
+ return;
+ }
+
+ v4l2_capability cap = {};
+ if (!((HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_QUERYCAP, &cap)) == 0) &&
+ ((cap.capabilities & V4L2_CAP_VIDEO_CAPTURE ||
+ cap.capabilities & V4L2_CAP_VIDEO_CAPTURE_MPLANE) &&
+ !(cap.capabilities & V4L2_CAP_VIDEO_OUTPUT) &&
+ !(cap.capabilities & V4L2_CAP_VIDEO_OUTPUT_MPLANE)))) {
+ device_fd_.reset();
+ SetErrorState("This is not a V4L2 video capture device");
+ return;
+ }
+
+ // Get supported video formats in preferred order.
+ // For large resolutions, favour mjpeg over raw formats.
+ const std::list<uint32_t>& desired_v4l2_formats =
+ GetListOfUsableFourCcs(width > kMjpegWidth || height > kMjpegHeight);
+ std::list<uint32_t>::const_iterator best = desired_v4l2_formats.end();
+
+ v4l2_fmtdesc fmtdesc = {};
+ fmtdesc.type = capture_type_;
+ for (; HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_ENUM_FMT, &fmtdesc)) == 0;
+ ++fmtdesc.index) {
+ best = std::find(desired_v4l2_formats.begin(), best, fmtdesc.pixelformat);
+ }
+ if (best == desired_v4l2_formats.end()) {
+ SetErrorState("Failed to find a supported camera format.");
+ return;
+ }
+
+ DVLOG(1) << "Chosen pixel format is " << FourccToString(*best);
+
+ video_fmt_.type = capture_type_;
+ if (!FillV4L2Format(&video_fmt_, width, height, *best)) {
+ SetErrorState("Failed filling in V4L2 Format");
+ return;
+ }
+
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_S_FMT, &video_fmt_)) < 0) {
+ SetErrorState("Failed to set video capture format");
+ return;
+ }
+ const VideoCapturePixelFormat pixel_format =
+ V4l2FourCcToChromiumPixelFormat(video_fmt_.fmt.pix.pixelformat);
+ if (pixel_format == VIDEO_CAPTURE_PIXEL_FORMAT_UNKNOWN) {
+ SetErrorState("Unsupported pixel format");
+ return;
+ }
+
+ // Set capture framerate in the form of capture interval.
+ v4l2_streamparm streamparm = {};
+ streamparm.type = capture_type_;
+ // The following line checks that the driver knows about framerate get/set.
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_G_PARM, &streamparm)) >= 0) {
+ // Now check if the device is able to accept a capture framerate set.
+ if (streamparm.parm.capture.capability & V4L2_CAP_TIMEPERFRAME) {
+ // |frame_rate| is float, approximate by a fraction.
+ streamparm.parm.capture.timeperframe.numerator =
+ media::kFrameRatePrecision;
+ streamparm.parm.capture.timeperframe.denominator =
+ (frame_rate) ? (frame_rate * media::kFrameRatePrecision)
+ : (kTypicalFramerate * media::kFrameRatePrecision);
+
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_S_PARM, &streamparm)) <
+ 0) {
+ SetErrorState("Failed to set camera framerate");
+ return;
+ }
+ DVLOG(2) << "Actual camera driverframerate: "
+ << streamparm.parm.capture.timeperframe.denominator << "/"
+ << streamparm.parm.capture.timeperframe.numerator;
+ }
+ }
+ // TODO(mcasas): what should be done if the camera driver does not allow
+ // framerate configuration, or the actual one is different from the desired?
+
+ // Set anti-banding/anti-flicker to 50/60Hz. May fail due to not supported
+ // operation (|errno| == EINVAL in this case) or plain failure.
+ if ((power_line_frequency_ == V4L2_CID_POWER_LINE_FREQUENCY_50HZ) ||
+ (power_line_frequency_ == V4L2_CID_POWER_LINE_FREQUENCY_60HZ) ||
+ (power_line_frequency_ == V4L2_CID_POWER_LINE_FREQUENCY_AUTO)) {
+ struct v4l2_control control = {};
+ control.id = V4L2_CID_POWER_LINE_FREQUENCY;
+ control.value = power_line_frequency_;
+ const int retval =
+ HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_S_CTRL, &control));
+ if (retval != 0)
+ DVLOG(1) << "Error setting power line frequency removal";
+ }
+
+ capture_format_.frame_size.SetSize(video_fmt_.fmt.pix.width,
+ video_fmt_.fmt.pix.height);
+ capture_format_.frame_rate = frame_rate;
+ capture_format_.pixel_format = pixel_format;
+
+ v4l2_requestbuffers r_buffer = {};
+ r_buffer.type = capture_type_;
+ r_buffer.memory = V4L2_MEMORY_MMAP;
+ r_buffer.count = kNumVideoBuffers;
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_REQBUFS, &r_buffer)) < 0) {
+ SetErrorState("Error requesting MMAP buffers from V4L2");
+ return;
+ }
+ for (unsigned int i = 0; i < r_buffer.count; ++i) {
+ if (!MapAndQueueBuffer(i)) {
+ SetErrorState("Allocate buffer failed");
+ return;
+ }
+ }
+
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_STREAMON, &capture_type_)) <
+ 0) {
+ SetErrorState("VIDIOC_STREAMON failed");
+ return;
+ }
+
+ is_capturing_ = true;
+ // Post task to start fetching frames from v4l2.
+ v4l2_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&V4L2CaptureDelegate::DoCapture, this));
+}
+
+void V4L2CaptureDelegate::StopAndDeAllocate() {
+ DCHECK(v4l2_task_runner_->BelongsToCurrentThread());
+ // The order is important: stop streaming, clear |buffer_pool_|,
+ // thus munmap()ing the v4l2_buffers, and then return them to the OS.
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_STREAMOFF, &capture_type_)) <
+ 0) {
+ SetErrorState("VIDIOC_STREAMOFF failed");
+ return;
+ }
+
+ buffer_tracker_pool_.clear();
+
+ v4l2_requestbuffers r_buffer = {};
+ r_buffer.type = capture_type_;
+ r_buffer.memory = V4L2_MEMORY_MMAP;
+ r_buffer.count = 0;
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_REQBUFS, &r_buffer)) < 0)
+ SetErrorState("Failed to VIDIOC_REQBUFS with count = 0");
+
+ // At this point we can close the device.
+ // This is also needed for correctly changing settings later via VIDIOC_S_FMT.
+ device_fd_.reset();
+ is_capturing_ = false;
+ client_.reset();
+}
+
+void V4L2CaptureDelegate::SetRotation(int rotation) {
+ DCHECK(v4l2_task_runner_->BelongsToCurrentThread());
+ DCHECK(rotation >= 0 && rotation < 360 && rotation % 90 == 0);
+ rotation_ = rotation;
+}
+
+bool V4L2CaptureDelegate::MapAndQueueBuffer(int index) {
+ v4l2_buffer buffer;
+ FillV4L2Buffer(&buffer, index);
+
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_QUERYBUF, &buffer)) < 0) {
+ DLOG(ERROR) << "Error querying status of a MMAP V4L2 buffer";
+ return false;
+ }
+
+ const scoped_refptr<BufferTracker>& buffer_tracker = CreateBufferTracker();
+ if (!buffer_tracker->Init(device_fd_.get(), buffer)) {
+ DLOG(ERROR) << "Error creating BufferTracker";
+ return false;
+ }
+ buffer_tracker_pool_.push_back(buffer_tracker);
+
+ // Enqueue the buffer in the drivers incoming queue.
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_QBUF, &buffer)) < 0) {
+ DLOG(ERROR) << "Error enqueuing a V4L2 buffer back into the driver";
+ return false;
+ }
+ return true;
+}
+
+void V4L2CaptureDelegate::FillV4L2Buffer(v4l2_buffer* buffer, int i) const {
+ memset(buffer, 0, sizeof(*buffer));
+ buffer->memory = V4L2_MEMORY_MMAP;
+ buffer->index = i;
+ FinishFillingV4L2Buffer(buffer);
+}
+
+void V4L2CaptureDelegate::DoCapture() {
+ DCHECK(v4l2_task_runner_->BelongsToCurrentThread());
+ if (!is_capturing_)
+ return;
+
+ pollfd device_pfd = {};
+ device_pfd.fd = device_fd_.get();
+ device_pfd.events = POLLIN;
+ const int result = HANDLE_EINTR(poll(&device_pfd, 1, kCaptureTimeoutMs));
+ if (result < 0) {
+ SetErrorState("Poll failed");
+ return;
+ }
+ // Check if poll() timed out; track the amount of times it did in a row and
+ // throw an error if it times out too many times.
+ if (result == 0) {
+ timeout_count_++;
+ if (timeout_count_ >= kContinuousTimeoutLimit) {
+ SetErrorState("Multiple continuous timeouts while read-polling.");
+ timeout_count_ = 0;
+ return;
+ }
+ } else {
+ timeout_count_ = 0;
+ }
+
+ // Deenqueue, send and reenqueue a buffer if the driver has filled one in.
+ if (device_pfd.revents & POLLIN) {
+ v4l2_buffer buffer;
+ FillV4L2Buffer(&buffer, 0);
+
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_DQBUF, &buffer)) < 0) {
+ SetErrorState("Failed to dequeue capture buffer");
+ return;
+ }
+
+ SetPayloadSize(buffer_tracker_pool_[buffer.index], buffer);
+ SendBuffer(buffer_tracker_pool_[buffer.index], video_fmt_);
+
+ if (HANDLE_EINTR(ioctl(device_fd_.get(), VIDIOC_QBUF, &buffer)) < 0) {
+ SetErrorState("Failed to enqueue capture buffer");
+ return;
+ }
+ }
+
+ v4l2_task_runner_->PostTask(
+ FROM_HERE, base::Bind(&V4L2CaptureDelegate::DoCapture, this));
+}
+
+void V4L2CaptureDelegate::SetErrorState(const std::string& reason) {
+ DCHECK(v4l2_task_runner_->BelongsToCurrentThread());
+ is_capturing_ = false;
+ client_->OnError(reason);
+}
+
+} // namespace media
diff --git a/media/capture/video/linux/v4l2_capture_delegate.h b/media/capture/video/linux/v4l2_capture_delegate.h
new file mode 100644
index 0000000..0a87ed7
--- /dev/null
+++ b/media/capture/video/linux/v4l2_capture_delegate.h
@@ -0,0 +1,169 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_LINUX_V4L2_VIDEO_CAPTURE_DELEGATE_H_
+#define MEDIA_VIDEO_CAPTURE_LINUX_V4L2_VIDEO_CAPTURE_DELEGATE_H_
+
+#if defined(OS_OPENBSD)
+#include <sys/videoio.h>
+#else
+#include <linux/videodev2.h>
+#endif
+
+#include "base/files/scoped_file.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_vector.h"
+#include "media/capture/video/video_capture_device.h"
+
+namespace media {
+
+// Class doing the actual Linux capture using V4L2 API. V4L2 SPLANE/MPLANE
+// capture specifics are implemented in derived classes. Created and destroyed
+// on the owner's thread, otherwise living and operating on |v4l2_task_runner_|.
+class V4L2CaptureDelegate
+ : public base::RefCountedThreadSafe<V4L2CaptureDelegate> {
+ public:
+ // Creates the appropiate VideoCaptureDelegate according to parameters.
+ static scoped_refptr<V4L2CaptureDelegate> CreateV4L2CaptureDelegate(
+ const VideoCaptureDevice::Name& device_name,
+ const scoped_refptr<base::SingleThreadTaskRunner>& v4l2_task_runner,
+ int power_line_frequency);
+
+ // Retrieves the #planes for a given |fourcc|, or 0 if unknown.
+ static size_t GetNumPlanesForFourCc(uint32_t fourcc);
+ // Returns the Chrome pixel format for |v4l2_fourcc| or
+ // VIDEO_CAPTURE_PIXEL_FORMAT_UNKNOWN.
+ static VideoCapturePixelFormat V4l2FourCcToChromiumPixelFormat(
+ uint32_t v4l2_fourcc);
+
+ // Composes a list of usable and supported pixel formats, in order of
+ // preference, with MJPEG prioritised depending on |prefer_mjpeg|.
+ static std::list<uint32_t> GetListOfUsableFourCcs(bool prefer_mjpeg);
+
+ // Forward-to versions of VideoCaptureDevice virtual methods.
+ void AllocateAndStart(int width,
+ int height,
+ float frame_rate,
+ scoped_ptr<VideoCaptureDevice::Client> client);
+ void StopAndDeAllocate();
+
+ void SetRotation(int rotation);
+
+ protected:
+ // Class keeping track of SPLANE/MPLANE V4L2 buffers, mmap()ed on construction
+ // and munmap()ed on destruction. Destruction is syntactically equal for
+ // S/MPLANE but not construction, so this is implemented in derived classes.
+ // Internally it has a vector of planes, which for SPLANE will contain only
+ // one element.
+ class BufferTracker : public base::RefCounted<BufferTracker> {
+ public:
+ BufferTracker();
+ // Abstract method to mmap() given |fd| according to |buffer|, planarity
+ // specific.
+ virtual bool Init(int fd, const v4l2_buffer& buffer) = 0;
+
+ uint8_t* const GetPlaneStart(size_t plane) const {
+ DCHECK_LT(plane, planes_.size());
+ return planes_[plane].start;
+ }
+
+ size_t GetPlanePayloadSize(size_t plane) const {
+ DCHECK_LT(plane, planes_.size());
+ return planes_[plane].payload_size;
+ }
+
+ void SetPlanePayloadSize(size_t plane, size_t payload_size) {
+ DCHECK_LT(plane, planes_.size());
+ DCHECK_LE(payload_size, planes_[plane].length);
+ planes_[plane].payload_size = payload_size;
+ }
+
+ protected:
+ friend class base::RefCounted<BufferTracker>;
+ virtual ~BufferTracker();
+ // Adds a given mmap()ed plane to |planes_|.
+ void AddMmapedPlane(uint8_t* const start, size_t length);
+
+ private:
+ struct Plane {
+ uint8_t* start;
+ size_t length;
+ size_t payload_size;
+ };
+ std::vector<Plane> planes_;
+ };
+
+ V4L2CaptureDelegate(
+ const VideoCaptureDevice::Name& device_name,
+ const scoped_refptr<base::SingleThreadTaskRunner>& v4l2_task_runner,
+ int power_line_frequency);
+ virtual ~V4L2CaptureDelegate();
+
+ // Creates the necessary, planarity-specific, internal tracking schemes,
+ virtual scoped_refptr<BufferTracker> CreateBufferTracker() const = 0;
+
+ // Fill in |format| with the given parameters, in a planarity dependent way.
+ virtual bool FillV4L2Format(v4l2_format* format,
+ uint32_t width,
+ uint32_t height,
+ uint32_t pixelformat_fourcc) const = 0;
+
+ // Finish filling |buffer| struct with planarity-dependent data.
+ virtual void FinishFillingV4L2Buffer(v4l2_buffer* buffer) const = 0;
+
+ // Fetch the number of bytes occupied by data in |buffer| and set to
+ // |buffer_tracker|.
+ virtual void SetPayloadSize(
+ const scoped_refptr<BufferTracker>& buffer_tracker,
+ const v4l2_buffer& buffer) const = 0;
+
+ // Sends the captured |buffer| to the |client_|, synchronously.
+ virtual void SendBuffer(const scoped_refptr<BufferTracker>& buffer_tracker,
+ const v4l2_format& format) const = 0;
+
+ // A few accessors for SendBuffer()'s to access private member variables.
+ VideoCaptureFormat capture_format() const { return capture_format_; }
+ VideoCaptureDevice::Client* client() const { return client_.get(); }
+ int rotation() const { return rotation_; }
+
+ private:
+ friend class base::RefCountedThreadSafe<V4L2CaptureDelegate>;
+
+ // Returns the input |fourcc| as a std::string four char representation.
+ static std::string FourccToString(uint32_t fourcc);
+ // VIDIOC_QUERYBUFs a buffer from V4L2, creates a BufferTracker for it and
+ // enqueues it (VIDIOC_QBUF) back into V4L2.
+ bool MapAndQueueBuffer(int index);
+ // Fills all common parts of |buffer|. Delegates to FinishFillingV4L2Buffer()
+ // for filling in the planar-dependent parts.
+ void FillV4L2Buffer(v4l2_buffer* buffer, int i) const;
+ void DoCapture();
+ void SetErrorState(const std::string& reason);
+
+ const v4l2_buf_type capture_type_;
+ const scoped_refptr<base::SingleThreadTaskRunner> v4l2_task_runner_;
+ const VideoCaptureDevice::Name device_name_;
+ const int power_line_frequency_;
+
+ // The following members are only known on AllocateAndStart().
+ VideoCaptureFormat capture_format_;
+ v4l2_format video_fmt_;
+ scoped_ptr<VideoCaptureDevice::Client> client_;
+ base::ScopedFD device_fd_;
+
+ // Vector of BufferTracker to keep track of mmap()ed pointers and their use.
+ std::vector<scoped_refptr<BufferTracker>> buffer_tracker_pool_;
+
+ bool is_capturing_;
+ int timeout_count_;
+
+ // Clockwise rotation in degrees. This value should be 0, 90, 180, or 270.
+ int rotation_;
+
+ DISALLOW_COPY_AND_ASSIGN(V4L2CaptureDelegate);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_LINUX_V4L2_VIDEO_CAPTURE_DELEGATE_H_
diff --git a/media/capture/video/linux/v4l2_capture_delegate_multi_plane.cc b/media/capture/video/linux/v4l2_capture_delegate_multi_plane.cc
new file mode 100644
index 0000000..1068252
--- /dev/null
+++ b/media/capture/video/linux/v4l2_capture_delegate_multi_plane.cc
@@ -0,0 +1,99 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/linux/v4l2_capture_delegate_multi_plane.h"
+
+#include <sys/mman.h>
+
+namespace media {
+
+V4L2CaptureDelegateMultiPlane::V4L2CaptureDelegateMultiPlane(
+ const VideoCaptureDevice::Name& device_name,
+ const scoped_refptr<base::SingleThreadTaskRunner>& v4l2_task_runner,
+ int power_line_frequency)
+ : V4L2CaptureDelegate(device_name, v4l2_task_runner, power_line_frequency) {
+}
+
+V4L2CaptureDelegateMultiPlane::~V4L2CaptureDelegateMultiPlane() {
+}
+
+scoped_refptr<V4L2CaptureDelegate::BufferTracker>
+V4L2CaptureDelegateMultiPlane::CreateBufferTracker() const {
+ return make_scoped_refptr(new BufferTrackerMPlane());
+}
+
+bool V4L2CaptureDelegateMultiPlane::FillV4L2Format(
+ v4l2_format* format,
+ uint32_t width,
+ uint32_t height,
+ uint32_t pixelformat_fourcc) const {
+ format->fmt.pix_mp.width = width;
+ format->fmt.pix_mp.height = height;
+ format->fmt.pix_mp.pixelformat = pixelformat_fourcc;
+
+ const size_t num_v4l2_planes =
+ V4L2CaptureDelegate::GetNumPlanesForFourCc(pixelformat_fourcc);
+ if (num_v4l2_planes == 0u)
+ return false;
+ DCHECK_LE(num_v4l2_planes, static_cast<size_t>(VIDEO_MAX_PLANES));
+ format->fmt.pix_mp.num_planes = num_v4l2_planes;
+
+ v4l2_planes_.resize(num_v4l2_planes);
+ return true;
+}
+
+void V4L2CaptureDelegateMultiPlane::FinishFillingV4L2Buffer(
+ v4l2_buffer* buffer) const {
+ buffer->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ buffer->length = v4l2_planes_.size();
+
+ static const struct v4l2_plane empty_plane = {};
+ std::fill(v4l2_planes_.begin(), v4l2_planes_.end(), empty_plane);
+ buffer->m.planes = v4l2_planes_.data();
+}
+
+void V4L2CaptureDelegateMultiPlane::SetPayloadSize(
+ const scoped_refptr<BufferTracker>& buffer_tracker,
+ const v4l2_buffer& buffer) const {
+ for (size_t i = 0; i < v4l2_planes_.size() && i < buffer.length; i++)
+ buffer_tracker->SetPlanePayloadSize(i, buffer.m.planes[i].bytesused);
+}
+
+void V4L2CaptureDelegateMultiPlane::SendBuffer(
+ const scoped_refptr<BufferTracker>& buffer_tracker,
+ const v4l2_format& format) const {
+ DCHECK_EQ(capture_format().pixel_format, VIDEO_CAPTURE_PIXEL_FORMAT_I420);
+ const size_t y_stride = format.fmt.pix_mp.plane_fmt[0].bytesperline;
+ const size_t u_stride = format.fmt.pix_mp.plane_fmt[1].bytesperline;
+ const size_t v_stride = format.fmt.pix_mp.plane_fmt[2].bytesperline;
+ DCHECK_GE(y_stride, 1u * capture_format().frame_size.width());
+ DCHECK_GE(u_stride, 1u * capture_format().frame_size.width() / 2);
+ DCHECK_GE(v_stride, 1u * capture_format().frame_size.width() / 2);
+ client()->OnIncomingCapturedYuvData(
+ buffer_tracker->GetPlaneStart(0), buffer_tracker->GetPlaneStart(1),
+ buffer_tracker->GetPlaneStart(2), y_stride, u_stride, v_stride,
+ capture_format(), rotation(), base::TimeTicks::Now());
+}
+
+bool V4L2CaptureDelegateMultiPlane::BufferTrackerMPlane::Init(
+ int fd,
+ const v4l2_buffer& buffer) {
+ for (size_t p = 0; p < buffer.length; ++p) {
+ // Some devices require mmap() to be called with both READ and WRITE.
+ // See http://crbug.com/178582.
+ void* const start =
+ mmap(NULL, buffer.m.planes[p].length, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, buffer.m.planes[p].m.mem_offset);
+ if (start == MAP_FAILED) {
+ DLOG(ERROR) << "Error mmap()ing a V4L2 buffer into userspace";
+ return false;
+ }
+ AddMmapedPlane(static_cast<uint8_t*>(start), buffer.m.planes[p].length);
+ DVLOG(3) << "Mmap()ed plane #" << p << " of " << buffer.m.planes[p].length
+ << "B";
+ }
+ return true;
+}
+
+} // namespace media
diff --git a/media/capture/video/linux/v4l2_capture_delegate_multi_plane.h b/media/capture/video/linux/v4l2_capture_delegate_multi_plane.h
new file mode 100644
index 0000000..ee6cad3
--- /dev/null
+++ b/media/capture/video/linux/v4l2_capture_delegate_multi_plane.h
@@ -0,0 +1,61 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_LINUX_V4L2_CAPTURE_DELEGATE_MULTI_PLANE_H_
+#define MEDIA_VIDEO_CAPTURE_LINUX_V4L2_CAPTURE_DELEGATE_MULTI_PLANE_H_
+
+#include "base/memory/ref_counted.h"
+#include "media/capture/video/linux/v4l2_capture_delegate.h"
+
+#if defined(OS_OPENBSD)
+#error "OpenBSD does not support MPlane capture API."
+#endif
+
+namespace base {
+class SingleThreadTaskRunner;
+} // namespace base
+
+namespace media {
+
+// V4L2 specifics for MPLANE API.
+class V4L2CaptureDelegateMultiPlane final : public V4L2CaptureDelegate {
+ public:
+ V4L2CaptureDelegateMultiPlane(
+ const VideoCaptureDevice::Name& device_name,
+ const scoped_refptr<base::SingleThreadTaskRunner>& v4l2_task_runner,
+ int power_line_frequency);
+
+ private:
+ // BufferTracker derivation to implement construction semantics for MPLANE.
+ class BufferTrackerMPlane final : public BufferTracker {
+ public:
+ bool Init(int fd, const v4l2_buffer& buffer) override;
+
+ private:
+ ~BufferTrackerMPlane() override {}
+ };
+
+ ~V4L2CaptureDelegateMultiPlane() override;
+
+ // V4L2CaptureDelegate virtual methods implementation.
+ scoped_refptr<BufferTracker> CreateBufferTracker() const override;
+ bool FillV4L2Format(v4l2_format* format,
+ uint32_t width,
+ uint32_t height,
+ uint32_t pixelformat_fourcc) const override;
+ void FinishFillingV4L2Buffer(v4l2_buffer* buffer) const override;
+ void SetPayloadSize(const scoped_refptr<BufferTracker>& buffer_tracker,
+ const v4l2_buffer& buffer) const override;
+ void SendBuffer(const scoped_refptr<BufferTracker>& buffer_tracker,
+ const v4l2_format& format) const override;
+
+ // Vector to allocate and track as many v4l2_plane structs as planes, needed
+ // for v4l2_buffer.m.planes. This is a scratchpad marked mutable to enable
+ // using it in otherwise const methods.
+ mutable std::vector<struct v4l2_plane> v4l2_planes_;
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_LINUX_V4L2_CAPTURE_DELEGATE_SINGLE_PLANE_H_
diff --git a/media/capture/video/linux/v4l2_capture_delegate_single_plane.cc b/media/capture/video/linux/v4l2_capture_delegate_single_plane.cc
new file mode 100644
index 0000000..722eedc
--- /dev/null
+++ b/media/capture/video/linux/v4l2_capture_delegate_single_plane.cc
@@ -0,0 +1,61 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/linux/v4l2_capture_delegate_single_plane.h"
+
+#include <sys/mman.h>
+
+namespace media {
+
+scoped_refptr<V4L2CaptureDelegate::BufferTracker>
+V4L2CaptureDelegateSinglePlane::CreateBufferTracker() const {
+ return make_scoped_refptr(new BufferTrackerSPlane());
+}
+
+bool V4L2CaptureDelegateSinglePlane::FillV4L2Format(
+ v4l2_format* format,
+ uint32_t width,
+ uint32_t height,
+ uint32_t pixelformat_fourcc) const {
+ format->fmt.pix.width = width;
+ format->fmt.pix.height = height;
+ format->fmt.pix.pixelformat = pixelformat_fourcc;
+ return true;
+}
+
+void V4L2CaptureDelegateSinglePlane::FinishFillingV4L2Buffer(
+ v4l2_buffer* buffer) const {
+ buffer->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+}
+
+void V4L2CaptureDelegateSinglePlane::SetPayloadSize(
+ const scoped_refptr<BufferTracker>& buffer_tracker,
+ const v4l2_buffer& buffer) const {
+ buffer_tracker->SetPlanePayloadSize(0, buffer.bytesused);
+}
+
+void V4L2CaptureDelegateSinglePlane::SendBuffer(
+ const scoped_refptr<BufferTracker>& buffer_tracker,
+ const v4l2_format& format) const {
+ client()->OnIncomingCapturedData(
+ buffer_tracker->GetPlaneStart(0), buffer_tracker->GetPlanePayloadSize(0),
+ capture_format(), rotation(), base::TimeTicks::Now());
+}
+
+bool V4L2CaptureDelegateSinglePlane::BufferTrackerSPlane::Init(
+ int fd,
+ const v4l2_buffer& buffer) {
+ // Some devices require mmap() to be called with both READ and WRITE.
+ // See http://crbug.com/178582.
+ void* const start = mmap(NULL, buffer.length, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, buffer.m.offset);
+ if (start == MAP_FAILED) {
+ DLOG(ERROR) << "Error mmap()ing a V4L2 buffer into userspace";
+ return false;
+ }
+ AddMmapedPlane(static_cast<uint8_t*>(start), buffer.length);
+ return true;
+}
+
+} // namespace media
diff --git a/media/capture/video/linux/v4l2_capture_delegate_single_plane.h b/media/capture/video/linux/v4l2_capture_delegate_single_plane.h
new file mode 100644
index 0000000..5124f14
--- /dev/null
+++ b/media/capture/video/linux/v4l2_capture_delegate_single_plane.h
@@ -0,0 +1,56 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_LINUX_V4L2_CAPTURE_DELEGATE_SINGLE_PLANE_H_
+#define MEDIA_VIDEO_CAPTURE_LINUX_V4L2_CAPTURE_DELEGATE_SINGLE_PLANE_H_
+
+#include "base/memory/ref_counted.h"
+#include "media/capture/video/linux/v4l2_capture_delegate.h"
+#include "media/capture/video/video_capture_device.h"
+
+namespace base {
+class SingleThreadTaskRunner;
+} // namespace base
+
+namespace media {
+
+// V4L2 specifics for SPLANE API.
+class V4L2CaptureDelegateSinglePlane final : public V4L2CaptureDelegate {
+ public:
+ V4L2CaptureDelegateSinglePlane(
+ const VideoCaptureDevice::Name& device_name,
+ const scoped_refptr<base::SingleThreadTaskRunner>& v4l2_task_runner,
+ int power_line_frequency)
+ : V4L2CaptureDelegate(device_name,
+ v4l2_task_runner,
+ power_line_frequency) {}
+
+ private:
+ // BufferTracker derivation to implement construction semantics for SPLANE.
+ class BufferTrackerSPlane final : public BufferTracker {
+ public:
+ bool Init(int fd, const v4l2_buffer& buffer) override;
+
+ private:
+ ~BufferTrackerSPlane() override {}
+ };
+
+ ~V4L2CaptureDelegateSinglePlane() override {}
+
+ // V4L2CaptureDelegate virtual methods implementation.
+ scoped_refptr<BufferTracker> CreateBufferTracker() const override;
+ bool FillV4L2Format(v4l2_format* format,
+ uint32_t width,
+ uint32_t height,
+ uint32_t pixelformat_fourcc) const override;
+ void FinishFillingV4L2Buffer(v4l2_buffer* buffer) const override;
+ void SetPayloadSize(const scoped_refptr<BufferTracker>& buffer_tracker,
+ const v4l2_buffer& buffer) const override;
+ void SendBuffer(const scoped_refptr<BufferTracker>& buffer_tracker,
+ const v4l2_format& format) const override;
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_LINUX_V4L2_CAPTURE_DELEGATE_MULTI_PLANE_H_
diff --git a/media/capture/video/linux/video_capture_device_chromeos.cc b/media/capture/video/linux/video_capture_device_chromeos.cc
new file mode 100644
index 0000000..3f2d761
--- /dev/null
+++ b/media/capture/video/linux/video_capture_device_chromeos.cc
@@ -0,0 +1,114 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/linux/video_capture_device_chromeos.h"
+
+#include "base/bind.h"
+#include "base/memory/ref_counted.h"
+#include "base/single_thread_task_runner.h"
+#include "base/thread_task_runner_handle.h"
+#include "ui/gfx/display.h"
+#include "ui/gfx/display_observer.h"
+#include "ui/gfx/screen.h"
+
+namespace media {
+
+// This is a delegate class used to transfer Display change events from the UI
+// thread to the media thread.
+class VideoCaptureDeviceChromeOS::ScreenObserverDelegate
+ : public gfx::DisplayObserver,
+ public base::RefCountedThreadSafe<ScreenObserverDelegate> {
+ public:
+ ScreenObserverDelegate(
+ VideoCaptureDeviceChromeOS* capture_device,
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner)
+ : capture_device_(capture_device),
+ ui_task_runner_(ui_task_runner),
+ capture_task_runner_(base::ThreadTaskRunnerHandle::Get()) {
+ ui_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&ScreenObserverDelegate::AddObserverOnUIThread, this));
+ }
+
+ void RemoveObserver() {
+ DCHECK(capture_task_runner_->BelongsToCurrentThread());
+ capture_device_ = NULL;
+ ui_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&ScreenObserverDelegate::RemoveObserverOnUIThread, this));
+ }
+
+ private:
+ friend class base::RefCountedThreadSafe<ScreenObserverDelegate>;
+
+ ~ScreenObserverDelegate() override { DCHECK(!capture_device_); }
+
+ void OnDisplayAdded(const gfx::Display& /*new_display*/) override {}
+ void OnDisplayRemoved(const gfx::Display& /*old_display*/) override {}
+ void OnDisplayMetricsChanged(const gfx::Display& display,
+ uint32_t metrics) override {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ if (!(metrics & DISPLAY_METRIC_ROTATION))
+ return;
+ SendDisplayRotation(display);
+ }
+
+ void AddObserverOnUIThread() {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ gfx::Screen* screen =
+ gfx::Screen::GetScreenByType(gfx::SCREEN_TYPE_ALTERNATE);
+ if (screen) {
+ screen->AddObserver(this);
+ SendDisplayRotation(screen->GetPrimaryDisplay());
+ }
+ }
+
+ void RemoveObserverOnUIThread() {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ gfx::Screen* screen =
+ gfx::Screen::GetScreenByType(gfx::SCREEN_TYPE_ALTERNATE);
+ if (screen)
+ screen->RemoveObserver(this);
+ }
+
+ // Post the screen rotation change from the UI thread to capture thread
+ void SendDisplayRotation(const gfx::Display& display) {
+ DCHECK(ui_task_runner_->BelongsToCurrentThread());
+ capture_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&ScreenObserverDelegate::SendDisplayRotationOnCaptureThread,
+ this, display));
+ }
+
+ void SendDisplayRotationOnCaptureThread(const gfx::Display& display) {
+ DCHECK(capture_task_runner_->BelongsToCurrentThread());
+ if (capture_device_)
+ capture_device_->SetDisplayRotation(display);
+ }
+
+ VideoCaptureDeviceChromeOS* capture_device_;
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner_;
+ scoped_refptr<base::SingleThreadTaskRunner> capture_task_runner_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ScreenObserverDelegate);
+};
+
+VideoCaptureDeviceChromeOS::VideoCaptureDeviceChromeOS(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner,
+ const Name& device_name)
+ : VideoCaptureDeviceLinux(device_name),
+ screen_observer_delegate_(
+ new ScreenObserverDelegate(this, ui_task_runner)) {
+}
+
+VideoCaptureDeviceChromeOS::~VideoCaptureDeviceChromeOS() {
+ screen_observer_delegate_->RemoveObserver();
+}
+
+void VideoCaptureDeviceChromeOS::SetDisplayRotation(
+ const gfx::Display& display) {
+ if (display.IsInternal())
+ SetRotation(display.rotation() * 90);
+}
+
+} // namespace media
diff --git a/media/capture/video/linux/video_capture_device_chromeos.h b/media/capture/video/linux/video_capture_device_chromeos.h
new file mode 100644
index 0000000..832d8ce
--- /dev/null
+++ b/media/capture/video/linux/video_capture_device_chromeos.h
@@ -0,0 +1,36 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_DEVICE_CHROMEOS_H_
+#define MEDIA_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_DEVICE_CHROMEOS_H_
+
+#include "media/capture/video/linux/video_capture_device_linux.h"
+
+namespace gfx {
+class Display;
+} // namespace gfx
+
+namespace media {
+
+// This class is functionally the same as VideoCaptureDeviceLinux, with the
+// exception that it is aware of the orientation of the internal Display. When
+// the internal Display is rotated, the frames captured are rotated to match.
+class VideoCaptureDeviceChromeOS : public VideoCaptureDeviceLinux {
+ public:
+ explicit VideoCaptureDeviceChromeOS(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner,
+ const Name& device_name);
+ ~VideoCaptureDeviceChromeOS() override;
+
+ private:
+ class ScreenObserverDelegate;
+
+ void SetDisplayRotation(const gfx::Display& display);
+ scoped_refptr<ScreenObserverDelegate> screen_observer_delegate_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(VideoCaptureDeviceChromeOS);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_DEVICE_CHROMEOS_H_
diff --git a/media/capture/video/linux/video_capture_device_factory_linux.cc b/media/capture/video/linux/video_capture_device_factory_linux.cc
new file mode 100644
index 0000000..c37ad28
--- /dev/null
+++ b/media/capture/video/linux/video_capture_device_factory_linux.cc
@@ -0,0 +1,225 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/linux/video_capture_device_factory_linux.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#if defined(OS_OPENBSD)
+#include <sys/videoio.h>
+#else
+#include <linux/videodev2.h>
+#endif
+#include <sys/ioctl.h>
+
+#include "base/files/file_enumerator.h"
+#include "base/files/scoped_file.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/strings/stringprintf.h"
+#if defined(OS_CHROMEOS)
+#include "media/capture/video/linux/video_capture_device_chromeos.h"
+#endif
+#include "media/capture/video/linux/video_capture_device_linux.h"
+
+namespace media {
+
+static bool HasUsableFormats(int fd, uint32 capabilities) {
+ const std::list<uint32_t>& usable_fourccs =
+ VideoCaptureDeviceLinux::GetListOfUsableFourCCs(false);
+
+ static const struct {
+ int capability;
+ v4l2_buf_type buf_type;
+ } kCapabilityAndBufferTypes[] = {
+ {V4L2_CAP_VIDEO_CAPTURE, V4L2_BUF_TYPE_VIDEO_CAPTURE},
+ {V4L2_CAP_VIDEO_CAPTURE_MPLANE, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE}};
+
+ for (const auto& capability_and_buffer_type : kCapabilityAndBufferTypes) {
+ v4l2_fmtdesc fmtdesc = {};
+ if (capabilities & capability_and_buffer_type.capability) {
+ fmtdesc.type = capability_and_buffer_type.buf_type;
+ for (; HANDLE_EINTR(ioctl(fd, VIDIOC_ENUM_FMT, &fmtdesc)) == 0;
+ ++fmtdesc.index) {
+ if (std::find(usable_fourccs.begin(), usable_fourccs.end(),
+ fmtdesc.pixelformat) != usable_fourccs.end())
+ return true;
+ }
+ }
+ }
+ DLOG(ERROR) << "No usable formats found";
+ return false;
+}
+
+static std::list<float> GetFrameRateList(int fd,
+ uint32 fourcc,
+ uint32 width,
+ uint32 height) {
+ std::list<float> frame_rates;
+
+ v4l2_frmivalenum frame_interval = {};
+ frame_interval.pixel_format = fourcc;
+ frame_interval.width = width;
+ frame_interval.height = height;
+ for (; HANDLE_EINTR(ioctl(fd, VIDIOC_ENUM_FRAMEINTERVALS, &frame_interval)) ==
+ 0;
+ ++frame_interval.index) {
+ if (frame_interval.type == V4L2_FRMIVAL_TYPE_DISCRETE) {
+ if (frame_interval.discrete.numerator != 0) {
+ frame_rates.push_back(
+ frame_interval.discrete.denominator /
+ static_cast<float>(frame_interval.discrete.numerator));
+ }
+ } else if (frame_interval.type == V4L2_FRMIVAL_TYPE_CONTINUOUS ||
+ frame_interval.type == V4L2_FRMIVAL_TYPE_STEPWISE) {
+ // TODO(mcasas): see http://crbug.com/249953, support these devices.
+ NOTIMPLEMENTED();
+ break;
+ }
+ }
+ // Some devices, e.g. Kinect, do not enumerate any frame rates, see
+ // http://crbug.com/412284. Set their frame_rate to zero.
+ if (frame_rates.empty())
+ frame_rates.push_back(0);
+ return frame_rates;
+}
+
+static void GetSupportedFormatsForV4L2BufferType(
+ int fd,
+ v4l2_buf_type buf_type,
+ media::VideoCaptureFormats* supported_formats) {
+ v4l2_fmtdesc v4l2_format = {};
+ v4l2_format.type = buf_type;
+ for (; HANDLE_EINTR(ioctl(fd, VIDIOC_ENUM_FMT, &v4l2_format)) == 0;
+ ++v4l2_format.index) {
+ VideoCaptureFormat supported_format;
+ supported_format.pixel_format =
+ VideoCaptureDeviceLinux::V4l2FourCcToChromiumPixelFormat(
+ v4l2_format.pixelformat);
+
+ if (supported_format.pixel_format == VIDEO_CAPTURE_PIXEL_FORMAT_UNKNOWN)
+ continue;
+
+ v4l2_frmsizeenum frame_size = {};
+ frame_size.pixel_format = v4l2_format.pixelformat;
+ for (; HANDLE_EINTR(ioctl(fd, VIDIOC_ENUM_FRAMESIZES, &frame_size)) == 0;
+ ++frame_size.index) {
+ if (frame_size.type == V4L2_FRMSIZE_TYPE_DISCRETE) {
+ supported_format.frame_size.SetSize(frame_size.discrete.width,
+ frame_size.discrete.height);
+ } else if (frame_size.type == V4L2_FRMSIZE_TYPE_STEPWISE ||
+ frame_size.type == V4L2_FRMSIZE_TYPE_CONTINUOUS) {
+ // TODO(mcasas): see http://crbug.com/249953, support these devices.
+ NOTIMPLEMENTED();
+ }
+
+ const std::list<float> frame_rates = GetFrameRateList(
+ fd, v4l2_format.pixelformat, frame_size.discrete.width,
+ frame_size.discrete.height);
+ for (const auto& frame_rate : frame_rates) {
+ supported_format.frame_rate = frame_rate;
+ supported_formats->push_back(supported_format);
+ DVLOG(1) << VideoCaptureFormat::ToString(supported_format);
+ }
+ }
+ }
+}
+
+VideoCaptureDeviceFactoryLinux::VideoCaptureDeviceFactoryLinux(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner)
+ : ui_task_runner_(ui_task_runner) {
+}
+
+VideoCaptureDeviceFactoryLinux::~VideoCaptureDeviceFactoryLinux() {
+}
+
+scoped_ptr<VideoCaptureDevice> VideoCaptureDeviceFactoryLinux::Create(
+ const VideoCaptureDevice::Name& device_name) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+#if defined(OS_CHROMEOS)
+ VideoCaptureDeviceChromeOS* self =
+ new VideoCaptureDeviceChromeOS(ui_task_runner_, device_name);
+#else
+ VideoCaptureDeviceLinux* self = new VideoCaptureDeviceLinux(device_name);
+#endif
+ if (!self)
+ return scoped_ptr<VideoCaptureDevice>();
+ // Test opening the device driver. This is to make sure it is available.
+ // We will reopen it again in our worker thread when someone
+ // allocates the camera.
+ base::ScopedFD fd(HANDLE_EINTR(open(device_name.id().c_str(), O_RDONLY)));
+ if (!fd.is_valid()) {
+ DLOG(ERROR) << "Cannot open device";
+ delete self;
+ return scoped_ptr<VideoCaptureDevice>();
+ }
+
+ return scoped_ptr<VideoCaptureDevice>(self);
+}
+
+void VideoCaptureDeviceFactoryLinux::GetDeviceNames(
+ VideoCaptureDevice::Names* const device_names) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(device_names->empty());
+ const base::FilePath path("/dev/");
+ base::FileEnumerator enumerator(path, false, base::FileEnumerator::FILES,
+ "video*");
+
+ while (!enumerator.Next().empty()) {
+ const base::FileEnumerator::FileInfo info = enumerator.GetInfo();
+ const std::string unique_id = path.value() + info.GetName().value();
+ const base::ScopedFD fd(HANDLE_EINTR(open(unique_id.c_str(), O_RDONLY)));
+ if (!fd.is_valid()) {
+ DLOG(ERROR) << "Couldn't open " << info.GetName().value();
+ continue;
+ }
+ // Test if this is a V4L2 capture device and if it has at least one
+ // supported capture format. Devices that have capture and output
+ // capabilities at the same time are memory-to-memory and are skipped, see
+ // http://crbug.com/139356.
+ v4l2_capability cap;
+ if ((HANDLE_EINTR(ioctl(fd.get(), VIDIOC_QUERYCAP, &cap)) == 0) &&
+ ((cap.capabilities & V4L2_CAP_VIDEO_CAPTURE ||
+ cap.capabilities & V4L2_CAP_VIDEO_CAPTURE_MPLANE) &&
+ !(cap.capabilities & V4L2_CAP_VIDEO_OUTPUT) &&
+ !(cap.capabilities & V4L2_CAP_VIDEO_OUTPUT_MPLANE)) &&
+ HasUsableFormats(fd.get(), cap.capabilities)) {
+ device_names->push_back(VideoCaptureDevice::Name(
+ base::StringPrintf("%s", cap.card), unique_id,
+ (cap.capabilities & V4L2_CAP_VIDEO_CAPTURE_MPLANE)
+ ? VideoCaptureDevice::Name::V4L2_MULTI_PLANE
+ : VideoCaptureDevice::Name::V4L2_SINGLE_PLANE));
+ }
+ }
+}
+
+void VideoCaptureDeviceFactoryLinux::GetDeviceSupportedFormats(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* supported_formats) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (device.id().empty())
+ return;
+ base::ScopedFD fd(HANDLE_EINTR(open(device.id().c_str(), O_RDONLY)));
+ if (!fd.is_valid()) // Failed to open this device.
+ return;
+ supported_formats->clear();
+
+ DCHECK_NE(device.capture_api_type(),
+ VideoCaptureDevice::Name::API_TYPE_UNKNOWN);
+ const v4l2_buf_type buf_type =
+ (device.capture_api_type() == VideoCaptureDevice::Name::V4L2_MULTI_PLANE)
+ ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
+ : V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ GetSupportedFormatsForV4L2BufferType(fd.get(), buf_type, supported_formats);
+
+ return;
+}
+
+// static
+VideoCaptureDeviceFactory*
+VideoCaptureDeviceFactory::CreateVideoCaptureDeviceFactory(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner) {
+ return new VideoCaptureDeviceFactoryLinux(ui_task_runner);
+}
+
+} // namespace media
diff --git a/media/capture/video/linux/video_capture_device_factory_linux.h b/media/capture/video/linux/video_capture_device_factory_linux.h
new file mode 100644
index 0000000..5a9c140
--- /dev/null
+++ b/media/capture/video/linux/video_capture_device_factory_linux.h
@@ -0,0 +1,38 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Implementation of a VideoCaptureDeviceFactoryLinux class.
+
+#ifndef MEDIA_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_DEVICE_FACTORY_LINUX_H_
+#define MEDIA_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_DEVICE_FACTORY_LINUX_H_
+
+#include "media/capture/video/video_capture_device_factory.h"
+
+#include "media/base/video_capture_types.h"
+
+namespace media {
+
+// Extension of VideoCaptureDeviceFactory to create and manipulate Linux
+// devices.
+class MEDIA_EXPORT VideoCaptureDeviceFactoryLinux
+ : public VideoCaptureDeviceFactory {
+ public:
+ explicit VideoCaptureDeviceFactoryLinux(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner);
+ ~VideoCaptureDeviceFactoryLinux() override;
+
+ scoped_ptr<VideoCaptureDevice> Create(
+ const VideoCaptureDevice::Name& device_name) override;
+ void GetDeviceNames(VideoCaptureDevice::Names* device_names) override;
+ void GetDeviceSupportedFormats(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* supported_formats) override;
+
+ private:
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner_;
+ DISALLOW_COPY_AND_ASSIGN(VideoCaptureDeviceFactoryLinux);
+};
+
+} // namespace media
+#endif // MEDIA_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_DEVICE_FACTORY_LINUX_H_
diff --git a/media/capture/video/linux/video_capture_device_linux.cc b/media/capture/video/linux/video_capture_device_linux.cc
new file mode 100644
index 0000000..b00b698
--- /dev/null
+++ b/media/capture/video/linux/video_capture_device_linux.cc
@@ -0,0 +1,147 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/linux/video_capture_device_linux.h"
+
+#if defined(OS_OPENBSD)
+#include <sys/videoio.h>
+#else
+#include <linux/videodev2.h>
+#endif
+
+#include <list>
+
+#include "base/bind.h"
+#include "base/strings/stringprintf.h"
+#include "media/capture/video/linux/v4l2_capture_delegate.h"
+
+namespace media {
+
+// USB VID and PID are both 4 bytes long.
+static const size_t kVidPidSize = 4;
+
+// /sys/class/video4linux/video{N}/device is a symlink to the corresponding
+// USB device info directory.
+static const char kVidPathTemplate[] =
+ "/sys/class/video4linux/%s/device/../idVendor";
+static const char kPidPathTemplate[] =
+ "/sys/class/video4linux/%s/device/../idProduct";
+
+static bool ReadIdFile(const std::string path, std::string* id) {
+ char id_buf[kVidPidSize];
+ FILE* file = fopen(path.c_str(), "rb");
+ if (!file)
+ return false;
+ const bool success = fread(id_buf, kVidPidSize, 1, file) == 1;
+ fclose(file);
+ if (!success)
+ return false;
+ id->append(id_buf, kVidPidSize);
+ return true;
+}
+
+// Translates Video4Linux pixel formats to Chromium pixel formats.
+// static
+VideoCapturePixelFormat
+VideoCaptureDeviceLinux::V4l2FourCcToChromiumPixelFormat(uint32 v4l2_fourcc) {
+ return V4L2CaptureDelegate::V4l2FourCcToChromiumPixelFormat(v4l2_fourcc);
+}
+
+// Gets a list of usable Four CC formats prioritised.
+// static
+std::list<uint32_t> VideoCaptureDeviceLinux::GetListOfUsableFourCCs(
+ bool favour_mjpeg) {
+ return V4L2CaptureDelegate::GetListOfUsableFourCcs(favour_mjpeg);
+}
+
+const std::string VideoCaptureDevice::Name::GetModel() const {
+ // |unique_id| is of the form "/dev/video2". |file_name| is "video2".
+ const std::string dev_dir = "/dev/";
+ DCHECK_EQ(0, unique_id_.compare(0, dev_dir.length(), dev_dir));
+ const std::string file_name =
+ unique_id_.substr(dev_dir.length(), unique_id_.length());
+
+ const std::string vidPath =
+ base::StringPrintf(kVidPathTemplate, file_name.c_str());
+ const std::string pidPath =
+ base::StringPrintf(kPidPathTemplate, file_name.c_str());
+
+ std::string usb_id;
+ if (!ReadIdFile(vidPath, &usb_id))
+ return "";
+ usb_id.append(":");
+ if (!ReadIdFile(pidPath, &usb_id))
+ return "";
+
+ return usb_id;
+}
+
+VideoCaptureDeviceLinux::VideoCaptureDeviceLinux(const Name& device_name)
+ : v4l2_thread_("V4L2CaptureThread"), device_name_(device_name) {
+}
+
+VideoCaptureDeviceLinux::~VideoCaptureDeviceLinux() {
+ // Check if the thread is running.
+ // This means that the device has not been StopAndDeAllocate()d properly.
+ DCHECK(!v4l2_thread_.IsRunning());
+ v4l2_thread_.Stop();
+}
+
+void VideoCaptureDeviceLinux::AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
+ DCHECK(!capture_impl_);
+ if (v4l2_thread_.IsRunning())
+ return; // Wrong state.
+ v4l2_thread_.Start();
+
+ const int line_frequency =
+ TranslatePowerLineFrequencyToV4L2(GetPowerLineFrequencyForLocation());
+ capture_impl_ = V4L2CaptureDelegate::CreateV4L2CaptureDelegate(
+ device_name_, v4l2_thread_.task_runner(), line_frequency);
+ if (!capture_impl_) {
+ client->OnError("Failed to create VideoCaptureDelegate");
+ return;
+ }
+ v4l2_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&V4L2CaptureDelegate::AllocateAndStart, capture_impl_,
+ params.requested_format.frame_size.width(),
+ params.requested_format.frame_size.height(),
+ params.requested_format.frame_rate, base::Passed(&client)));
+}
+
+void VideoCaptureDeviceLinux::StopAndDeAllocate() {
+ if (!v4l2_thread_.IsRunning())
+ return; // Wrong state.
+ v4l2_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&V4L2CaptureDelegate::StopAndDeAllocate, capture_impl_));
+ v4l2_thread_.Stop();
+
+ capture_impl_ = NULL;
+}
+
+void VideoCaptureDeviceLinux::SetRotation(int rotation) {
+ if (v4l2_thread_.IsRunning()) {
+ v4l2_thread_.message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&V4L2CaptureDelegate::SetRotation, capture_impl_, rotation));
+ }
+}
+
+// static
+int VideoCaptureDeviceLinux::TranslatePowerLineFrequencyToV4L2(int frequency) {
+ switch (frequency) {
+ case kPowerLine50Hz:
+ return V4L2_CID_POWER_LINE_FREQUENCY_50HZ;
+ case kPowerLine60Hz:
+ return V4L2_CID_POWER_LINE_FREQUENCY_60HZ;
+ default:
+ // If we have no idea of the frequency, at least try and set it to AUTO.
+ return V4L2_CID_POWER_LINE_FREQUENCY_AUTO;
+ }
+}
+
+} // namespace media
diff --git a/media/capture/video/linux/video_capture_device_linux.h b/media/capture/video/linux/video_capture_device_linux.h
new file mode 100644
index 0000000..2eb2eaf
--- /dev/null
+++ b/media/capture/video/linux/video_capture_device_linux.h
@@ -0,0 +1,60 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Linux specific implementation of VideoCaptureDevice.
+// V4L2 is used for capturing. V4L2 does not provide its own thread for
+// capturing so this implementation uses a Chromium thread for fetching frames
+// from V4L2.
+
+#ifndef MEDIA_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_DEVICE_LINUX_H_
+#define MEDIA_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_DEVICE_LINUX_H_
+
+#include <string>
+
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/threading/thread.h"
+#include "media/base/video_capture_types.h"
+#include "media/capture/video/video_capture_device.h"
+
+namespace media {
+
+class V4L2CaptureDelegate;
+
+// Linux V4L2 implementation of VideoCaptureDevice.
+class VideoCaptureDeviceLinux : public VideoCaptureDevice {
+ public:
+ static VideoCapturePixelFormat V4l2FourCcToChromiumPixelFormat(
+ uint32 v4l2_fourcc);
+ static std::list<uint32_t> GetListOfUsableFourCCs(bool favour_mjpeg);
+
+ explicit VideoCaptureDeviceLinux(const Name& device_name);
+ ~VideoCaptureDeviceLinux() override;
+
+ // VideoCaptureDevice implementation.
+ void AllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<Client> client) override;
+ void StopAndDeAllocate() override;
+
+ protected:
+ void SetRotation(int rotation);
+
+ private:
+ static int TranslatePowerLineFrequencyToV4L2(int frequency);
+
+ // Internal delegate doing the actual capture setting, buffer allocation and
+ // circulacion with the V4L2 API. Created and deleted in the thread where
+ // VideoCaptureDeviceLinux lives but otherwise operating on |v4l2_thread_|.
+ scoped_refptr<V4L2CaptureDelegate> capture_impl_;
+
+ base::Thread v4l2_thread_; // Thread used for reading data from the device.
+
+ const Name device_name_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(VideoCaptureDeviceLinux);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_LINUX_VIDEO_CAPTURE_DEVICE_LINUX_H_
diff --git a/media/capture/video/mac/DEPS b/media/capture/video/mac/DEPS
new file mode 100644
index 0000000..58a1003
--- /dev/null
+++ b/media/capture/video/mac/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+third_party/decklink",
+]
diff --git a/media/capture/video/mac/platform_video_capturing_mac.h b/media/capture/video/mac/platform_video_capturing_mac.h
new file mode 100644
index 0000000..13cc6a2
--- /dev/null
+++ b/media/capture/video/mac/platform_video_capturing_mac.h
@@ -0,0 +1,53 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_MAC_PLATFORM_VIDEO_CAPTURING_MAC_H_
+#define MEDIA_VIDEO_CAPTURE_MAC_PLATFORM_VIDEO_CAPTURING_MAC_H_
+
+#import <Foundation/Foundation.h>
+
+namespace media {
+class VideoCaptureDeviceMac;
+}
+
+// Protocol representing platform-dependent video capture on Mac, implemented
+// by both QTKit and AVFoundation APIs.
+@protocol PlatformVideoCapturingMac<NSObject>
+
+// This method initializes the instance by calling NSObject |init| and registers
+// internally a frame receiver at the same time. The frame receiver is supposed
+// to be initialised before and outlive the VideoCapturingDeviceMac
+// implementation.
+- (id)initWithFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver;
+
+// Sets the frame receiver. This method executes the registration in mutual
+// exclusion.
+// TODO(mcasas): This method and stopCapture() are always called in sequence and
+// this one is only used to clear the frameReceiver, investigate if both can be
+// merged.
+- (void)setFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver;
+
+// Sets which capture device to use by name passed as deviceId argument. The
+// device names are usually obtained via VideoCaptureDevice::GetDeviceNames()
+// method. This method will also configure all device properties except those in
+// setCaptureHeight:width:frameRate. If |deviceId| is nil, capture is stopped
+// and all potential configuration is torn down. Returns YES on sucess, NO
+// otherwise.
+- (BOOL)setCaptureDevice:(NSString*)deviceId;
+
+// Configures the capture properties.
+- (BOOL)setCaptureHeight:(int)height
+ width:(int)width
+ frameRate:(float)frameRate;
+
+// Starts video capturing, registers observers. Returns YES on sucess, NO
+// otherwise.
+- (BOOL)startCapture;
+
+// Stops video capturing, unregisters observers.
+- (void)stopCapture;
+
+@end
+
+#endif // MEDIA_VIDEO_CAPTURE_MAC_PLATFORM_VIDEO_CAPTURING_MAC_H_
diff --git a/media/capture/video/mac/video_capture_device_avfoundation_mac.h b/media/capture/video/mac/video_capture_device_avfoundation_mac.h
new file mode 100644
index 0000000..539dffc
--- /dev/null
+++ b/media/capture/video/mac/video_capture_device_avfoundation_mac.h
@@ -0,0 +1,122 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_AVFOUNDATION_MAC_H_
+#define MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_AVFOUNDATION_MAC_H_
+
+#import <Foundation/Foundation.h>
+
+#import "base/mac/scoped_nsobject.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread_checker.h"
+#import "media/base/mac/avfoundation_glue.h"
+#include "media/base/video_capture_types.h"
+#import "media/capture/video/mac/platform_video_capturing_mac.h"
+#include "media/capture/video/video_capture_device.h"
+
+namespace media {
+class VideoCaptureDeviceMac;
+}
+
+@class CrAVCaptureDevice;
+@class CrAVCaptureSession;
+@class CrAVCaptureVideoDataOutput;
+
+// Class used by VideoCaptureDeviceMac (VCDM) for video capture using
+// AVFoundation API. This class lives inside the thread created by its owner
+// VCDM.
+//
+// * Clients (VCDM) should call +deviceNames to fetch the list of devices
+// available in the system; this method returns the list of device names that
+// have to be used with -setCaptureDevice:.
+// * Previous to any use, clients (VCDM) must call -initWithFrameReceiver: to
+// initialise an object of this class and register a |frameReceiver_|.
+// * Frame receiver registration or removal can also happen via explicit call
+// to -setFrameReceiver:. Re-registrations are safe and allowed, even during
+// capture using this method.
+// * Method -setCaptureDevice: must be called at least once with a device
+// identifier from +deviceNames. Creates all the necessary AVFoundation
+// objects on first call; it connects them ready for capture every time.
+// This method should not be called during capture (i.e. between
+// -startCapture and -stopCapture).
+// * -setCaptureWidth:height:frameRate: is called if a resolution or frame rate
+// different than the by default one set by -setCaptureDevice: is needed.
+// This method should not be called during capture. This method must be
+// called after -setCaptureDevice:.
+// * -startCapture registers the notification listeners and starts the
+// capture. The capture can be stop using -stopCapture. The capture can be
+// restarted and restoped multiple times, reconfiguring or not the device in
+// between.
+// * -setCaptureDevice can be called with a |nil| value, case in which it stops
+// the capture and disconnects the library objects. This step is not
+// necessary.
+// * Deallocation of the library objects happens gracefully on destruction of
+// the VideoCaptureDeviceAVFoundation object.
+//
+//
+@interface VideoCaptureDeviceAVFoundation
+ : NSObject<CrAVCaptureVideoDataOutputSampleBufferDelegate,
+ PlatformVideoCapturingMac> {
+ @private
+ // The following attributes are set via -setCaptureHeight:width:frameRate:.
+ int frameWidth_;
+ int frameHeight_;
+ float frameRate_;
+
+ base::Lock lock_; // Protects concurrent setting and using of frameReceiver_.
+ media::VideoCaptureDeviceMac* frameReceiver_; // weak.
+
+ base::scoped_nsobject<CrAVCaptureSession> captureSession_;
+
+ // |captureDevice_| is an object coming from AVFoundation, used only to be
+ // plugged in |captureDeviceInput_| and to query for session preset support.
+ CrAVCaptureDevice* captureDevice_;
+ // |captureDeviceInput_| is owned by |captureSession_|.
+ CrAVCaptureDeviceInput* captureDeviceInput_;
+ base::scoped_nsobject<CrAVCaptureVideoDataOutput> captureVideoDataOutput_;
+
+ base::ThreadChecker main_thread_checker_;
+ base::ThreadChecker callback_thread_checker_;
+}
+
+// Returns a dictionary of capture devices with friendly name and unique id.
++ (NSDictionary*)deviceNames;
+
+// Retrieve the capture supported formats for a given device |name|.
++ (void)getDevice:(const media::VideoCaptureDevice::Name&)name
+ supportedFormats:(media::VideoCaptureFormats*)formats;
+
+// Initializes the instance and the underlying capture session and registers the
+// frame receiver.
+- (id)initWithFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver;
+
+// Sets the frame receiver.
+- (void)setFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver;
+
+// Sets which capture device to use by name, retrieved via |deviceNames|. Once
+// the deviceId is known, the library objects are created if needed and
+// connected for the capture, and a by default resolution is set. If deviceId is
+// nil, then the eventual capture is stopped and library objects are
+// disconnected. Returns YES on sucess, NO otherwise. This method should not be
+// called during capture.
+- (BOOL)setCaptureDevice:(NSString*)deviceId;
+
+// Configures the capture properties for the capture session and the video data
+// output; this means it MUST be called after setCaptureDevice:. Return YES on
+// success, else NO.
+- (BOOL)setCaptureHeight:(int)height
+ width:(int)width
+ frameRate:(float)frameRate;
+
+// Starts video capturing and register the notification listeners. Must be
+// called after setCaptureDevice:, and, eventually, also after
+// setCaptureHeight:width:frameRate:. Returns YES on sucess, NO otherwise.
+- (BOOL)startCapture;
+
+// Stops video capturing and stops listening to notifications.
+- (void)stopCapture;
+
+@end
+
+#endif // MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_AVFOUNDATION_MAC_H_
diff --git a/media/capture/video/mac/video_capture_device_avfoundation_mac.mm b/media/capture/video/mac/video_capture_device_avfoundation_mac.mm
new file mode 100644
index 0000000..54ed686
--- /dev/null
+++ b/media/capture/video/mac/video_capture_device_avfoundation_mac.mm
@@ -0,0 +1,354 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "media/capture/video/mac/video_capture_device_avfoundation_mac.h"
+
+#import <CoreVideo/CoreVideo.h>
+
+#include "base/logging.h"
+#include "base/mac/foundation_util.h"
+#include "media/capture/video/mac/video_capture_device_mac.h"
+#include "ui/gfx/geometry/size.h"
+
+// Prefer MJPEG if frame width or height is larger than this.
+static const int kMjpegWidthThreshold = 640;
+static const int kMjpegHeightThreshold = 480;
+
+// This function translates Mac Core Video pixel formats to Chromium pixel
+// formats. Chromium pixel formats are sorted in order of preference.
+media::VideoCapturePixelFormat FourCCToChromiumPixelFormat(FourCharCode code) {
+ switch (code) {
+ case kCVPixelFormatType_422YpCbCr8:
+ return media::VIDEO_CAPTURE_PIXEL_FORMAT_UYVY;
+ case CoreMediaGlue::kCMPixelFormat_422YpCbCr8_yuvs:
+ return media::VIDEO_CAPTURE_PIXEL_FORMAT_YUY2;
+ case CoreMediaGlue::kCMVideoCodecType_JPEG_OpenDML:
+ return media::VIDEO_CAPTURE_PIXEL_FORMAT_MJPEG;
+ default:
+ return media::VIDEO_CAPTURE_PIXEL_FORMAT_UNKNOWN;
+ }
+}
+
+@implementation VideoCaptureDeviceAVFoundation
+
+#pragma mark Class methods
+
++ (void)getDeviceNames:(NSMutableDictionary*)deviceNames {
+ // At this stage we already know that AVFoundation is supported and the whole
+ // library is loaded and initialised, by the device monitoring.
+ NSArray* devices = [AVCaptureDeviceGlue devices];
+ for (CrAVCaptureDevice* device in devices) {
+ if (([device hasMediaType:AVFoundationGlue::AVMediaTypeVideo()] ||
+ [device hasMediaType:AVFoundationGlue::AVMediaTypeMuxed()]) &&
+ ![device isSuspended]) {
+ DeviceNameAndTransportType* nameAndTransportType =
+ [[[DeviceNameAndTransportType alloc]
+ initWithName:[device localizedName]
+ transportType:[device transportType]] autorelease];
+ [deviceNames setObject:nameAndTransportType forKey:[device uniqueID]];
+ }
+ }
+}
+
++ (NSDictionary*)deviceNames {
+ NSMutableDictionary* deviceNames =
+ [[[NSMutableDictionary alloc] init] autorelease];
+ // The device name retrieval is not going to happen in the main thread, and
+ // this might cause instabilities (it did in QTKit), so keep an eye here.
+ [self getDeviceNames:deviceNames];
+ return deviceNames;
+}
+
++ (void)getDevice:(const media::VideoCaptureDevice::Name&)name
+ supportedFormats:(media::VideoCaptureFormats*)formats {
+ NSArray* devices = [AVCaptureDeviceGlue devices];
+ CrAVCaptureDevice* device = nil;
+ for (device in devices) {
+ if ([[device uniqueID] UTF8String] == name.id())
+ break;
+ }
+ if (device == nil)
+ return;
+ for (CrAVCaptureDeviceFormat* format in device.formats) {
+ // MediaSubType is a CMPixelFormatType but can be used as CVPixelFormatType
+ // as well according to CMFormatDescription.h
+ const media::VideoCapturePixelFormat pixelFormat =
+ FourCCToChromiumPixelFormat(
+ CoreMediaGlue::CMFormatDescriptionGetMediaSubType(
+ [format formatDescription]));
+
+ CoreMediaGlue::CMVideoDimensions dimensions =
+ CoreMediaGlue::CMVideoFormatDescriptionGetDimensions(
+ [format formatDescription]);
+
+ for (CrAVFrameRateRange* frameRate in
+ [format videoSupportedFrameRateRanges]) {
+ media::VideoCaptureFormat format(
+ gfx::Size(dimensions.width, dimensions.height),
+ frameRate.maxFrameRate, pixelFormat);
+ formats->push_back(format);
+ DVLOG(2) << name.name() << " "
+ << media::VideoCaptureFormat::ToString(format);
+ }
+ }
+}
+
+#pragma mark Public methods
+
+- (id)initWithFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver {
+ if ((self = [super init])) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ DCHECK(frameReceiver);
+ [self setFrameReceiver:frameReceiver];
+ captureSession_.reset(
+ [[AVFoundationGlue::AVCaptureSessionClass() alloc] init]);
+ }
+ return self;
+}
+
+- (void)dealloc {
+ [self stopCapture];
+ [super dealloc];
+}
+
+- (void)setFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver {
+ base::AutoLock lock(lock_);
+ frameReceiver_ = frameReceiver;
+}
+
+- (BOOL)setCaptureDevice:(NSString*)deviceId {
+ DCHECK(captureSession_);
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+
+ if (!deviceId) {
+ // First stop the capture session, if it's running.
+ [self stopCapture];
+ // Now remove the input and output from the capture session.
+ [captureSession_ removeOutput:captureVideoDataOutput_];
+ if (captureDeviceInput_) {
+ [captureSession_ removeInput:captureDeviceInput_];
+ // No need to release |captureDeviceInput_|, is owned by the session.
+ captureDeviceInput_ = nil;
+ }
+ return YES;
+ }
+
+ // Look for input device with requested name.
+ captureDevice_ = [AVCaptureDeviceGlue deviceWithUniqueID:deviceId];
+ if (!captureDevice_) {
+ [self
+ sendErrorString:[NSString stringWithUTF8String:
+ "Could not open video capture device."]];
+ return NO;
+ }
+
+ // Create the capture input associated with the device. Easy peasy.
+ NSError* error = nil;
+ captureDeviceInput_ =
+ [AVCaptureDeviceInputGlue deviceInputWithDevice:captureDevice_
+ error:&error];
+ if (!captureDeviceInput_) {
+ captureDevice_ = nil;
+ [self sendErrorString:
+ [NSString stringWithFormat:
+ @"Could not create video capture input (%@): %@",
+ [error localizedDescription],
+ [error localizedFailureReason]]];
+ return NO;
+ }
+ [captureSession_ addInput:captureDeviceInput_];
+
+ // Create a new data output for video. The data output is configured to
+ // discard late frames by default.
+ captureVideoDataOutput_.reset(
+ [[AVFoundationGlue::AVCaptureVideoDataOutputClass() alloc] init]);
+ if (!captureVideoDataOutput_) {
+ [captureSession_ removeInput:captureDeviceInput_];
+ [self sendErrorString:[NSString stringWithUTF8String:
+ "Could not create video data output."]];
+ return NO;
+ }
+ [captureVideoDataOutput_ setAlwaysDiscardsLateVideoFrames:true];
+ [captureVideoDataOutput_
+ setSampleBufferDelegate:self
+ queue:dispatch_get_global_queue(
+ DISPATCH_QUEUE_PRIORITY_DEFAULT, 0)];
+ [captureSession_ addOutput:captureVideoDataOutput_];
+ return YES;
+}
+
+- (BOOL)setCaptureHeight:(int)height
+ width:(int)width
+ frameRate:(float)frameRate {
+ // Check if either of VideoCaptureDeviceMac::AllocateAndStart() or
+ // VideoCaptureDeviceMac::ReceiveFrame() is calling here, depending on the
+ // running state. VCDM::ReceiveFrame() calls here to change aspect ratio.
+ DCHECK((![captureSession_ isRunning] &&
+ main_thread_checker_.CalledOnValidThread()) ||
+ callback_thread_checker_.CalledOnValidThread());
+
+ frameWidth_ = width;
+ frameHeight_ = height;
+ frameRate_ = frameRate;
+
+ FourCharCode best_fourcc = kCVPixelFormatType_422YpCbCr8;
+ const bool prefer_mjpeg =
+ width > kMjpegWidthThreshold || height > kMjpegHeightThreshold;
+ for (CrAVCaptureDeviceFormat* format in captureDevice_.formats) {
+ const FourCharCode fourcc =
+ CoreMediaGlue::CMFormatDescriptionGetMediaSubType(
+ [format formatDescription]);
+ if (prefer_mjpeg &&
+ fourcc == CoreMediaGlue::kCMVideoCodecType_JPEG_OpenDML) {
+ best_fourcc = fourcc;
+ break;
+ }
+ // Compare according to Chromium preference.
+ if (FourCCToChromiumPixelFormat(fourcc) <
+ FourCCToChromiumPixelFormat(best_fourcc)) {
+ best_fourcc = fourcc;
+ }
+ }
+
+ // The capture output has to be configured, despite Mac documentation
+ // detailing that setting the sessionPreset would be enough. The reason for
+ // this mismatch is probably because most of the AVFoundation docs are written
+ // for iOS and not for MacOsX. AVVideoScalingModeKey() refers to letterboxing
+ // yes/no and preserve aspect ratio yes/no when scaling. Currently we set
+ // cropping and preservation.
+ NSDictionary* videoSettingsDictionary = @{
+ (id)kCVPixelBufferWidthKey : @(width), (id)
+ kCVPixelBufferHeightKey : @(height), (id)
+ kCVPixelBufferPixelFormatTypeKey : @(best_fourcc),
+ AVFoundationGlue::AVVideoScalingModeKey() :
+ AVFoundationGlue::AVVideoScalingModeResizeAspectFill()
+ };
+ [captureVideoDataOutput_ setVideoSettings:videoSettingsDictionary];
+
+ CrAVCaptureConnection* captureConnection = [captureVideoDataOutput_
+ connectionWithMediaType:AVFoundationGlue::AVMediaTypeVideo()];
+ // Check selector existence, related to bugs http://crbug.com/327532 and
+ // http://crbug.com/328096.
+ // CMTimeMake accepts integer argumenst but |frameRate| is float, round it.
+ if ([captureConnection
+ respondsToSelector:@selector(isVideoMinFrameDurationSupported)] &&
+ [captureConnection isVideoMinFrameDurationSupported]) {
+ [captureConnection
+ setVideoMinFrameDuration:CoreMediaGlue::CMTimeMake(
+ media::kFrameRatePrecision,
+ (int)(frameRate *
+ media::kFrameRatePrecision))];
+ }
+ if ([captureConnection
+ respondsToSelector:@selector(isVideoMaxFrameDurationSupported)] &&
+ [captureConnection isVideoMaxFrameDurationSupported]) {
+ [captureConnection
+ setVideoMaxFrameDuration:CoreMediaGlue::CMTimeMake(
+ media::kFrameRatePrecision,
+ (int)(frameRate *
+ media::kFrameRatePrecision))];
+ }
+ return YES;
+}
+
+- (BOOL)startCapture {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ if (!captureSession_) {
+ DLOG(ERROR) << "Video capture session not initialized.";
+ return NO;
+ }
+ // Connect the notifications.
+ NSNotificationCenter* nc = [NSNotificationCenter defaultCenter];
+ [nc addObserver:self
+ selector:@selector(onVideoError:)
+ name:AVFoundationGlue::AVCaptureSessionRuntimeErrorNotification()
+ object:captureSession_];
+ [captureSession_ startRunning];
+ return YES;
+}
+
+- (void)stopCapture {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ if ([captureSession_ isRunning])
+ [captureSession_ stopRunning]; // Synchronous.
+ [[NSNotificationCenter defaultCenter] removeObserver:self];
+}
+
+#pragma mark Private methods
+
+// |captureOutput| is called by the capture device to deliver a new frame.
+- (void)captureOutput:(CrAVCaptureOutput*)captureOutput
+ didOutputSampleBuffer:(CoreMediaGlue::CMSampleBufferRef)sampleBuffer
+ fromConnection:(CrAVCaptureConnection*)connection {
+ // AVFoundation calls from a number of threads, depending on, at least, if
+ // Chrome is on foreground or background. Sample the actual thread here.
+ callback_thread_checker_.DetachFromThread();
+ CHECK(callback_thread_checker_.CalledOnValidThread());
+
+ const CoreMediaGlue::CMFormatDescriptionRef formatDescription =
+ CoreMediaGlue::CMSampleBufferGetFormatDescription(sampleBuffer);
+ const FourCharCode fourcc =
+ CoreMediaGlue::CMFormatDescriptionGetMediaSubType(formatDescription);
+ const CoreMediaGlue::CMVideoDimensions dimensions =
+ CoreMediaGlue::CMVideoFormatDescriptionGetDimensions(formatDescription);
+ const media::VideoCaptureFormat captureFormat(
+ gfx::Size(dimensions.width, dimensions.height), frameRate_,
+ FourCCToChromiumPixelFormat(fourcc));
+
+ char* baseAddress = 0;
+ size_t frameSize = 0;
+ CVImageBufferRef videoFrame = nil;
+ if (fourcc == CoreMediaGlue::kCMVideoCodecType_JPEG_OpenDML) {
+ // If MJPEG, use block buffer instead of pixel buffer.
+ CoreMediaGlue::CMBlockBufferRef blockBuffer =
+ CoreMediaGlue::CMSampleBufferGetDataBuffer(sampleBuffer);
+ if (blockBuffer) {
+ size_t lengthAtOffset;
+ CoreMediaGlue::CMBlockBufferGetDataPointer(
+ blockBuffer, 0, &lengthAtOffset, &frameSize, &baseAddress);
+ // Expect the MJPEG data to be available as a contiguous reference, i.e.
+ // not covered by multiple memory blocks.
+ CHECK_EQ(lengthAtOffset, frameSize);
+ }
+ } else {
+ videoFrame = CoreMediaGlue::CMSampleBufferGetImageBuffer(sampleBuffer);
+ // Lock the frame and calculate frame size.
+ if (CVPixelBufferLockBaseAddress(videoFrame, kCVPixelBufferLock_ReadOnly) ==
+ kCVReturnSuccess) {
+ baseAddress = static_cast<char*>(CVPixelBufferGetBaseAddress(videoFrame));
+ frameSize = CVPixelBufferGetHeight(videoFrame) *
+ CVPixelBufferGetBytesPerRow(videoFrame);
+ } else {
+ videoFrame = nil;
+ }
+ }
+
+ {
+ base::AutoLock lock(lock_);
+ if (frameReceiver_ && baseAddress) {
+ frameReceiver_->ReceiveFrame(reinterpret_cast<uint8_t*>(baseAddress),
+ frameSize, captureFormat, 0, 0);
+ }
+ }
+
+ if (videoFrame)
+ CVPixelBufferUnlockBaseAddress(videoFrame, kCVPixelBufferLock_ReadOnly);
+}
+
+- (void)onVideoError:(NSNotification*)errorNotification {
+ NSError* error = base::mac::ObjCCast<NSError>([[errorNotification userInfo]
+ objectForKey:AVFoundationGlue::AVCaptureSessionErrorKey()]);
+ [self sendErrorString:
+ [NSString stringWithFormat:@"%@: %@", [error localizedDescription],
+ [error localizedFailureReason]]];
+}
+
+- (void)sendErrorString:(NSString*)error {
+ DLOG(ERROR) << [error UTF8String];
+ base::AutoLock lock(lock_);
+ if (frameReceiver_)
+ frameReceiver_->ReceiveError([error UTF8String]);
+}
+
+@end
diff --git a/media/capture/video/mac/video_capture_device_decklink_mac.h b/media/capture/video/mac/video_capture_device_decklink_mac.h
new file mode 100644
index 0000000..e03765a
--- /dev/null
+++ b/media/capture/video/mac/video_capture_device_decklink_mac.h
@@ -0,0 +1,82 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Implementation of VideoCaptureDevice class for Blackmagic video capture
+// devices by using the DeckLink SDK.
+
+#ifndef MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_DECKLINK_MAC_H_
+#define MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_DECKLINK_MAC_H_
+
+#include "media/capture/video/video_capture_device.h"
+
+#import <Foundation/Foundation.h>
+
+#include "base/synchronization/lock.h"
+#include "base/threading/thread_checker.h"
+
+namespace {
+class DeckLinkCaptureDelegate;
+} // namespace
+
+namespace media {
+
+// Extension of VideoCaptureDevice to create and manipulate Blackmagic devices.
+// Creates a reference counted |decklink_capture_delegate_| that does all the
+// DeckLink SDK configuration and capture work while holding a weak reference to
+// us for sending back frames, logs and error messages.
+class MEDIA_EXPORT VideoCaptureDeviceDeckLinkMac : public VideoCaptureDevice {
+ public:
+ // Gets the names of all DeckLink video capture devices connected to this
+ // computer, as enumerated by the DeckLink SDK. To allow the user to choose
+ // exactly which capture format she wants, we enumerate as many cameras as
+ // capture formats.
+ static void EnumerateDevices(VideoCaptureDevice::Names* device_names);
+
+ // Gets the supported formats of a particular device attached to the system,
+ // identified by |device|. Formats are retrieved from the DeckLink SDK.
+ // Following the enumeration, each camera will have only one capability.
+ static void EnumerateDeviceCapabilities(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* supported_formats);
+
+ explicit VideoCaptureDeviceDeckLinkMac(const Name& device_name);
+ ~VideoCaptureDeviceDeckLinkMac() override;
+
+ // Copy of VideoCaptureDevice::Client::OnIncomingCapturedData(). Used by
+ // |decklink_capture_delegate_| to forward captured frames.
+ void OnIncomingCapturedData(const uint8* data,
+ size_t length,
+ const VideoCaptureFormat& frame_format,
+ int rotation, // Clockwise.
+ base::TimeTicks timestamp);
+
+ // Forwarder to VideoCaptureDevice::Client::OnError().
+ void SendErrorString(const std::string& reason);
+
+ // Forwarder to VideoCaptureDevice::Client::OnLog().
+ void SendLogString(const std::string& message);
+
+ private:
+ // VideoCaptureDevice implementation.
+ void AllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) override;
+ void StopAndDeAllocate() override;
+
+ // Protects concurrent setting and using of |client_|.
+ base::Lock lock_;
+ scoped_ptr<VideoCaptureDevice::Client> client_;
+
+ // Reference counted handle to the DeckLink capture delegate, ref counted by
+ // the DeckLink SDK as well.
+ scoped_refptr<DeckLinkCaptureDelegate> decklink_capture_delegate_;
+
+ // Checks for Device (a.k.a. Audio) thread.
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoCaptureDeviceDeckLinkMac);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_DECKLINK_MAC_H_
diff --git a/media/capture/video/mac/video_capture_device_decklink_mac.mm b/media/capture/video/mac/video_capture_device_decklink_mac.mm
new file mode 100644
index 0000000..ddf69ad
--- /dev/null
+++ b/media/capture/video/mac/video_capture_device_decklink_mac.mm
@@ -0,0 +1,484 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/mac/video_capture_device_decklink_mac.h"
+
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/strings/sys_string_conversions.h"
+#include "base/synchronization/lock.h"
+#include "third_party/decklink/mac/include/DeckLinkAPI.h"
+
+namespace {
+
+// DeckLink SDK uses ScopedComPtr-style APIs. Chrome ScopedComPtr is only
+// available for Windows builds. This is a verbatim knock-off of the needed
+// parts of base::win::ScopedComPtr<> for ref counting.
+template <class T>
+class ScopedDeckLinkPtr : public scoped_refptr<T> {
+ private:
+ using scoped_refptr<T>::ptr_;
+
+ public:
+ T** Receive() {
+ DCHECK(!ptr_) << "Object leak. Pointer must be NULL";
+ return &ptr_;
+ }
+
+ void** ReceiveVoid() { return reinterpret_cast<void**>(Receive()); }
+
+ void Release() {
+ if (ptr_ != NULL) {
+ ptr_->Release();
+ ptr_ = NULL;
+ }
+ }
+};
+
+// This class is used to interact directly with DeckLink SDK for video capture.
+// Implements the reference counted interface IUnknown. Has a weak reference to
+// VideoCaptureDeviceDeckLinkMac for sending captured frames, error messages and
+// logs.
+class DeckLinkCaptureDelegate
+ : public IDeckLinkInputCallback,
+ public base::RefCountedThreadSafe<DeckLinkCaptureDelegate> {
+ public:
+ DeckLinkCaptureDelegate(const media::VideoCaptureDevice::Name& device_name,
+ media::VideoCaptureDeviceDeckLinkMac* frame_receiver);
+
+ void AllocateAndStart(const media::VideoCaptureParams& params);
+ void StopAndDeAllocate();
+
+ // Remove the VideoCaptureDeviceDeckLinkMac's weak reference.
+ void ResetVideoCaptureDeviceReference();
+
+ private:
+ // IDeckLinkInputCallback interface implementation.
+ HRESULT VideoInputFormatChanged(
+ BMDVideoInputFormatChangedEvents notification_events,
+ IDeckLinkDisplayMode* new_display_mode,
+ BMDDetectedVideoInputFormatFlags detected_signal_flags) override;
+ HRESULT VideoInputFrameArrived(
+ IDeckLinkVideoInputFrame* video_frame,
+ IDeckLinkAudioInputPacket* audio_packet) override;
+
+ // IUnknown interface implementation.
+ HRESULT QueryInterface(REFIID iid, void** ppv) override;
+ ULONG AddRef() override;
+ ULONG Release() override;
+
+ // Forwarder to VideoCaptureDeviceDeckLinkMac::SendErrorString().
+ void SendErrorString(const std::string& reason);
+
+ // Forwarder to VideoCaptureDeviceDeckLinkMac::SendLogString().
+ void SendLogString(const std::string& message);
+
+ const media::VideoCaptureDevice::Name device_name_;
+
+ // Protects concurrent setting and using of |frame_receiver_|.
+ base::Lock lock_;
+ // Weak reference to the captured frames client, used also for error messages
+ // and logging. Initialized on construction and used until cleared by calling
+ // ResetVideoCaptureDeviceReference().
+ media::VideoCaptureDeviceDeckLinkMac* frame_receiver_;
+
+ // This is used to control the video capturing device input interface.
+ ScopedDeckLinkPtr<IDeckLinkInput> decklink_input_;
+ // |decklink_| represents a physical device attached to the host.
+ ScopedDeckLinkPtr<IDeckLink> decklink_;
+
+ // Checks for Device (a.k.a. Audio) thread.
+ base::ThreadChecker thread_checker_;
+
+ friend class scoped_refptr<DeckLinkCaptureDelegate>;
+ friend class base::RefCountedThreadSafe<DeckLinkCaptureDelegate>;
+
+ ~DeckLinkCaptureDelegate() override;
+
+ DISALLOW_COPY_AND_ASSIGN(DeckLinkCaptureDelegate);
+};
+
+static float GetDisplayModeFrameRate(
+ const ScopedDeckLinkPtr<IDeckLinkDisplayMode>& display_mode) {
+ BMDTimeValue time_value, time_scale;
+ float display_mode_frame_rate = 0.0f;
+ if (display_mode->GetFrameRate(&time_value, &time_scale) == S_OK &&
+ time_value > 0) {
+ display_mode_frame_rate = static_cast<float>(time_scale) / time_value;
+ }
+ // Interlaced formats are going to be marked as double the frame rate,
+ // which follows the general naming convention.
+ if (display_mode->GetFieldDominance() == bmdLowerFieldFirst ||
+ display_mode->GetFieldDominance() == bmdUpperFieldFirst) {
+ display_mode_frame_rate *= 2.0f;
+ }
+ return display_mode_frame_rate;
+}
+
+DeckLinkCaptureDelegate::DeckLinkCaptureDelegate(
+ const media::VideoCaptureDevice::Name& device_name,
+ media::VideoCaptureDeviceDeckLinkMac* frame_receiver)
+ : device_name_(device_name), frame_receiver_(frame_receiver) {
+}
+
+DeckLinkCaptureDelegate::~DeckLinkCaptureDelegate() {
+}
+
+void DeckLinkCaptureDelegate::AllocateAndStart(
+ const media::VideoCaptureParams& params) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ scoped_refptr<IDeckLinkIterator> decklink_iter(
+ CreateDeckLinkIteratorInstance());
+ DLOG_IF(ERROR, !decklink_iter.get()) << "Error creating DeckLink iterator";
+ if (!decklink_iter.get())
+ return;
+
+ ScopedDeckLinkPtr<IDeckLink> decklink_local;
+ while (decklink_iter->Next(decklink_local.Receive()) == S_OK) {
+ CFStringRef device_model_name = NULL;
+ if ((decklink_local->GetModelName(&device_model_name) == S_OK) ||
+ (device_name_.id() == base::SysCFStringRefToUTF8(device_model_name))) {
+ break;
+ }
+ }
+ if (!decklink_local.get()) {
+ SendErrorString("Device id not found in the system");
+ return;
+ }
+
+ ScopedDeckLinkPtr<IDeckLinkInput> decklink_input_local;
+ if (decklink_local->QueryInterface(
+ IID_IDeckLinkInput, decklink_input_local.ReceiveVoid()) != S_OK) {
+ SendErrorString("Error querying input interface.");
+ return;
+ }
+
+ ScopedDeckLinkPtr<IDeckLinkDisplayModeIterator> display_mode_iter;
+ if (decklink_input_local->GetDisplayModeIterator(
+ display_mode_iter.Receive()) != S_OK) {
+ SendErrorString("Error creating Display Mode Iterator");
+ return;
+ }
+
+ ScopedDeckLinkPtr<IDeckLinkDisplayMode> chosen_display_mode;
+ ScopedDeckLinkPtr<IDeckLinkDisplayMode> display_mode;
+ float min_diff = FLT_MAX;
+ while (display_mode_iter->Next(display_mode.Receive()) == S_OK) {
+ const float diff = labs(display_mode->GetWidth() -
+ params.requested_format.frame_size.width()) +
+ labs(params.requested_format.frame_size.height() -
+ display_mode->GetHeight()) +
+ fabs(params.requested_format.frame_rate -
+ GetDisplayModeFrameRate(display_mode));
+ if (diff < min_diff) {
+ chosen_display_mode = display_mode;
+ min_diff = diff;
+ }
+ display_mode.Release();
+ }
+ if (!chosen_display_mode.get()) {
+ SendErrorString("Could not find a display mode");
+ return;
+ }
+#if !defined(NDEBUG)
+ DVLOG(1) << "Requested format: "
+ << media::VideoCaptureFormat::ToString(params.requested_format);
+ CFStringRef format_name = NULL;
+ if (chosen_display_mode->GetName(&format_name) == S_OK)
+ DVLOG(1) << "Chosen format: " << base::SysCFStringRefToUTF8(format_name);
+#endif
+
+ // Enable video input. Configure for no input video format change detection,
+ // this in turn will disable calls to VideoInputFormatChanged().
+ if (decklink_input_local->EnableVideoInput(
+ chosen_display_mode->GetDisplayMode(), bmdFormat8BitYUV,
+ bmdVideoInputFlagDefault) != S_OK) {
+ SendErrorString("Could not select the video format we like.");
+ return;
+ }
+
+ decklink_input_local->SetCallback(this);
+ if (decklink_input_local->StartStreams() != S_OK)
+ SendErrorString("Could not start capturing");
+
+ decklink_.swap(decklink_local);
+ decklink_input_.swap(decklink_input_local);
+}
+
+void DeckLinkCaptureDelegate::StopAndDeAllocate() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!decklink_input_.get())
+ return;
+ if (decklink_input_->StopStreams() != S_OK)
+ SendLogString("Problem stopping capture.");
+ decklink_input_->SetCallback(NULL);
+ decklink_input_->DisableVideoInput();
+ decklink_input_.Release();
+ decklink_.Release();
+ ResetVideoCaptureDeviceReference();
+}
+
+HRESULT DeckLinkCaptureDelegate::VideoInputFormatChanged(
+ BMDVideoInputFormatChangedEvents notification_events,
+ IDeckLinkDisplayMode* new_display_mode,
+ BMDDetectedVideoInputFormatFlags detected_signal_flags) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return S_OK;
+}
+
+HRESULT DeckLinkCaptureDelegate::VideoInputFrameArrived(
+ IDeckLinkVideoInputFrame* video_frame,
+ IDeckLinkAudioInputPacket* /* audio_packet */) {
+ // Capture frames are manipulated as an IDeckLinkVideoFrame.
+ uint8* video_data = NULL;
+ video_frame->GetBytes(reinterpret_cast<void**>(&video_data));
+
+ media::VideoCapturePixelFormat pixel_format =
+ media::VIDEO_CAPTURE_PIXEL_FORMAT_UNKNOWN;
+ switch (video_frame->GetPixelFormat()) {
+ case bmdFormat8BitYUV: // A.k.a. '2vuy';
+ pixel_format = media::VIDEO_CAPTURE_PIXEL_FORMAT_UYVY;
+ break;
+ case bmdFormat8BitARGB:
+ pixel_format = media::VIDEO_CAPTURE_PIXEL_FORMAT_ARGB;
+ break;
+ default:
+ SendErrorString("Unsupported pixel format");
+ break;
+ }
+
+ const media::VideoCaptureFormat capture_format(
+ gfx::Size(video_frame->GetWidth(), video_frame->GetHeight()),
+ 0.0f, // Frame rate is not needed for captured data callback.
+ pixel_format);
+ base::AutoLock lock(lock_);
+ if (frame_receiver_) {
+ frame_receiver_->OnIncomingCapturedData(
+ video_data, video_frame->GetRowBytes() * video_frame->GetHeight(),
+ capture_format,
+ 0, // Rotation.
+ base::TimeTicks::Now());
+ }
+ return S_OK;
+}
+
+HRESULT DeckLinkCaptureDelegate::QueryInterface(REFIID iid, void** ppv) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ CFUUIDBytes iunknown = CFUUIDGetUUIDBytes(IUnknownUUID);
+ if (memcmp(&iid, &iunknown, sizeof(REFIID)) == 0 ||
+ memcmp(&iid, &IID_IDeckLinkInputCallback, sizeof(REFIID)) == 0) {
+ *ppv = static_cast<IDeckLinkInputCallback*>(this);
+ AddRef();
+ return S_OK;
+ }
+ return E_NOINTERFACE;
+}
+
+ULONG DeckLinkCaptureDelegate::AddRef() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ base::RefCountedThreadSafe<DeckLinkCaptureDelegate>::AddRef();
+ return 1;
+}
+
+ULONG DeckLinkCaptureDelegate::Release() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ bool ret_value = !HasOneRef();
+ base::RefCountedThreadSafe<DeckLinkCaptureDelegate>::Release();
+ return ret_value;
+}
+
+void DeckLinkCaptureDelegate::SendErrorString(const std::string& reason) {
+ base::AutoLock lock(lock_);
+ if (frame_receiver_)
+ frame_receiver_->SendErrorString(reason);
+}
+
+void DeckLinkCaptureDelegate::SendLogString(const std::string& message) {
+ base::AutoLock lock(lock_);
+ if (frame_receiver_)
+ frame_receiver_->SendLogString(message);
+}
+
+void DeckLinkCaptureDelegate::ResetVideoCaptureDeviceReference() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ base::AutoLock lock(lock_);
+ frame_receiver_ = NULL;
+}
+
+} // namespace
+
+namespace media {
+
+static std::string JoinDeviceNameAndFormat(CFStringRef name,
+ CFStringRef format) {
+ return base::SysCFStringRefToUTF8(name) + " - " +
+ base::SysCFStringRefToUTF8(format);
+}
+
+// static
+void VideoCaptureDeviceDeckLinkMac::EnumerateDevices(
+ VideoCaptureDevice::Names* device_names) {
+ scoped_refptr<IDeckLinkIterator> decklink_iter(
+ CreateDeckLinkIteratorInstance());
+ // At this point, not being able to create a DeckLink iterator means that
+ // there are no Blackmagic DeckLink devices in the system, don't print error.
+ DVLOG_IF(1, !decklink_iter.get()) << "Could not create DeckLink iterator";
+ if (!decklink_iter.get())
+ return;
+
+ ScopedDeckLinkPtr<IDeckLink> decklink;
+ while (decklink_iter->Next(decklink.Receive()) == S_OK) {
+ ScopedDeckLinkPtr<IDeckLink> decklink_local;
+ decklink_local.swap(decklink);
+
+ CFStringRef device_model_name = NULL;
+ HRESULT hr = decklink_local->GetModelName(&device_model_name);
+ DVLOG_IF(1, hr != S_OK) << "Error reading Blackmagic device model name";
+ CFStringRef device_display_name = NULL;
+ hr = decklink_local->GetDisplayName(&device_display_name);
+ DVLOG_IF(1, hr != S_OK) << "Error reading Blackmagic device display name";
+ DVLOG_IF(1, hr == S_OK) << "Blackmagic device found with name: "
+ << base::SysCFStringRefToUTF8(device_display_name);
+
+ if (!device_model_name && !device_display_name)
+ continue;
+
+ ScopedDeckLinkPtr<IDeckLinkInput> decklink_input;
+ if (decklink_local->QueryInterface(IID_IDeckLinkInput,
+ decklink_input.ReceiveVoid()) != S_OK) {
+ DLOG(ERROR) << "Error Blackmagic querying input interface.";
+ return;
+ }
+
+ ScopedDeckLinkPtr<IDeckLinkDisplayModeIterator> display_mode_iter;
+ if (decklink_input->GetDisplayModeIterator(display_mode_iter.Receive()) !=
+ S_OK) {
+ continue;
+ }
+
+ ScopedDeckLinkPtr<IDeckLinkDisplayMode> display_mode;
+ while (display_mode_iter->Next(display_mode.Receive()) == S_OK) {
+ CFStringRef format_name = NULL;
+ if (display_mode->GetName(&format_name) == S_OK) {
+ VideoCaptureDevice::Name name(
+ JoinDeviceNameAndFormat(device_display_name, format_name),
+ JoinDeviceNameAndFormat(device_model_name, format_name),
+ VideoCaptureDevice::Name::DECKLINK,
+ VideoCaptureDevice::Name::OTHER_TRANSPORT);
+ device_names->push_back(name);
+ DVLOG(1) << "Blackmagic camera enumerated: " << name.name();
+ }
+ display_mode.Release();
+ }
+ }
+}
+
+// static
+void VideoCaptureDeviceDeckLinkMac::EnumerateDeviceCapabilities(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* supported_formats) {
+ scoped_refptr<IDeckLinkIterator> decklink_iter(
+ CreateDeckLinkIteratorInstance());
+ DLOG_IF(ERROR, !decklink_iter.get()) << "Error creating DeckLink iterator";
+ if (!decklink_iter.get())
+ return;
+
+ ScopedDeckLinkPtr<IDeckLink> decklink;
+ while (decklink_iter->Next(decklink.Receive()) == S_OK) {
+ ScopedDeckLinkPtr<IDeckLink> decklink_local;
+ decklink_local.swap(decklink);
+
+ ScopedDeckLinkPtr<IDeckLinkInput> decklink_input;
+ if (decklink_local->QueryInterface(IID_IDeckLinkInput,
+ decklink_input.ReceiveVoid()) != S_OK) {
+ DLOG(ERROR) << "Error Blackmagic querying input interface.";
+ return;
+ }
+
+ ScopedDeckLinkPtr<IDeckLinkDisplayModeIterator> display_mode_iter;
+ if (decklink_input->GetDisplayModeIterator(display_mode_iter.Receive()) !=
+ S_OK) {
+ continue;
+ }
+
+ CFStringRef device_model_name = NULL;
+ if (decklink_local->GetModelName(&device_model_name) != S_OK)
+ continue;
+
+ ScopedDeckLinkPtr<IDeckLinkDisplayMode> display_mode;
+ while (display_mode_iter->Next(display_mode.Receive()) == S_OK) {
+ CFStringRef format_name = NULL;
+ if (display_mode->GetName(&format_name) == S_OK &&
+ device.id() !=
+ JoinDeviceNameAndFormat(device_model_name, format_name)) {
+ display_mode.Release();
+ continue;
+ }
+
+ // IDeckLinkDisplayMode does not have information on pixel format, this
+ // is only available on capture.
+ const media::VideoCaptureFormat format(
+ gfx::Size(display_mode->GetWidth(), display_mode->GetHeight()),
+ GetDisplayModeFrameRate(display_mode),
+ VIDEO_CAPTURE_PIXEL_FORMAT_UNKNOWN);
+ supported_formats->push_back(format);
+ DVLOG(2) << device.name() << " " << VideoCaptureFormat::ToString(format);
+ display_mode.Release();
+ }
+ return;
+ }
+}
+
+VideoCaptureDeviceDeckLinkMac::VideoCaptureDeviceDeckLinkMac(
+ const Name& device_name)
+ : decklink_capture_delegate_(
+ new DeckLinkCaptureDelegate(device_name, this)) {
+}
+
+VideoCaptureDeviceDeckLinkMac::~VideoCaptureDeviceDeckLinkMac() {
+ decklink_capture_delegate_->ResetVideoCaptureDeviceReference();
+}
+
+void VideoCaptureDeviceDeckLinkMac::OnIncomingCapturedData(
+ const uint8* data,
+ size_t length,
+ const VideoCaptureFormat& frame_format,
+ int rotation, // Clockwise.
+ base::TimeTicks timestamp) {
+ base::AutoLock lock(lock_);
+ if (client_) {
+ client_->OnIncomingCapturedData(data, length, frame_format, rotation,
+ timestamp);
+ }
+}
+
+void VideoCaptureDeviceDeckLinkMac::SendErrorString(const std::string& reason) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ base::AutoLock lock(lock_);
+ if (client_)
+ client_->OnError(reason);
+}
+
+void VideoCaptureDeviceDeckLinkMac::SendLogString(const std::string& message) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ base::AutoLock lock(lock_);
+ if (client_)
+ client_->OnLog(message);
+}
+
+void VideoCaptureDeviceDeckLinkMac::AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ client_ = client.Pass();
+ if (decklink_capture_delegate_.get())
+ decklink_capture_delegate_->AllocateAndStart(params);
+}
+
+void VideoCaptureDeviceDeckLinkMac::StopAndDeAllocate() {
+ if (decklink_capture_delegate_.get())
+ decklink_capture_delegate_->StopAndDeAllocate();
+}
+
+} // namespace media
diff --git a/media/capture/video/mac/video_capture_device_factory_mac.h b/media/capture/video/mac/video_capture_device_factory_mac.h
new file mode 100644
index 0000000..07e127b
--- /dev/null
+++ b/media/capture/video/mac/video_capture_device_factory_mac.h
@@ -0,0 +1,42 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Implementation of a VideoCaptureDeviceFactory class for Mac.
+
+#ifndef MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_FACTORY_MAC_H_
+#define MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_FACTORY_MAC_H_
+
+#include "media/capture/video/video_capture_device_factory.h"
+
+namespace media {
+
+// Extension of VideoCaptureDeviceFactory to create and manipulate Mac devices.
+class MEDIA_EXPORT VideoCaptureDeviceFactoryMac
+ : public VideoCaptureDeviceFactory {
+ public:
+ static bool PlatformSupportsAVFoundation();
+
+ explicit VideoCaptureDeviceFactoryMac(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner);
+ ~VideoCaptureDeviceFactoryMac() override;
+
+ scoped_ptr<VideoCaptureDevice> Create(
+ const VideoCaptureDevice::Name& device_name) override;
+ void GetDeviceNames(VideoCaptureDevice::Names* device_names) override;
+ void EnumerateDeviceNames(const base::Callback<
+ void(scoped_ptr<media::VideoCaptureDevice::Names>)>& callback) override;
+ void GetDeviceSupportedFormats(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* supported_formats) override;
+
+ private:
+ // Cache of |ui_task_runner| for enumerating devices there for QTKit.
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoCaptureDeviceFactoryMac);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_FACTORY_MAC_H_
diff --git a/media/capture/video/mac/video_capture_device_factory_mac.mm b/media/capture/video/mac/video_capture_device_factory_mac.mm
new file mode 100644
index 0000000..896c290
--- /dev/null
+++ b/media/capture/video/mac/video_capture_device_factory_mac.mm
@@ -0,0 +1,219 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/mac/video_capture_device_factory_mac.h"
+
+#import <IOKit/audio/IOAudioTypes.h>
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/profiler/scoped_tracker.h"
+#include "base/strings/string_util.h"
+#include "base/task_runner_util.h"
+#import "media/base/mac/avfoundation_glue.h"
+#import "media/capture/video/mac/video_capture_device_avfoundation_mac.h"
+#import "media/capture/video/mac/video_capture_device_decklink_mac.h"
+#include "media/capture/video/mac/video_capture_device_mac.h"
+#import "media/capture/video/mac/video_capture_device_qtkit_mac.h"
+
+namespace media {
+
+// In QTKit API, some devices are known to crash if VGA is requested, for them
+// HD is the only supported resolution (see http://crbug.com/396812). In the
+// AVfoundation case, we skip enumerating them altogether. These devices are
+// identified by a characteristic trailing substring of uniqueId. At the moment
+// these are just Blackmagic devices.
+const struct NameAndVid {
+ const char* unique_id_signature;
+ const int capture_width;
+ const int capture_height;
+ const float capture_frame_rate;
+} kBlacklistedCameras[] = {{"-01FDA82C8A9C", 1280, 720, 60.0f}};
+
+static bool IsDeviceBlacklisted(const VideoCaptureDevice::Name& name) {
+ bool is_device_blacklisted = false;
+ for(size_t i = 0;
+ !is_device_blacklisted && i < arraysize(kBlacklistedCameras); ++i) {
+ is_device_blacklisted =
+ base::EndsWith(name.id(),
+ kBlacklistedCameras[i].unique_id_signature,
+ base::CompareCase::INSENSITIVE_ASCII);
+ }
+ DVLOG_IF(2, is_device_blacklisted) << "Blacklisted camera: " << name.name()
+ << ", id: " << name.id();
+ return is_device_blacklisted;
+}
+
+static scoped_ptr<media::VideoCaptureDevice::Names>
+EnumerateDevicesUsingQTKit() {
+ // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/458397 is
+ // fixed.
+ tracked_objects::ScopedTracker tracking_profile(
+ FROM_HERE_WITH_EXPLICIT_FUNCTION(
+ "458397 media::EnumerateDevicesUsingQTKit"));
+
+ scoped_ptr<VideoCaptureDevice::Names> device_names(
+ new VideoCaptureDevice::Names());
+ NSMutableDictionary* capture_devices =
+ [[[NSMutableDictionary alloc] init] autorelease];
+ [VideoCaptureDeviceQTKit getDeviceNames:capture_devices];
+ for (NSString* key in capture_devices) {
+ VideoCaptureDevice::Name name(
+ [[[capture_devices valueForKey:key] deviceName] UTF8String],
+ [key UTF8String], VideoCaptureDevice::Name::QTKIT);
+ if (IsDeviceBlacklisted(name))
+ name.set_is_blacklisted(true);
+ device_names->push_back(name);
+ }
+ return device_names.Pass();
+}
+
+static void RunDevicesEnumeratedCallback(
+ const base::Callback<void(scoped_ptr<media::VideoCaptureDevice::Names>)>&
+ callback,
+ scoped_ptr<media::VideoCaptureDevice::Names> device_names) {
+ // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/458397 is
+ // fixed.
+ tracked_objects::ScopedTracker tracking_profile(
+ FROM_HERE_WITH_EXPLICIT_FUNCTION(
+ "458397 media::RunDevicesEnumeratedCallback"));
+ callback.Run(device_names.Pass());
+}
+
+// static
+bool VideoCaptureDeviceFactoryMac::PlatformSupportsAVFoundation() {
+ return AVFoundationGlue::IsAVFoundationSupported();
+}
+
+VideoCaptureDeviceFactoryMac::VideoCaptureDeviceFactoryMac(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner)
+ : ui_task_runner_(ui_task_runner) {
+ thread_checker_.DetachFromThread();
+}
+
+VideoCaptureDeviceFactoryMac::~VideoCaptureDeviceFactoryMac() {
+}
+
+scoped_ptr<VideoCaptureDevice> VideoCaptureDeviceFactoryMac::Create(
+ const VideoCaptureDevice::Name& device_name) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_NE(device_name.capture_api_type(),
+ VideoCaptureDevice::Name::API_TYPE_UNKNOWN);
+
+ scoped_ptr<VideoCaptureDevice> capture_device;
+ if (device_name.capture_api_type() == VideoCaptureDevice::Name::DECKLINK) {
+ capture_device.reset(new VideoCaptureDeviceDeckLinkMac(device_name));
+ } else {
+ VideoCaptureDeviceMac* device = new VideoCaptureDeviceMac(device_name);
+ capture_device.reset(device);
+ if (!device->Init(device_name.capture_api_type())) {
+ LOG(ERROR) << "Could not initialize VideoCaptureDevice.";
+ capture_device.reset();
+ }
+ }
+ return scoped_ptr<VideoCaptureDevice>(capture_device.Pass());
+}
+
+void VideoCaptureDeviceFactoryMac::GetDeviceNames(
+ VideoCaptureDevice::Names* device_names) {
+ // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/458397 is
+ // fixed.
+ tracked_objects::ScopedTracker tracking_profile(
+ FROM_HERE_WITH_EXPLICIT_FUNCTION(
+ "458397 VideoCaptureDeviceFactoryMac::GetDeviceNames"));
+ DCHECK(thread_checker_.CalledOnValidThread());
+ // Loop through all available devices and add to |device_names|.
+ NSDictionary* capture_devices;
+ if (AVFoundationGlue::IsAVFoundationSupported()) {
+ DVLOG(1) << "Enumerating video capture devices using AVFoundation";
+ capture_devices = [VideoCaptureDeviceAVFoundation deviceNames];
+ // Enumerate all devices found by AVFoundation, translate the info for each
+ // to class Name and add it to |device_names|.
+ for (NSString* key in capture_devices) {
+ int transport_type = [[capture_devices valueForKey:key] transportType];
+ // Transport types are defined for Audio devices and reused for video.
+ VideoCaptureDevice::Name::TransportType device_transport_type =
+ (transport_type == kIOAudioDeviceTransportTypeBuiltIn ||
+ transport_type == kIOAudioDeviceTransportTypeUSB)
+ ? VideoCaptureDevice::Name::USB_OR_BUILT_IN
+ : VideoCaptureDevice::Name::OTHER_TRANSPORT;
+ VideoCaptureDevice::Name name(
+ [[[capture_devices valueForKey:key] deviceName] UTF8String],
+ [key UTF8String], VideoCaptureDevice::Name::AVFOUNDATION,
+ device_transport_type);
+ if (IsDeviceBlacklisted(name))
+ continue;
+ device_names->push_back(name);
+ }
+ // Also retrieve Blackmagic devices, if present, via DeckLink SDK API.
+ VideoCaptureDeviceDeckLinkMac::EnumerateDevices(device_names);
+ } else {
+ // We should not enumerate QTKit devices in Device Thread;
+ NOTREACHED();
+ }
+}
+
+void VideoCaptureDeviceFactoryMac::EnumerateDeviceNames(const base::Callback<
+ void(scoped_ptr<media::VideoCaptureDevice::Names>)>& callback) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (AVFoundationGlue::IsAVFoundationSupported()) {
+ scoped_ptr<VideoCaptureDevice::Names> device_names(
+ new VideoCaptureDevice::Names());
+ GetDeviceNames(device_names.get());
+ callback.Run(device_names.Pass());
+ } else {
+ DVLOG(1) << "Enumerating video capture devices using QTKit";
+ base::PostTaskAndReplyWithResult(
+ ui_task_runner_.get(), FROM_HERE,
+ base::Bind(&EnumerateDevicesUsingQTKit),
+ base::Bind(&RunDevicesEnumeratedCallback, callback));
+ }
+}
+
+void VideoCaptureDeviceFactoryMac::GetDeviceSupportedFormats(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* supported_formats) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ switch (device.capture_api_type()) {
+ case VideoCaptureDevice::Name::AVFOUNDATION:
+ DVLOG(1) << "Enumerating video capture capabilities, AVFoundation";
+ [VideoCaptureDeviceAVFoundation getDevice:device
+ supportedFormats:supported_formats];
+ break;
+ case VideoCaptureDevice::Name::QTKIT:
+ // Blacklisted cameras provide their own supported format(s), otherwise no
+ // such information is provided for QTKit devices.
+ if (device.is_blacklisted()) {
+ for (size_t i = 0; i < arraysize(kBlacklistedCameras); ++i) {
+ if (base::EndsWith(device.id(),
+ kBlacklistedCameras[i].unique_id_signature,
+ base::CompareCase::INSENSITIVE_ASCII)) {
+ supported_formats->push_back(media::VideoCaptureFormat(
+ gfx::Size(kBlacklistedCameras[i].capture_width,
+ kBlacklistedCameras[i].capture_height),
+ kBlacklistedCameras[i].capture_frame_rate,
+ media::VIDEO_CAPTURE_PIXEL_FORMAT_UYVY));
+ break;
+ }
+ }
+ }
+ break;
+ case VideoCaptureDevice::Name::DECKLINK:
+ DVLOG(1) << "Enumerating video capture capabilities " << device.name();
+ VideoCaptureDeviceDeckLinkMac::EnumerateDeviceCapabilities(
+ device, supported_formats);
+ break;
+ default:
+ NOTREACHED();
+ }
+}
+
+// static
+VideoCaptureDeviceFactory*
+VideoCaptureDeviceFactory::CreateVideoCaptureDeviceFactory(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner) {
+ return new VideoCaptureDeviceFactoryMac(ui_task_runner);
+}
+
+} // namespace media
diff --git a/media/capture/video/mac/video_capture_device_factory_mac_unittest.mm b/media/capture/video/mac/video_capture_device_factory_mac_unittest.mm
new file mode 100644
index 0000000..6b8500d
--- /dev/null
+++ b/media/capture/video/mac/video_capture_device_factory_mac_unittest.mm
@@ -0,0 +1,49 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/command_line.h"
+#include "base/message_loop/message_loop.h"
+#include "base/thread_task_runner_handle.h"
+#import "media/base/mac/avfoundation_glue.h"
+#include "media/base/media_switches.h"
+#include "media/capture/video/mac/video_capture_device_factory_mac.h"
+#include "media/capture/video/mac/video_capture_device_mac.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace media {
+
+class VideoCaptureDeviceFactoryMacTest : public testing::Test {
+ void SetUp() override {
+ AVFoundationGlue::InitializeAVFoundation();
+ base::CommandLine::ForCurrentProcess()->AppendSwitch(
+ switches::kEnableAVFoundation);
+ }
+
+ private:
+ base::MessageLoop message_loop_;
+};
+
+TEST_F(VideoCaptureDeviceFactoryMacTest, ListDevicesAVFoundation) {
+ if (!AVFoundationGlue::IsAVFoundationSupported()) {
+ DVLOG(1) << "AVFoundation not supported, skipping test.";
+ return;
+ }
+ VideoCaptureDeviceFactoryMac video_capture_device_factory(
+ base::ThreadTaskRunnerHandle::Get());
+
+ VideoCaptureDevice::Names names;
+ video_capture_device_factory.GetDeviceNames(&names);
+ if (!names.size()) {
+ DVLOG(1) << "No camera available. Exiting test.";
+ return;
+ }
+ // There should be no blacklisted devices, i.e. QTKit.
+ std::string device_vid;
+ for (VideoCaptureDevice::Names::const_iterator it = names.begin();
+ it != names.end(); ++it) {
+ EXPECT_EQ(it->capture_api_type(), VideoCaptureDevice::Name::AVFOUNDATION);
+ }
+}
+
+}; // namespace media
diff --git a/media/capture/video/mac/video_capture_device_mac.h b/media/capture/video/mac/video_capture_device_mac.h
new file mode 100644
index 0000000..c36248e
--- /dev/null
+++ b/media/capture/video/mac/video_capture_device_mac.h
@@ -0,0 +1,115 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MacOSX implementation of generic VideoCaptureDevice, using either QTKit or
+// AVFoundation as native capture API. QTKit is available in all OSX versions,
+// although namely deprecated in 10.9, and AVFoundation is available in versions
+// 10.7 (Lion) and later.
+
+#ifndef MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_MAC_H_
+#define MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_MAC_H_
+
+#import <Foundation/Foundation.h>
+
+#include <string>
+
+#include "base/compiler_specific.h"
+#include "base/mac/scoped_nsobject.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "media/base/video_capture_types.h"
+#include "media/capture/video/video_capture_device.h"
+
+@protocol PlatformVideoCapturingMac;
+
+namespace base {
+class SingleThreadTaskRunner;
+}
+
+// Small class to bundle device name and connection type into a dictionary.
+MEDIA_EXPORT
+@interface DeviceNameAndTransportType : NSObject {
+ @private
+ base::scoped_nsobject<NSString> deviceName_;
+ // The transport type of the device (USB, PCI, etc), values are defined in
+ // <IOKit/audio/IOAudioTypes.h> as kIOAudioDeviceTransportType*.
+ int32_t transportType_;
+}
+
+- (id)initWithName:(NSString*)name transportType:(int32_t)transportType;
+
+- (NSString*)deviceName;
+- (int32_t)transportType;
+@end
+
+namespace media {
+
+enum {
+ // Unknown transport type, addition to the kIOAudioDeviceTransportType*
+ // family for QTKit devices where this attribute isn't published.
+ kIOAudioDeviceTransportTypeUnknown = 'unkn'
+};
+
+// Called by VideoCaptureManager to open, close and start, stop Mac video
+// capture devices.
+class VideoCaptureDeviceMac : public VideoCaptureDevice {
+ public:
+ explicit VideoCaptureDeviceMac(const Name& device_name);
+ ~VideoCaptureDeviceMac() override;
+
+ // VideoCaptureDevice implementation.
+ void AllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) override;
+ void StopAndDeAllocate() override;
+
+ bool Init(VideoCaptureDevice::Name::CaptureApiType capture_api_type);
+
+ // Called to deliver captured video frames.
+ void ReceiveFrame(const uint8* video_frame,
+ int video_frame_length,
+ const VideoCaptureFormat& frame_format,
+ int aspect_numerator,
+ int aspect_denominator);
+
+ // Forwarder to VideoCaptureDevice::Client::OnError().
+ void ReceiveError(const std::string& reason);
+
+ // Forwarder to VideoCaptureDevice::Client::OnLog().
+ void LogMessage(const std::string& message);
+
+ private:
+ void SetErrorState(const std::string& reason);
+ bool UpdateCaptureResolution();
+
+ // Flag indicating the internal state.
+ enum InternalState { kNotInitialized, kIdle, kCapturing, kError };
+
+ Name device_name_;
+ scoped_ptr<VideoCaptureDevice::Client> client_;
+
+ VideoCaptureFormat capture_format_;
+ // These variables control the two-step configure-start process for QTKit HD:
+ // the device is first started with no configuration and the captured frames
+ // are inspected to check if the camera really supports HD. AVFoundation does
+ // not need this process so |final_resolution_selected_| is false then.
+ bool final_resolution_selected_;
+ bool tried_to_square_pixels_;
+
+ // Only read and write state_ from inside this loop.
+ const scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ InternalState state_;
+
+ id<PlatformVideoCapturingMac> capture_device_;
+
+ // Used with Bind and PostTask to ensure that methods aren't called after the
+ // VideoCaptureDeviceMac is destroyed.
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<VideoCaptureDeviceMac> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoCaptureDeviceMac);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_MAC_H_
diff --git a/media/capture/video/mac/video_capture_device_mac.mm b/media/capture/video/mac/video_capture_device_mac.mm
new file mode 100644
index 0000000..d5f6d60
--- /dev/null
+++ b/media/capture/video/mac/video_capture_device_mac.mm
@@ -0,0 +1,569 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/mac/video_capture_device_mac.h"
+
+#include <IOKit/IOCFPlugIn.h>
+#include <IOKit/usb/IOUSBLib.h>
+#include <IOKit/usb/USBSpec.h>
+
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/mac/scoped_ioobject.h"
+#include "base/mac/scoped_ioplugininterface.h"
+#include "base/single_thread_task_runner.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/thread_task_runner_handle.h"
+#include "base/time/time.h"
+#import "media/base/mac/avfoundation_glue.h"
+#import "media/capture/video/mac/platform_video_capturing_mac.h"
+#import "media/capture/video/mac/video_capture_device_avfoundation_mac.h"
+#import "media/capture/video/mac/video_capture_device_qtkit_mac.h"
+#include "ui/gfx/geometry/size.h"
+
+@implementation DeviceNameAndTransportType
+
+- (id)initWithName:(NSString*)deviceName transportType:(int32_t)transportType {
+ if (self = [super init]) {
+ deviceName_.reset([deviceName copy]);
+ transportType_ = transportType;
+ }
+ return self;
+}
+
+- (NSString*)deviceName {
+ return deviceName_;
+}
+
+- (int32_t)transportType {
+ return transportType_;
+}
+
+@end // @implementation DeviceNameAndTransportType
+
+namespace media {
+
+// Mac specific limits for minimum and maximum frame rate.
+const float kMinFrameRate = 1.0f;
+const float kMaxFrameRate = 30.0f;
+
+// In device identifiers, the USB VID and PID are stored in 4 bytes each.
+const size_t kVidPidSize = 4;
+
+const struct Resolution {
+ const int width;
+ const int height;
+} kQVGA = {320, 240}, kVGA = {640, 480}, kHD = {1280, 720};
+
+const struct Resolution* const kWellSupportedResolutions[] = {
+ &kQVGA,
+ &kVGA,
+ &kHD,
+};
+
+// Rescaling the image to fix the pixel aspect ratio runs the risk of making
+// the aspect ratio worse, if QTKit selects a new source mode with a different
+// shape. This constant ensures that we don't take this risk if the current
+// aspect ratio is tolerable.
+const float kMaxPixelAspectRatio = 1.15;
+
+// The following constants are extracted from the specification "Universal
+// Serial Bus Device Class Definition for Video Devices", Rev. 1.1 June 1, 2005.
+// http://www.usb.org/developers/devclass_docs/USB_Video_Class_1_1.zip
+// CS_INTERFACE: Sec. A.4 "Video Class-Specific Descriptor Types".
+const int kVcCsInterface = 0x24;
+// VC_PROCESSING_UNIT: Sec. A.5 "Video Class-Specific VC Interface Descriptor
+// Subtypes".
+const int kVcProcessingUnit = 0x5;
+// SET_CUR: Sec. A.8 "Video Class-Specific Request Codes".
+const int kVcRequestCodeSetCur = 0x1;
+// PU_POWER_LINE_FREQUENCY_CONTROL: Sec. A.9.5 "Processing Unit Control
+// Selectors".
+const int kPuPowerLineFrequencyControl = 0x5;
+// Sec. 4.2.2.3.5 Power Line Frequency Control.
+const int k50Hz = 1;
+const int k60Hz = 2;
+const int kPuPowerLineFrequencyControlCommandSize = 1;
+
+// Addition to the IOUSB family of structures, with subtype and unit ID.
+typedef struct IOUSBInterfaceDescriptor {
+ IOUSBDescriptorHeader header;
+ UInt8 bDescriptorSubType;
+ UInt8 bUnitID;
+} IOUSBInterfaceDescriptor;
+
+static void GetBestMatchSupportedResolution(gfx::Size* resolution) {
+ int min_diff = kint32max;
+ const int desired_area = resolution->GetArea();
+ for (size_t i = 0; i < arraysize(kWellSupportedResolutions); ++i) {
+ const int area = kWellSupportedResolutions[i]->width *
+ kWellSupportedResolutions[i]->height;
+ const int diff = std::abs(desired_area - area);
+ if (diff < min_diff) {
+ min_diff = diff;
+ resolution->SetSize(kWellSupportedResolutions[i]->width,
+ kWellSupportedResolutions[i]->height);
+ }
+ }
+}
+
+// Tries to create a user-side device interface for a given USB device. Returns
+// true if interface was found and passes it back in |device_interface|. The
+// caller should release |device_interface|.
+static bool FindDeviceInterfaceInUsbDevice(
+ const int vendor_id,
+ const int product_id,
+ const io_service_t usb_device,
+ IOUSBDeviceInterface*** device_interface) {
+ // Create a plugin, i.e. a user-side controller to manipulate USB device.
+ IOCFPlugInInterface** plugin;
+ SInt32 score; // Unused, but required for IOCreatePlugInInterfaceForService.
+ kern_return_t kr = IOCreatePlugInInterfaceForService(
+ usb_device, kIOUSBDeviceUserClientTypeID, kIOCFPlugInInterfaceID, &plugin,
+ &score);
+ if (kr != kIOReturnSuccess || !plugin) {
+ DLOG(ERROR) << "IOCreatePlugInInterfaceForService";
+ return false;
+ }
+ base::mac::ScopedIOPluginInterface<IOCFPlugInInterface> plugin_ref(plugin);
+
+ // Fetch the Device Interface from the plugin.
+ HRESULT res = (*plugin)->QueryInterface(
+ plugin, CFUUIDGetUUIDBytes(kIOUSBDeviceInterfaceID),
+ reinterpret_cast<LPVOID*>(device_interface));
+ if (!SUCCEEDED(res) || !*device_interface) {
+ DLOG(ERROR) << "QueryInterface, couldn't create interface to USB";
+ return false;
+ }
+ return true;
+}
+
+// Tries to find a Video Control type interface inside a general USB device
+// interface |device_interface|, and returns it in |video_control_interface| if
+// found. The returned interface must be released in the caller.
+static bool FindVideoControlInterfaceInDeviceInterface(
+ IOUSBDeviceInterface** device_interface,
+ IOCFPlugInInterface*** video_control_interface) {
+ // Create an iterator to the list of Video-AVControl interfaces of the device,
+ // then get the first interface in the list.
+ io_iterator_t interface_iterator;
+ IOUSBFindInterfaceRequest interface_request = {
+ .bInterfaceClass = kUSBVideoInterfaceClass,
+ .bInterfaceSubClass = kUSBVideoControlSubClass,
+ .bInterfaceProtocol = kIOUSBFindInterfaceDontCare,
+ .bAlternateSetting = kIOUSBFindInterfaceDontCare};
+ kern_return_t kr =
+ (*device_interface)
+ ->CreateInterfaceIterator(device_interface, &interface_request,
+ &interface_iterator);
+ if (kr != kIOReturnSuccess) {
+ DLOG(ERROR) << "Could not create an iterator to the device's interfaces.";
+ return false;
+ }
+ base::mac::ScopedIOObject<io_iterator_t> iterator_ref(interface_iterator);
+
+ // There should be just one interface matching the class-subclass desired.
+ io_service_t found_interface;
+ found_interface = IOIteratorNext(interface_iterator);
+ if (!found_interface) {
+ DLOG(ERROR) << "Could not find a Video-AVControl interface in the device.";
+ return false;
+ }
+ base::mac::ScopedIOObject<io_service_t> found_interface_ref(found_interface);
+
+ // Create a user side controller (i.e. a "plugin") for the found interface.
+ SInt32 score;
+ kr = IOCreatePlugInInterfaceForService(
+ found_interface, kIOUSBInterfaceUserClientTypeID, kIOCFPlugInInterfaceID,
+ video_control_interface, &score);
+ if (kr != kIOReturnSuccess || !*video_control_interface) {
+ DLOG(ERROR) << "IOCreatePlugInInterfaceForService";
+ return false;
+ }
+ return true;
+}
+
+// Creates a control interface for |plugin_interface| and produces a command to
+// set the appropriate Power Line frequency for flicker removal.
+static void SetAntiFlickerInVideoControlInterface(
+ IOCFPlugInInterface** plugin_interface,
+ const int frequency) {
+ // Create, the control interface for the found plugin, and release
+ // the intermediate plugin.
+ IOUSBInterfaceInterface** control_interface = NULL;
+ HRESULT res =
+ (*plugin_interface)
+ ->QueryInterface(plugin_interface,
+ CFUUIDGetUUIDBytes(kIOUSBInterfaceInterfaceID),
+ reinterpret_cast<LPVOID*>(&control_interface));
+ if (!SUCCEEDED(res) || !control_interface) {
+ DLOG(ERROR) << "Couldn’t create control interface";
+ return;
+ }
+ base::mac::ScopedIOPluginInterface<IOUSBInterfaceInterface>
+ control_interface_ref(control_interface);
+
+ // Find the device's unit ID presenting type 0x24 (kVcCsInterface) and
+ // subtype 0x5 (kVcProcessingUnit). Inside this unit is where we find the
+ // power line frequency removal setting, and this id is device dependent.
+ int real_unit_id = -1;
+ IOUSBDescriptorHeader* descriptor = NULL;
+ IOUSBInterfaceDescriptor* cs_descriptor = NULL;
+ IOUSBInterfaceInterface220** interface =
+ reinterpret_cast<IOUSBInterfaceInterface220**>(control_interface);
+ while ((descriptor = (*interface)
+ ->FindNextAssociatedDescriptor(interface, descriptor,
+ kUSBAnyDesc))) {
+ cs_descriptor = reinterpret_cast<IOUSBInterfaceDescriptor*>(descriptor);
+ if ((descriptor->bDescriptorType == kVcCsInterface) &&
+ (cs_descriptor->bDescriptorSubType == kVcProcessingUnit)) {
+ real_unit_id = cs_descriptor->bUnitID;
+ break;
+ }
+ }
+ DVLOG_IF(1, real_unit_id == -1)
+ << "This USB device doesn't seem to have a "
+ << " VC_PROCESSING_UNIT, anti-flicker not available";
+ if (real_unit_id == -1)
+ return;
+
+ if ((*control_interface)->USBInterfaceOpen(control_interface) !=
+ kIOReturnSuccess) {
+ DLOG(ERROR) << "Unable to open control interface";
+ return;
+ }
+
+ // Create the control request and launch it to the device's control interface.
+ // Note how the wIndex needs the interface number OR'ed in the lowest bits.
+ IOUSBDevRequest command;
+ command.bmRequestType =
+ USBmakebmRequestType(kUSBOut, kUSBClass, kUSBInterface);
+ command.bRequest = kVcRequestCodeSetCur;
+ UInt8 interface_number;
+ (*control_interface)
+ ->GetInterfaceNumber(control_interface, &interface_number);
+ command.wIndex = (real_unit_id << 8) | interface_number;
+ const int selector = kPuPowerLineFrequencyControl;
+ command.wValue = (selector << 8);
+ command.wLength = kPuPowerLineFrequencyControlCommandSize;
+ command.wLenDone = 0;
+ int power_line_flag_value = (frequency == 50) ? k50Hz : k60Hz;
+ command.pData = &power_line_flag_value;
+
+ IOReturn ret =
+ (*control_interface)->ControlRequest(control_interface, 0, &command);
+ DLOG_IF(ERROR, ret != kIOReturnSuccess) << "Anti-flicker control request"
+ << " failed (0x" << std::hex << ret
+ << "), unit id: " << real_unit_id;
+ DVLOG_IF(1, ret == kIOReturnSuccess) << "Anti-flicker set to " << frequency
+ << "Hz";
+
+ (*control_interface)->USBInterfaceClose(control_interface);
+}
+
+// Sets the flicker removal in a USB webcam identified by |vendor_id| and
+// |product_id|, if available. The process includes first finding all USB
+// devices matching the specified |vendor_id| and |product_id|; for each
+// matching device, a device interface, and inside it a video control interface
+// are created. The latter is used to a send a power frequency setting command.
+static void SetAntiFlickerInUsbDevice(const int vendor_id,
+ const int product_id,
+ const int frequency) {
+ if (frequency == 0)
+ return;
+ DVLOG(1) << "Setting Power Line Frequency to " << frequency << " Hz, device "
+ << std::hex << vendor_id << "-" << product_id;
+
+ // Compose a search dictionary with vendor and product ID.
+ CFMutableDictionaryRef query_dictionary =
+ IOServiceMatching(kIOUSBDeviceClassName);
+ CFDictionarySetValue(
+ query_dictionary, CFSTR(kUSBVendorName),
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &vendor_id));
+ CFDictionarySetValue(
+ query_dictionary, CFSTR(kUSBProductName),
+ CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &product_id));
+
+ io_iterator_t usb_iterator;
+ kern_return_t kr = IOServiceGetMatchingServices(
+ kIOMasterPortDefault, query_dictionary, &usb_iterator);
+ if (kr != kIOReturnSuccess) {
+ DLOG(ERROR) << "No devices found with specified Vendor and Product ID.";
+ return;
+ }
+ base::mac::ScopedIOObject<io_iterator_t> usb_iterator_ref(usb_iterator);
+
+ while (io_service_t usb_device = IOIteratorNext(usb_iterator)) {
+ base::mac::ScopedIOObject<io_service_t> usb_device_ref(usb_device);
+
+ IOUSBDeviceInterface** device_interface = NULL;
+ if (!FindDeviceInterfaceInUsbDevice(vendor_id, product_id, usb_device,
+ &device_interface)) {
+ return;
+ }
+ base::mac::ScopedIOPluginInterface<IOUSBDeviceInterface>
+ device_interface_ref(device_interface);
+
+ IOCFPlugInInterface** video_control_interface = NULL;
+ if (!FindVideoControlInterfaceInDeviceInterface(device_interface,
+ &video_control_interface)) {
+ return;
+ }
+ base::mac::ScopedIOPluginInterface<IOCFPlugInInterface>
+ plugin_interface_ref(video_control_interface);
+
+ SetAntiFlickerInVideoControlInterface(video_control_interface, frequency);
+ }
+}
+
+const std::string VideoCaptureDevice::Name::GetModel() const {
+ // Skip the AVFoundation's not USB nor built-in devices.
+ if (capture_api_type() == AVFOUNDATION && transport_type() != USB_OR_BUILT_IN)
+ return "";
+ if (capture_api_type() == DECKLINK)
+ return "";
+ // Both PID and VID are 4 characters.
+ if (unique_id_.size() < 2 * kVidPidSize)
+ return "";
+
+ // The last characters of device id is a concatenation of VID and then PID.
+ const size_t vid_location = unique_id_.size() - 2 * kVidPidSize;
+ std::string id_vendor = unique_id_.substr(vid_location, kVidPidSize);
+ const size_t pid_location = unique_id_.size() - kVidPidSize;
+ std::string id_product = unique_id_.substr(pid_location, kVidPidSize);
+
+ return id_vendor + ":" + id_product;
+}
+
+VideoCaptureDeviceMac::VideoCaptureDeviceMac(const Name& device_name)
+ : device_name_(device_name),
+ tried_to_square_pixels_(false),
+ task_runner_(base::ThreadTaskRunnerHandle::Get()),
+ state_(kNotInitialized),
+ capture_device_(nil),
+ weak_factory_(this) {
+ // Avoid reconfiguring AVFoundation or blacklisted devices.
+ final_resolution_selected_ = AVFoundationGlue::IsAVFoundationSupported() ||
+ device_name.is_blacklisted();
+}
+
+VideoCaptureDeviceMac::~VideoCaptureDeviceMac() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ [capture_device_ release];
+}
+
+void VideoCaptureDeviceMac::AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ if (state_ != kIdle) {
+ return;
+ }
+
+ // QTKit API can scale captured frame to any size requested, which would lead
+ // to undesired aspect ratio changes. Try to open the camera with a known
+ // supported format and let the client crop/pad the captured frames.
+ gfx::Size resolution = params.requested_format.frame_size;
+ if (!AVFoundationGlue::IsAVFoundationSupported())
+ GetBestMatchSupportedResolution(&resolution);
+
+ client_ = client.Pass();
+ if (device_name_.capture_api_type() == Name::AVFOUNDATION)
+ LogMessage("Using AVFoundation for device: " + device_name_.name());
+ else
+ LogMessage("Using QTKit for device: " + device_name_.name());
+ NSString* deviceId =
+ [NSString stringWithUTF8String:device_name_.id().c_str()];
+
+ [capture_device_ setFrameReceiver:this];
+
+ if (![capture_device_ setCaptureDevice:deviceId]) {
+ SetErrorState("Could not open capture device.");
+ return;
+ }
+
+ capture_format_.frame_size = resolution;
+ capture_format_.frame_rate =
+ std::max(kMinFrameRate,
+ std::min(params.requested_format.frame_rate, kMaxFrameRate));
+ // Leave the pixel format selection to AVFoundation/QTKit. The pixel format
+ // will be passed to |ReceiveFrame|.
+ capture_format_.pixel_format = VIDEO_CAPTURE_PIXEL_FORMAT_UNKNOWN;
+
+ // QTKit: Set the capture resolution only if this is VGA or smaller, otherwise
+ // leave it unconfigured and start capturing: QTKit will produce frames at the
+ // native resolution, allowing us to identify cameras whose native resolution
+ // is too low for HD. This additional information comes at a cost in startup
+ // latency, because the webcam will need to be reopened if its default
+ // resolution is not HD or VGA.
+ // AVfoundation is configured for all resolutions.
+ if (AVFoundationGlue::IsAVFoundationSupported() ||
+ resolution.width() <= kVGA.width || resolution.height() <= kVGA.height) {
+ if (!UpdateCaptureResolution())
+ return;
+ }
+
+ // Try setting the power line frequency removal (anti-flicker). The built-in
+ // cameras are normally suspended so the configuration must happen right
+ // before starting capture and during configuration.
+ const std::string& device_model = device_name_.GetModel();
+ if (device_model.length() > 2 * kVidPidSize) {
+ std::string vendor_id = device_model.substr(0, kVidPidSize);
+ std::string model_id = device_model.substr(kVidPidSize + 1);
+ int vendor_id_as_int, model_id_as_int;
+ if (base::HexStringToInt(base::StringPiece(vendor_id), &vendor_id_as_int) &&
+ base::HexStringToInt(base::StringPiece(model_id), &model_id_as_int)) {
+ SetAntiFlickerInUsbDevice(vendor_id_as_int, model_id_as_int,
+ GetPowerLineFrequencyForLocation());
+ }
+ }
+
+ if (![capture_device_ startCapture]) {
+ SetErrorState("Could not start capture device.");
+ return;
+ }
+
+ state_ = kCapturing;
+}
+
+void VideoCaptureDeviceMac::StopAndDeAllocate() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK(state_ == kCapturing || state_ == kError) << state_;
+
+ [capture_device_ setCaptureDevice:nil];
+ [capture_device_ setFrameReceiver:nil];
+ client_.reset();
+ state_ = kIdle;
+ tried_to_square_pixels_ = false;
+}
+
+bool VideoCaptureDeviceMac::Init(
+ VideoCaptureDevice::Name::CaptureApiType capture_api_type) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_EQ(state_, kNotInitialized);
+
+ if (capture_api_type == Name::AVFOUNDATION) {
+ capture_device_ =
+ [[VideoCaptureDeviceAVFoundation alloc] initWithFrameReceiver:this];
+ } else {
+ capture_device_ =
+ [[VideoCaptureDeviceQTKit alloc] initWithFrameReceiver:this];
+ }
+
+ if (!capture_device_)
+ return false;
+
+ state_ = kIdle;
+ return true;
+}
+
+void VideoCaptureDeviceMac::ReceiveFrame(const uint8* video_frame,
+ int video_frame_length,
+ const VideoCaptureFormat& frame_format,
+ int aspect_numerator,
+ int aspect_denominator) {
+ // This method is safe to call from a device capture thread, i.e. any thread
+ // controlled by QTKit/AVFoundation.
+ if (!final_resolution_selected_) {
+ DCHECK(!AVFoundationGlue::IsAVFoundationSupported());
+ if (capture_format_.frame_size.width() > kVGA.width ||
+ capture_format_.frame_size.height() > kVGA.height) {
+ // We are requesting HD. Make sure that the picture is good, otherwise
+ // drop down to VGA.
+ bool change_to_vga = false;
+ if (frame_format.frame_size.width() <
+ capture_format_.frame_size.width() ||
+ frame_format.frame_size.height() <
+ capture_format_.frame_size.height()) {
+ // These are the default capture settings, not yet configured to match
+ // |capture_format_|.
+ DCHECK(frame_format.frame_rate == 0);
+ DVLOG(1) << "Switching to VGA because the default resolution is "
+ << frame_format.frame_size.ToString();
+ change_to_vga = true;
+ }
+
+ if (capture_format_.frame_size == frame_format.frame_size &&
+ aspect_numerator != aspect_denominator) {
+ DVLOG(1) << "Switching to VGA because HD has nonsquare pixel "
+ << "aspect ratio " << aspect_numerator << ":"
+ << aspect_denominator;
+ change_to_vga = true;
+ }
+
+ if (change_to_vga)
+ capture_format_.frame_size.SetSize(kVGA.width, kVGA.height);
+ }
+
+ if (capture_format_.frame_size == frame_format.frame_size &&
+ !tried_to_square_pixels_ &&
+ (aspect_numerator > kMaxPixelAspectRatio * aspect_denominator ||
+ aspect_denominator > kMaxPixelAspectRatio * aspect_numerator)) {
+ // The requested size results in non-square PAR. Shrink the frame to 1:1
+ // PAR (assuming QTKit selects the same input mode, which is not
+ // guaranteed).
+ int new_width = capture_format_.frame_size.width();
+ int new_height = capture_format_.frame_size.height();
+ if (aspect_numerator < aspect_denominator)
+ new_width = (new_width * aspect_numerator) / aspect_denominator;
+ else
+ new_height = (new_height * aspect_denominator) / aspect_numerator;
+ capture_format_.frame_size.SetSize(new_width, new_height);
+ tried_to_square_pixels_ = true;
+ }
+
+ if (capture_format_.frame_size == frame_format.frame_size) {
+ final_resolution_selected_ = true;
+ } else {
+ UpdateCaptureResolution();
+ // Let the resolution update sink through QTKit and wait for next frame.
+ return;
+ }
+ }
+
+ // QTKit capture source can change resolution if someone else reconfigures the
+ // camera, and that is fine: http://crbug.com/353620. In AVFoundation, this
+ // should not happen, it should resize internally.
+ if (!AVFoundationGlue::IsAVFoundationSupported()) {
+ capture_format_.frame_size = frame_format.frame_size;
+ } else if (capture_format_.frame_size != frame_format.frame_size) {
+ ReceiveError("Captured resolution " + frame_format.frame_size.ToString() +
+ ", and expected " + capture_format_.frame_size.ToString());
+ return;
+ }
+
+ client_->OnIncomingCapturedData(video_frame, video_frame_length, frame_format,
+ 0, base::TimeTicks::Now());
+}
+
+void VideoCaptureDeviceMac::ReceiveError(const std::string& reason) {
+ task_runner_->PostTask(FROM_HERE,
+ base::Bind(&VideoCaptureDeviceMac::SetErrorState,
+ weak_factory_.GetWeakPtr(), reason));
+}
+
+void VideoCaptureDeviceMac::SetErrorState(const std::string& reason) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ state_ = kError;
+ client_->OnError(reason);
+}
+
+void VideoCaptureDeviceMac::LogMessage(const std::string& message) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ if (client_)
+ client_->OnLog(message);
+}
+
+bool VideoCaptureDeviceMac::UpdateCaptureResolution() {
+ if (![capture_device_ setCaptureHeight:capture_format_.frame_size.height()
+ width:capture_format_.frame_size.width()
+ frameRate:capture_format_.frame_rate]) {
+ ReceiveError("Could not configure capture device.");
+ return false;
+ }
+ return true;
+}
+
+} // namespace media
diff --git a/media/capture/video/mac/video_capture_device_qtkit_mac.h b/media/capture/video/mac/video_capture_device_qtkit_mac.h
new file mode 100644
index 0000000..a96dd6c
--- /dev/null
+++ b/media/capture/video/mac/video_capture_device_qtkit_mac.h
@@ -0,0 +1,76 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// VideoCaptureDeviceQTKit implements all QTKit related code for
+// communicating with a QTKit capture device.
+
+#ifndef MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_QTKIT_MAC_H_
+#define MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_QTKIT_MAC_H_
+
+#import <Foundation/Foundation.h>
+
+#include <vector>
+
+#import "media/capture/video/mac/platform_video_capturing_mac.h"
+
+namespace media {
+class VideoCaptureDeviceMac;
+}
+
+@class QTCaptureDeviceInput;
+@class QTCaptureSession;
+
+@interface VideoCaptureDeviceQTKit : NSObject<PlatformVideoCapturingMac> {
+ @private
+ // Settings.
+ float frameRate_;
+
+ NSLock* lock_;
+ media::VideoCaptureDeviceMac* frameReceiver_;
+
+ // QTKit variables.
+ QTCaptureSession* captureSession_;
+ QTCaptureDeviceInput* captureDeviceInput_;
+
+ // Buffer for adjusting frames which do not fit receiver
+ // assumptions. scoped_array<> might make more sense, if the size
+ // can be proven invariant.
+ std::vector<UInt8> adjustedFrame_;
+}
+
+// Fills up the |deviceNames| dictionary of capture devices with friendly name
+// and unique id. No thread assumptions, but this method should run in UI
+// thread, see http://crbug.com/139164
++ (void)getDeviceNames:(NSMutableDictionary*)deviceNames;
+
+// Returns a dictionary of capture devices with friendly name and unique id, via
+// runing +getDeviceNames: on Main Thread.
++ (NSDictionary*)deviceNames;
+
+// Initializes the instance and registers the frame receiver.
+- (id)initWithFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver;
+
+// Set the frame receiver.
+- (void)setFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver;
+
+// Sets which capture device to use. Returns YES on sucess, NO otherwise.
+- (BOOL)setCaptureDevice:(NSString*)deviceId;
+
+// Configures the capture properties.
+- (BOOL)setCaptureHeight:(int)height
+ width:(int)width
+ frameRate:(float)frameRate;
+
+// Start video capturing. Returns YES on sucess, NO otherwise.
+- (BOOL)startCapture;
+
+// Stops video capturing.
+- (void)stopCapture;
+
+// Handle any QTCaptureSessionRuntimeErrorNotifications.
+- (void)handleNotification:(NSNotification*)errorNotification;
+
+@end
+
+#endif // MEDIA_VIDEO_CAPTURE_MAC_VIDEO_CAPTURE_DEVICE_QTKIT_MAC_H_
diff --git a/media/capture/video/mac/video_capture_device_qtkit_mac.mm b/media/capture/video/mac/video_capture_device_qtkit_mac.mm
new file mode 100644
index 0000000..3415e03
--- /dev/null
+++ b/media/capture/video/mac/video_capture_device_qtkit_mac.mm
@@ -0,0 +1,360 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "media/capture/video/mac/video_capture_device_qtkit_mac.h"
+
+#import <QTKit/QTKit.h>
+
+#include "base/debug/crash_logging.h"
+#include "base/logging.h"
+#include "base/mac/scoped_nsexception_enabler.h"
+#include "media/base/video_capture_types.h"
+#include "media/capture/video/mac/video_capture_device_mac.h"
+#include "media/capture/video/video_capture_device.h"
+#include "ui/gfx/geometry/size.h"
+
+@implementation VideoCaptureDeviceQTKit
+
+#pragma mark Class methods
+
++ (void)getDeviceNames:(NSMutableDictionary*)deviceNames {
+ // Third-party drivers often throw exceptions, which are fatal in
+ // Chromium (see comments in scoped_nsexception_enabler.h). The
+ // following catches any exceptions and continues in an orderly
+ // fashion with no devices detected.
+ NSArray* captureDevices = base::mac::RunBlockIgnoringExceptions(^{
+ return [QTCaptureDevice inputDevicesWithMediaType:QTMediaTypeVideo];
+ });
+
+ for (QTCaptureDevice* device in captureDevices) {
+ if ([[device attributeForKey:QTCaptureDeviceSuspendedAttribute] boolValue])
+ continue;
+ DeviceNameAndTransportType* nameAndTransportType = [[
+ [DeviceNameAndTransportType alloc]
+ initWithName:[device localizedDisplayName]
+ transportType:media::kIOAudioDeviceTransportTypeUnknown] autorelease];
+ [deviceNames setObject:nameAndTransportType forKey:[device uniqueID]];
+ }
+}
+
++ (NSDictionary*)deviceNames {
+ NSMutableDictionary* deviceNames =
+ [[[NSMutableDictionary alloc] init] autorelease];
+
+ // TODO(shess): Post to the main thread to see if that helps
+ // http://crbug.com/139164
+ [self performSelectorOnMainThread:@selector(getDeviceNames:)
+ withObject:deviceNames
+ waitUntilDone:YES];
+ return deviceNames;
+}
+
+#pragma mark Public methods
+
+- (id)initWithFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver {
+ self = [super init];
+ if (self) {
+ frameReceiver_ = frameReceiver;
+ lock_ = [[NSLock alloc] init];
+ }
+ return self;
+}
+
+- (void)dealloc {
+ [captureSession_ release];
+ [captureDeviceInput_ release];
+ [super dealloc];
+}
+
+- (void)setFrameReceiver:(media::VideoCaptureDeviceMac*)frameReceiver {
+ [lock_ lock];
+ frameReceiver_ = frameReceiver;
+ [lock_ unlock];
+}
+
+- (BOOL)setCaptureDevice:(NSString*)deviceId {
+ if (deviceId) {
+ // Set the capture device.
+ if (captureDeviceInput_) {
+ DLOG(ERROR) << "Video capture device already set.";
+ return NO;
+ }
+
+ // TODO(mcasas): Consider using [QTCaptureDevice deviceWithUniqueID] instead
+ // of explicitly forcing reenumeration of devices.
+ NSArray* captureDevices =
+ [QTCaptureDevice inputDevicesWithMediaType:QTMediaTypeVideo];
+ NSArray* captureDevicesNames = [captureDevices valueForKey:@"uniqueID"];
+ NSUInteger index = [captureDevicesNames indexOfObject:deviceId];
+ if (index == NSNotFound) {
+ [self sendErrorString:[NSString stringWithUTF8String:
+ "Video capture device not found."]];
+ return NO;
+ }
+ QTCaptureDevice* device = [captureDevices objectAtIndex:index];
+ if ([[device
+ attributeForKey:QTCaptureDeviceSuspendedAttribute] boolValue]) {
+ [self sendErrorString:
+ [NSString stringWithUTF8String:
+ "Cannot open suspended video capture device."]];
+ return NO;
+ }
+ NSError* error;
+ if (![device open:&error]) {
+ [self sendErrorString:
+ [NSString stringWithFormat:
+ @"Could not open video capture device (%@): %@",
+ [error localizedDescription],
+ [error localizedFailureReason]]];
+ return NO;
+ }
+ captureDeviceInput_ = [[QTCaptureDeviceInput alloc] initWithDevice:device];
+ captureSession_ = [[QTCaptureSession alloc] init];
+
+ QTCaptureDecompressedVideoOutput* captureDecompressedOutput =
+ [[[QTCaptureDecompressedVideoOutput alloc] init] autorelease];
+ [captureDecompressedOutput setDelegate:self];
+ [captureDecompressedOutput setAutomaticallyDropsLateVideoFrames:YES];
+ if (![captureSession_ addOutput:captureDecompressedOutput error:&error]) {
+ [self
+ sendErrorString:
+ [NSString stringWithFormat:
+ @"Could not connect video capture output (%@): %@",
+ [error localizedDescription],
+ [error localizedFailureReason]]];
+ return NO;
+ }
+
+ // This key can be used to check if video capture code was related to a
+ // particular crash.
+ base::debug::SetCrashKeyValue("VideoCaptureDeviceQTKit", "OpenedDevice");
+
+ // Set the video pixel format to 2VUY (a.k.a UYVY, packed 4:2:2).
+ NSDictionary* captureDictionary = [NSDictionary
+ dictionaryWithObject:
+ [NSNumber numberWithUnsignedInt:kCVPixelFormatType_422YpCbCr8]
+ forKey:(id)kCVPixelBufferPixelFormatTypeKey];
+ [captureDecompressedOutput setPixelBufferAttributes:captureDictionary];
+
+ return YES;
+ } else {
+ // Remove the previously set capture device.
+ if (!captureDeviceInput_) {
+ // Being here means stopping a device that never started OK in the first
+ // place, log it.
+ [self sendLogString:[NSString
+ stringWithUTF8String:
+ "No video capture device set, on removal."]];
+ return YES;
+ }
+ // Tear down input and output, stop the capture and deregister observers.
+ [self stopCapture];
+ [captureSession_ release];
+ captureSession_ = nil;
+ [captureDeviceInput_ release];
+ captureDeviceInput_ = nil;
+ return YES;
+ }
+}
+
+- (BOOL)setCaptureHeight:(int)height
+ width:(int)width
+ frameRate:(float)frameRate {
+ if (!captureDeviceInput_) {
+ [self sendErrorString:
+ [NSString stringWithUTF8String:"No video capture device set."]];
+ return NO;
+ }
+ if ([[captureSession_ outputs] count] != 1) {
+ [self sendErrorString:[NSString
+ stringWithUTF8String:
+ "Video capture capabilities already set."]];
+ return NO;
+ }
+ if (frameRate <= 0.0f) {
+ [self sendErrorString:[NSString stringWithUTF8String:"Wrong frame rate."]];
+ return NO;
+ }
+
+ frameRate_ = frameRate;
+
+ QTCaptureDecompressedVideoOutput* output =
+ [[captureSession_ outputs] objectAtIndex:0];
+
+ // Set up desired output properties. The old capture dictionary is used to
+ // retrieve the initial pixel format, which must be maintained.
+ NSDictionary* videoSettingsDictionary = @{
+ (id)kCVPixelBufferWidthKey : @(width), (id)
+ kCVPixelBufferHeightKey : @(height), (id)
+ kCVPixelBufferPixelFormatTypeKey : [[output pixelBufferAttributes]
+ valueForKey:(id)kCVPixelBufferPixelFormatTypeKey]
+ };
+ [output setPixelBufferAttributes:videoSettingsDictionary];
+
+ [output setMinimumVideoFrameInterval:(NSTimeInterval)1 / frameRate];
+ return YES;
+}
+
+- (BOOL)startCapture {
+ if ([[captureSession_ outputs] count] == 0) {
+ // Capture properties not set.
+ [self
+ sendErrorString:[NSString stringWithUTF8String:
+ "Video capture device not initialized."]];
+ return NO;
+ }
+ if ([[captureSession_ inputs] count] == 0) {
+ NSError* error;
+ if (![captureSession_ addInput:captureDeviceInput_ error:&error]) {
+ [self
+ sendErrorString:
+ [NSString stringWithFormat:
+ @"Could not connect video capture device (%@): %@",
+ [error localizedDescription],
+ [error localizedFailureReason]]];
+
+ return NO;
+ }
+ NSNotificationCenter* notificationCenter =
+ [NSNotificationCenter defaultCenter];
+ [notificationCenter addObserver:self
+ selector:@selector(handleNotification:)
+ name:QTCaptureSessionRuntimeErrorNotification
+ object:captureSession_];
+ [captureSession_ startRunning];
+ }
+ return YES;
+}
+
+- (void)stopCapture {
+ // QTKit achieves thread safety and asynchronous execution by posting messages
+ // to the main thread, e.g. -addOutput:. Both -removeOutput: and -removeInput:
+ // post a message to the main thread while holding a lock that the
+ // notification handler might need. To avoid a deadlock, we perform those
+ // tasks in the main thread. See bugs http://crbug.com/152757 and
+ // http://crbug.com/399792.
+ [self performSelectorOnMainThread:@selector(stopCaptureOnUIThread:)
+ withObject:nil
+ waitUntilDone:YES];
+ [[NSNotificationCenter defaultCenter] removeObserver:self];
+}
+
+- (void)stopCaptureOnUIThread:(id)dummy {
+ if ([[captureSession_ inputs] count] > 0) {
+ DCHECK_EQ([[captureSession_ inputs] count], 1u);
+ [captureSession_ removeInput:captureDeviceInput_];
+ [captureSession_ stopRunning];
+ }
+ if ([[captureSession_ outputs] count] > 0) {
+ DCHECK_EQ([[captureSession_ outputs] count], 1u);
+ id output = [[captureSession_ outputs] objectAtIndex:0];
+ [output setDelegate:nil];
+ [captureSession_ removeOutput:output];
+ }
+}
+
+// |captureOutput| is called by the capture device to deliver a new frame.
+- (void)captureOutput:(QTCaptureOutput*)captureOutput
+ didOutputVideoFrame:(CVImageBufferRef)videoFrame
+ withSampleBuffer:(QTSampleBuffer*)sampleBuffer
+ fromConnection:(QTCaptureConnection*)connection {
+ [lock_ lock];
+ if (!frameReceiver_) {
+ [lock_ unlock];
+ return;
+ }
+
+ // Lock the frame and calculate frame size.
+ const int kLockFlags = 0;
+ if (CVPixelBufferLockBaseAddress(videoFrame, kLockFlags) ==
+ kCVReturnSuccess) {
+ void* baseAddress = CVPixelBufferGetBaseAddress(videoFrame);
+ size_t bytesPerRow = CVPixelBufferGetBytesPerRow(videoFrame);
+ size_t frameWidth = CVPixelBufferGetWidth(videoFrame);
+ size_t frameHeight = CVPixelBufferGetHeight(videoFrame);
+ size_t frameSize = bytesPerRow * frameHeight;
+
+ // TODO(shess): bytesPerRow may not correspond to frameWidth_*2,
+ // but VideoCaptureController::OnIncomingCapturedData() requires
+ // it to do so. Plumbing things through is intrusive, for now
+ // just deliver an adjusted buffer.
+ // TODO(nick): This workaround could probably be eliminated by using
+ // VideoCaptureController::OnIncomingCapturedVideoFrame, which supports
+ // pitches.
+ UInt8* addressToPass = static_cast<UInt8*>(baseAddress);
+ // UYVY is 2 bytes per pixel.
+ size_t expectedBytesPerRow = frameWidth * 2;
+ if (bytesPerRow > expectedBytesPerRow) {
+ // TODO(shess): frameHeight and frameHeight_ are not the same,
+ // try to do what the surrounding code seems to assume.
+ // Ironically, captureCapability and frameSize are ignored
+ // anyhow.
+ adjustedFrame_.resize(expectedBytesPerRow * frameHeight);
+ // std::vector is contiguous according to standard.
+ UInt8* adjustedAddress = &adjustedFrame_[0];
+
+ for (size_t y = 0; y < frameHeight; ++y) {
+ memcpy(adjustedAddress + y * expectedBytesPerRow,
+ addressToPass + y * bytesPerRow, expectedBytesPerRow);
+ }
+
+ addressToPass = adjustedAddress;
+ frameSize = frameHeight * expectedBytesPerRow;
+ }
+
+ media::VideoCaptureFormat captureFormat(
+ gfx::Size(frameWidth, frameHeight), frameRate_,
+ media::VIDEO_CAPTURE_PIXEL_FORMAT_UYVY);
+
+ // The aspect ratio dictionary is often missing, in which case we report
+ // a pixel aspect ratio of 0:0.
+ int aspectNumerator = 0, aspectDenominator = 0;
+ CFDictionaryRef aspectRatioDict = (CFDictionaryRef)CVBufferGetAttachment(
+ videoFrame, kCVImageBufferPixelAspectRatioKey, NULL);
+ if (aspectRatioDict) {
+ CFNumberRef aspectNumeratorRef = (CFNumberRef)CFDictionaryGetValue(
+ aspectRatioDict, kCVImageBufferPixelAspectRatioHorizontalSpacingKey);
+ CFNumberRef aspectDenominatorRef = (CFNumberRef)CFDictionaryGetValue(
+ aspectRatioDict, kCVImageBufferPixelAspectRatioVerticalSpacingKey);
+ DCHECK(aspectNumeratorRef && aspectDenominatorRef)
+ << "Aspect Ratio dictionary missing its entries.";
+ CFNumberGetValue(aspectNumeratorRef, kCFNumberIntType, &aspectNumerator);
+ CFNumberGetValue(aspectDenominatorRef, kCFNumberIntType,
+ &aspectDenominator);
+ }
+
+ // Deliver the captured video frame.
+ frameReceiver_->ReceiveFrame(addressToPass, frameSize, captureFormat,
+ aspectNumerator, aspectDenominator);
+
+ CVPixelBufferUnlockBaseAddress(videoFrame, kLockFlags);
+ }
+ [lock_ unlock];
+}
+
+- (void)handleNotification:(NSNotification*)errorNotification {
+ NSError* error = (NSError*)
+ [[errorNotification userInfo] objectForKey:QTCaptureSessionErrorKey];
+ [self sendErrorString:
+ [NSString stringWithFormat:@"%@: %@", [error localizedDescription],
+ [error localizedFailureReason]]];
+}
+
+- (void)sendErrorString:(NSString*)error {
+ DLOG(ERROR) << [error UTF8String];
+ [lock_ lock];
+ if (frameReceiver_)
+ frameReceiver_->ReceiveError([error UTF8String]);
+ [lock_ unlock];
+}
+
+- (void)sendLogString:(NSString*)message {
+ DVLOG(1) << [message UTF8String];
+ [lock_ lock];
+ if (frameReceiver_)
+ frameReceiver_->LogMessage([message UTF8String]);
+ [lock_ unlock];
+}
+
+@end
diff --git a/media/capture/video/video_capture_device.cc b/media/capture/video/video_capture_device.cc
new file mode 100644
index 0000000..7260589
--- /dev/null
+++ b/media/capture/video/video_capture_device.cc
@@ -0,0 +1,161 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/video_capture_device.h"
+
+#include "base/i18n/timezone.h"
+#include "base/strings/string_util.h"
+
+namespace media {
+
+const std::string VideoCaptureDevice::Name::GetNameAndModel() const {
+ const std::string model_id = GetModel();
+ if (model_id.empty())
+ return device_name_;
+ const std::string suffix = " (" + model_id + ")";
+ if (base::EndsWith(device_name_, suffix, base::CompareCase::SENSITIVE))
+ return device_name_;
+ return device_name_ + suffix;
+}
+
+VideoCaptureDevice::Name::Name() {
+}
+
+VideoCaptureDevice::Name::Name(const std::string& name, const std::string& id)
+ : device_name_(name), unique_id_(id) {
+}
+
+#if defined(OS_LINUX)
+VideoCaptureDevice::Name::Name(const std::string& name,
+ const std::string& id,
+ const CaptureApiType api_type)
+ : device_name_(name), unique_id_(id), capture_api_class_(api_type) {
+}
+#elif defined(OS_WIN)
+VideoCaptureDevice::Name::Name(const std::string& name,
+ const std::string& id,
+ const CaptureApiType api_type)
+ : device_name_(name),
+ unique_id_(id),
+ capture_api_class_(api_type),
+ capabilities_id_(id) {
+}
+#elif defined(OS_MACOSX)
+VideoCaptureDevice::Name::Name(const std::string& name,
+ const std::string& id,
+ const CaptureApiType api_type)
+ : device_name_(name),
+ unique_id_(id),
+ capture_api_class_(api_type),
+ transport_type_(OTHER_TRANSPORT),
+ is_blacklisted_(false) {
+}
+
+VideoCaptureDevice::Name::Name(const std::string& name,
+ const std::string& id,
+ const CaptureApiType api_type,
+ const TransportType transport_type)
+ : device_name_(name),
+ unique_id_(id),
+ capture_api_class_(api_type),
+ transport_type_(transport_type),
+ is_blacklisted_(false) {
+}
+#elif defined(ANDROID)
+VideoCaptureDevice::Name::Name(const std::string& name,
+ const std::string& id,
+ const CaptureApiType api_type)
+ : device_name_(name), unique_id_(id), capture_api_class_(api_type) {
+}
+#endif
+
+VideoCaptureDevice::Name::~Name() {
+}
+
+#if defined(OS_LINUX)
+const char* VideoCaptureDevice::Name::GetCaptureApiTypeString() const {
+ switch (capture_api_type()) {
+ case V4L2_SINGLE_PLANE:
+ return "V4L2 SPLANE";
+ case V4L2_MULTI_PLANE:
+ return "V4L2 MPLANE";
+ default:
+ NOTREACHED() << "Unknown Video Capture API type!";
+ return "Unknown API";
+ }
+}
+#elif defined(OS_WIN)
+const char* VideoCaptureDevice::Name::GetCaptureApiTypeString() const {
+ switch (capture_api_type()) {
+ case MEDIA_FOUNDATION:
+ return "Media Foundation";
+ case DIRECT_SHOW:
+ return "Direct Show";
+ default:
+ NOTREACHED() << "Unknown Video Capture API type!";
+ return "Unknown API";
+ }
+}
+#elif defined(OS_MACOSX)
+const char* VideoCaptureDevice::Name::GetCaptureApiTypeString() const {
+ switch (capture_api_type()) {
+ case AVFOUNDATION:
+ return "AV Foundation";
+ case QTKIT:
+ return "QTKit";
+ case DECKLINK:
+ return "DeckLink";
+ default:
+ NOTREACHED() << "Unknown Video Capture API type!";
+ return "Unknown API";
+ }
+}
+#elif defined(OS_ANDROID)
+const char* VideoCaptureDevice::Name::GetCaptureApiTypeString() const {
+ switch (capture_api_type()) {
+ case API1:
+ return "Camera API1";
+ case API2_LEGACY:
+ return "Camera API2 Legacy";
+ case API2_FULL:
+ return "Camera API2 Full";
+ case API2_LIMITED:
+ return "Camera API2 Limited";
+ case TANGO:
+ return "Tango API";
+ case API_TYPE_UNKNOWN:
+ default:
+ NOTREACHED() << "Unknown Video Capture API type!";
+ return "Unknown API";
+ }
+}
+#endif
+
+VideoCaptureDevice::Client::Buffer::~Buffer() {
+}
+
+VideoCaptureDevice::~VideoCaptureDevice() {
+}
+
+int VideoCaptureDevice::GetPowerLineFrequencyForLocation() const {
+ std::string current_country = base::CountryCodeForCurrentTimezone();
+ if (current_country.empty())
+ return 0;
+ // Sorted out list of countries with 60Hz power line frequency, from
+ // http://en.wikipedia.org/wiki/Mains_electricity_by_country
+ const char* countries_using_60Hz[] = {
+ "AI", "AO", "AS", "AW", "AZ", "BM", "BR", "BS", "BZ", "CA", "CO",
+ "CR", "CU", "DO", "EC", "FM", "GT", "GU", "GY", "HN", "HT", "JP",
+ "KN", "KR", "KY", "MS", "MX", "NI", "PA", "PE", "PF", "PH", "PR",
+ "PW", "SA", "SR", "SV", "TT", "TW", "UM", "US", "VG", "VI", "VE"};
+ const char** countries_using_60Hz_end =
+ countries_using_60Hz + arraysize(countries_using_60Hz);
+ if (std::find(countries_using_60Hz, countries_using_60Hz_end,
+ current_country) == countries_using_60Hz_end) {
+ return kPowerLine50Hz;
+ }
+ return kPowerLine60Hz;
+}
+
+} // namespace media
diff --git a/media/capture/video/video_capture_device.h b/media/capture/video/video_capture_device.h
new file mode 100644
index 0000000..75f9865
--- /dev/null
+++ b/media/capture/video/video_capture_device.h
@@ -0,0 +1,293 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// VideoCaptureDevice is the abstract base class for realizing video capture
+// device support in Chromium. It provides the interface for OS dependent
+// implementations.
+// The class is created and functions are invoked on a thread owned by
+// VideoCaptureManager. Capturing is done on other threads, depending on the OS
+// specific implementation.
+
+#ifndef MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_H_
+#define MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_H_
+
+#include <list>
+#include <string>
+
+#include "base/files/file.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/single_thread_task_runner.h"
+#include "base/time/time.h"
+#include "media/base/media_export.h"
+#include "media/base/video_capture_types.h"
+#include "media/base/video_frame.h"
+#include "ui/gfx/gpu_memory_buffer.h"
+
+namespace media {
+
+class MEDIA_EXPORT VideoCaptureDevice {
+ public:
+ // Represents a capture device name and ID.
+ // You should not create an instance of this class directly by e.g. setting
+ // various properties directly. Instead use
+ // VideoCaptureDevice::GetDeviceNames to do this for you and if you need to
+ // cache your own copy of a name, you can do so via the copy constructor.
+ // The reason for this is that a device name might contain platform specific
+ // settings that are relevant only to the platform specific implementation of
+ // VideoCaptureDevice::Create.
+ class MEDIA_EXPORT Name {
+ public:
+ Name();
+ Name(const std::string& name, const std::string& id);
+
+#if defined(OS_LINUX)
+ // Linux/CrOS targets Capture Api type: it can only be set on construction.
+ enum CaptureApiType {
+ V4L2_SINGLE_PLANE,
+ V4L2_MULTI_PLANE,
+ API_TYPE_UNKNOWN
+ };
+#elif defined(OS_WIN)
+ // Windows targets Capture Api type: it can only be set on construction.
+ enum CaptureApiType { MEDIA_FOUNDATION, DIRECT_SHOW, API_TYPE_UNKNOWN };
+#elif defined(OS_MACOSX)
+ // Mac targets Capture Api type: it can only be set on construction.
+ enum CaptureApiType { AVFOUNDATION, QTKIT, DECKLINK, API_TYPE_UNKNOWN };
+ // For AVFoundation Api, identify devices that are built-in or USB.
+ enum TransportType { USB_OR_BUILT_IN, OTHER_TRANSPORT };
+#elif defined(OS_ANDROID)
+ // Android targets Capture Api type: it can only be set on construction.
+ // Automatically generated enum to interface with Java world.
+ //
+ // A Java counterpart will be generated for this enum.
+ // GENERATED_JAVA_ENUM_PACKAGE: org.chromium.media
+ enum CaptureApiType {
+ API1,
+ API2_LEGACY,
+ API2_FULL,
+ API2_LIMITED,
+ TANGO,
+ API_TYPE_UNKNOWN
+ };
+#endif
+
+#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) || \
+ defined(OS_ANDROID)
+ Name(const std::string& name,
+ const std::string& id,
+ const CaptureApiType api_type);
+#endif
+#if defined(OS_MACOSX)
+ Name(const std::string& name,
+ const std::string& id,
+ const CaptureApiType api_type,
+ const TransportType transport_type);
+#endif
+ ~Name();
+
+ // Friendly name of a device
+ const std::string& name() const { return device_name_; }
+
+ // Unique name of a device. Even if there are multiple devices with the same
+ // friendly name connected to the computer this will be unique.
+ const std::string& id() const { return unique_id_; }
+
+ // The unique hardware model identifier of the capture device. Returns
+ // "[vid]:[pid]" when a USB device is detected, otherwise "".
+ // The implementation of this method is platform-dependent.
+ const std::string GetModel() const;
+
+ // Friendly name of a device, plus the model identifier in parentheses.
+ const std::string GetNameAndModel() const;
+
+ // These operators are needed due to storing the name in an STL container.
+ // In the shared build, all methods from the STL container will be exported
+ // so even though they're not used, they're still depended upon.
+ bool operator==(const Name& other) const {
+ return other.id() == unique_id_;
+ }
+ bool operator<(const Name& other) const { return unique_id_ < other.id(); }
+
+#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) || \
+ defined(OS_ANDROID)
+ CaptureApiType capture_api_type() const {
+ return capture_api_class_.capture_api_type();
+ }
+ const char* GetCaptureApiTypeString() const;
+#endif
+#if defined(OS_WIN)
+ // Certain devices need an ID different from the |unique_id_| for
+ // capabilities retrieval.
+ const std::string& capabilities_id() const { return capabilities_id_; }
+ void set_capabilities_id(const std::string& id) { capabilities_id_ = id; }
+#endif // if defined(OS_WIN)
+#if defined(OS_MACOSX)
+ TransportType transport_type() const { return transport_type_; }
+ bool is_blacklisted() const { return is_blacklisted_; }
+ void set_is_blacklisted(bool is_blacklisted) {
+ is_blacklisted_ = is_blacklisted;
+ }
+#endif // if defined(OS_MACOSX)
+
+ private:
+ std::string device_name_;
+ std::string unique_id_;
+#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) || \
+ defined(OS_ANDROID)
+ // This class wraps the CaptureApiType to give it a by default value if not
+ // initialized.
+ class CaptureApiClass {
+ public:
+ CaptureApiClass() : capture_api_type_(API_TYPE_UNKNOWN) {}
+ CaptureApiClass(const CaptureApiType api_type)
+ : capture_api_type_(api_type) {}
+ CaptureApiType capture_api_type() const {
+ DCHECK_NE(capture_api_type_, API_TYPE_UNKNOWN);
+ return capture_api_type_;
+ }
+
+ private:
+ CaptureApiType capture_api_type_;
+ };
+
+ CaptureApiClass capture_api_class_;
+#endif
+#if defined(OS_WIN)
+ // ID used for capabilities retrieval. By default is equal to |unique_id|.
+ std::string capabilities_id_;
+#endif
+#if defined(OS_MACOSX)
+ TransportType transport_type_;
+ // Flag used to mark blacklisted devices for QTKit Api.
+ bool is_blacklisted_;
+#endif
+ // Allow generated copy constructor and assignment.
+ };
+
+ // Manages a list of Name entries.
+ typedef std::list<Name> Names;
+
+ // Interface defining the methods that clients of VideoCapture must have. It
+ // is actually two-in-one: clients may implement OnIncomingCapturedData() or
+ // ReserveOutputBuffer() + OnIncomingCapturedVideoFrame(), or all of them.
+ // All clients must implement OnError().
+ class MEDIA_EXPORT Client {
+ public:
+ // Memory buffer returned by Client::ReserveOutputBuffer().
+ class MEDIA_EXPORT Buffer {
+ public:
+ virtual ~Buffer() = 0;
+ virtual int id() const = 0;
+ virtual size_t size() const = 0;
+ virtual void* data() = 0;
+ virtual ClientBuffer AsClientBuffer() = 0;
+#if defined(OS_POSIX)
+ virtual base::FileDescriptor AsPlatformFile() = 0;
+#endif
+ };
+
+ virtual ~Client() {}
+
+ // Captured a new video frame, data for which is pointed to by |data|.
+ //
+ // The format of the frame is described by |frame_format|, and is assumed to
+ // be tightly packed. This method will try to reserve an output buffer and
+ // copy from |data| into the output buffer. If no output buffer is
+ // available, the frame will be silently dropped.
+ virtual void OnIncomingCapturedData(const uint8* data,
+ int length,
+ const VideoCaptureFormat& frame_format,
+ int clockwise_rotation,
+ const base::TimeTicks& timestamp) = 0;
+
+ // Captured a 3 planar YUV frame. Planes are possibly disjoint.
+ // |frame_format| must indicate I420.
+ virtual void OnIncomingCapturedYuvData(
+ const uint8* y_data,
+ const uint8* u_data,
+ const uint8* v_data,
+ size_t y_stride,
+ size_t u_stride,
+ size_t v_stride,
+ const VideoCaptureFormat& frame_format,
+ int clockwise_rotation,
+ const base::TimeTicks& timestamp) = 0;
+
+ // Reserve an output buffer into which contents can be captured directly.
+ // The returned Buffer will always be allocated with a memory size suitable
+ // for holding a packed video frame with pixels of |format| format, of
+ // |dimensions| frame dimensions. It is permissible for |dimensions| to be
+ // zero; in which case the returned Buffer does not guarantee memory
+ // backing, but functions as a reservation for external input for the
+ // purposes of buffer throttling.
+ //
+ // The output buffer stays reserved and mapped for use until the Buffer
+ // object is destroyed or returned.
+ virtual scoped_ptr<Buffer> ReserveOutputBuffer(
+ const gfx::Size& dimensions,
+ VideoCapturePixelFormat format,
+ VideoPixelStorage storage) = 0;
+
+ // Captured new video data, held in |frame| or |buffer|, respectively for
+ // OnIncomingCapturedVideoFrame() and OnIncomingCapturedBuffer().
+ //
+ // In both cases, as the frame is backed by a reservation returned by
+ // ReserveOutputBuffer(), delivery is guaranteed and will require no
+ // additional copies in the browser process.
+ virtual void OnIncomingCapturedBuffer(
+ scoped_ptr<Buffer> buffer,
+ const VideoCaptureFormat& frame_format,
+ const base::TimeTicks& timestamp) = 0;
+ virtual void OnIncomingCapturedVideoFrame(
+ scoped_ptr<Buffer> buffer,
+ const scoped_refptr<VideoFrame>& frame,
+ const base::TimeTicks& timestamp) = 0;
+
+ // An error has occurred that cannot be handled and VideoCaptureDevice must
+ // be StopAndDeAllocate()-ed. |reason| is a text description of the error.
+ virtual void OnError(const std::string& reason) = 0;
+
+ // VideoCaptureDevice requests the |message| to be logged.
+ virtual void OnLog(const std::string& message) {}
+
+ // Returns the current buffer pool utilization, in the range 0.0 (no buffers
+ // are in use by producers or consumers) to 1.0 (all buffers are in use).
+ virtual double GetBufferPoolUtilization() const = 0;
+ };
+
+ virtual ~VideoCaptureDevice();
+
+ // Prepares the camera for use. After this function has been called no other
+ // applications can use the camera. StopAndDeAllocate() must be called before
+ // the object is deleted.
+ virtual void AllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<Client> client) = 0;
+
+ // Deallocates the camera, possibly asynchronously.
+ //
+ // This call requires the device to do the following things, eventually: put
+ // camera hardware into a state where other applications could use it, free
+ // the memory associated with capture, and delete the |client| pointer passed
+ // into AllocateAndStart.
+ //
+ // If deallocation is done asynchronously, then the device implementation must
+ // ensure that a subsequent AllocateAndStart() operation targeting the same ID
+ // would be sequenced through the same task runner, so that deallocation
+ // happens first.
+ virtual void StopAndDeAllocate() = 0;
+
+ // Gets the power line frequency from the current system time zone if this is
+ // defined, otherwise returns 0.
+ int GetPowerLineFrequencyForLocation() const;
+
+ protected:
+ static const int kPowerLine50Hz = 50;
+ static const int kPowerLine60Hz = 60;
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_H_
diff --git a/media/capture/video/video_capture_device_factory.cc b/media/capture/video/video_capture_device_factory.cc
new file mode 100644
index 0000000..aa6be6f
--- /dev/null
+++ b/media/capture/video/video_capture_device_factory.cc
@@ -0,0 +1,65 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/video_capture_device_factory.h"
+
+#include "base/command_line.h"
+#include "media/base/media_switches.h"
+#include "media/capture/video/fake_video_capture_device_factory.h"
+#include "media/capture/video/file_video_capture_device_factory.h"
+
+namespace media {
+
+// static
+scoped_ptr<VideoCaptureDeviceFactory> VideoCaptureDeviceFactory::CreateFactory(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner) {
+ const base::CommandLine* command_line =
+ base::CommandLine::ForCurrentProcess();
+ // Use a Fake or File Video Device Factory if the command line flags are
+ // present, otherwise use the normal, platform-dependent, device factory.
+ if (command_line->HasSwitch(switches::kUseFakeDeviceForMediaStream)) {
+ if (command_line->HasSwitch(switches::kUseFileForFakeVideoCapture)) {
+ return scoped_ptr<VideoCaptureDeviceFactory>(
+ new media::FileVideoCaptureDeviceFactory());
+ } else {
+ return scoped_ptr<VideoCaptureDeviceFactory>(
+ new media::FakeVideoCaptureDeviceFactory());
+ }
+ } else {
+ // |ui_task_runner| is needed for the Linux ChromeOS factory to retrieve
+ // screen rotations and for the Mac factory to run QTKit device enumeration.
+ return scoped_ptr<VideoCaptureDeviceFactory>(
+ CreateVideoCaptureDeviceFactory(ui_task_runner));
+ }
+}
+
+VideoCaptureDeviceFactory::VideoCaptureDeviceFactory() {
+ thread_checker_.DetachFromThread();
+}
+
+VideoCaptureDeviceFactory::~VideoCaptureDeviceFactory() {
+}
+
+void VideoCaptureDeviceFactory::EnumerateDeviceNames(const base::Callback<
+ void(scoped_ptr<media::VideoCaptureDevice::Names>)>& callback) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!callback.is_null());
+ scoped_ptr<VideoCaptureDevice::Names> device_names(
+ new VideoCaptureDevice::Names());
+ GetDeviceNames(device_names.get());
+ callback.Run(device_names.Pass());
+}
+
+#if !defined(OS_MACOSX) && !defined(OS_LINUX) && !defined(OS_ANDROID) && \
+ !defined(OS_WIN)
+// static
+VideoCaptureDeviceFactory*
+VideoCaptureDeviceFactory::CreateVideoCaptureDeviceFactory(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner) {
+ NOTIMPLEMENTED();
+ return NULL;
+}
+#endif
+
+} // namespace media
diff --git a/media/capture/video/video_capture_device_factory.h b/media/capture/video/video_capture_device_factory.h
new file mode 100644
index 0000000..1cd5d17
--- /dev/null
+++ b/media/capture/video/video_capture_device_factory.h
@@ -0,0 +1,57 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_FACTORY_H_
+#define MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_FACTORY_H_
+
+#include "base/threading/thread_checker.h"
+#include "media/capture/video/video_capture_device.h"
+
+namespace media {
+
+// VideoCaptureDeviceFactory is the base class for creation of video capture
+// devices in the different platforms. VCDFs are created by MediaStreamManager
+// on IO thread and plugged into VideoCaptureManager, who owns and operates them
+// in Device Thread (a.k.a. Audio Thread).
+class MEDIA_EXPORT VideoCaptureDeviceFactory {
+ public:
+ static scoped_ptr<VideoCaptureDeviceFactory> CreateFactory(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner);
+
+ VideoCaptureDeviceFactory();
+ virtual ~VideoCaptureDeviceFactory();
+
+ // Creates a VideoCaptureDevice object. Returns NULL if something goes wrong.
+ virtual scoped_ptr<VideoCaptureDevice> Create(
+ const VideoCaptureDevice::Name& device_name) = 0;
+
+ // Asynchronous version of GetDeviceNames calling back to |callback|.
+ virtual void EnumerateDeviceNames(const base::Callback<
+ void(scoped_ptr<media::VideoCaptureDevice::Names>)>& callback);
+
+ // Gets the supported formats of a particular device attached to the system.
+ // This method should be called before allocating or starting a device. In
+ // case format enumeration is not supported, or there was a problem, the
+ // formats array will be empty.
+ virtual void GetDeviceSupportedFormats(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* supported_formats) = 0;
+
+ protected:
+ // Gets the names of all video capture devices connected to this computer.
+ // Used by the default implementation of EnumerateDeviceNames().
+ virtual void GetDeviceNames(VideoCaptureDevice::Names* device_names) = 0;
+
+ base::ThreadChecker thread_checker_;
+
+ private:
+ static VideoCaptureDeviceFactory* CreateVideoCaptureDeviceFactory(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner);
+
+ DISALLOW_COPY_AND_ASSIGN(VideoCaptureDeviceFactory);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_FACTORY_H_
diff --git a/media/capture/video/video_capture_device_info.cc b/media/capture/video/video_capture_device_info.cc
new file mode 100644
index 0000000..3bb4df2
--- /dev/null
+++ b/media/capture/video/video_capture_device_info.cc
@@ -0,0 +1,21 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/video_capture_device_info.h"
+
+namespace media {
+
+VideoCaptureDeviceInfo::VideoCaptureDeviceInfo() {
+}
+
+VideoCaptureDeviceInfo::VideoCaptureDeviceInfo(
+ const VideoCaptureDevice::Name& name,
+ const VideoCaptureFormats& supported_formats)
+ : name(name), supported_formats(supported_formats) {
+}
+
+VideoCaptureDeviceInfo::~VideoCaptureDeviceInfo() {
+}
+
+} // namespace media
diff --git a/media/capture/video/video_capture_device_info.h b/media/capture/video/video_capture_device_info.h
new file mode 100644
index 0000000..3cdb1b5
--- /dev/null
+++ b/media/capture/video/video_capture_device_info.h
@@ -0,0 +1,28 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_INFO_H_
+#define MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_INFO_H_
+
+#include "media/base/video_capture_types.h"
+#include "media/capture/video/video_capture_device.h"
+
+namespace media {
+
+// A convenience wrap of a device's name and associated supported formats.
+struct MEDIA_EXPORT VideoCaptureDeviceInfo {
+ VideoCaptureDeviceInfo();
+ VideoCaptureDeviceInfo(const VideoCaptureDevice::Name& name,
+ const VideoCaptureFormats& supported_formats);
+ ~VideoCaptureDeviceInfo();
+
+ VideoCaptureDevice::Name name;
+ VideoCaptureFormats supported_formats;
+};
+
+typedef std::vector<VideoCaptureDeviceInfo> VideoCaptureDeviceInfos;
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_INFO_H_
diff --git a/media/capture/video/video_capture_device_unittest.cc b/media/capture/video/video_capture_device_unittest.cc
new file mode 100644
index 0000000..44bcaca
--- /dev/null
+++ b/media/capture/video/video_capture_device_unittest.cc
@@ -0,0 +1,485 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/test/test_timeouts.h"
+#include "base/thread_task_runner_handle.h"
+#include "base/threading/thread.h"
+#include "media/base/video_capture_types.h"
+#include "media/capture/video/video_capture_device.h"
+#include "media/capture/video/video_capture_device_factory.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_WIN)
+#include "base/win/scoped_com_initializer.h"
+#include "media/capture/video/win/video_capture_device_factory_win.h"
+#endif
+
+#if defined(OS_MACOSX)
+#include "media/base/mac/avfoundation_glue.h"
+#include "media/capture/video/mac/video_capture_device_factory_mac.h"
+#endif
+
+#if defined(OS_ANDROID)
+#include "base/android/jni_android.h"
+#include "media/capture/video/android/video_capture_device_android.h"
+#endif
+
+#if defined(OS_MACOSX)
+// Mac/QTKit will always give you the size you ask for and this case will fail.
+#define MAYBE_AllocateBadSize DISABLED_AllocateBadSize
+// We will always get YUYV from the Mac QTKit/AVFoundation implementations.
+#define MAYBE_CaptureMjpeg DISABLED_CaptureMjpeg
+#elif defined(OS_WIN)
+#define MAYBE_AllocateBadSize AllocateBadSize
+#define MAYBE_CaptureMjpeg CaptureMjpeg
+#elif defined(OS_ANDROID)
+// TODO(wjia): enable those tests on Android.
+// On Android, native camera (JAVA) delivers frames on UI thread which is the
+// main thread for tests. This results in no frame received by
+// VideoCaptureAndroid.
+#define MAYBE_AllocateBadSize DISABLED_AllocateBadSize
+#define ReAllocateCamera DISABLED_ReAllocateCamera
+#define DeAllocateCameraWhileRunning DISABLED_DeAllocateCameraWhileRunning
+#define DeAllocateCameraWhileRunning DISABLED_DeAllocateCameraWhileRunning
+#define MAYBE_CaptureMjpeg DISABLED_CaptureMjpeg
+#else
+#define MAYBE_AllocateBadSize AllocateBadSize
+#define MAYBE_CaptureMjpeg CaptureMjpeg
+#endif
+
+using ::testing::_;
+using ::testing::SaveArg;
+
+namespace media {
+namespace {
+
+static const gfx::Size kCaptureSizes[] = {gfx::Size(640, 480),
+ gfx::Size(1280, 720)};
+
+class MockClient : public VideoCaptureDevice::Client {
+ public:
+ MOCK_METHOD9(OnIncomingCapturedYuvData,
+ void(const uint8* y_data,
+ const uint8* u_data,
+ const uint8* v_data,
+ size_t y_stride,
+ size_t u_stride,
+ size_t v_stride,
+ const VideoCaptureFormat& frame_format,
+ int clockwise_rotation,
+ const base::TimeTicks& timestamp));
+ MOCK_METHOD0(DoReserveOutputBuffer, void(void));
+ MOCK_METHOD0(DoOnIncomingCapturedBuffer, void(void));
+ MOCK_METHOD0(DoOnIncomingCapturedVideoFrame, void(void));
+ MOCK_METHOD1(OnError, void(const std::string& reason));
+ MOCK_CONST_METHOD0(GetBufferPoolUtilization, double(void));
+
+ explicit MockClient(base::Callback<void(const VideoCaptureFormat&)> frame_cb)
+ : main_thread_(base::ThreadTaskRunnerHandle::Get()),
+ frame_cb_(frame_cb) {}
+
+ void OnIncomingCapturedData(const uint8* data,
+ int length,
+ const VideoCaptureFormat& format,
+ int rotation,
+ const base::TimeTicks& timestamp) override {
+ ASSERT_GT(length, 0);
+ ASSERT_TRUE(data != NULL);
+ main_thread_->PostTask(FROM_HERE, base::Bind(frame_cb_, format));
+ }
+
+ // Trampoline methods to workaround GMOCK problems with scoped_ptr<>.
+ scoped_ptr<Buffer> ReserveOutputBuffer(
+ const gfx::Size& dimensions,
+ media::VideoCapturePixelFormat format,
+ media::VideoPixelStorage storage) override {
+ DoReserveOutputBuffer();
+ NOTREACHED() << "This should never be called";
+ return scoped_ptr<Buffer>();
+ }
+ void OnIncomingCapturedBuffer(scoped_ptr<Buffer> buffer,
+ const VideoCaptureFormat& frame_format,
+ const base::TimeTicks& timestamp) override {
+ DoOnIncomingCapturedBuffer();
+ }
+ void OnIncomingCapturedVideoFrame(scoped_ptr<Buffer> buffer,
+ const scoped_refptr<VideoFrame>& frame,
+ const base::TimeTicks& timestamp) override {
+ DoOnIncomingCapturedVideoFrame();
+ }
+
+ private:
+ scoped_refptr<base::SingleThreadTaskRunner> main_thread_;
+ base::Callback<void(const VideoCaptureFormat&)> frame_cb_;
+};
+
+class DeviceEnumerationListener
+ : public base::RefCounted<DeviceEnumerationListener> {
+ public:
+ MOCK_METHOD1(OnEnumeratedDevicesCallbackPtr,
+ void(VideoCaptureDevice::Names* names));
+ // GMock doesn't support move-only arguments, so we use this forward method.
+ void OnEnumeratedDevicesCallback(
+ scoped_ptr<VideoCaptureDevice::Names> names) {
+ OnEnumeratedDevicesCallbackPtr(names.release());
+ }
+
+ private:
+ friend class base::RefCounted<DeviceEnumerationListener>;
+ virtual ~DeviceEnumerationListener() {}
+};
+
+} // namespace
+
+class VideoCaptureDeviceTest : public testing::TestWithParam<gfx::Size> {
+ protected:
+ typedef VideoCaptureDevice::Client Client;
+
+ VideoCaptureDeviceTest()
+ : loop_(new base::MessageLoop()),
+ client_(
+ new MockClient(base::Bind(&VideoCaptureDeviceTest::OnFrameCaptured,
+ base::Unretained(this)))),
+ video_capture_device_factory_(VideoCaptureDeviceFactory::CreateFactory(
+ base::ThreadTaskRunnerHandle::Get())) {
+ device_enumeration_listener_ = new DeviceEnumerationListener();
+ }
+
+ void SetUp() override {
+#if defined(OS_ANDROID)
+ VideoCaptureDeviceAndroid::RegisterVideoCaptureDevice(
+ base::android::AttachCurrentThread());
+#endif
+#if defined(OS_MACOSX)
+ AVFoundationGlue::InitializeAVFoundation();
+#endif
+ EXPECT_CALL(*client_, OnIncomingCapturedYuvData(_, _, _, _, _, _, _, _, _))
+ .Times(0);
+ EXPECT_CALL(*client_, DoReserveOutputBuffer()).Times(0);
+ EXPECT_CALL(*client_, DoOnIncomingCapturedBuffer()).Times(0);
+ EXPECT_CALL(*client_, DoOnIncomingCapturedVideoFrame()).Times(0);
+ }
+
+ void ResetWithNewClient() {
+ client_.reset(new MockClient(base::Bind(
+ &VideoCaptureDeviceTest::OnFrameCaptured, base::Unretained(this))));
+ }
+
+ void OnFrameCaptured(const VideoCaptureFormat& format) {
+ last_format_ = format;
+ run_loop_->QuitClosure().Run();
+ }
+
+ void WaitForCapturedFrame() {
+ run_loop_.reset(new base::RunLoop());
+ run_loop_->Run();
+ }
+
+ scoped_ptr<VideoCaptureDevice::Names> EnumerateDevices() {
+ VideoCaptureDevice::Names* names;
+ EXPECT_CALL(*device_enumeration_listener_.get(),
+ OnEnumeratedDevicesCallbackPtr(_)).WillOnce(SaveArg<0>(&names));
+
+ video_capture_device_factory_->EnumerateDeviceNames(
+ base::Bind(&DeviceEnumerationListener::OnEnumeratedDevicesCallback,
+ device_enumeration_listener_));
+ base::MessageLoop::current()->RunUntilIdle();
+ return scoped_ptr<VideoCaptureDevice::Names>(names);
+ }
+
+ const VideoCaptureFormat& last_format() const { return last_format_; }
+
+ scoped_ptr<VideoCaptureDevice::Name> GetFirstDeviceNameSupportingPixelFormat(
+ const VideoCapturePixelFormat& pixel_format) {
+ names_ = EnumerateDevices();
+ if (names_->empty()) {
+ DVLOG(1) << "No camera available.";
+ return scoped_ptr<VideoCaptureDevice::Name>();
+ }
+ for (const auto& names_iterator : *names_) {
+ VideoCaptureFormats supported_formats;
+ video_capture_device_factory_->GetDeviceSupportedFormats(
+ names_iterator, &supported_formats);
+ for (const auto& formats_iterator : supported_formats) {
+ if (formats_iterator.pixel_format == pixel_format) {
+ return scoped_ptr<VideoCaptureDevice::Name>(
+ new VideoCaptureDevice::Name(names_iterator));
+ }
+ }
+ }
+ DVLOG_IF(1, pixel_format != VIDEO_CAPTURE_PIXEL_FORMAT_MAX)
+ << "No camera can capture the"
+ << " format: " << VideoCaptureFormat::PixelFormatToString(pixel_format);
+ return scoped_ptr<VideoCaptureDevice::Name>();
+ }
+
+ bool IsCaptureSizeSupported(const VideoCaptureDevice::Name& device,
+ const gfx::Size& size) {
+ VideoCaptureFormats supported_formats;
+ video_capture_device_factory_->GetDeviceSupportedFormats(
+ device, &supported_formats);
+ const auto it = std::find_if(
+ supported_formats.begin(), supported_formats.end(),
+ [&size](VideoCaptureFormat const& f) { return f.frame_size == size; });
+ if (it == supported_formats.end()) {
+ DVLOG(1) << "Size " << size.ToString() << " is not supported.";
+ return false;
+ }
+ return true;
+ }
+
+#if defined(OS_WIN)
+ base::win::ScopedCOMInitializer initialize_com_;
+#endif
+ scoped_ptr<VideoCaptureDevice::Names> names_;
+ scoped_ptr<base::MessageLoop> loop_;
+ scoped_ptr<base::RunLoop> run_loop_;
+ scoped_ptr<MockClient> client_;
+ scoped_refptr<DeviceEnumerationListener> device_enumeration_listener_;
+ VideoCaptureFormat last_format_;
+ scoped_ptr<VideoCaptureDeviceFactory> video_capture_device_factory_;
+};
+
+// Cause hangs on Windows Debug. http://crbug.com/417824
+#if defined(OS_WIN) && !defined(NDEBUG)
+#define MAYBE_OpenInvalidDevice DISABLED_OpenInvalidDevice
+#else
+#define MAYBE_OpenInvalidDevice OpenInvalidDevice
+#endif
+
+TEST_F(VideoCaptureDeviceTest, MAYBE_OpenInvalidDevice) {
+#if defined(OS_WIN)
+ VideoCaptureDevice::Name::CaptureApiType api_type =
+ VideoCaptureDeviceFactoryWin::PlatformSupportsMediaFoundation()
+ ? VideoCaptureDevice::Name::MEDIA_FOUNDATION
+ : VideoCaptureDevice::Name::DIRECT_SHOW;
+ VideoCaptureDevice::Name device_name("jibberish", "jibberish", api_type);
+#elif defined(OS_MACOSX)
+ VideoCaptureDevice::Name device_name(
+ "jibberish", "jibberish",
+ VideoCaptureDeviceFactoryMac::PlatformSupportsAVFoundation()
+ ? VideoCaptureDevice::Name::AVFOUNDATION
+ : VideoCaptureDevice::Name::QTKIT);
+#else
+ VideoCaptureDevice::Name device_name("jibberish", "jibberish");
+#endif
+ scoped_ptr<VideoCaptureDevice> device =
+ video_capture_device_factory_->Create(device_name);
+#if !defined(OS_MACOSX)
+ EXPECT_TRUE(device == NULL);
+#else
+ if (VideoCaptureDeviceFactoryMac::PlatformSupportsAVFoundation()) {
+ EXPECT_TRUE(device == NULL);
+ } else {
+ // The presence of the actual device is only checked on AllocateAndStart()
+ // and not on creation for QTKit API in Mac OS X platform.
+ EXPECT_CALL(*client_, OnError(_)).Times(1);
+
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size.SetSize(640, 480);
+ capture_params.requested_format.frame_rate = 30;
+ capture_params.requested_format.pixel_format =
+ VIDEO_CAPTURE_PIXEL_FORMAT_I420;
+ device->AllocateAndStart(capture_params, client_.Pass());
+ device->StopAndDeAllocate();
+ }
+#endif
+}
+
+TEST_P(VideoCaptureDeviceTest, CaptureWithSize) {
+ names_ = EnumerateDevices();
+ if (names_->empty()) {
+ DVLOG(1) << "No camera available. Exiting test.";
+ return;
+ }
+
+ const gfx::Size& size = GetParam();
+ if (!IsCaptureSizeSupported(names_->front(), size))
+ return;
+ const int width = size.width();
+ const int height = size.height();
+
+ scoped_ptr<VideoCaptureDevice> device(
+ video_capture_device_factory_->Create(names_->front()));
+ ASSERT_TRUE(device);
+ DVLOG(1) << names_->front().id();
+
+ EXPECT_CALL(*client_, OnError(_)).Times(0);
+
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size.SetSize(width, height);
+ capture_params.requested_format.frame_rate = 30.0f;
+ capture_params.requested_format.pixel_format =
+ VIDEO_CAPTURE_PIXEL_FORMAT_I420;
+ device->AllocateAndStart(capture_params, client_.Pass());
+ // Get captured video frames.
+ WaitForCapturedFrame();
+ EXPECT_EQ(last_format().frame_size.width(), width);
+ EXPECT_EQ(last_format().frame_size.height(), height);
+ if (last_format().pixel_format != VIDEO_CAPTURE_PIXEL_FORMAT_MJPEG)
+ EXPECT_EQ(size.GetArea(), last_format().frame_size.GetArea());
+ device->StopAndDeAllocate();
+}
+
+#if !defined(OS_ANDROID)
+INSTANTIATE_TEST_CASE_P(MAYBE_VideoCaptureDeviceTests,
+ VideoCaptureDeviceTest,
+ testing::ValuesIn(kCaptureSizes));
+#endif
+
+TEST_F(VideoCaptureDeviceTest, MAYBE_AllocateBadSize) {
+ names_ = EnumerateDevices();
+ if (names_->empty()) {
+ DVLOG(1) << "No camera available. Exiting test.";
+ return;
+ }
+ scoped_ptr<VideoCaptureDevice> device(
+ video_capture_device_factory_->Create(names_->front()));
+ ASSERT_TRUE(device);
+
+ EXPECT_CALL(*client_, OnError(_)).Times(0);
+
+ const gfx::Size input_size(640, 480);
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size.SetSize(637, 472);
+ capture_params.requested_format.frame_rate = 35;
+ capture_params.requested_format.pixel_format =
+ VIDEO_CAPTURE_PIXEL_FORMAT_I420;
+ device->AllocateAndStart(capture_params, client_.Pass());
+ WaitForCapturedFrame();
+ device->StopAndDeAllocate();
+ EXPECT_EQ(last_format().frame_size.width(), input_size.width());
+ EXPECT_EQ(last_format().frame_size.height(), input_size.height());
+ if (last_format().pixel_format != VIDEO_CAPTURE_PIXEL_FORMAT_MJPEG)
+ EXPECT_EQ(input_size.GetArea(), last_format().frame_size.GetArea());
+}
+
+// Cause hangs on Windows Debug. http://crbug.com/417824
+#if defined(OS_WIN) && !defined(NDEBUG)
+#define MAYBE_ReAllocateCamera DISABLED_ReAllocateCamera
+#else
+#define MAYBE_ReAllocateCamera ReAllocateCamera
+#endif
+
+TEST_F(VideoCaptureDeviceTest, MAYBE_ReAllocateCamera) {
+ names_ = EnumerateDevices();
+ if (names_->empty()) {
+ DVLOG(1) << "No camera available. Exiting test.";
+ return;
+ }
+
+ // First, do a number of very fast device start/stops.
+ for (int i = 0; i <= 5; i++) {
+ ResetWithNewClient();
+ scoped_ptr<VideoCaptureDevice> device(
+ video_capture_device_factory_->Create(names_->front()));
+ gfx::Size resolution;
+ if (i % 2) {
+ resolution = gfx::Size(640, 480);
+ } else {
+ resolution = gfx::Size(1280, 1024);
+ }
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size = resolution;
+ capture_params.requested_format.frame_rate = 30;
+ capture_params.requested_format.pixel_format =
+ VIDEO_CAPTURE_PIXEL_FORMAT_I420;
+ device->AllocateAndStart(capture_params, client_.Pass());
+ device->StopAndDeAllocate();
+ }
+
+ // Finally, do a device start and wait for it to finish.
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size.SetSize(320, 240);
+ capture_params.requested_format.frame_rate = 30;
+ capture_params.requested_format.pixel_format =
+ VIDEO_CAPTURE_PIXEL_FORMAT_I420;
+
+ ResetWithNewClient();
+ scoped_ptr<VideoCaptureDevice> device(
+ video_capture_device_factory_->Create(names_->front()));
+
+ device->AllocateAndStart(capture_params, client_.Pass());
+ WaitForCapturedFrame();
+ device->StopAndDeAllocate();
+ device.reset();
+ EXPECT_EQ(last_format().frame_size.width(), 320);
+ EXPECT_EQ(last_format().frame_size.height(), 240);
+}
+
+TEST_F(VideoCaptureDeviceTest, DeAllocateCameraWhileRunning) {
+ names_ = EnumerateDevices();
+ if (names_->empty()) {
+ DVLOG(1) << "No camera available. Exiting test.";
+ return;
+ }
+ scoped_ptr<VideoCaptureDevice> device(
+ video_capture_device_factory_->Create(names_->front()));
+ ASSERT_TRUE(device);
+
+ EXPECT_CALL(*client_, OnError(_)).Times(0);
+
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size.SetSize(640, 480);
+ capture_params.requested_format.frame_rate = 30;
+ capture_params.requested_format.pixel_format =
+ VIDEO_CAPTURE_PIXEL_FORMAT_I420;
+ device->AllocateAndStart(capture_params, client_.Pass());
+ // Get captured video frames.
+ WaitForCapturedFrame();
+ EXPECT_EQ(last_format().frame_size.width(), 640);
+ EXPECT_EQ(last_format().frame_size.height(), 480);
+ EXPECT_EQ(last_format().frame_rate, 30);
+ device->StopAndDeAllocate();
+}
+
+// Start the camera in 720p to capture MJPEG instead of a raw format.
+TEST_F(VideoCaptureDeviceTest, MAYBE_CaptureMjpeg) {
+ scoped_ptr<VideoCaptureDevice::Name> name =
+ GetFirstDeviceNameSupportingPixelFormat(VIDEO_CAPTURE_PIXEL_FORMAT_MJPEG);
+ if (!name) {
+ DVLOG(1) << "No camera supports MJPEG format. Exiting test.";
+ return;
+ }
+ scoped_ptr<VideoCaptureDevice> device(
+ video_capture_device_factory_->Create(*name));
+ ASSERT_TRUE(device);
+
+ EXPECT_CALL(*client_, OnError(_)).Times(0);
+
+ VideoCaptureParams capture_params;
+ capture_params.requested_format.frame_size.SetSize(1280, 720);
+ capture_params.requested_format.frame_rate = 30;
+ capture_params.requested_format.pixel_format =
+ VIDEO_CAPTURE_PIXEL_FORMAT_MJPEG;
+ device->AllocateAndStart(capture_params, client_.Pass());
+ // Get captured video frames.
+ WaitForCapturedFrame();
+ // Verify we get MJPEG from the device. Not all devices can capture 1280x720
+ // @ 30 fps, so we don't care about the exact resolution we get.
+ EXPECT_EQ(last_format().pixel_format, VIDEO_CAPTURE_PIXEL_FORMAT_MJPEG);
+ EXPECT_GE(static_cast<size_t>(1280 * 720),
+ last_format().ImageAllocationSize());
+ device->StopAndDeAllocate();
+}
+
+TEST_F(VideoCaptureDeviceTest, GetDeviceSupportedFormats) {
+ // Use VIDEO_CAPTURE_PIXEL_FORMAT_MAX to iterate all device names for testing
+ // GetDeviceSupportedFormats().
+ scoped_ptr<VideoCaptureDevice::Name> name =
+ GetFirstDeviceNameSupportingPixelFormat(VIDEO_CAPTURE_PIXEL_FORMAT_MAX);
+ // Verify no camera returned for VIDEO_CAPTURE_PIXEL_FORMAT_MAX. Nothing else
+ // to test here
+ // since we cannot forecast the hardware capabilities.
+ ASSERT_FALSE(name);
+}
+
+}; // namespace media
diff --git a/media/capture/video/win/capability_list_win.cc b/media/capture/video/win/capability_list_win.cc
new file mode 100644
index 0000000..db6e986
--- /dev/null
+++ b/media/capture/video/win/capability_list_win.cc
@@ -0,0 +1,54 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/win/capability_list_win.h"
+
+#include <algorithm>
+#include <functional>
+
+#include "base/logging.h"
+
+namespace media {
+
+static bool CompareCapability(const VideoCaptureFormat& requested,
+ const CapabilityWin& capability_lhs,
+ const CapabilityWin& capability_rhs) {
+ const VideoCaptureFormat& lhs = capability_lhs.supported_format;
+ const VideoCaptureFormat& rhs = capability_rhs.supported_format;
+
+ const int diff_height_lhs =
+ std::abs(lhs.frame_size.height() - requested.frame_size.height());
+ const int diff_height_rhs =
+ std::abs(rhs.frame_size.height() - requested.frame_size.height());
+ if (diff_height_lhs != diff_height_rhs)
+ return diff_height_lhs < diff_height_rhs;
+
+ const int diff_width_lhs =
+ std::abs(lhs.frame_size.width() - requested.frame_size.width());
+ const int diff_width_rhs =
+ std::abs(rhs.frame_size.width() - requested.frame_size.width());
+ if (diff_width_lhs != diff_width_rhs)
+ return diff_width_lhs < diff_width_rhs;
+
+ const float diff_fps_lhs = std::fabs(lhs.frame_rate - requested.frame_rate);
+ const float diff_fps_rhs = std::fabs(rhs.frame_rate - requested.frame_rate);
+ if (diff_fps_lhs != diff_fps_rhs)
+ return diff_fps_lhs < diff_fps_rhs;
+
+ return lhs.pixel_format < rhs.pixel_format;
+}
+
+const CapabilityWin& GetBestMatchedCapability(
+ const VideoCaptureFormat& requested,
+ const CapabilityList& capabilities) {
+ DCHECK(!capabilities.empty());
+ const CapabilityWin* best_match = &(*capabilities.begin());
+ for (const CapabilityWin& capability : capabilities) {
+ if (CompareCapability(requested, capability, *best_match))
+ best_match = &capability;
+ }
+ return *best_match;
+}
+
+} // namespace media
diff --git a/media/capture/video/win/capability_list_win.h b/media/capture/video/win/capability_list_win.h
new file mode 100644
index 0000000..47166c1
--- /dev/null
+++ b/media/capture/video/win/capability_list_win.h
@@ -0,0 +1,46 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Windows specific implementation of VideoCaptureDevice.
+// DirectShow is used for capturing. DirectShow provide its own threads
+// for capturing.
+
+#ifndef MEDIA_VIDEO_CAPTURE_WIN_CAPABILITY_LIST_WIN_H_
+#define MEDIA_VIDEO_CAPTURE_WIN_CAPABILITY_LIST_WIN_H_
+
+#include <list>
+#include <windows.h>
+
+#include "media/base/video_capture_types.h"
+
+namespace media {
+
+struct CapabilityWin {
+ CapabilityWin(int index, const VideoCaptureFormat& format)
+ : stream_index(index), supported_format(format), info_header() {}
+
+ // Used by VideoCaptureDeviceWin.
+ CapabilityWin(int index,
+ const VideoCaptureFormat& format,
+ const BITMAPINFOHEADER& info_header)
+ : stream_index(index),
+ supported_format(format),
+ info_header(info_header) {}
+
+ const int stream_index;
+ const VideoCaptureFormat supported_format;
+
+ // |info_header| is only valid if DirectShow is used.
+ const BITMAPINFOHEADER info_header;
+};
+
+typedef std::list<CapabilityWin> CapabilityList;
+
+const CapabilityWin& GetBestMatchedCapability(
+ const VideoCaptureFormat& requested,
+ const CapabilityList& capabilities);
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_WIN_CAPABILITY_LIST_WIN_H_
diff --git a/media/capture/video/win/filter_base_win.cc b/media/capture/video/win/filter_base_win.cc
new file mode 100644
index 0000000..166b860
--- /dev/null
+++ b/media/capture/video/win/filter_base_win.cc
@@ -0,0 +1,173 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/win/filter_base_win.h"
+
+#pragma comment(lib, "strmiids.lib")
+
+namespace media {
+
+// Implement IEnumPins.
+class PinEnumerator final : public IEnumPins,
+ public base::RefCounted<PinEnumerator> {
+ public:
+ explicit PinEnumerator(FilterBase* filter) : filter_(filter), index_(0) {}
+
+ // IUnknown implementation.
+ STDMETHOD(QueryInterface)(REFIID iid, void** object_ptr) override {
+ if (iid == IID_IEnumPins || iid == IID_IUnknown) {
+ AddRef();
+ *object_ptr = static_cast<IEnumPins*>(this);
+ return S_OK;
+ }
+ return E_NOINTERFACE;
+ }
+
+ STDMETHOD_(ULONG, AddRef)() override {
+ base::RefCounted<PinEnumerator>::AddRef();
+ return 1;
+ }
+
+ STDMETHOD_(ULONG, Release)() override {
+ base::RefCounted<PinEnumerator>::Release();
+ return 1;
+ }
+
+ // Implement IEnumPins.
+ STDMETHOD(Next)(ULONG count, IPin** pins, ULONG* fetched) override {
+ ULONG pins_fetched = 0;
+ while (pins_fetched < count && filter_->NoOfPins() > index_) {
+ IPin* pin = filter_->GetPin(index_++);
+ pin->AddRef();
+ pins[pins_fetched++] = pin;
+ }
+
+ if (fetched)
+ *fetched = pins_fetched;
+
+ return pins_fetched == count ? S_OK : S_FALSE;
+ }
+
+ STDMETHOD(Skip)(ULONG count) override {
+ if (filter_->NoOfPins() - index_ > count) {
+ index_ += count;
+ return S_OK;
+ }
+ index_ = 0;
+ return S_FALSE;
+ }
+
+ STDMETHOD(Reset)() override {
+ index_ = 0;
+ return S_OK;
+ }
+
+ STDMETHOD(Clone)(IEnumPins** clone) override {
+ PinEnumerator* pin_enum = new PinEnumerator(filter_.get());
+ pin_enum->AddRef();
+ pin_enum->index_ = index_;
+ *clone = pin_enum;
+ return S_OK;
+ }
+
+ private:
+ friend class base::RefCounted<PinEnumerator>;
+ ~PinEnumerator() {}
+
+ scoped_refptr<FilterBase> filter_;
+ size_t index_;
+};
+
+FilterBase::FilterBase() : state_(State_Stopped) {
+}
+
+STDMETHODIMP FilterBase::EnumPins(IEnumPins** enum_pins) {
+ *enum_pins = new PinEnumerator(this);
+ (*enum_pins)->AddRef();
+ return S_OK;
+}
+
+STDMETHODIMP FilterBase::FindPin(LPCWSTR id, IPin** pin) {
+ return E_NOTIMPL;
+}
+
+STDMETHODIMP FilterBase::QueryFilterInfo(FILTER_INFO* info) {
+ info->pGraph = owning_graph_.get();
+ info->achName[0] = L'\0';
+ if (info->pGraph)
+ info->pGraph->AddRef();
+ return S_OK;
+}
+
+STDMETHODIMP FilterBase::JoinFilterGraph(IFilterGraph* graph, LPCWSTR name) {
+ owning_graph_ = graph;
+ return S_OK;
+}
+
+STDMETHODIMP FilterBase::QueryVendorInfo(LPWSTR* pVendorInfo) {
+ return S_OK;
+}
+
+// Implement IMediaFilter.
+STDMETHODIMP FilterBase::Stop() {
+ state_ = State_Stopped;
+ return S_OK;
+}
+
+STDMETHODIMP FilterBase::Pause() {
+ state_ = State_Paused;
+ return S_OK;
+}
+
+STDMETHODIMP FilterBase::Run(REFERENCE_TIME start) {
+ state_ = State_Running;
+ return S_OK;
+}
+
+STDMETHODIMP FilterBase::GetState(DWORD msec_timeout, FILTER_STATE* state) {
+ *state = state_;
+ return S_OK;
+}
+
+STDMETHODIMP FilterBase::SetSyncSource(IReferenceClock* clock) {
+ return S_OK;
+}
+
+STDMETHODIMP FilterBase::GetSyncSource(IReferenceClock** clock) {
+ return E_NOTIMPL;
+}
+
+// Implement from IPersistent.
+STDMETHODIMP FilterBase::GetClassID(CLSID* class_id) {
+ NOTREACHED();
+ return E_NOTIMPL;
+}
+
+// Implement IUnknown.
+STDMETHODIMP FilterBase::QueryInterface(REFIID id, void** object_ptr) {
+ if (id == IID_IMediaFilter || id == IID_IUnknown) {
+ *object_ptr = static_cast<IMediaFilter*>(this);
+ } else if (id == IID_IPersist) {
+ *object_ptr = static_cast<IPersist*>(this);
+ } else {
+ return E_NOINTERFACE;
+ }
+ AddRef();
+ return S_OK;
+}
+
+ULONG STDMETHODCALLTYPE FilterBase::AddRef() {
+ base::RefCounted<FilterBase>::AddRef();
+ return 1;
+}
+
+ULONG STDMETHODCALLTYPE FilterBase::Release() {
+ base::RefCounted<FilterBase>::Release();
+ return 1;
+}
+
+FilterBase::~FilterBase() {
+}
+
+} // namespace media
diff --git a/media/capture/video/win/filter_base_win.h b/media/capture/video/win/filter_base_win.h
new file mode 100644
index 0000000..2294742
--- /dev/null
+++ b/media/capture/video/win/filter_base_win.h
@@ -0,0 +1,74 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Implement a simple base class for DirectShow filters. It may only be used in
+// a single threaded apartment.
+
+#ifndef MEDIA_VIDEO_CAPTURE_WIN_FILTER_BASE_WIN_H_
+#define MEDIA_VIDEO_CAPTURE_WIN_FILTER_BASE_WIN_H_
+
+// Avoid including strsafe.h via dshow as it will cause build warnings.
+#define NO_DSHOW_STRSAFE
+#include <dshow.h>
+
+#include "base/memory/ref_counted.h"
+#include "base/win/scoped_comptr.h"
+
+namespace media {
+
+class FilterBase : public IBaseFilter, public base::RefCounted<FilterBase> {
+ public:
+ FilterBase();
+
+ // Number of pins connected to this filter.
+ virtual size_t NoOfPins() = 0;
+ // Returns the IPin interface pin no index.
+ virtual IPin* GetPin(int index) = 0;
+
+ // Inherited from IUnknown.
+ STDMETHOD(QueryInterface)(REFIID id, void** object_ptr) override;
+ STDMETHOD_(ULONG, AddRef)() override;
+ STDMETHOD_(ULONG, Release)() override;
+
+ // Inherited from IBaseFilter.
+ STDMETHOD(EnumPins)(IEnumPins** enum_pins) override;
+
+ STDMETHOD(FindPin)(LPCWSTR id, IPin** pin) override;
+
+ STDMETHOD(QueryFilterInfo)(FILTER_INFO* info) override;
+
+ STDMETHOD(JoinFilterGraph)(IFilterGraph* graph, LPCWSTR name) override;
+
+ STDMETHOD(QueryVendorInfo)(LPWSTR* vendor_info) override;
+
+ // Inherited from IMediaFilter.
+ STDMETHOD(Stop)() override;
+
+ STDMETHOD(Pause)() override;
+
+ STDMETHOD(Run)(REFERENCE_TIME start) override;
+
+ STDMETHOD(GetState)(DWORD msec_timeout, FILTER_STATE* state) override;
+
+ STDMETHOD(SetSyncSource)(IReferenceClock* clock) override;
+
+ STDMETHOD(GetSyncSource)(IReferenceClock** clock) override;
+
+ // Inherited from IPersistent.
+ STDMETHOD(GetClassID)(CLSID* class_id) override = 0;
+
+ protected:
+ friend class base::RefCounted<FilterBase>;
+ virtual ~FilterBase();
+
+ private:
+ FILTER_STATE state_;
+ base::win::ScopedComPtr<IFilterGraph> owning_graph_;
+
+ DISALLOW_COPY_AND_ASSIGN(FilterBase);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_WIN_FILTER_BASE_WIN_H_
diff --git a/media/capture/video/win/pin_base_win.cc b/media/capture/video/win/pin_base_win.cc
new file mode 100644
index 0000000..3f52194
--- /dev/null
+++ b/media/capture/video/win/pin_base_win.cc
@@ -0,0 +1,285 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/win/pin_base_win.h"
+
+#include "base/logging.h"
+
+namespace media {
+
+// Implement IEnumPins.
+class TypeEnumerator final : public IEnumMediaTypes,
+ public base::RefCounted<TypeEnumerator> {
+ public:
+ explicit TypeEnumerator(PinBase* pin) : pin_(pin), index_(0) {}
+
+ // Implement from IUnknown.
+ STDMETHOD(QueryInterface)(REFIID iid, void** object_ptr) override {
+ if (iid == IID_IEnumMediaTypes || iid == IID_IUnknown) {
+ AddRef();
+ *object_ptr = static_cast<IEnumMediaTypes*>(this);
+ return S_OK;
+ }
+ return E_NOINTERFACE;
+ }
+
+ STDMETHOD_(ULONG, AddRef)() override {
+ base::RefCounted<TypeEnumerator>::AddRef();
+ return 1;
+ }
+
+ STDMETHOD_(ULONG, Release)() override {
+ base::RefCounted<TypeEnumerator>::Release();
+ return 1;
+ }
+
+ // Implement IEnumMediaTypes.
+ STDMETHOD(Next)(ULONG count, AM_MEDIA_TYPE** types, ULONG* fetched) override {
+ ULONG types_fetched = 0;
+
+ while (types_fetched < count) {
+ // Allocate AM_MEDIA_TYPE that we will store the media type in.
+ AM_MEDIA_TYPE* type = reinterpret_cast<AM_MEDIA_TYPE*>(
+ CoTaskMemAlloc(sizeof(AM_MEDIA_TYPE)));
+ if (!type) {
+ FreeAllocatedMediaTypes(types_fetched, types);
+ return E_OUTOFMEMORY;
+ }
+ ZeroMemory(type, sizeof(AM_MEDIA_TYPE));
+
+ // Allocate a VIDEOINFOHEADER and connect it to the AM_MEDIA_TYPE.
+ type->cbFormat = sizeof(VIDEOINFOHEADER);
+ BYTE* format =
+ reinterpret_cast<BYTE*>(CoTaskMemAlloc(sizeof(VIDEOINFOHEADER)));
+ if (!format) {
+ CoTaskMemFree(type);
+ FreeAllocatedMediaTypes(types_fetched, types);
+ return E_OUTOFMEMORY;
+ }
+ type->pbFormat = format;
+ // Get the media type from the pin.
+ if (pin_->GetValidMediaType(index_++, type)) {
+ types[types_fetched++] = type;
+ } else {
+ CoTaskMemFree(format);
+ CoTaskMemFree(type);
+ break;
+ }
+ }
+
+ if (fetched)
+ *fetched = types_fetched;
+
+ return types_fetched == count ? S_OK : S_FALSE;
+ }
+
+ STDMETHOD(Skip)(ULONG count) override {
+ index_ += count;
+ return S_OK;
+ }
+
+ STDMETHOD(Reset)() override {
+ index_ = 0;
+ return S_OK;
+ }
+
+ STDMETHOD(Clone)(IEnumMediaTypes** clone) override {
+ TypeEnumerator* type_enum = new TypeEnumerator(pin_.get());
+ type_enum->AddRef();
+ type_enum->index_ = index_;
+ *clone = type_enum;
+ return S_OK;
+ }
+
+ private:
+ friend class base::RefCounted<TypeEnumerator>;
+ ~TypeEnumerator() {}
+
+ void FreeAllocatedMediaTypes(ULONG allocated, AM_MEDIA_TYPE** types) {
+ for (ULONG i = 0; i < allocated; ++i) {
+ CoTaskMemFree(types[i]->pbFormat);
+ CoTaskMemFree(types[i]);
+ }
+ }
+
+ scoped_refptr<PinBase> pin_;
+ int index_;
+};
+
+PinBase::PinBase(IBaseFilter* owner) : owner_(owner) {
+ memset(&current_media_type_, 0, sizeof(current_media_type_));
+}
+
+void PinBase::SetOwner(IBaseFilter* owner) {
+ owner_ = owner;
+}
+
+// Called on an output pin to and establish a
+// connection.
+STDMETHODIMP PinBase::Connect(IPin* receive_pin,
+ const AM_MEDIA_TYPE* media_type) {
+ if (!receive_pin || !media_type)
+ return E_POINTER;
+
+ current_media_type_ = *media_type;
+ receive_pin->AddRef();
+ connected_pin_.Attach(receive_pin);
+ HRESULT hr = receive_pin->ReceiveConnection(this, media_type);
+
+ return hr;
+}
+
+// Called from an output pin on an input pin to and establish a
+// connection.
+STDMETHODIMP PinBase::ReceiveConnection(IPin* connector,
+ const AM_MEDIA_TYPE* media_type) {
+ if (!IsMediaTypeValid(media_type))
+ return VFW_E_TYPE_NOT_ACCEPTED;
+
+ current_media_type_ = *media_type;
+ connector->AddRef();
+ connected_pin_.Attach(connector);
+ return S_OK;
+}
+
+STDMETHODIMP PinBase::Disconnect() {
+ if (!connected_pin_.get())
+ return S_FALSE;
+
+ connected_pin_.Release();
+ return S_OK;
+}
+
+STDMETHODIMP PinBase::ConnectedTo(IPin** pin) {
+ *pin = connected_pin_.get();
+ if (!connected_pin_.get())
+ return VFW_E_NOT_CONNECTED;
+
+ connected_pin_.get()->AddRef();
+ return S_OK;
+}
+
+STDMETHODIMP PinBase::ConnectionMediaType(AM_MEDIA_TYPE* media_type) {
+ if (!connected_pin_.get())
+ return VFW_E_NOT_CONNECTED;
+ *media_type = current_media_type_;
+ return S_OK;
+}
+
+STDMETHODIMP PinBase::QueryPinInfo(PIN_INFO* info) {
+ info->dir = PINDIR_INPUT;
+ info->pFilter = owner_;
+ if (owner_)
+ owner_->AddRef();
+ info->achName[0] = L'\0';
+
+ return S_OK;
+}
+
+STDMETHODIMP PinBase::QueryDirection(PIN_DIRECTION* pin_dir) {
+ *pin_dir = PINDIR_INPUT;
+ return S_OK;
+}
+
+STDMETHODIMP PinBase::QueryId(LPWSTR* id) {
+ NOTREACHED();
+ return E_OUTOFMEMORY;
+}
+
+STDMETHODIMP PinBase::QueryAccept(const AM_MEDIA_TYPE* media_type) {
+ return S_FALSE;
+}
+
+STDMETHODIMP PinBase::EnumMediaTypes(IEnumMediaTypes** types) {
+ *types = new TypeEnumerator(this);
+ (*types)->AddRef();
+ return S_OK;
+}
+
+STDMETHODIMP PinBase::QueryInternalConnections(IPin** pins, ULONG* no_pins) {
+ return E_NOTIMPL;
+}
+
+STDMETHODIMP PinBase::EndOfStream() {
+ return S_OK;
+}
+
+STDMETHODIMP PinBase::BeginFlush() {
+ return S_OK;
+}
+
+STDMETHODIMP PinBase::EndFlush() {
+ return S_OK;
+}
+
+STDMETHODIMP PinBase::NewSegment(REFERENCE_TIME start,
+ REFERENCE_TIME stop,
+ double rate) {
+ NOTREACHED();
+ return E_NOTIMPL;
+}
+
+// Inherited from IMemInputPin.
+STDMETHODIMP PinBase::GetAllocator(IMemAllocator** allocator) {
+ return VFW_E_NO_ALLOCATOR;
+}
+
+STDMETHODIMP PinBase::NotifyAllocator(IMemAllocator* allocator,
+ BOOL read_only) {
+ return S_OK;
+}
+
+STDMETHODIMP PinBase::GetAllocatorRequirements(
+ ALLOCATOR_PROPERTIES* properties) {
+ return E_NOTIMPL;
+}
+
+STDMETHODIMP PinBase::ReceiveMultiple(IMediaSample** samples,
+ long sample_count,
+ long* processed) {
+ DCHECK(samples);
+
+ HRESULT hr = S_OK;
+ *processed = 0;
+ while (sample_count--) {
+ hr = Receive(samples[*processed]);
+ // S_FALSE means don't send any more.
+ if (hr != S_OK)
+ break;
+ ++(*processed);
+ }
+ return hr;
+}
+
+STDMETHODIMP PinBase::ReceiveCanBlock() {
+ return S_FALSE;
+}
+
+// Inherited from IUnknown.
+STDMETHODIMP PinBase::QueryInterface(REFIID id, void** object_ptr) {
+ if (id == IID_IPin || id == IID_IUnknown) {
+ *object_ptr = static_cast<IPin*>(this);
+ } else if (id == IID_IMemInputPin) {
+ *object_ptr = static_cast<IMemInputPin*>(this);
+ } else {
+ return E_NOINTERFACE;
+ }
+ AddRef();
+ return S_OK;
+}
+
+STDMETHODIMP_(ULONG) PinBase::AddRef() {
+ base::RefCounted<PinBase>::AddRef();
+ return 1;
+}
+
+STDMETHODIMP_(ULONG) PinBase::Release() {
+ base::RefCounted<PinBase>::Release();
+ return 1;
+}
+
+PinBase::~PinBase() {
+}
+
+} // namespace media
diff --git a/media/capture/video/win/pin_base_win.h b/media/capture/video/win/pin_base_win.h
new file mode 100644
index 0000000..a3ac020
--- /dev/null
+++ b/media/capture/video/win/pin_base_win.h
@@ -0,0 +1,111 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Implement a simple base class for a DirectShow input pin. It may only be
+// used in a single threaded apartment.
+
+#ifndef MEDIA_VIDEO_CAPTURE_WIN_PIN_BASE_WIN_H_
+#define MEDIA_VIDEO_CAPTURE_WIN_PIN_BASE_WIN_H_
+
+// Avoid including strsafe.h via dshow as it will cause build warnings.
+#define NO_DSHOW_STRSAFE
+#include <dshow.h>
+
+#include "base/memory/ref_counted.h"
+#include "base/win/scoped_comptr.h"
+
+namespace media {
+
+class PinBase : public IPin,
+ public IMemInputPin,
+ public base::RefCounted<PinBase> {
+ public:
+ explicit PinBase(IBaseFilter* owner);
+
+ // Function used for changing the owner.
+ // If the owner is deleted the owner should first call this function
+ // with owner = NULL.
+ void SetOwner(IBaseFilter* owner);
+
+ // Checks if a media type is acceptable. This is called when this pin is
+ // connected to an output pin. Must return true if the media type is
+ // acceptable, false otherwise.
+ virtual bool IsMediaTypeValid(const AM_MEDIA_TYPE* media_type) = 0;
+
+ // Enumerates valid media types.
+ virtual bool GetValidMediaType(int index, AM_MEDIA_TYPE* media_type) = 0;
+
+ // Called when new media is received. Note that this is not on the same
+ // thread as where the pin is created.
+ STDMETHOD(Receive)(IMediaSample* sample) override = 0;
+
+ STDMETHOD(Connect)(IPin* receive_pin,
+ const AM_MEDIA_TYPE* media_type) override;
+
+ STDMETHOD(ReceiveConnection)(IPin* connector,
+ const AM_MEDIA_TYPE* media_type) override;
+
+ STDMETHOD(Disconnect)() override;
+
+ STDMETHOD(ConnectedTo)(IPin** pin) override;
+
+ STDMETHOD(ConnectionMediaType)(AM_MEDIA_TYPE* media_type) override;
+
+ STDMETHOD(QueryPinInfo)(PIN_INFO* info) override;
+
+ STDMETHOD(QueryDirection)(PIN_DIRECTION* pin_dir) override;
+
+ STDMETHOD(QueryId)(LPWSTR* id) override;
+
+ STDMETHOD(QueryAccept)(const AM_MEDIA_TYPE* media_type) override;
+
+ STDMETHOD(EnumMediaTypes)(IEnumMediaTypes** types) override;
+
+ STDMETHOD(QueryInternalConnections)(IPin** pins, ULONG* no_pins) override;
+
+ STDMETHOD(EndOfStream)() override;
+
+ STDMETHOD(BeginFlush)() override;
+
+ STDMETHOD(EndFlush)() override;
+
+ STDMETHOD(NewSegment)(REFERENCE_TIME start,
+ REFERENCE_TIME stop,
+ double dRate) override;
+
+ // Inherited from IMemInputPin.
+ STDMETHOD(GetAllocator)(IMemAllocator** allocator) override;
+
+ STDMETHOD(NotifyAllocator)(IMemAllocator* allocator, BOOL read_only) override;
+
+ STDMETHOD(GetAllocatorRequirements)(
+ ALLOCATOR_PROPERTIES* properties) override;
+
+ STDMETHOD(ReceiveMultiple)(IMediaSample** samples,
+ long sample_count,
+ long* processed) override;
+ STDMETHOD(ReceiveCanBlock)() override;
+
+ // Inherited from IUnknown.
+ STDMETHOD(QueryInterface)(REFIID id, void** object_ptr) override;
+
+ STDMETHOD_(ULONG, AddRef)() override;
+
+ STDMETHOD_(ULONG, Release)() override;
+
+ protected:
+ friend class base::RefCounted<PinBase>;
+ virtual ~PinBase();
+
+ private:
+ AM_MEDIA_TYPE current_media_type_;
+ base::win::ScopedComPtr<IPin> connected_pin_;
+ // owner_ is the filter owning this pin. We don't reference count it since
+ // that would create a circular reference count.
+ IBaseFilter* owner_;
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_WIN_PIN_BASE_WIN_H_
diff --git a/media/capture/video/win/sink_filter_observer_win.h b/media/capture/video/win/sink_filter_observer_win.h
new file mode 100644
index 0000000..9dfd08b
--- /dev/null
+++ b/media/capture/video/win/sink_filter_observer_win.h
@@ -0,0 +1,25 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Observer class of Sinkfilter. The implementor of this class receive video
+// frames from the SinkFilter DirectShow filter.
+
+#ifndef MEDIA_VIDEO_CAPTURE_WIN_SINK_FILTER_OBSERVER_WIN_H_
+#define MEDIA_VIDEO_CAPTURE_WIN_SINK_FILTER_OBSERVER_WIN_H_
+
+namespace media {
+
+class SinkFilterObserver {
+ public:
+ // SinkFilter will call this function with all frames delivered to it.
+ // buffer in only valid during this function call.
+ virtual void FrameReceived(const uint8* buffer, int length) = 0;
+
+ protected:
+ virtual ~SinkFilterObserver();
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_WIN_SINK_FILTER_OBSERVER_WIN_H_
diff --git a/media/capture/video/win/sink_filter_win.cc b/media/capture/video/win/sink_filter_win.cc
new file mode 100644
index 0000000..2a36746
--- /dev/null
+++ b/media/capture/video/win/sink_filter_win.cc
@@ -0,0 +1,63 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/win/sink_filter_win.h"
+
+#include "base/logging.h"
+#include "media/capture/video/win/sink_input_pin_win.h"
+
+namespace media {
+
+// Define GUID for I420. This is the color format we would like to support but
+// it is not defined in the DirectShow SDK.
+// http://msdn.microsoft.com/en-us/library/dd757532.aspx
+// 30323449-0000-0010-8000-00AA00389B71.
+GUID kMediaSubTypeI420 = {0x30323449,
+ 0x0000,
+ 0x0010,
+ {0x80, 0x00, 0x00, 0xAA, 0x00, 0x38, 0x9B, 0x71}};
+
+// UYVY synonym with BT709 color components, used in HD video. This variation
+// might appear in non-USB capture cards and it's implemented as a normal YUV
+// pixel format with the characters HDYC encoded in the first array word.
+GUID kMediaSubTypeHDYC = {0x43594448,
+ 0x0000,
+ 0x0010,
+ {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}};
+
+SinkFilterObserver::~SinkFilterObserver() {
+}
+
+SinkFilter::SinkFilter(SinkFilterObserver* observer) : input_pin_(NULL) {
+ input_pin_ = new SinkInputPin(this, observer);
+}
+
+void SinkFilter::SetRequestedMediaFormat(VideoCapturePixelFormat pixel_format,
+ float frame_rate,
+ const BITMAPINFOHEADER& info_header) {
+ input_pin_->SetRequestedMediaFormat(pixel_format, frame_rate, info_header);
+}
+
+const VideoCaptureFormat& SinkFilter::ResultingFormat() {
+ return input_pin_->ResultingFormat();
+}
+
+size_t SinkFilter::NoOfPins() {
+ return 1;
+}
+
+IPin* SinkFilter::GetPin(int index) {
+ return index == 0 ? input_pin_.get() : NULL;
+}
+
+STDMETHODIMP SinkFilter::GetClassID(CLSID* clsid) {
+ *clsid = __uuidof(SinkFilter);
+ return S_OK;
+}
+
+SinkFilter::~SinkFilter() {
+ input_pin_->SetOwner(NULL);
+}
+
+} // namespace media
diff --git a/media/capture/video/win/sink_filter_win.h b/media/capture/video/win/sink_filter_win.h
new file mode 100644
index 0000000..d3fa99a
--- /dev/null
+++ b/media/capture/video/win/sink_filter_win.h
@@ -0,0 +1,62 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Implement a DirectShow sink filter used for receiving captured frames from
+// a DirectShow Capture filter.
+
+#ifndef MEDIA_VIDEO_CAPTURE_WIN_SINK_FILTER_WIN_H_
+#define MEDIA_VIDEO_CAPTURE_WIN_SINK_FILTER_WIN_H_
+
+#include <windows.h>
+
+#include "base/memory/ref_counted.h"
+#include "media/base/video_capture_types.h"
+#include "media/capture/video/video_capture_device.h"
+#include "media/capture/video/win/filter_base_win.h"
+#include "media/capture/video/win/sink_filter_observer_win.h"
+
+namespace media {
+
+// Define GUID for I420. This is the color format we would like to support but
+// it is not defined in the DirectShow SDK.
+// http://msdn.microsoft.com/en-us/library/dd757532.aspx
+// 30323449-0000-0010-8000-00AA00389B71.
+extern GUID kMediaSubTypeI420;
+
+// UYVY synonym with BT709 color components, used in HD video. This variation
+// might appear in non-USB capture cards and it's implemented as a normal YUV
+// pixel format with the characters HDYC encoded in the first array word.
+extern GUID kMediaSubTypeHDYC;
+
+class SinkInputPin;
+
+class __declspec(uuid("88cdbbdc-a73b-4afa-acbf-15d5e2ce12c3")) SinkFilter
+ : public FilterBase {
+ public:
+ explicit SinkFilter(SinkFilterObserver* observer);
+
+ void SetRequestedMediaFormat(VideoCapturePixelFormat pixel_format,
+ float frame_rate,
+ const BITMAPINFOHEADER& info_header);
+ // Returns the format that is negotiated when this
+ // filter is connected to a media filter.
+ const VideoCaptureFormat& ResultingFormat();
+
+ // Implement FilterBase.
+ size_t NoOfPins() override;
+ IPin* GetPin(int index) override;
+
+ STDMETHOD(GetClassID)(CLSID* clsid) override;
+
+ private:
+ ~SinkFilter() override;
+
+ scoped_refptr<SinkInputPin> input_pin_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SinkFilter);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_WIN_SINK_FILTER_WIN_H_
diff --git a/media/capture/video/win/sink_input_pin_win.cc b/media/capture/video/win/sink_input_pin_win.cc
new file mode 100644
index 0000000..240ed9c
--- /dev/null
+++ b/media/capture/video/win/sink_input_pin_win.cc
@@ -0,0 +1,192 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/win/sink_input_pin_win.h"
+
+#include <cstring>
+
+// Avoid including strsafe.h via dshow as it will cause build warnings.
+#define NO_DSHOW_STRSAFE
+#include <dshow.h>
+
+#include "base/logging.h"
+
+namespace media {
+
+const REFERENCE_TIME kSecondsToReferenceTime = 10000000;
+
+static DWORD GetArea(const BITMAPINFOHEADER& info_header) {
+ return info_header.biWidth * info_header.biHeight;
+}
+
+SinkInputPin::SinkInputPin(IBaseFilter* filter, SinkFilterObserver* observer)
+ : PinBase(filter), requested_frame_rate_(0), observer_(observer) {
+}
+
+void SinkInputPin::SetRequestedMediaFormat(
+ VideoCapturePixelFormat pixel_format,
+ float frame_rate,
+ const BITMAPINFOHEADER& info_header) {
+ requested_pixel_format_ = pixel_format;
+ requested_frame_rate_ = frame_rate;
+ requested_info_header_ = info_header;
+ resulting_format_.frame_size.SetSize(0, 0);
+ resulting_format_.frame_rate = 0;
+ resulting_format_.pixel_format = VIDEO_CAPTURE_PIXEL_FORMAT_UNKNOWN;
+}
+
+const VideoCaptureFormat& SinkInputPin::ResultingFormat() {
+ return resulting_format_;
+}
+
+bool SinkInputPin::IsMediaTypeValid(const AM_MEDIA_TYPE* media_type) {
+ GUID type = media_type->majortype;
+ if (type != MEDIATYPE_Video)
+ return false;
+
+ GUID format_type = media_type->formattype;
+ if (format_type != FORMAT_VideoInfo)
+ return false;
+
+ // Check for the sub types we support.
+ GUID sub_type = media_type->subtype;
+ VIDEOINFOHEADER* pvi =
+ reinterpret_cast<VIDEOINFOHEADER*>(media_type->pbFormat);
+ if (pvi == NULL)
+ return false;
+
+ // Store the incoming width and height.
+ resulting_format_.frame_size.SetSize(pvi->bmiHeader.biWidth,
+ abs(pvi->bmiHeader.biHeight));
+ if (pvi->AvgTimePerFrame > 0) {
+ resulting_format_.frame_rate =
+ static_cast<int>(kSecondsToReferenceTime / pvi->AvgTimePerFrame);
+ } else {
+ resulting_format_.frame_rate = requested_frame_rate_;
+ }
+ if (sub_type == kMediaSubTypeI420 &&
+ pvi->bmiHeader.biCompression == MAKEFOURCC('I', '4', '2', '0')) {
+ resulting_format_.pixel_format = VIDEO_CAPTURE_PIXEL_FORMAT_I420;
+ return true;
+ }
+ if (sub_type == MEDIASUBTYPE_YUY2 &&
+ pvi->bmiHeader.biCompression == MAKEFOURCC('Y', 'U', 'Y', '2')) {
+ resulting_format_.pixel_format = VIDEO_CAPTURE_PIXEL_FORMAT_YUY2;
+ return true;
+ }
+ if (sub_type == MEDIASUBTYPE_MJPG &&
+ pvi->bmiHeader.biCompression == MAKEFOURCC('M', 'J', 'P', 'G')) {
+ resulting_format_.pixel_format = VIDEO_CAPTURE_PIXEL_FORMAT_MJPEG;
+ return true;
+ }
+ if (sub_type == MEDIASUBTYPE_RGB24 &&
+ pvi->bmiHeader.biCompression == BI_RGB) {
+ resulting_format_.pixel_format = VIDEO_CAPTURE_PIXEL_FORMAT_RGB24;
+ return true;
+ }
+ if (sub_type == MEDIASUBTYPE_RGB32 &&
+ pvi->bmiHeader.biCompression == BI_RGB) {
+ resulting_format_.pixel_format = VIDEO_CAPTURE_PIXEL_FORMAT_RGB32;
+ return true;
+ }
+ return false;
+}
+
+bool SinkInputPin::GetValidMediaType(int index, AM_MEDIA_TYPE* media_type) {
+ if (media_type->cbFormat < sizeof(VIDEOINFOHEADER))
+ return false;
+
+ VIDEOINFOHEADER* pvi =
+ reinterpret_cast<VIDEOINFOHEADER*>(media_type->pbFormat);
+
+ ZeroMemory(pvi, sizeof(VIDEOINFOHEADER));
+ pvi->bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
+ pvi->bmiHeader.biPlanes = 1;
+ pvi->bmiHeader.biClrImportant = 0;
+ pvi->bmiHeader.biClrUsed = 0;
+ if (requested_frame_rate_ > 0) {
+ pvi->AvgTimePerFrame = kSecondsToReferenceTime / requested_frame_rate_;
+ }
+
+ media_type->majortype = MEDIATYPE_Video;
+ media_type->formattype = FORMAT_VideoInfo;
+ media_type->bTemporalCompression = FALSE;
+
+ if (requested_pixel_format_ == VIDEO_CAPTURE_PIXEL_FORMAT_MJPEG) {
+ // If the requested pixel format is MJPEG, accept only MJPEG.
+ // This is ok since the capabilities of the capturer have been
+ // enumerated and we know that it is supported.
+ if (index != 0)
+ return false;
+
+ pvi->bmiHeader = requested_info_header_;
+ return true;
+ }
+
+ switch (index) {
+ case 0: {
+ pvi->bmiHeader.biCompression = MAKEFOURCC('I', '4', '2', '0');
+ pvi->bmiHeader.biBitCount = 12; // bit per pixel
+ pvi->bmiHeader.biWidth = requested_info_header_.biWidth;
+ pvi->bmiHeader.biHeight = requested_info_header_.biHeight;
+ pvi->bmiHeader.biSizeImage = GetArea(requested_info_header_) * 3 / 2;
+ media_type->subtype = kMediaSubTypeI420;
+ break;
+ }
+ case 1: {
+ pvi->bmiHeader.biCompression = MAKEFOURCC('Y', 'U', 'Y', '2');
+ pvi->bmiHeader.biBitCount = 16;
+ pvi->bmiHeader.biWidth = requested_info_header_.biWidth;
+ pvi->bmiHeader.biHeight = requested_info_header_.biHeight;
+ pvi->bmiHeader.biSizeImage = GetArea(requested_info_header_) * 2;
+ media_type->subtype = MEDIASUBTYPE_YUY2;
+ break;
+ }
+ case 2: {
+ pvi->bmiHeader.biCompression = BI_RGB;
+ pvi->bmiHeader.biBitCount = 24;
+ pvi->bmiHeader.biWidth = requested_info_header_.biWidth;
+ pvi->bmiHeader.biHeight = requested_info_header_.biHeight;
+ pvi->bmiHeader.biSizeImage = GetArea(requested_info_header_) * 3;
+ media_type->subtype = MEDIASUBTYPE_RGB24;
+ break;
+ }
+ case 3: {
+ pvi->bmiHeader.biCompression = BI_RGB;
+ pvi->bmiHeader.biBitCount = 32;
+ pvi->bmiHeader.biWidth = requested_info_header_.biWidth;
+ pvi->bmiHeader.biHeight = requested_info_header_.biHeight;
+ pvi->bmiHeader.biSizeImage = GetArea(requested_info_header_) * 4;
+ media_type->subtype = MEDIASUBTYPE_RGB32;
+ break;
+ }
+ default:
+ return false;
+ }
+
+ media_type->bFixedSizeSamples = TRUE;
+ media_type->lSampleSize = pvi->bmiHeader.biSizeImage;
+ return true;
+}
+
+HRESULT SinkInputPin::Receive(IMediaSample* sample) {
+ const int length = sample->GetActualDataLength();
+ uint8* buffer = NULL;
+
+ if (length <= 0) {
+ DLOG(WARNING) << "Media sample length is 0 or less.";
+ return S_FALSE;
+ }
+
+ if (FAILED(sample->GetPointer(&buffer)))
+ return S_FALSE;
+
+ observer_->FrameReceived(buffer, length);
+ return S_OK;
+}
+
+SinkInputPin::~SinkInputPin() {
+}
+
+} // namespace media
diff --git a/media/capture/video/win/sink_input_pin_win.h b/media/capture/video/win/sink_input_pin_win.h
new file mode 100644
index 0000000..b0d2f16
--- /dev/null
+++ b/media/capture/video/win/sink_input_pin_win.h
@@ -0,0 +1,53 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Implement a DirectShow input pin used for receiving captured frames from
+// a DirectShow Capture filter.
+
+#ifndef MEDIA_VIDEO_CAPTURE_WIN_SINK_INPUT_PIN_WIN_H_
+#define MEDIA_VIDEO_CAPTURE_WIN_SINK_INPUT_PIN_WIN_H_
+
+#include "media/base/video_capture_types.h"
+#include "media/capture/video/video_capture_device.h"
+#include "media/capture/video/win/pin_base_win.h"
+#include "media/capture/video/win/sink_filter_win.h"
+
+namespace media {
+
+// Const used for converting Seconds to REFERENCE_TIME.
+extern const REFERENCE_TIME kSecondsToReferenceTime;
+
+// Input pin of the SinkFilter.
+class SinkInputPin : public PinBase {
+ public:
+ SinkInputPin(IBaseFilter* filter, SinkFilterObserver* observer);
+
+ void SetRequestedMediaFormat(VideoCapturePixelFormat pixel_format,
+ float frame_rate,
+ const BITMAPINFOHEADER& info_header);
+ // Returns the capability that is negotiated when this
+ // pin is connected to a media filter.
+ const VideoCaptureFormat& ResultingFormat();
+
+ // Implement PinBase.
+ bool IsMediaTypeValid(const AM_MEDIA_TYPE* media_type) override;
+ bool GetValidMediaType(int index, AM_MEDIA_TYPE* media_type) override;
+
+ STDMETHOD(Receive)(IMediaSample* media_sample) override;
+
+ private:
+ ~SinkInputPin() override;
+
+ VideoCapturePixelFormat requested_pixel_format_;
+ float requested_frame_rate_;
+ BITMAPINFOHEADER requested_info_header_;
+ VideoCaptureFormat resulting_format_;
+ SinkFilterObserver* observer_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SinkInputPin);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_WIN_SINK_INPUT_PIN_WIN_H_
diff --git a/media/capture/video/win/video_capture_device_factory_win.cc b/media/capture/video/win/video_capture_device_factory_win.cc
new file mode 100644
index 0000000..8f1ea11
--- /dev/null
+++ b/media/capture/video/win/video_capture_device_factory_win.cc
@@ -0,0 +1,438 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/win/video_capture_device_factory_win.h"
+
+#include <mfapi.h>
+#include <mferror.h>
+
+#include "base/command_line.h"
+#include "base/macros.h"
+#include "base/metrics/histogram.h"
+#include "base/strings/string_util.h"
+#include "base/strings/sys_string_conversions.h"
+#include "base/win/metro.h"
+#include "base/win/scoped_co_mem.h"
+#include "base/win/scoped_variant.h"
+#include "base/win/windows_version.h"
+#include "media/base/media_switches.h"
+#include "media/base/win/mf_initializer.h"
+#include "media/capture/video/win/video_capture_device_mf_win.h"
+#include "media/capture/video/win/video_capture_device_win.h"
+
+using base::win::ScopedCoMem;
+using base::win::ScopedComPtr;
+using base::win::ScopedVariant;
+using Name = media::VideoCaptureDevice::Name;
+using Names = media::VideoCaptureDevice::Names;
+
+namespace media {
+
+// Avoid enumerating and/or using certain devices due to they provoking crashes
+// or any other reason (http://crbug.com/378494). This enum is defined for the
+// purposes of UMA collection. Existing entries cannot be removed.
+enum BlacklistedCameraNames {
+ BLACKLISTED_CAMERA_GOOGLE_CAMERA_ADAPTER = 0,
+ BLACKLISTED_CAMERA_IP_CAMERA = 1,
+ BLACKLISTED_CAMERA_CYBERLINK_WEBCAM_SPLITTER = 2,
+ BLACKLISTED_CAMERA_EPOCCAM = 3,
+ // This one must be last, and equal to the previous enumerated value.
+ BLACKLISTED_CAMERA_MAX = BLACKLISTED_CAMERA_EPOCCAM,
+};
+
+// Blacklisted devices are identified by a characteristic prefix of the name.
+// This prefix is used case-insensitively. This list must be kept in sync with
+// |BlacklistedCameraNames|.
+static const char* const kBlacklistedCameraNames[] = {
+ // Name of a fake DirectShow filter on computers with GTalk installed.
+ "Google Camera Adapter",
+ // The following software WebCams cause crashes.
+ "IP Camera [JPEG/MJPEG]",
+ "CyberLink Webcam Splitter",
+ "EpocCam",
+};
+static_assert(arraysize(kBlacklistedCameraNames) == BLACKLISTED_CAMERA_MAX + 1,
+ "kBlacklistedCameraNames should be same size as "
+ "BlacklistedCameraNames enum");
+
+static bool LoadMediaFoundationDlls() {
+ static const wchar_t* const kMfDLLs[] = {
+ L"%WINDIR%\\system32\\mf.dll",
+ L"%WINDIR%\\system32\\mfplat.dll",
+ L"%WINDIR%\\system32\\mfreadwrite.dll",
+ };
+
+ for (int i = 0; i < arraysize(kMfDLLs); ++i) {
+ wchar_t path[MAX_PATH] = {0};
+ ExpandEnvironmentStringsW(kMfDLLs[i], path, arraysize(path));
+ if (!LoadLibraryExW(path, NULL, LOAD_WITH_ALTERED_SEARCH_PATH))
+ return false;
+ }
+ return true;
+}
+
+static bool PrepareVideoCaptureAttributesMediaFoundation(
+ IMFAttributes** attributes,
+ int count) {
+ InitializeMediaFoundation();
+
+ if (FAILED(MFCreateAttributes(attributes, count)))
+ return false;
+
+ return SUCCEEDED(
+ (*attributes)
+ ->SetGUID(MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE,
+ MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_GUID));
+}
+
+static bool CreateVideoCaptureDeviceMediaFoundation(const char* sym_link,
+ IMFMediaSource** source) {
+ ScopedComPtr<IMFAttributes> attributes;
+ if (!PrepareVideoCaptureAttributesMediaFoundation(attributes.Receive(), 2))
+ return false;
+
+ attributes->SetString(MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_SYMBOLIC_LINK,
+ base::SysUTF8ToWide(sym_link).c_str());
+
+ return SUCCEEDED(MFCreateDeviceSource(attributes.get(), source));
+}
+
+static bool EnumerateVideoDevicesMediaFoundation(IMFActivate*** devices,
+ UINT32* count) {
+ ScopedComPtr<IMFAttributes> attributes;
+ if (!PrepareVideoCaptureAttributesMediaFoundation(attributes.Receive(), 1))
+ return false;
+
+ return SUCCEEDED(MFEnumDeviceSources(attributes.get(), devices, count));
+}
+
+static bool IsDeviceBlackListed(const std::string& name) {
+ DCHECK_EQ(BLACKLISTED_CAMERA_MAX + 1,
+ static_cast<int>(arraysize(kBlacklistedCameraNames)));
+ for (size_t i = 0; i < arraysize(kBlacklistedCameraNames); ++i) {
+ if (base::StartsWith(name, kBlacklistedCameraNames[i],
+ base::CompareCase::INSENSITIVE_ASCII)) {
+ DVLOG(1) << "Enumerated blacklisted device: " << name;
+ UMA_HISTOGRAM_ENUMERATION("Media.VideoCapture.BlacklistedDevice", i,
+ BLACKLISTED_CAMERA_MAX + 1);
+ return true;
+ }
+ }
+ return false;
+}
+
+static void GetDeviceNamesDirectShow(Names* device_names) {
+ DCHECK(device_names);
+ DVLOG(1) << " GetDeviceNamesDirectShow";
+
+ ScopedComPtr<ICreateDevEnum> dev_enum;
+ HRESULT hr =
+ dev_enum.CreateInstance(CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC);
+ if (FAILED(hr))
+ return;
+
+ ScopedComPtr<IEnumMoniker> enum_moniker;
+ hr = dev_enum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory,
+ enum_moniker.Receive(), 0);
+ // CreateClassEnumerator returns S_FALSE on some Windows OS
+ // when no camera exist. Therefore the FAILED macro can't be used.
+ if (hr != S_OK)
+ return;
+
+ // Enumerate all video capture devices.
+ for (ScopedComPtr<IMoniker> moniker;
+ enum_moniker->Next(1, moniker.Receive(), NULL) == S_OK;
+ moniker.Release()) {
+ ScopedComPtr<IPropertyBag> prop_bag;
+ hr = moniker->BindToStorage(0, 0, IID_IPropertyBag, prop_bag.ReceiveVoid());
+ if (FAILED(hr))
+ continue;
+
+ // Find the description or friendly name.
+ ScopedVariant name;
+ hr = prop_bag->Read(L"Description", name.Receive(), 0);
+ if (FAILED(hr))
+ hr = prop_bag->Read(L"FriendlyName", name.Receive(), 0);
+
+ if (FAILED(hr) || name.type() != VT_BSTR)
+ continue;
+
+ const std::string device_name(base::SysWideToUTF8(V_BSTR(name.ptr())));
+ if (IsDeviceBlackListed(device_name))
+ continue;
+
+ name.Reset();
+ hr = prop_bag->Read(L"DevicePath", name.Receive(), 0);
+ std::string id;
+ if (FAILED(hr) || name.type() != VT_BSTR) {
+ id = device_name;
+ } else {
+ DCHECK_EQ(name.type(), VT_BSTR);
+ id = base::SysWideToUTF8(V_BSTR(name.ptr()));
+ }
+ device_names->push_back(Name(device_name, id, Name::DIRECT_SHOW));
+ }
+}
+
+static void GetDeviceNamesMediaFoundation(Names* device_names) {
+ DVLOG(1) << " GetDeviceNamesMediaFoundation";
+ ScopedCoMem<IMFActivate*> devices;
+ UINT32 count;
+ if (!EnumerateVideoDevicesMediaFoundation(&devices, &count))
+ return;
+
+ for (UINT32 i = 0; i < count; ++i) {
+ ScopedCoMem<wchar_t> name;
+ UINT32 name_size;
+ HRESULT hr = devices[i]->GetAllocatedString(
+ MF_DEVSOURCE_ATTRIBUTE_FRIENDLY_NAME, &name, &name_size);
+ if (SUCCEEDED(hr)) {
+ ScopedCoMem<wchar_t> id;
+ UINT32 id_size;
+ hr = devices[i]->GetAllocatedString(
+ MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_SYMBOLIC_LINK, &id,
+ &id_size);
+ if (SUCCEEDED(hr)) {
+ device_names->push_back(
+ Name(base::SysWideToUTF8(std::wstring(name, name_size)),
+ base::SysWideToUTF8(std::wstring(id, id_size)),
+ Name::MEDIA_FOUNDATION));
+ }
+ }
+ DLOG_IF(ERROR, FAILED(hr)) << "GetAllocatedString failed: "
+ << logging::SystemErrorCodeToString(hr);
+ devices[i]->Release();
+ }
+}
+
+static void GetDeviceSupportedFormatsDirectShow(const Name& device,
+ VideoCaptureFormats* formats) {
+ DVLOG(1) << "GetDeviceSupportedFormatsDirectShow for " << device.name();
+ ScopedComPtr<ICreateDevEnum> dev_enum;
+ HRESULT hr =
+ dev_enum.CreateInstance(CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC);
+ if (FAILED(hr))
+ return;
+
+ ScopedComPtr<IEnumMoniker> enum_moniker;
+ hr = dev_enum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory,
+ enum_moniker.Receive(), 0);
+ // CreateClassEnumerator returns S_FALSE on some Windows OS when no camera
+ // exists. Therefore the FAILED macro can't be used.
+ if (hr != S_OK)
+ return;
+
+ // Walk the capture devices. No need to check for device presence again since
+ // that is anyway needed in GetDeviceFilter(). "google camera adapter" and old
+ // VFW devices are already skipped previously in GetDeviceNames() enumeration.
+ base::win::ScopedComPtr<IBaseFilter> capture_filter;
+ hr = VideoCaptureDeviceWin::GetDeviceFilter(device.capabilities_id(),
+ CLSID_VideoInputDeviceCategory,
+ capture_filter.Receive());
+ if (!capture_filter.get()) {
+ DLOG(ERROR) << "Failed to create capture filter: "
+ << logging::SystemErrorCodeToString(hr);
+ return;
+ }
+
+ base::win::ScopedComPtr<IPin> output_capture_pin(
+ VideoCaptureDeviceWin::GetPin(capture_filter.get(), PINDIR_OUTPUT,
+ PIN_CATEGORY_CAPTURE, GUID_NULL));
+ if (!output_capture_pin.get()) {
+ DLOG(ERROR) << "Failed to get capture output pin";
+ return;
+ }
+
+ ScopedComPtr<IAMStreamConfig> stream_config;
+ hr = output_capture_pin.QueryInterface(stream_config.Receive());
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "Failed to get IAMStreamConfig interface from "
+ "capture device: " << logging::SystemErrorCodeToString(hr);
+ return;
+ }
+
+ int count = 0, size = 0;
+ hr = stream_config->GetNumberOfCapabilities(&count, &size);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "GetNumberOfCapabilities failed: "
+ << logging::SystemErrorCodeToString(hr);
+ return;
+ }
+
+ scoped_ptr<BYTE[]> caps(new BYTE[size]);
+ for (int i = 0; i < count; ++i) {
+ VideoCaptureDeviceWin::ScopedMediaType media_type;
+ hr = stream_config->GetStreamCaps(i, media_type.Receive(), caps.get());
+ // GetStreamCaps() may return S_FALSE, so don't use FAILED() or SUCCEED()
+ // macros here since they'll trigger incorrectly.
+ if (hr != S_OK || !media_type.get()) {
+ DLOG(ERROR) << "GetStreamCaps failed: "
+ << logging::SystemErrorCodeToString(hr);
+ return;
+ }
+
+ if (media_type->majortype == MEDIATYPE_Video &&
+ media_type->formattype == FORMAT_VideoInfo) {
+ VideoCaptureFormat format;
+ format.pixel_format =
+ VideoCaptureDeviceWin::TranslateMediaSubtypeToPixelFormat(
+ media_type->subtype);
+ if (format.pixel_format == VIDEO_CAPTURE_PIXEL_FORMAT_UNKNOWN)
+ continue;
+ VIDEOINFOHEADER* h =
+ reinterpret_cast<VIDEOINFOHEADER*>(media_type->pbFormat);
+ format.frame_size.SetSize(h->bmiHeader.biWidth, h->bmiHeader.biHeight);
+ // Trust the frame rate from the VIDEOINFOHEADER.
+ format.frame_rate =
+ (h->AvgTimePerFrame > 0)
+ ? kSecondsToReferenceTime / static_cast<float>(h->AvgTimePerFrame)
+ : 0.0f;
+ formats->push_back(format);
+ DVLOG(1) << device.name() << " " << VideoCaptureFormat::ToString(format);
+ }
+ }
+}
+
+static void GetDeviceSupportedFormatsMediaFoundation(
+ const Name& device,
+ VideoCaptureFormats* formats) {
+ DVLOG(1) << "GetDeviceSupportedFormatsMediaFoundation for " << device.name();
+ ScopedComPtr<IMFMediaSource> source;
+ if (!CreateVideoCaptureDeviceMediaFoundation(device.id().c_str(),
+ source.Receive())) {
+ return;
+ }
+
+ base::win::ScopedComPtr<IMFSourceReader> reader;
+ HRESULT hr =
+ MFCreateSourceReaderFromMediaSource(source.get(), NULL, reader.Receive());
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "MFCreateSourceReaderFromMediaSource failed: "
+ << logging::SystemErrorCodeToString(hr);
+ return;
+ }
+
+ DWORD stream_index = 0;
+ ScopedComPtr<IMFMediaType> type;
+ while (SUCCEEDED(reader->GetNativeMediaType(kFirstVideoStream, stream_index,
+ type.Receive()))) {
+ UINT32 width, height;
+ hr = MFGetAttributeSize(type.get(), MF_MT_FRAME_SIZE, &width, &height);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "MFGetAttributeSize failed: "
+ << logging::SystemErrorCodeToString(hr);
+ return;
+ }
+ VideoCaptureFormat capture_format;
+ capture_format.frame_size.SetSize(width, height);
+
+ UINT32 numerator, denominator;
+ hr = MFGetAttributeRatio(type.get(), MF_MT_FRAME_RATE, &numerator,
+ &denominator);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "MFGetAttributeSize failed: "
+ << logging::SystemErrorCodeToString(hr);
+ return;
+ }
+ capture_format.frame_rate =
+ denominator ? static_cast<float>(numerator) / denominator : 0.0f;
+
+ GUID type_guid;
+ hr = type->GetGUID(MF_MT_SUBTYPE, &type_guid);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "GetGUID failed: " << logging::SystemErrorCodeToString(hr);
+ return;
+ }
+ VideoCaptureDeviceMFWin::FormatFromGuid(type_guid,
+ &capture_format.pixel_format);
+ type.Release();
+ formats->push_back(capture_format);
+ ++stream_index;
+
+ DVLOG(1) << device.name() << " "
+ << VideoCaptureFormat::ToString(capture_format);
+ }
+}
+
+// Returns true iff the current platform supports the Media Foundation API
+// and that the DLLs are available. On Vista this API is an optional download
+// but the API is advertised as a part of Windows 7 and onwards. However,
+// we've seen that the required DLLs are not available in some Win7
+// distributions such as Windows 7 N and Windows 7 KN.
+// static
+bool VideoCaptureDeviceFactoryWin::PlatformSupportsMediaFoundation() {
+ // Even though the DLLs might be available on Vista, we get crashes
+ // when running our tests on the build bots.
+ if (base::win::GetVersion() < base::win::VERSION_WIN7)
+ return false;
+
+ static bool g_dlls_available = LoadMediaFoundationDlls();
+ return g_dlls_available;
+}
+
+VideoCaptureDeviceFactoryWin::VideoCaptureDeviceFactoryWin() {
+ // Use Media Foundation for Metro processes (after and including Win8) and
+ // DirectShow for any other versions, unless forced via flag. Media Foundation
+ // can also be forced if appropriate flag is set and we are in Windows 7 or
+ // 8 in non-Metro mode.
+ const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
+ use_media_foundation_ =
+ (base::win::IsMetroProcess() &&
+ !cmd_line->HasSwitch(switches::kForceDirectShowVideoCapture)) ||
+ (base::win::GetVersion() >= base::win::VERSION_WIN7 &&
+ cmd_line->HasSwitch(switches::kForceMediaFoundationVideoCapture));
+}
+
+scoped_ptr<VideoCaptureDevice> VideoCaptureDeviceFactoryWin::Create(
+ const Name& device_name) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ scoped_ptr<VideoCaptureDevice> device;
+ if (device_name.capture_api_type() == Name::MEDIA_FOUNDATION) {
+ DCHECK(PlatformSupportsMediaFoundation());
+ device.reset(new VideoCaptureDeviceMFWin(device_name));
+ DVLOG(1) << " MediaFoundation Device: " << device_name.name();
+ ScopedComPtr<IMFMediaSource> source;
+ if (!CreateVideoCaptureDeviceMediaFoundation(device_name.id().c_str(),
+ source.Receive())) {
+ return scoped_ptr<VideoCaptureDevice>();
+ }
+ if (!static_cast<VideoCaptureDeviceMFWin*>(device.get())->Init(source))
+ device.reset();
+ } else {
+ DCHECK(device_name.capture_api_type() == Name::DIRECT_SHOW);
+ device.reset(new VideoCaptureDeviceWin(device_name));
+ DVLOG(1) << " DirectShow Device: " << device_name.name();
+ if (!static_cast<VideoCaptureDeviceWin*>(device.get())->Init())
+ device.reset();
+ }
+ return device.Pass();
+}
+
+void VideoCaptureDeviceFactoryWin::GetDeviceNames(Names* device_names) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (use_media_foundation_) {
+ GetDeviceNamesMediaFoundation(device_names);
+ } else {
+ GetDeviceNamesDirectShow(device_names);
+ }
+}
+
+void VideoCaptureDeviceFactoryWin::GetDeviceSupportedFormats(
+ const Name& device,
+ VideoCaptureFormats* formats) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (use_media_foundation_)
+ GetDeviceSupportedFormatsMediaFoundation(device, formats);
+ else
+ GetDeviceSupportedFormatsDirectShow(device, formats);
+}
+
+// static
+VideoCaptureDeviceFactory*
+VideoCaptureDeviceFactory::CreateVideoCaptureDeviceFactory(
+ scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner) {
+ return new VideoCaptureDeviceFactoryWin();
+}
+
+} // namespace media
diff --git a/media/capture/video/win/video_capture_device_factory_win.h b/media/capture/video/win/video_capture_device_factory_win.h
new file mode 100644
index 0000000..1512925
--- /dev/null
+++ b/media/capture/video/win/video_capture_device_factory_win.h
@@ -0,0 +1,39 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Implementation of a VideoCaptureDeviceFactory class for Windows platforms.
+
+#ifndef MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_FACTORY_WIN_H_
+#define MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_FACTORY_WIN_H_
+
+#include "media/capture/video/video_capture_device_factory.h"
+
+namespace media {
+
+// Extension of VideoCaptureDeviceFactory to create and manipulate Windows
+// devices, via either DirectShow or MediaFoundation APIs.
+class MEDIA_EXPORT VideoCaptureDeviceFactoryWin
+ : public VideoCaptureDeviceFactory {
+ public:
+ static bool PlatformSupportsMediaFoundation();
+
+ VideoCaptureDeviceFactoryWin();
+ ~VideoCaptureDeviceFactoryWin() override {}
+
+ scoped_ptr<VideoCaptureDevice> Create(
+ const VideoCaptureDevice::Name& device_name) override;
+ void GetDeviceNames(VideoCaptureDevice::Names* device_names) override;
+ void GetDeviceSupportedFormats(
+ const VideoCaptureDevice::Name& device,
+ VideoCaptureFormats* supported_formats) override;
+
+ private:
+ bool use_media_foundation_;
+
+ DISALLOW_COPY_AND_ASSIGN(VideoCaptureDeviceFactoryWin);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_VIDEO_CAPTURE_DEVICE_FACTORY_WIN_H_
diff --git a/media/capture/video/win/video_capture_device_mf_win.cc b/media/capture/video/win/video_capture_device_mf_win.cc
new file mode 100644
index 0000000..8d3b104
--- /dev/null
+++ b/media/capture/video/win/video_capture_device_mf_win.cc
@@ -0,0 +1,327 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/win/video_capture_device_mf_win.h"
+
+#include <mfapi.h>
+#include <mferror.h>
+
+#include "base/memory/ref_counted.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/sys_string_conversions.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/win/scoped_co_mem.h"
+#include "base/win/windows_version.h"
+#include "media/capture/video/win/capability_list_win.h"
+
+using base::win::ScopedCoMem;
+using base::win::ScopedComPtr;
+
+namespace media {
+
+// In Windows device identifiers, the USB VID and PID are preceded by the string
+// "vid_" or "pid_". The identifiers are each 4 bytes long.
+const char kVidPrefix[] = "vid_"; // Also contains '\0'.
+const char kPidPrefix[] = "pid_"; // Also contains '\0'.
+const size_t kVidPidSize = 4;
+
+static bool GetFrameSize(IMFMediaType* type, gfx::Size* frame_size) {
+ UINT32 width32, height32;
+ if (FAILED(MFGetAttributeSize(type, MF_MT_FRAME_SIZE, &width32, &height32)))
+ return false;
+ frame_size->SetSize(width32, height32);
+ return true;
+}
+
+static bool GetFrameRate(IMFMediaType* type, float* frame_rate) {
+ UINT32 numerator, denominator;
+ if (FAILED(MFGetAttributeRatio(type, MF_MT_FRAME_RATE, &numerator,
+ &denominator)) ||
+ !denominator) {
+ return false;
+ }
+ *frame_rate = static_cast<float>(numerator) / denominator;
+ return true;
+}
+
+static bool FillFormat(IMFMediaType* type, VideoCaptureFormat* format) {
+ GUID type_guid;
+ if (FAILED(type->GetGUID(MF_MT_SUBTYPE, &type_guid)) ||
+ !GetFrameSize(type, &format->frame_size) ||
+ !GetFrameRate(type, &format->frame_rate) ||
+ !VideoCaptureDeviceMFWin::FormatFromGuid(type_guid,
+ &format->pixel_format)) {
+ return false;
+ }
+
+ return true;
+}
+
+HRESULT FillCapabilities(IMFSourceReader* source,
+ CapabilityList* capabilities) {
+ DWORD stream_index = 0;
+ ScopedComPtr<IMFMediaType> type;
+ HRESULT hr;
+ while (SUCCEEDED(hr = source->GetNativeMediaType(
+ kFirstVideoStream, stream_index, type.Receive()))) {
+ VideoCaptureFormat format;
+ if (FillFormat(type.get(), &format))
+ capabilities->emplace_back(stream_index, format);
+ type.Release();
+ ++stream_index;
+ }
+
+ if (capabilities->empty() && (SUCCEEDED(hr) || hr == MF_E_NO_MORE_TYPES))
+ hr = HRESULT_FROM_WIN32(ERROR_EMPTY);
+
+ return (hr == MF_E_NO_MORE_TYPES) ? S_OK : hr;
+}
+
+class MFReaderCallback final
+ : public base::RefCountedThreadSafe<MFReaderCallback>,
+ public IMFSourceReaderCallback {
+ public:
+ MFReaderCallback(VideoCaptureDeviceMFWin* observer)
+ : observer_(observer), wait_event_(NULL) {}
+
+ void SetSignalOnFlush(base::WaitableEvent* event) { wait_event_ = event; }
+
+ STDMETHOD(QueryInterface)(REFIID riid, void** object) override {
+ if (riid != IID_IUnknown && riid != IID_IMFSourceReaderCallback)
+ return E_NOINTERFACE;
+ *object = static_cast<IMFSourceReaderCallback*>(this);
+ AddRef();
+ return S_OK;
+ }
+
+ STDMETHOD_(ULONG, AddRef)() override {
+ base::RefCountedThreadSafe<MFReaderCallback>::AddRef();
+ return 1U;
+ }
+
+ STDMETHOD_(ULONG, Release)() override {
+ base::RefCountedThreadSafe<MFReaderCallback>::Release();
+ return 1U;
+ }
+
+ STDMETHOD(OnReadSample)(HRESULT status,
+ DWORD stream_index,
+ DWORD stream_flags,
+ LONGLONG time_stamp,
+ IMFSample* sample) override {
+ base::TimeTicks stamp(base::TimeTicks::Now());
+ if (!sample) {
+ observer_->OnIncomingCapturedData(NULL, 0, 0, stamp);
+ return S_OK;
+ }
+
+ DWORD count = 0;
+ sample->GetBufferCount(&count);
+
+ for (DWORD i = 0; i < count; ++i) {
+ ScopedComPtr<IMFMediaBuffer> buffer;
+ sample->GetBufferByIndex(i, buffer.Receive());
+ if (buffer.get()) {
+ DWORD length = 0, max_length = 0;
+ BYTE* data = NULL;
+ buffer->Lock(&data, &max_length, &length);
+ observer_->OnIncomingCapturedData(data, length, 0, stamp);
+ buffer->Unlock();
+ }
+ }
+ return S_OK;
+ }
+
+ STDMETHOD(OnFlush)(DWORD stream_index) override {
+ if (wait_event_) {
+ wait_event_->Signal();
+ wait_event_ = NULL;
+ }
+ return S_OK;
+ }
+
+ STDMETHOD(OnEvent)(DWORD stream_index, IMFMediaEvent* event) override {
+ NOTIMPLEMENTED();
+ return S_OK;
+ }
+
+ private:
+ friend class base::RefCountedThreadSafe<MFReaderCallback>;
+ ~MFReaderCallback() {}
+
+ VideoCaptureDeviceMFWin* observer_;
+ base::WaitableEvent* wait_event_;
+};
+
+// static
+bool VideoCaptureDeviceMFWin::FormatFromGuid(const GUID& guid,
+ VideoCapturePixelFormat* format) {
+ struct {
+ const GUID& guid;
+ const VideoCapturePixelFormat format;
+ } static const kFormatMap[] = {
+ {MFVideoFormat_I420, VIDEO_CAPTURE_PIXEL_FORMAT_I420},
+ {MFVideoFormat_YUY2, VIDEO_CAPTURE_PIXEL_FORMAT_YUY2},
+ {MFVideoFormat_UYVY, VIDEO_CAPTURE_PIXEL_FORMAT_UYVY},
+ {MFVideoFormat_RGB24, VIDEO_CAPTURE_PIXEL_FORMAT_RGB24},
+ {MFVideoFormat_ARGB32, VIDEO_CAPTURE_PIXEL_FORMAT_ARGB},
+ {MFVideoFormat_MJPG, VIDEO_CAPTURE_PIXEL_FORMAT_MJPEG},
+ {MFVideoFormat_YV12, VIDEO_CAPTURE_PIXEL_FORMAT_YV12},
+ };
+
+ for (int i = 0; i < arraysize(kFormatMap); ++i) {
+ if (kFormatMap[i].guid == guid) {
+ *format = kFormatMap[i].format;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+const std::string VideoCaptureDevice::Name::GetModel() const {
+ const size_t vid_prefix_size = sizeof(kVidPrefix) - 1;
+ const size_t pid_prefix_size = sizeof(kPidPrefix) - 1;
+ const size_t vid_location = unique_id_.find(kVidPrefix);
+ if (vid_location == std::string::npos ||
+ vid_location + vid_prefix_size + kVidPidSize > unique_id_.size()) {
+ return std::string();
+ }
+ const size_t pid_location = unique_id_.find(kPidPrefix);
+ if (pid_location == std::string::npos ||
+ pid_location + pid_prefix_size + kVidPidSize > unique_id_.size()) {
+ return std::string();
+ }
+ std::string id_vendor =
+ unique_id_.substr(vid_location + vid_prefix_size, kVidPidSize);
+ std::string id_product =
+ unique_id_.substr(pid_location + pid_prefix_size, kVidPidSize);
+ return id_vendor + ":" + id_product;
+}
+
+VideoCaptureDeviceMFWin::VideoCaptureDeviceMFWin(const Name& device_name)
+ : name_(device_name), capture_(0) {
+ DetachFromThread();
+}
+
+VideoCaptureDeviceMFWin::~VideoCaptureDeviceMFWin() {
+ DCHECK(CalledOnValidThread());
+}
+
+bool VideoCaptureDeviceMFWin::Init(
+ const base::win::ScopedComPtr<IMFMediaSource>& source) {
+ DCHECK(CalledOnValidThread());
+ DCHECK(!reader_.get());
+
+ ScopedComPtr<IMFAttributes> attributes;
+ MFCreateAttributes(attributes.Receive(), 1);
+ DCHECK(attributes.get());
+
+ callback_ = new MFReaderCallback(this);
+ attributes->SetUnknown(MF_SOURCE_READER_ASYNC_CALLBACK, callback_.get());
+
+ return SUCCEEDED(MFCreateSourceReaderFromMediaSource(
+ source.get(), attributes.get(), reader_.Receive()));
+}
+
+void VideoCaptureDeviceMFWin::AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
+ DCHECK(CalledOnValidThread());
+
+ base::AutoLock lock(lock_);
+
+ client_ = client.Pass();
+ DCHECK_EQ(capture_, false);
+
+ CapabilityList capabilities;
+ HRESULT hr = S_OK;
+ if (reader_.get()) {
+ hr = FillCapabilities(reader_.get(), &capabilities);
+ if (SUCCEEDED(hr)) {
+ const CapabilityWin found_capability =
+ GetBestMatchedCapability(params.requested_format, capabilities);
+ ScopedComPtr<IMFMediaType> type;
+ hr = reader_->GetNativeMediaType(
+ kFirstVideoStream, found_capability.stream_index, type.Receive());
+ if (SUCCEEDED(hr)) {
+ hr = reader_->SetCurrentMediaType(kFirstVideoStream, NULL, type.get());
+ if (SUCCEEDED(hr)) {
+ hr =
+ reader_->ReadSample(kFirstVideoStream, 0, NULL, NULL, NULL, NULL);
+ if (SUCCEEDED(hr)) {
+ capture_format_ = found_capability.supported_format;
+ capture_ = true;
+ return;
+ }
+ }
+ }
+ }
+ }
+
+ OnError(hr);
+}
+
+void VideoCaptureDeviceMFWin::StopAndDeAllocate() {
+ DCHECK(CalledOnValidThread());
+ base::WaitableEvent flushed(false, false);
+ const int kFlushTimeOutInMs = 1000;
+ bool wait = false;
+ {
+ base::AutoLock lock(lock_);
+ if (capture_) {
+ capture_ = false;
+ callback_->SetSignalOnFlush(&flushed);
+ wait = SUCCEEDED(
+ reader_->Flush(static_cast<DWORD>(MF_SOURCE_READER_ALL_STREAMS)));
+ if (!wait) {
+ callback_->SetSignalOnFlush(NULL);
+ }
+ }
+ client_.reset();
+ }
+
+ // If the device has been unplugged, the Flush() won't trigger the event
+ // and a timeout will happen.
+ // TODO(tommi): Hook up the IMFMediaEventGenerator notifications API and
+ // do not wait at all after getting MEVideoCaptureDeviceRemoved event.
+ // See issue/226396.
+ if (wait)
+ flushed.TimedWait(base::TimeDelta::FromMilliseconds(kFlushTimeOutInMs));
+}
+
+void VideoCaptureDeviceMFWin::OnIncomingCapturedData(
+ const uint8* data,
+ int length,
+ int rotation,
+ const base::TimeTicks& time_stamp) {
+ base::AutoLock lock(lock_);
+ if (data && client_.get()) {
+ client_->OnIncomingCapturedData(data, length, capture_format_, rotation,
+ time_stamp);
+ }
+
+ if (capture_) {
+ HRESULT hr =
+ reader_->ReadSample(kFirstVideoStream, 0, NULL, NULL, NULL, NULL);
+ if (FAILED(hr)) {
+ // If running the *VideoCap* unit tests on repeat, this can sometimes
+ // fail with HRESULT_FROM_WINHRESULT_FROM_WIN32(ERROR_INVALID_FUNCTION).
+ // It's not clear to me why this is, but it is possible that it has
+ // something to do with this bug:
+ // http://support.microsoft.com/kb/979567
+ OnError(hr);
+ }
+ }
+}
+
+void VideoCaptureDeviceMFWin::OnError(HRESULT hr) {
+ if (client_.get()) {
+ client_->OnError(
+ base::StringPrintf("VideoCaptureDeviceMFWin: %s",
+ logging::SystemErrorCodeToString(hr).c_str()));
+ }
+}
+
+} // namespace media
diff --git a/media/capture/video/win/video_capture_device_mf_win.h b/media/capture/video/win/video_capture_device_mf_win.h
new file mode 100644
index 0000000..baff97f
--- /dev/null
+++ b/media/capture/video/win/video_capture_device_mf_win.h
@@ -0,0 +1,72 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Windows specific implementation of VideoCaptureDevice.
+// DirectShow is used for capturing. DirectShow provide its own threads
+// for capturing.
+
+#ifndef MEDIA_VIDEO_CAPTURE_WIN_VIDEO_CAPTURE_DEVICE_MF_WIN_H_
+#define MEDIA_VIDEO_CAPTURE_WIN_VIDEO_CAPTURE_DEVICE_MF_WIN_H_
+
+#include <mfidl.h>
+#include <mfreadwrite.h>
+
+#include <vector>
+
+#include "base/synchronization/lock.h"
+#include "base/threading/non_thread_safe.h"
+#include "base/win/scoped_comptr.h"
+#include "media/base/media_export.h"
+#include "media/capture/video/video_capture_device.h"
+
+interface IMFSourceReader;
+
+namespace media {
+
+class MFReaderCallback;
+
+const DWORD kFirstVideoStream =
+ static_cast<DWORD>(MF_SOURCE_READER_FIRST_VIDEO_STREAM);
+
+class MEDIA_EXPORT VideoCaptureDeviceMFWin : public base::NonThreadSafe,
+ public VideoCaptureDevice {
+ public:
+ static bool FormatFromGuid(const GUID& guid, VideoCapturePixelFormat* format);
+
+ explicit VideoCaptureDeviceMFWin(const Name& device_name);
+ ~VideoCaptureDeviceMFWin() override;
+
+ // Opens the device driver for this device.
+ bool Init(const base::win::ScopedComPtr<IMFMediaSource>& source);
+
+ // VideoCaptureDevice implementation.
+ void AllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) override;
+ void StopAndDeAllocate() override;
+
+ // Captured new video data.
+ void OnIncomingCapturedData(const uint8* data,
+ int length,
+ int rotation,
+ const base::TimeTicks& time_stamp);
+
+ private:
+ void OnError(HRESULT hr);
+
+ Name name_;
+ base::win::ScopedComPtr<IMFActivate> device_;
+ scoped_refptr<MFReaderCallback> callback_;
+
+ base::Lock lock_; // Used to guard the below variables.
+ scoped_ptr<VideoCaptureDevice::Client> client_;
+ base::win::ScopedComPtr<IMFSourceReader> reader_;
+ VideoCaptureFormat capture_format_;
+ bool capture_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(VideoCaptureDeviceMFWin);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_WIN_VIDEO_CAPTURE_DEVICE_MF_WIN_H_
diff --git a/media/capture/video/win/video_capture_device_win.cc b/media/capture/video/win/video_capture_device_win.cc
new file mode 100644
index 0000000..0ca81f1
--- /dev/null
+++ b/media/capture/video/win/video_capture_device_win.cc
@@ -0,0 +1,581 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/win/video_capture_device_win.h"
+
+#include <ks.h>
+#include <ksmedia.h>
+
+#include <algorithm>
+#include <list>
+
+#include "base/strings/sys_string_conversions.h"
+#include "base/win/scoped_co_mem.h"
+#include "base/win/scoped_variant.h"
+#include "media/capture/video/win/video_capture_device_mf_win.h"
+
+using base::win::ScopedCoMem;
+using base::win::ScopedComPtr;
+using base::win::ScopedVariant;
+
+namespace media {
+
+// Check if a Pin matches a category.
+bool PinMatchesCategory(IPin* pin, REFGUID category) {
+ DCHECK(pin);
+ bool found = false;
+ ScopedComPtr<IKsPropertySet> ks_property;
+ HRESULT hr = ks_property.QueryFrom(pin);
+ if (SUCCEEDED(hr)) {
+ GUID pin_category;
+ DWORD return_value;
+ hr = ks_property->Get(AMPROPSETID_Pin, AMPROPERTY_PIN_CATEGORY, NULL, 0,
+ &pin_category, sizeof(pin_category), &return_value);
+ if (SUCCEEDED(hr) && (return_value == sizeof(pin_category))) {
+ found = (pin_category == category);
+ }
+ }
+ return found;
+}
+
+// Check if a Pin's MediaType matches a given |major_type|.
+bool PinMatchesMajorType(IPin* pin, REFGUID major_type) {
+ DCHECK(pin);
+ AM_MEDIA_TYPE connection_media_type;
+ HRESULT hr = pin->ConnectionMediaType(&connection_media_type);
+ return SUCCEEDED(hr) && connection_media_type.majortype == major_type;
+}
+
+// Finds and creates a DirectShow Video Capture filter matching the |device_id|.
+// |class_id| is usually CLSID_VideoInputDeviceCategory for standard DirectShow
+// devices but might also be AM_KSCATEGORY_CAPTURE or AM_KSCATEGORY_CROSSBAR, to
+// enumerate WDM capture devices or WDM crossbars, respectively.
+// static
+HRESULT VideoCaptureDeviceWin::GetDeviceFilter(const std::string& device_id,
+ const CLSID device_class_id,
+ IBaseFilter** filter) {
+ DCHECK(filter);
+
+ ScopedComPtr<ICreateDevEnum> dev_enum;
+ HRESULT hr =
+ dev_enum.CreateInstance(CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC);
+ if (FAILED(hr))
+ return hr;
+
+ ScopedComPtr<IEnumMoniker> enum_moniker;
+ hr = dev_enum->CreateClassEnumerator(device_class_id, enum_moniker.Receive(),
+ 0);
+ // CreateClassEnumerator returns S_FALSE on some Windows OS
+ // when no camera exist. Therefore the FAILED macro can't be used.
+ if (hr != S_OK)
+ return NULL;
+
+ ScopedComPtr<IMoniker> moniker;
+ ScopedComPtr<IBaseFilter> capture_filter;
+ DWORD fetched = 0;
+ while (enum_moniker->Next(1, moniker.Receive(), &fetched) == S_OK) {
+ ScopedComPtr<IPropertyBag> prop_bag;
+ hr = moniker->BindToStorage(0, 0, IID_IPropertyBag, prop_bag.ReceiveVoid());
+ if (FAILED(hr)) {
+ moniker.Release();
+ continue;
+ }
+
+ // Find the device via DevicePath, Description or FriendlyName, whichever is
+ // available first.
+ static const wchar_t* kPropertyNames[] = {
+ L"DevicePath", L"Description", L"FriendlyName"};
+
+ ScopedVariant name;
+ for (const auto* property_name : kPropertyNames) {
+ if (name.type() != VT_BSTR)
+ prop_bag->Read(property_name, name.Receive(), 0);
+ }
+
+ if (name.type() == VT_BSTR) {
+ std::string device_path(base::SysWideToUTF8(V_BSTR(name.ptr())));
+ if (device_path.compare(device_id) == 0) {
+ // We have found the requested device
+ hr = moniker->BindToObject(0, 0, IID_IBaseFilter,
+ capture_filter.ReceiveVoid());
+ DLOG_IF(ERROR, FAILED(hr)) << "Failed to bind camera filter: "
+ << logging::SystemErrorCodeToString(hr);
+ break;
+ }
+ }
+ moniker.Release();
+ }
+
+ *filter = capture_filter.Detach();
+ if (!*filter && SUCCEEDED(hr))
+ hr = HRESULT_FROM_WIN32(ERROR_NOT_FOUND);
+
+ return hr;
+}
+
+// Finds an IPin on an IBaseFilter given the direction, Category and/or Major
+// Type. If either |category| or |major_type| are GUID_NULL, they are ignored.
+// static
+ScopedComPtr<IPin> VideoCaptureDeviceWin::GetPin(IBaseFilter* filter,
+ PIN_DIRECTION pin_dir,
+ REFGUID category,
+ REFGUID major_type) {
+ ScopedComPtr<IPin> pin;
+ ScopedComPtr<IEnumPins> pin_enum;
+ HRESULT hr = filter->EnumPins(pin_enum.Receive());
+ if (pin_enum.get() == NULL)
+ return pin;
+
+ // Get first unconnected pin.
+ hr = pin_enum->Reset(); // set to first pin
+ while ((hr = pin_enum->Next(1, pin.Receive(), NULL)) == S_OK) {
+ PIN_DIRECTION this_pin_dir = static_cast<PIN_DIRECTION>(-1);
+ hr = pin->QueryDirection(&this_pin_dir);
+ if (pin_dir == this_pin_dir) {
+ if ((category == GUID_NULL || PinMatchesCategory(pin.get(), category)) &&
+ (major_type == GUID_NULL ||
+ PinMatchesMajorType(pin.get(), major_type))) {
+ return pin;
+ }
+ }
+ pin.Release();
+ }
+
+ DCHECK(!pin.get());
+ return pin;
+}
+
+// static
+VideoCapturePixelFormat
+VideoCaptureDeviceWin::TranslateMediaSubtypeToPixelFormat(
+ const GUID& sub_type) {
+ static struct {
+ const GUID& sub_type;
+ VideoCapturePixelFormat format;
+ } pixel_formats[] = {
+ {kMediaSubTypeI420, VIDEO_CAPTURE_PIXEL_FORMAT_I420},
+ {MEDIASUBTYPE_IYUV, VIDEO_CAPTURE_PIXEL_FORMAT_I420},
+ {MEDIASUBTYPE_RGB24, VIDEO_CAPTURE_PIXEL_FORMAT_RGB24},
+ {MEDIASUBTYPE_YUY2, VIDEO_CAPTURE_PIXEL_FORMAT_YUY2},
+ {MEDIASUBTYPE_MJPG, VIDEO_CAPTURE_PIXEL_FORMAT_MJPEG},
+ {MEDIASUBTYPE_UYVY, VIDEO_CAPTURE_PIXEL_FORMAT_UYVY},
+ {MEDIASUBTYPE_ARGB32, VIDEO_CAPTURE_PIXEL_FORMAT_ARGB},
+ {kMediaSubTypeHDYC, VIDEO_CAPTURE_PIXEL_FORMAT_UYVY},
+ };
+ for (size_t i = 0; i < arraysize(pixel_formats); ++i) {
+ if (sub_type == pixel_formats[i].sub_type)
+ return pixel_formats[i].format;
+ }
+#ifndef NDEBUG
+ WCHAR guid_str[128];
+ StringFromGUID2(sub_type, guid_str, arraysize(guid_str));
+ DVLOG(2) << "Device (also) supports an unknown media type " << guid_str;
+#endif
+ return VIDEO_CAPTURE_PIXEL_FORMAT_UNKNOWN;
+}
+
+void VideoCaptureDeviceWin::ScopedMediaType::Free() {
+ if (!media_type_)
+ return;
+
+ DeleteMediaType(media_type_);
+ media_type_ = NULL;
+}
+
+AM_MEDIA_TYPE** VideoCaptureDeviceWin::ScopedMediaType::Receive() {
+ DCHECK(!media_type_);
+ return &media_type_;
+}
+
+// Release the format block for a media type.
+// http://msdn.microsoft.com/en-us/library/dd375432(VS.85).aspx
+void VideoCaptureDeviceWin::ScopedMediaType::FreeMediaType(AM_MEDIA_TYPE* mt) {
+ if (mt->cbFormat != 0) {
+ CoTaskMemFree(mt->pbFormat);
+ mt->cbFormat = 0;
+ mt->pbFormat = NULL;
+ }
+ if (mt->pUnk != NULL) {
+ NOTREACHED();
+ // pUnk should not be used.
+ mt->pUnk->Release();
+ mt->pUnk = NULL;
+ }
+}
+
+// Delete a media type structure that was allocated on the heap.
+// http://msdn.microsoft.com/en-us/library/dd375432(VS.85).aspx
+void VideoCaptureDeviceWin::ScopedMediaType::DeleteMediaType(
+ AM_MEDIA_TYPE* mt) {
+ if (mt != NULL) {
+ FreeMediaType(mt);
+ CoTaskMemFree(mt);
+ }
+}
+
+VideoCaptureDeviceWin::VideoCaptureDeviceWin(const Name& device_name)
+ : device_name_(device_name), state_(kIdle) {
+ DetachFromThread();
+}
+
+VideoCaptureDeviceWin::~VideoCaptureDeviceWin() {
+ DCHECK(CalledOnValidThread());
+ if (media_control_.get())
+ media_control_->Stop();
+
+ if (graph_builder_.get()) {
+ if (sink_filter_.get()) {
+ graph_builder_->RemoveFilter(sink_filter_.get());
+ sink_filter_ = NULL;
+ }
+
+ if (capture_filter_.get())
+ graph_builder_->RemoveFilter(capture_filter_.get());
+ }
+
+ if (capture_graph_builder_.get())
+ capture_graph_builder_.Release();
+}
+
+bool VideoCaptureDeviceWin::Init() {
+ DCHECK(CalledOnValidThread());
+ HRESULT hr;
+
+ hr = GetDeviceFilter(device_name_.id(), CLSID_VideoInputDeviceCategory,
+ capture_filter_.Receive());
+
+ if (!capture_filter_.get()) {
+ DLOG(ERROR) << "Failed to create capture filter: "
+ << logging::SystemErrorCodeToString(hr);
+ return false;
+ }
+
+ output_capture_pin_ = GetPin(capture_filter_.get(), PINDIR_OUTPUT,
+ PIN_CATEGORY_CAPTURE, GUID_NULL);
+ if (!output_capture_pin_.get()) {
+ DLOG(ERROR) << "Failed to get capture output pin";
+ return false;
+ }
+
+ // Create the sink filter used for receiving Captured frames.
+ sink_filter_ = new SinkFilter(this);
+ if (sink_filter_.get() == NULL) {
+ DLOG(ERROR) << "Failed to create send filter";
+ return false;
+ }
+
+ input_sink_pin_ = sink_filter_->GetPin(0);
+
+ hr = graph_builder_.CreateInstance(CLSID_FilterGraph, NULL,
+ CLSCTX_INPROC_SERVER);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "Failed to create graph builder: "
+ << logging::SystemErrorCodeToString(hr);
+ return false;
+ }
+
+ hr = capture_graph_builder_.CreateInstance(CLSID_CaptureGraphBuilder2, NULL,
+ CLSCTX_INPROC);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "Failed to create the Capture Graph Builder: "
+ << logging::SystemErrorCodeToString(hr);
+ return false;
+ }
+
+ hr = capture_graph_builder_->SetFiltergraph(graph_builder_.get());
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "Failed to give graph to capture graph builder: "
+ << logging::SystemErrorCodeToString(hr);
+ return false;
+ }
+
+ hr = graph_builder_.QueryInterface(media_control_.Receive());
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "Failed to create media control builder: "
+ << logging::SystemErrorCodeToString(hr);
+ return false;
+ }
+
+ hr = graph_builder_->AddFilter(capture_filter_.get(), NULL);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "Failed to add the capture device to the graph: "
+ << logging::SystemErrorCodeToString(hr);
+ return false;
+ }
+
+ hr = graph_builder_->AddFilter(sink_filter_.get(), NULL);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "Failed to add the send filter to the graph: "
+ << logging::SystemErrorCodeToString(hr);
+ return false;
+ }
+
+ // The following code builds the upstream portions of the graph,
+ // for example if a capture device uses a Windows Driver Model (WDM)
+ // driver, the graph may require certain filters upstream from the
+ // WDM Video Capture filter, such as a TV Tuner filter or an Analog
+ // Video Crossbar filter. We try using the more prevalent
+ // MEDIATYPE_Interleaved first.
+ base::win::ScopedComPtr<IAMStreamConfig> stream_config;
+
+ hr = capture_graph_builder_->FindInterface(
+ &PIN_CATEGORY_CAPTURE, &MEDIATYPE_Interleaved, capture_filter_.get(),
+ IID_IAMStreamConfig, (void**)stream_config.Receive());
+ if (FAILED(hr)) {
+ hr = capture_graph_builder_->FindInterface(
+ &PIN_CATEGORY_CAPTURE, &MEDIATYPE_Video, capture_filter_.get(),
+ IID_IAMStreamConfig, (void**)stream_config.Receive());
+ DLOG_IF(ERROR, FAILED(hr)) << "Failed to find CapFilter:IAMStreamConfig: "
+ << logging::SystemErrorCodeToString(hr);
+ }
+
+ return CreateCapabilityMap();
+}
+
+void VideoCaptureDeviceWin::AllocateAndStart(
+ const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) {
+ DCHECK(CalledOnValidThread());
+ if (state_ != kIdle)
+ return;
+
+ client_ = client.Pass();
+
+ // Get the camera capability that best match the requested format.
+ const CapabilityWin found_capability =
+ GetBestMatchedCapability(params.requested_format, capabilities_);
+
+ // Reduce the frame rate if the requested frame rate is lower
+ // than the capability.
+ float frame_rate = std::min(found_capability.supported_format.frame_rate,
+ params.requested_format.frame_rate);
+
+ ScopedComPtr<IAMStreamConfig> stream_config;
+ HRESULT hr = output_capture_pin_.QueryInterface(stream_config.Receive());
+ if (FAILED(hr)) {
+ SetErrorState("Can't get the Capture format settings");
+ return;
+ }
+
+ int count = 0, size = 0;
+ hr = stream_config->GetNumberOfCapabilities(&count, &size);
+ if (FAILED(hr)) {
+ SetErrorState("Failed to GetNumberOfCapabilities");
+ return;
+ }
+
+ scoped_ptr<BYTE[]> caps(new BYTE[size]);
+ ScopedMediaType media_type;
+
+ // Get the windows capability from the capture device.
+ // GetStreamCaps can return S_FALSE which we consider an error. Therefore the
+ // FAILED macro can't be used.
+ hr = stream_config->GetStreamCaps(found_capability.stream_index,
+ media_type.Receive(), caps.get());
+ if (hr != S_OK) {
+ SetErrorState("Failed to get capture device capabilities");
+ return;
+ }
+ if (media_type->formattype == FORMAT_VideoInfo) {
+ VIDEOINFOHEADER* h =
+ reinterpret_cast<VIDEOINFOHEADER*>(media_type->pbFormat);
+ if (frame_rate > 0)
+ h->AvgTimePerFrame = kSecondsToReferenceTime / frame_rate;
+ }
+ // Set the sink filter to request this format.
+ sink_filter_->SetRequestedMediaFormat(
+ found_capability.supported_format.pixel_format, frame_rate,
+ found_capability.info_header);
+ // Order the capture device to use this format.
+ hr = stream_config->SetFormat(media_type.get());
+ if (FAILED(hr)) {
+ // TODO(grunell): Log the error. http://crbug.com/405016.
+ SetErrorState("Failed to set capture device output format");
+ return;
+ }
+
+ SetAntiFlickerInCaptureFilter();
+
+ if (media_type->subtype == kMediaSubTypeHDYC) {
+ // HDYC pixel format, used by the DeckLink capture card, needs an AVI
+ // decompressor filter after source, let |graph_builder_| add it.
+ hr = graph_builder_->Connect(output_capture_pin_.get(),
+ input_sink_pin_.get());
+ } else {
+ hr = graph_builder_->ConnectDirect(output_capture_pin_.get(),
+ input_sink_pin_.get(), NULL);
+ }
+
+ if (FAILED(hr)) {
+ SetErrorState("Failed to connect the Capture graph.");
+ return;
+ }
+
+ hr = media_control_->Pause();
+ if (FAILED(hr)) {
+ SetErrorState(
+ "Failed to Pause the Capture device. "
+ "Is it already occupied?");
+ return;
+ }
+
+ // Get the format back from the sink filter after the filter have been
+ // connected.
+ capture_format_ = sink_filter_->ResultingFormat();
+
+ // Start capturing.
+ hr = media_control_->Run();
+ if (FAILED(hr)) {
+ SetErrorState("Failed to start the Capture device.");
+ return;
+ }
+
+ state_ = kCapturing;
+}
+
+void VideoCaptureDeviceWin::StopAndDeAllocate() {
+ DCHECK(CalledOnValidThread());
+ if (state_ != kCapturing)
+ return;
+
+ HRESULT hr = media_control_->Stop();
+ if (FAILED(hr)) {
+ SetErrorState("Failed to stop the capture graph.");
+ return;
+ }
+
+ graph_builder_->Disconnect(output_capture_pin_.get());
+ graph_builder_->Disconnect(input_sink_pin_.get());
+
+ client_.reset();
+ state_ = kIdle;
+}
+
+// Implements SinkFilterObserver::SinkFilterObserver.
+void VideoCaptureDeviceWin::FrameReceived(const uint8* buffer, int length) {
+ client_->OnIncomingCapturedData(buffer, length, capture_format_, 0,
+ base::TimeTicks::Now());
+}
+
+bool VideoCaptureDeviceWin::CreateCapabilityMap() {
+ DCHECK(CalledOnValidThread());
+ ScopedComPtr<IAMStreamConfig> stream_config;
+ HRESULT hr = output_capture_pin_.QueryInterface(stream_config.Receive());
+ if (FAILED(hr)) {
+ DPLOG(ERROR) << "Failed to get IAMStreamConfig interface from "
+ "capture device: " << logging::SystemErrorCodeToString(hr);
+ return false;
+ }
+
+ // Get interface used for getting the frame rate.
+ ScopedComPtr<IAMVideoControl> video_control;
+ hr = capture_filter_.QueryInterface(video_control.Receive());
+ DLOG_IF(WARNING, FAILED(hr)) << "IAMVideoControl Interface NOT SUPPORTED: "
+ << logging::SystemErrorCodeToString(hr);
+
+ int count = 0, size = 0;
+ hr = stream_config->GetNumberOfCapabilities(&count, &size);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "Failed to GetNumberOfCapabilities: "
+ << logging::SystemErrorCodeToString(hr);
+ return false;
+ }
+
+ scoped_ptr<BYTE[]> caps(new BYTE[size]);
+ for (int stream_index = 0; stream_index < count; ++stream_index) {
+ ScopedMediaType media_type;
+ hr = stream_config->GetStreamCaps(stream_index, media_type.Receive(),
+ caps.get());
+ // GetStreamCaps() may return S_FALSE, so don't use FAILED() or SUCCEED()
+ // macros here since they'll trigger incorrectly.
+ if (hr != S_OK) {
+ DLOG(ERROR) << "Failed to GetStreamCaps: "
+ << logging::SystemErrorCodeToString(hr);
+ return false;
+ }
+
+ if (media_type->majortype == MEDIATYPE_Video &&
+ media_type->formattype == FORMAT_VideoInfo) {
+ VideoCaptureFormat format;
+ format.pixel_format =
+ TranslateMediaSubtypeToPixelFormat(media_type->subtype);
+ if (format.pixel_format == VIDEO_CAPTURE_PIXEL_FORMAT_UNKNOWN)
+ continue;
+
+ VIDEOINFOHEADER* h =
+ reinterpret_cast<VIDEOINFOHEADER*>(media_type->pbFormat);
+ format.frame_size.SetSize(h->bmiHeader.biWidth, h->bmiHeader.biHeight);
+
+ // Try to get a better |time_per_frame| from IAMVideoControl. If not, use
+ // the value from VIDEOINFOHEADER.
+ REFERENCE_TIME time_per_frame = h->AvgTimePerFrame;
+ if (video_control.get()) {
+ ScopedCoMem<LONGLONG> max_fps;
+ LONG list_size = 0;
+ const SIZE size = {format.frame_size.width(),
+ format.frame_size.height()};
+ hr = video_control->GetFrameRateList(output_capture_pin_.get(),
+ stream_index, size, &list_size,
+ &max_fps);
+ // Can't assume the first value will return the max fps.
+ // Sometimes |list_size| will be > 0, but max_fps will be NULL. Some
+ // drivers may return an HRESULT of S_FALSE which SUCCEEDED() translates
+ // into success, so explicitly check S_OK. See http://crbug.com/306237.
+ if (hr == S_OK && list_size > 0 && max_fps) {
+ time_per_frame =
+ *std::min_element(max_fps.get(), max_fps.get() + list_size);
+ }
+ }
+
+ format.frame_rate =
+ (time_per_frame > 0)
+ ? (kSecondsToReferenceTime / static_cast<float>(time_per_frame))
+ : 0.0;
+
+ capabilities_.emplace_back(stream_index, format, h->bmiHeader);
+ }
+ }
+
+ return !capabilities_.empty();
+}
+
+// Set the power line frequency removal in |capture_filter_| if available.
+void VideoCaptureDeviceWin::SetAntiFlickerInCaptureFilter() {
+ const int power_line_frequency = GetPowerLineFrequencyForLocation();
+ if (power_line_frequency != kPowerLine50Hz &&
+ power_line_frequency != kPowerLine60Hz) {
+ return;
+ }
+ ScopedComPtr<IKsPropertySet> ks_propset;
+ DWORD type_support = 0;
+ HRESULT hr;
+ if (SUCCEEDED(hr = ks_propset.QueryFrom(capture_filter_.get())) &&
+ SUCCEEDED(hr = ks_propset->QuerySupported(
+ PROPSETID_VIDCAP_VIDEOPROCAMP,
+ KSPROPERTY_VIDEOPROCAMP_POWERLINE_FREQUENCY,
+ &type_support)) &&
+ (type_support & KSPROPERTY_SUPPORT_SET)) {
+ KSPROPERTY_VIDEOPROCAMP_S data = {};
+ data.Property.Set = PROPSETID_VIDCAP_VIDEOPROCAMP;
+ data.Property.Id = KSPROPERTY_VIDEOPROCAMP_POWERLINE_FREQUENCY;
+ data.Property.Flags = KSPROPERTY_TYPE_SET;
+ data.Value = (power_line_frequency == kPowerLine50Hz) ? 1 : 2;
+ data.Flags = KSPROPERTY_VIDEOPROCAMP_FLAGS_MANUAL;
+ hr = ks_propset->Set(PROPSETID_VIDCAP_VIDEOPROCAMP,
+ KSPROPERTY_VIDEOPROCAMP_POWERLINE_FREQUENCY, &data,
+ sizeof(data), &data, sizeof(data));
+ DLOG_IF(ERROR, FAILED(hr)) << "Anti-flicker setting failed: "
+ << logging::SystemErrorCodeToString(hr);
+ DVLOG_IF(2, SUCCEEDED(hr)) << "Anti-flicker set correctly.";
+ } else {
+ DVLOG(2) << "Anti-flicker setting not supported.";
+ }
+}
+
+void VideoCaptureDeviceWin::SetErrorState(const std::string& reason) {
+ DCHECK(CalledOnValidThread());
+ state_ = kError;
+ client_->OnError(reason);
+}
+} // namespace media
diff --git a/media/capture/video/win/video_capture_device_win.h b/media/capture/video/win/video_capture_device_win.h
new file mode 100644
index 0000000..dd7485e
--- /dev/null
+++ b/media/capture/video/win/video_capture_device_win.h
@@ -0,0 +1,114 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Windows specific implementation of VideoCaptureDevice.
+// DirectShow is used for capturing. DirectShow provide its own threads
+// for capturing.
+
+#ifndef MEDIA_VIDEO_CAPTURE_WIN_VIDEO_CAPTURE_DEVICE_WIN_H_
+#define MEDIA_VIDEO_CAPTURE_WIN_VIDEO_CAPTURE_DEVICE_WIN_H_
+
+// Avoid including strsafe.h via dshow as it will cause build warnings.
+#define NO_DSHOW_STRSAFE
+#include <dshow.h>
+
+#include <map>
+#include <string>
+
+#include "base/threading/non_thread_safe.h"
+#include "base/threading/thread.h"
+#include "base/win/scoped_comptr.h"
+#include "media/base/video_capture_types.h"
+#include "media/capture/video/video_capture_device.h"
+#include "media/capture/video/win/capability_list_win.h"
+#include "media/capture/video/win/sink_filter_win.h"
+#include "media/capture/video/win/sink_input_pin_win.h"
+
+namespace media {
+
+// All the methods in the class can only be run on a COM initialized thread.
+class VideoCaptureDeviceWin : public base::NonThreadSafe,
+ public VideoCaptureDevice,
+ public SinkFilterObserver {
+ public:
+ // A utility class that wraps the AM_MEDIA_TYPE type and guarantees that
+ // we free the structure when exiting the scope. DCHECKing is also done to
+ // avoid memory leaks.
+ class ScopedMediaType {
+ public:
+ ScopedMediaType() : media_type_(NULL) {}
+ ~ScopedMediaType() { Free(); }
+
+ AM_MEDIA_TYPE* operator->() { return media_type_; }
+ AM_MEDIA_TYPE* get() { return media_type_; }
+ void Free();
+ AM_MEDIA_TYPE** Receive();
+
+ private:
+ void FreeMediaType(AM_MEDIA_TYPE* mt);
+ void DeleteMediaType(AM_MEDIA_TYPE* mt);
+
+ AM_MEDIA_TYPE* media_type_;
+ };
+
+ static HRESULT GetDeviceFilter(const std::string& device_id,
+ const CLSID device_class_id,
+ IBaseFilter** filter);
+ static base::win::ScopedComPtr<IPin> GetPin(IBaseFilter* filter,
+ PIN_DIRECTION pin_dir,
+ REFGUID category,
+ REFGUID major_type);
+ static VideoCapturePixelFormat TranslateMediaSubtypeToPixelFormat(
+ const GUID& sub_type);
+
+ explicit VideoCaptureDeviceWin(const Name& device_name);
+ ~VideoCaptureDeviceWin() override;
+ // Opens the device driver for this device.
+ bool Init();
+
+ // VideoCaptureDevice implementation.
+ void AllocateAndStart(const VideoCaptureParams& params,
+ scoped_ptr<VideoCaptureDevice::Client> client) override;
+ void StopAndDeAllocate() override;
+
+ private:
+ enum InternalState {
+ kIdle, // The device driver is opened but camera is not in use.
+ kCapturing, // Video is being captured.
+ kError // Error accessing HW functions.
+ // User needs to recover by destroying the object.
+ };
+
+ // Implements SinkFilterObserver.
+ void FrameReceived(const uint8* buffer, int length) override;
+
+ bool CreateCapabilityMap();
+ void SetAntiFlickerInCaptureFilter();
+ void SetErrorState(const std::string& reason);
+
+ Name device_name_;
+ InternalState state_;
+ scoped_ptr<VideoCaptureDevice::Client> client_;
+
+ base::win::ScopedComPtr<IBaseFilter> capture_filter_;
+
+ base::win::ScopedComPtr<IGraphBuilder> graph_builder_;
+ base::win::ScopedComPtr<ICaptureGraphBuilder2> capture_graph_builder_;
+
+ base::win::ScopedComPtr<IMediaControl> media_control_;
+ base::win::ScopedComPtr<IPin> input_sink_pin_;
+ base::win::ScopedComPtr<IPin> output_capture_pin_;
+
+ scoped_refptr<SinkFilter> sink_filter_;
+
+ // Map of all capabilities this device support.
+ CapabilityList capabilities_;
+ VideoCaptureFormat capture_format_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(VideoCaptureDeviceWin);
+};
+
+} // namespace media
+
+#endif // MEDIA_VIDEO_CAPTURE_WIN_VIDEO_CAPTURE_DEVICE_WIN_H_