summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
authorscherkus@chromium.org <scherkus@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-07-20 23:16:21 +0000
committerscherkus@chromium.org <scherkus@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-07-20 23:16:21 +0000
commit8b788e3e23b32d6bc49256d42314a2f6c6ea78f1 (patch)
treeca696268c8ea9a4f50e3029f3f5286a7a249b004 /media
parent3e1f40e50c6bbbadffa4268beaf214df31f60123 (diff)
downloadchromium_src-8b788e3e23b32d6bc49256d42314a2f6c6ea78f1.zip
chromium_src-8b788e3e23b32d6bc49256d42314a2f6c6ea78f1.tar.gz
chromium_src-8b788e3e23b32d6bc49256d42314a2f6c6ea78f1.tar.bz2
Replace VideoDecoder::media_format() with significantly simpler width()/height() methods.
Clients don't really care about the surface type during initialization, hence its removal. Also removed a few methods from VideoRendererBase that didn't need to be there. Technically it's possible to do without width()/height() methods entirely, forcing clients to inspect VideoFrame objects for changes in dimensions and react accordingly, but that'll be a change for a different day. BUG=28206 TEST=media_unittests, unit_tests, layout_tests Review URL: http://codereview.chromium.org/7461016 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@93278 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r--media/base/filters.h11
-rw-r--r--media/base/mock_filters.h3
-rw-r--r--media/base/video_decoder_config.h5
-rw-r--r--media/filters/ffmpeg_video_decoder.cc22
-rw-r--r--media/filters/ffmpeg_video_decoder.h4
-rw-r--r--media/filters/ffmpeg_video_decoder_unittest.cc13
-rw-r--r--media/filters/video_renderer_base.cc38
-rw-r--r--media/filters/video_renderer_base.h21
-rw-r--r--media/filters/video_renderer_base_unittest.cc34
-rw-r--r--media/tools/player_wtl/wtl_renderer.cc12
-rw-r--r--media/tools/player_wtl/wtl_renderer.h11
-rw-r--r--media/tools/player_x11/gl_video_renderer.cc2
-rw-r--r--media/tools/player_x11/x11_video_renderer.cc69
13 files changed, 91 insertions, 154 deletions
diff --git a/media/base/filters.h b/media/base/filters.h
index e541c78..ffd7201 100644
--- a/media/base/filters.h
+++ b/media/base/filters.h
@@ -225,8 +225,15 @@ class VideoDecoder : public Filter {
// Indicate whether decoder provides its own output buffers
virtual bool ProvidesBuffer() = 0;
- // Returns the media format produced by this decoder.
- virtual const MediaFormat& media_format() = 0;
+ // Returns the width and height of decoded video in pixels.
+ //
+ // Clients should NOT rely on these values to remain constant. Instead, use
+ // the width/height from decoded video frames themselves.
+ //
+ // TODO(scherkus): why not rely on prerolling and decoding a single frame to
+ // get dimensions?
+ virtual int width() = 0;
+ virtual int height() = 0;
protected:
// Executes the permanent callback to pass off decoded video.
diff --git a/media/base/mock_filters.h b/media/base/mock_filters.h
index cdc04c7..417ae90 100644
--- a/media/base/mock_filters.h
+++ b/media/base/mock_filters.h
@@ -185,9 +185,10 @@ class MockVideoDecoder : public VideoDecoder {
MOCK_METHOD3(Initialize, void(DemuxerStream* stream,
FilterCallback* callback,
StatisticsCallback* stats_callback));
- MOCK_METHOD0(media_format, const MediaFormat&());
MOCK_METHOD1(ProduceVideoFrame, void(scoped_refptr<VideoFrame>));
MOCK_METHOD0(ProvidesBuffer, bool());
+ MOCK_METHOD0(width, int());
+ MOCK_METHOD0(height, int());
void VideoFrameReadyForTest(scoped_refptr<VideoFrame> frame) {
VideoDecoder::VideoFrameReady(frame);
diff --git a/media/base/video_decoder_config.h b/media/base/video_decoder_config.h
index 011efbf..a077312 100644
--- a/media/base/video_decoder_config.h
+++ b/media/base/video_decoder_config.h
@@ -18,6 +18,11 @@ enum VideoCodec {
kCodecMPEG4,
kCodecTheora,
kCodecVP8,
+
+ // DO NOT ADD RANDOM VIDEO CODECS!
+ //
+ // The only acceptable time to add a new codec is if there is production code
+ // that uses said codec in the same CL.
};
class VideoDecoderConfig {
diff --git a/media/filters/ffmpeg_video_decoder.cc b/media/filters/ffmpeg_video_decoder.cc
index 6fcbdff..c6409ba 100644
--- a/media/filters/ffmpeg_video_decoder.cc
+++ b/media/filters/ffmpeg_video_decoder.cc
@@ -12,7 +12,6 @@
#include "media/base/filters.h"
#include "media/base/filter_host.h"
#include "media/base/limits.h"
-#include "media/base/media_format.h"
#include "media/base/video_frame.h"
#include "media/ffmpeg/ffmpeg_common.h"
#include "media/video/ffmpeg_video_decode_engine.h"
@@ -95,13 +94,6 @@ void FFmpegVideoDecoder::OnInitializeComplete(const VideoCodecInfo& info) {
AutoCallbackRunner done_runner(initialize_callback_.release());
if (info.success) {
- media_format_.SetAsInteger(MediaFormat::kWidth,
- info.stream_info.surface_width);
- media_format_.SetAsInteger(MediaFormat::kHeight,
- info.stream_info.surface_height);
- media_format_.SetAsInteger(
- MediaFormat::kSurfaceFormat,
- static_cast<int>(info.stream_info.surface_format));
state_ = kNormal;
} else {
host()->SetError(PIPELINE_ERROR_DECODE);
@@ -272,10 +264,6 @@ void FFmpegVideoDecoder::OnReadCompleteTask(scoped_refptr<Buffer> buffer) {
decode_engine_->ConsumeVideoSample(buffer);
}
-const MediaFormat& FFmpegVideoDecoder::media_format() {
- return media_format_;
-}
-
void FFmpegVideoDecoder::ProduceVideoFrame(
scoped_refptr<VideoFrame> video_frame) {
if (MessageLoop::current() != message_loop_) {
@@ -352,6 +340,16 @@ bool FFmpegVideoDecoder::ProvidesBuffer() {
return info_.provides_buffers;
}
+int FFmpegVideoDecoder::width() {
+ DCHECK(info_.success);
+ return info_.stream_info.surface_width;
+}
+
+int FFmpegVideoDecoder::height() {
+ DCHECK(info_.success);
+ return info_.stream_info.surface_height;
+}
+
void FFmpegVideoDecoder::FlushBuffers() {
while (!frame_queue_flushed_.empty()) {
scoped_refptr<VideoFrame> video_frame;
diff --git a/media/filters/ffmpeg_video_decoder.h b/media/filters/ffmpeg_video_decoder.h
index 38a3bcc..5324bf2 100644
--- a/media/filters/ffmpeg_video_decoder.h
+++ b/media/filters/ffmpeg_video_decoder.h
@@ -37,9 +37,10 @@ class FFmpegVideoDecoder
virtual void Initialize(DemuxerStream* demuxer_stream,
FilterCallback* callback,
StatisticsCallback* stats_callback);
- virtual const MediaFormat& media_format();
virtual void ProduceVideoFrame(scoped_refptr<VideoFrame> video_frame);
virtual bool ProvidesBuffer();
+ virtual int width();
+ virtual int height();
private:
// VideoDecodeEngine::EventHandler interface.
@@ -91,7 +92,6 @@ class FFmpegVideoDecoder
virtual void SetVideoDecodeEngineForTest(VideoDecodeEngine* engine);
MessageLoop* message_loop_;
- MediaFormat media_format_;
PtsStream pts_stream_; // Stream of presentation timestamps.
DecoderState state_;
diff --git a/media/filters/ffmpeg_video_decoder_unittest.cc b/media/filters/ffmpeg_video_decoder_unittest.cc
index a45a42f..7aafaac 100644
--- a/media/filters/ffmpeg_video_decoder_unittest.cc
+++ b/media/filters/ffmpeg_video_decoder_unittest.cc
@@ -257,15 +257,10 @@ TEST_F(FFmpegVideoDecoderTest, Initialize_EngineFails) {
TEST_F(FFmpegVideoDecoderTest, Initialize_Successful) {
InitializeDecoderSuccessfully();
- // Test that the output media format is an uncompressed video surface that
- // matches the dimensions specified by FFmpeg.
- const MediaFormat& media_format = decoder_->media_format();
- int width = 0;
- int height = 0;
- EXPECT_TRUE(media_format.GetAsInteger(MediaFormat::kWidth, &width));
- EXPECT_EQ(kWidth, width);
- EXPECT_TRUE(media_format.GetAsInteger(MediaFormat::kHeight, &height));
- EXPECT_EQ(kHeight, height);
+ // Test that the uncompressed video surface matches the dimensions
+ // specified by FFmpeg.
+ EXPECT_EQ(kWidth, decoder_->width());
+ EXPECT_EQ(kHeight, decoder_->height());
}
TEST_F(FFmpegVideoDecoderTest, OnError) {
diff --git a/media/filters/video_renderer_base.cc b/media/filters/video_renderer_base.cc
index 038a1a8..94e1bdf 100644
--- a/media/filters/video_renderer_base.cc
+++ b/media/filters/video_renderer_base.cc
@@ -28,10 +28,7 @@ static const int64 kMaxSleepMilliseconds = 60;
static const int kIdleMilliseconds = 10;
VideoRendererBase::VideoRendererBase()
- : width_(0),
- height_(0),
- surface_format_(VideoFrame::INVALID),
- frame_available_(&lock_),
+ : frame_available_(&lock_),
state_(kUninitialized),
thread_(base::kNullThreadHandle),
pending_reads_(0),
@@ -45,27 +42,6 @@ VideoRendererBase::~VideoRendererBase() {
DCHECK(state_ == kUninitialized || state_ == kStopped);
}
-// static
-bool VideoRendererBase::ParseMediaFormat(
- const MediaFormat& media_format,
- VideoFrame::Format* surface_format_out,
- int* width_out, int* height_out) {
- int surface_format;
- if (!media_format.GetAsInteger(MediaFormat::kSurfaceFormat, &surface_format))
- return false;
- if (surface_format_out)
- *surface_format_out = static_cast<VideoFrame::Format>(surface_format);
-
- int width, height;
- if (!media_format.GetAsInteger(MediaFormat::kWidth, &width))
- return false;
- if (!media_format.GetAsInteger(MediaFormat::kHeight, &height))
- return false;
- if (width_out) *width_out = width;
- if (height_out) *height_out = height;
- return true;
-}
-
void VideoRendererBase::Play(FilterCallback* callback) {
base::AutoLock auto_lock(lock_);
DCHECK_EQ(kPrerolled, state_);
@@ -173,13 +149,7 @@ void VideoRendererBase::Initialize(VideoDecoder* decoder,
base::Unretained(this)));
// Notify the pipeline of the video dimensions.
- if (!ParseMediaFormat(decoder->media_format(),
- &surface_format_,
- &width_, &height_)) {
- EnterErrorState_Locked(PIPELINE_ERROR_INITIALIZATION_FAILED);
- return;
- }
- host()->SetVideoSize(width_, height_);
+ host()->SetVideoSize(decoder_->width(), decoder_->height());
// Initialize the subclass.
// TODO(scherkus): do we trust subclasses not to do something silly while
@@ -479,10 +449,6 @@ void VideoRendererBase::ConsumeVideoFrame(scoped_refptr<VideoFrame> frame) {
}
}
-VideoDecoder* VideoRendererBase::GetDecoder() {
- return decoder_.get();
-}
-
void VideoRendererBase::ReadInput(scoped_refptr<VideoFrame> frame) {
// We should never return empty frames or EOS frame.
DCHECK(frame.get() && !frame->IsEndOfStream());
diff --git a/media/filters/video_renderer_base.h b/media/filters/video_renderer_base.h
index d48c97c..d4a064c 100644
--- a/media/filters/video_renderer_base.h
+++ b/media/filters/video_renderer_base.h
@@ -36,15 +36,6 @@ class VideoRendererBase
VideoRendererBase();
virtual ~VideoRendererBase();
- // Helper method to parse out video-related information from a MediaFormat.
- // Returns true all the required parameters are existent in |media_format|.
- // |surface_format_out|, |width_out|, |height_out| can be NULL where the
- // result is not needed.
- static bool ParseMediaFormat(
- const MediaFormat& media_format,
- VideoFrame::Format* surface_format_out,
- int* width_out, int* height_out);
-
// Filter implementation.
virtual void Play(FilterCallback* callback);
virtual void Pause(FilterCallback* callback);
@@ -76,7 +67,7 @@ class VideoRendererBase
// class takes place.
//
// Implementors typically use the media format of |decoder| to create their
- // output surfaces. Implementors should NOT call InitializationComplete().
+ // output surfaces.
virtual bool OnInitialize(VideoDecoder* decoder) = 0;
// Subclass interface. Called after all other stopping actions take place.
@@ -97,12 +88,6 @@ class VideoRendererBase
// class executes on.
virtual void OnFrameAvailable() = 0;
- virtual VideoDecoder* GetDecoder();
-
- int width() { return width_; }
- int height() { return height_; }
- VideoFrame::Format surface_format() { return surface_format_; }
-
void ReadInput(scoped_refptr<VideoFrame> frame);
private:
@@ -141,10 +126,6 @@ class VideoRendererBase
scoped_refptr<VideoDecoder> decoder_;
- int width_;
- int height_;
- VideoFrame::Format surface_format_;
-
// Queue of incoming frames as well as the current frame since the last time
// OnFrameAvailable() was called.
typedef std::deque< scoped_refptr<VideoFrame> > VideoFrameQueue;
diff --git a/media/filters/video_renderer_base_unittest.cc b/media/filters/video_renderer_base_unittest.cc
index 8fdf1e3..6ed126f 100644
--- a/media/filters/video_renderer_base_unittest.cc
+++ b/media/filters/video_renderer_base_unittest.cc
@@ -58,15 +58,12 @@ class VideoRendererBaseTest : public ::testing::Test {
EXPECT_CALL(*decoder_, ProduceVideoFrame(_))
.WillRepeatedly(Invoke(this, &VideoRendererBaseTest::EnqueueCallback));
- // Sets the essential media format keys for this decoder.
- decoder_media_format_.SetAsInteger(MediaFormat::kSurfaceFormat,
- VideoFrame::YV12);
- decoder_media_format_.SetAsInteger(MediaFormat::kWidth, kWidth);
- decoder_media_format_.SetAsInteger(MediaFormat::kHeight, kHeight);
- EXPECT_CALL(*decoder_, media_format())
- .WillRepeatedly(ReturnRef(decoder_media_format_));
EXPECT_CALL(*decoder_, ProvidesBuffer())
.WillRepeatedly(Return(true));
+
+ EXPECT_CALL(*decoder_, width()).WillRepeatedly(Return(kWidth));
+ EXPECT_CALL(*decoder_, height()).WillRepeatedly(Return(kHeight));
+
EXPECT_CALL(stats_callback_object_, OnStatistics(_))
.Times(AnyNumber());
}
@@ -181,7 +178,6 @@ class VideoRendererBaseTest : public ::testing::Test {
scoped_refptr<MockVideoRendererBase> renderer_;
scoped_refptr<MockVideoDecoder> decoder_;
StrictMock<MockFilterHost> host_;
- MediaFormat decoder_media_format_;
MockStatisticsCallback stats_callback_object_;
// Receives all the buffers that renderer had provided to |decoder_|.
@@ -207,28 +203,6 @@ const size_t VideoRendererBaseTest::kWidth = 16u;
const size_t VideoRendererBaseTest::kHeight = 16u;
const int64 VideoRendererBaseTest::kDuration = 10;
-// Test initialization where the decoder's media format is malformed.
-TEST_F(VideoRendererBaseTest, Initialize_BadMediaFormat) {
- // Don't set a media format.
- MediaFormat media_format;
- scoped_refptr<MockVideoDecoder> bad_decoder(new MockVideoDecoder());
- EXPECT_CALL(*bad_decoder, ProvidesBuffer())
- .WillRepeatedly(Return(true));
-
- InSequence s;
-
- EXPECT_CALL(*bad_decoder, media_format())
- .WillRepeatedly(ReturnRef(media_format));
-
- // We expect to receive an error.
- EXPECT_CALL(host_, SetError(PIPELINE_ERROR_INITIALIZATION_FAILED));
-
- // Initialize, we expect to have no reads.
- renderer_->Initialize(bad_decoder,
- NewExpectedCallback(), NewStatisticsCallback());
- EXPECT_EQ(0u, read_queue_.size());
-}
-
// Test initialization where the subclass failed for some reason.
TEST_F(VideoRendererBaseTest, Initialize_Failed) {
InSequence s;
diff --git a/media/tools/player_wtl/wtl_renderer.cc b/media/tools/player_wtl/wtl_renderer.cc
index 04a932c..baec88e 100644
--- a/media/tools/player_wtl/wtl_renderer.cc
+++ b/media/tools/player_wtl/wtl_renderer.cc
@@ -12,10 +12,9 @@ WtlVideoRenderer::WtlVideoRenderer(WtlVideoWindow* window)
WtlVideoRenderer::~WtlVideoRenderer() {}
-// static
-bool WtlVideoRenderer::IsMediaFormatSupported(
- const media::MediaFormat& media_format) {
- return ParseMediaFormat(media_format, NULL, NULL, NULL);
+bool WtlVideoRenderer::OnInitialize(media::VideoDecoder* decoder) {
+ window_->SetSize(decoder->width(), decoder->height());
+ return true;
}
void WtlVideoRenderer::OnStop(media::FilterCallback* callback) {
@@ -25,11 +24,6 @@ void WtlVideoRenderer::OnStop(media::FilterCallback* callback) {
}
}
-bool WtlVideoRenderer::OnInitialize(media::VideoDecoder* decoder) {
- window_->SetSize(width(), height());
- return true;
-}
-
void WtlVideoRenderer::OnFrameAvailable() {
window_->Invalidate();
}
diff --git a/media/tools/player_wtl/wtl_renderer.h b/media/tools/player_wtl/wtl_renderer.h
index 70fbe0e..e929d8e 100644
--- a/media/tools/player_wtl/wtl_renderer.h
+++ b/media/tools/player_wtl/wtl_renderer.h
@@ -1,8 +1,6 @@
-// Copyright (c) 2009 The Chromium Authors. All rights reserved. Use of this
-// source code is governed by a BSD-style license that can be found in the
-// LICENSE file.
-
-// Video renderer for media player.
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef MEDIA_TOOLS_PLAYER_WTL_WTL_RENDERER_H_
#define MEDIA_TOOLS_PLAYER_WTL_WTL_RENDERER_H_
@@ -11,12 +9,11 @@
class WtlVideoWindow;
+// Video renderer for media player.
class WtlVideoRenderer : public media::VideoRendererBase {
public:
explicit WtlVideoRenderer(WtlVideoWindow* window);
- static bool IsMediaFormatSupported(const media::MediaFormat& media_format);
-
protected:
// VideoRendererBase implementation.
virtual bool OnInitialize(media::VideoDecoder* decoder);
diff --git a/media/tools/player_x11/gl_video_renderer.cc b/media/tools/player_x11/gl_video_renderer.cc
index e39d1dd..b58b4df 100644
--- a/media/tools/player_x11/gl_video_renderer.cc
+++ b/media/tools/player_x11/gl_video_renderer.cc
@@ -128,7 +128,7 @@ bool GlVideoRenderer::OnInitialize(media::VideoDecoder* decoder) {
LOG(INFO) << "Initializing GL Renderer...";
// Resize the window to fit that of the video.
- XResizeWindow(display_, window_, width(), height());
+ XResizeWindow(display_, window_, decoder->width(), decoder->height());
gl_context_ = InitGLContext(display_, window_);
if (!gl_context_)
diff --git a/media/tools/player_x11/x11_video_renderer.cc b/media/tools/player_x11/x11_video_renderer.cc
index 9633303..055e54b 100644
--- a/media/tools/player_x11/x11_video_renderer.cc
+++ b/media/tools/player_x11/x11_video_renderer.cc
@@ -13,6 +13,21 @@
#include "media/base/video_frame.h"
#include "media/base/yuv_convert.h"
+// Creates a 32-bit XImage.
+static XImage* CreateImage(Display* display, int width, int height) {
+ LOG(INFO) << "Allocating XImage " << width << "x" << height;
+ return XCreateImage(display,
+ DefaultVisual(display, DefaultScreen(display)),
+ DefaultDepth(display, DefaultScreen(display)),
+ ZPixmap,
+ 0,
+ static_cast<char*>(malloc(width * height * 4)),
+ width,
+ height,
+ 32,
+ width * 4);
+}
+
// Returns the picture format for ARGB.
// This method is originally from chrome/common/x11_util.cc.
static XRenderPictFormat* GetRenderARGB32Format(Display* dpy) {
@@ -42,11 +57,11 @@ static XRenderPictFormat* GetRenderARGB32Format(Display* dpy) {
pictformat = XRenderFindFormat(dpy, kMask, &templ, 0 /* first result */);
if (!pictformat) {
- // Not all X servers support xRGB32 formats. However, the XRENDER spec
+ // Not all X servers support xRGB32 formats. However, the XRender spec
// says that they must support an ARGB32 format, so we can always return
// that.
pictformat = XRenderFindStandardFormat(dpy, PictStandardARGB32);
- CHECK(pictformat) << "XRENDER ARGB32 not supported.";
+ CHECK(pictformat) << "XRender ARGB32 not supported.";
}
return pictformat;
@@ -79,7 +94,12 @@ bool X11VideoRenderer::OnInitialize(media::VideoDecoder* decoder) {
LOG(INFO) << "Initializing X11 Renderer...";
// Resize the window to fit that of the video.
- XResizeWindow(display_, window_, width(), height());
+ int width = decoder->width();
+ int height = decoder->height();
+ XResizeWindow(display_, window_, width, height);
+
+ // Allocate an XImage for caching RGB result.
+ image_ = CreateImage(display_, width, height);
// Testing XRender support. We'll use the very basic of XRender
// so if it presents it is already good enough. We don't need
@@ -98,25 +118,12 @@ bool X11VideoRenderer::OnInitialize(media::VideoDecoder* decoder) {
XRenderPictFormat* pictformat = XRenderFindVisualFormat(
display_,
attr.visual);
- CHECK(pictformat) << "XRENDER does not support default visual";
+ CHECK(pictformat) << "XRender does not support default visual";
picture_ = XRenderCreatePicture(display_, window_, pictformat, 0, NULL);
CHECK(picture_) << "Backing picture not created";
}
- // Initialize the XImage to store the output of YUV -> RGB conversion.
- image_ = XCreateImage(display_,
- DefaultVisual(display_, DefaultScreen(display_)),
- DefaultDepth(display_, DefaultScreen(display_)),
- ZPixmap,
- 0,
- static_cast<char*>(malloc(width() * height() * 4)),
- width(),
- height(),
- 32,
- width() * 4);
- DCHECK(image_);
-
return true;
}
@@ -130,12 +137,24 @@ void X11VideoRenderer::PaintOnMainThread() {
scoped_refptr<media::VideoFrame> video_frame;
GetCurrentFrame(&video_frame);
- if (!image_ || !video_frame) {
+ if (!video_frame) {
// TODO(jiesun): Use color fill rather than create black frame then scale.
PutCurrentFrame(video_frame);
return;
}
+ int width = video_frame->width();
+ int height = video_frame->height();
+
+ // Check if we need to re-allocate our XImage.
+ if (image_->width != width || image_->height != height) {
+ LOG(INFO) << "Detection resolution change: "
+ << image_->width << "x" << image_->height << " -> "
+ << width << "x" << height;
+ XDestroyImage(image_);
+ image_ = CreateImage(display_, width, height);
+ }
+
// Convert YUV frame to RGB.
DCHECK(video_frame->format() == media::VideoFrame::YV12 ||
video_frame->format() == media::VideoFrame::YV16);
@@ -167,8 +186,8 @@ void X11VideoRenderer::PaintOnMainThread() {
// Creates a XImage.
XImage image;
memset(&image, 0, sizeof(image));
- image.width = width();
- image.height = height();
+ image.width = width;
+ image.height = height;
image.depth = 32;
image.bits_per_pixel = 32;
image.format = ZPixmap;
@@ -184,13 +203,13 @@ void X11VideoRenderer::PaintOnMainThread() {
// Creates a pixmap and uploads from the XImage.
unsigned long pixmap = XCreatePixmap(display_,
window_,
- width(),
- height(),
+ width,
+ height,
32);
GC gc = XCreateGC(display_, pixmap, 0, NULL);
XPutImage(display_, pixmap, gc, &image,
0, 0, 0, 0,
- width(), height());
+ width, height);
XFreeGC(display_, gc);
// Creates the picture representing the pixmap.
@@ -200,7 +219,7 @@ void X11VideoRenderer::PaintOnMainThread() {
// Composite the picture over the picture representing the window.
XRenderComposite(display_, PictOpSrc, picture, 0,
picture_, 0, 0, 0, 0, 0, 0,
- width(), height());
+ width, height);
XRenderFreePicture(display_, picture);
XFreePixmap(display_, pixmap);
@@ -213,7 +232,7 @@ void X11VideoRenderer::PaintOnMainThread() {
// to the window.
GC gc = XCreateGC(display_, window_, 0, NULL);
XPutImage(display_, window_, gc, image_,
- 0, 0, 0, 0, width(), height());
+ 0, 0, 0, 0, width, height);
XFlush(display_);
XFreeGC(display_, gc);
}