summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
authorhbono@chromium.org <hbono@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-09-09 05:36:13 +0000
committerhbono@chromium.org <hbono@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-09-09 05:36:13 +0000
commit215c1a35edef4bb7938358626b17b4ddcd15033b (patch)
treef7ee261c61ab8656fa14a513c429c71b8da20d4b /media
parenta1be81a89355c279d8c05bfb3dc70e67642326d2 (diff)
downloadchromium_src-215c1a35edef4bb7938358626b17b4ddcd15033b.zip
chromium_src-215c1a35edef4bb7938358626b17b4ddcd15033b.tar.gz
chromium_src-215c1a35edef4bb7938358626b17b4ddcd15033b.tar.bz2
Implements RGB to YV12 conversions in YASM.
This change implements two conversions (RGB32-to-YV12 and RGB24-to-YV12) in YASM. This change uses 8-bit fixed-point calculation and converts up to four pixels at once. BUG=none TEST=YUVConvertTest.SideBySideRGB Review URL: http://codereview.chromium.org/7003082 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@100352 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r--media/base/cpu_features.h3
-rw-r--r--media/base/cpu_features_arm.cc4
-rw-r--r--media/base/cpu_features_x86.cc29
-rw-r--r--media/base/simd/convert_rgb_to_yuv.cc101
-rw-r--r--media/base/simd/convert_rgb_to_yuv.h40
-rw-r--r--media/base/simd/convert_rgb_to_yuv_ssse3.asm317
-rw-r--r--media/base/simd/convert_rgb_to_yuv_ssse3.h57
-rw-r--r--media/base/simd/convert_rgb_to_yuv_ssse3.inc200
-rw-r--r--media/base/simd/convert_rgb_to_yuv_unittest.cc124
-rw-r--r--media/base/simd/x86inc.asm1001
-rw-r--r--media/base/yuv_convert.cc20
-rw-r--r--media/media.gyp55
12 files changed, 1942 insertions, 9 deletions
diff --git a/media/base/cpu_features.h b/media/base/cpu_features.h
index c5bdcd7..c2762d8 100644
--- a/media/base/cpu_features.h
+++ b/media/base/cpu_features.h
@@ -13,6 +13,9 @@ namespace media {
// Returns true if CPU has SSE2 support.
bool hasSSE2();
+// Returns true if CPU supports SSE2, SSE3, and SSSE3.
+bool hasSSSE3();
+
} // namespace media
#endif // MEDIA_BASE_CPU_FEATURES_H_
diff --git a/media/base/cpu_features_arm.cc b/media/base/cpu_features_arm.cc
index e86fc97..a0d5c68 100644
--- a/media/base/cpu_features_arm.cc
+++ b/media/base/cpu_features_arm.cc
@@ -10,4 +10,8 @@ bool hasSSE2() {
return false;
}
+bool hasSSSE3() {
+ return false;
+}
+
} // namespace media
diff --git a/media/base/cpu_features_x86.cc b/media/base/cpu_features_x86.cc
index 98dfba1..bf7d05d 100644
--- a/media/base/cpu_features_x86.cc
+++ b/media/base/cpu_features_x86.cc
@@ -12,12 +12,6 @@
namespace media {
-#if defined(ARCH_CPU_X86_64)
-/* All x86_64 machines have SSE2, so don't even bother checking. */
-bool hasSSE2() {
- return true;
-}
-#else
#ifdef _MSC_VER
static inline void getcpuid(int info_type, int info[4]) {
__asm {
@@ -33,6 +27,7 @@ static inline void getcpuid(int info_type, int info[4]) {
#else
static inline void getcpuid(int info_type, int info[4]) {
// We save and restore ebx, so this code can be compatible with -fPIC
+#if defined(__i386__)
asm volatile (
"pushl %%ebx \n\t"
"cpuid \n\t"
@@ -41,14 +36,34 @@ static inline void getcpuid(int info_type, int info[4]) {
: "=a"(info[0]), "=r"(info[1]), "=c"(info[2]), "=d"(info[3])
: "a"(info_type)
);
+#else
+ // We can use cpuid instruction without saving ebx on gcc x86-64 because it
+ // does not use ebx (or rbx) as a GOT register.
+ asm volatile (
+ "cpuid \n\t"
+ : "=a"(info[0]), "=r"(info[1]), "=c"(info[2]), "=d"(info[3])
+ : "a"(info_type)
+ );
+#endif
}
#endif
bool hasSSE2() {
+#if defined(ARCH_CPU_X86_64)
+ /* All x86_64 machines have SSE2, so don't even bother checking. */
+ return true;
+#else
int cpu_info[4] = { 0 };
getcpuid(1, cpu_info);
return (cpu_info[3] & (1<<26)) != 0;
-}
#endif
+}
+
+bool hasSSSE3() {
+ int cpu_info[4] = { 0 };
+ getcpuid(1, cpu_info);
+ return (cpu_info[3] & 0x04000000) != 0 && (cpu_info[2] & 0x00000001) != 0 &&
+ (cpu_info[2] & 0x00000200) != 0;
+}
} // namespace media
diff --git a/media/base/simd/convert_rgb_to_yuv.cc b/media/base/simd/convert_rgb_to_yuv.cc
new file mode 100644
index 0000000..2bd6930
--- /dev/null
+++ b/media/base/simd/convert_rgb_to_yuv.cc
@@ -0,0 +1,101 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/base/simd/convert_rgb_to_yuv.h"
+
+#include "build/build_config.h"
+#include "media/base/cpu_features.h"
+#include "media/base/simd/convert_rgb_to_yuv_ssse3.h"
+
+namespace media {
+
+void ConvertRGB32ToYUV_SSSE3(const uint8* rgbframe,
+ uint8* yplane,
+ uint8* uplane,
+ uint8* vplane,
+ int width,
+ int height,
+ int rgbstride,
+ int ystride,
+ int uvstride) {
+#ifdef ENABLE_SUBSAMPLING
+ for (; height >= 2; height -= 2) {
+ ConvertARGBToYUVEven_SSSE3(rgbframe, yplane, uplane, vplane, width);
+ rgbframe += rgbstride;
+ yplane += ystride;
+
+ ConvertARGBToYUVOdd_SSSE3(rgbframe, yplane, uplane, vplane, width);
+ rgbframe += rgbstride;
+ yplane += ystride;
+
+ uplane += uvstride;
+ vplane += uvstride;
+ }
+
+ if (height)
+ ConvertARGBToYUVEven_SSSE3(rgbframe, yplane, uplane, vplane, width);
+#else
+ for (; height >= 2; height -= 2) {
+ ConvertARGBToYUVRow_SSSE3(rgbframe, yplane, uplane, vplane, width);
+ rgbframe += rgbstride;
+ yplane += ystride;
+
+ ConvertARGBToYUVRow_SSSE3(rgbframe, yplane, NULL, NULL, width);
+ rgbframe += rgbstride;
+ yplane += ystride;
+
+ uplane += uvstride;
+ vplane += uvstride;
+ }
+
+ if (height)
+ ConvertARGBToYUVRow_SSSE3(rgbframe, yplane, uplane, vplane, width);
+#endif
+}
+
+void ConvertRGB24ToYUV_SSSE3(const uint8* rgbframe,
+ uint8* yplane,
+ uint8* uplane,
+ uint8* vplane,
+ int width,
+ int height,
+ int rgbstride,
+ int ystride,
+ int uvstride) {
+#ifdef ENABLE_SUBSAMPLING
+ for (; height >= 2; height -= 2) {
+ ConvertRGBToYUVEven_SSSE3(rgbframe, yplane, uplane, vplane, width);
+ rgbframe += rgbstride;
+ yplane += ystride;
+
+ ConvertRGBToYUVOdd_SSSE3(rgbframe, yplane, uplane, vplane, width);
+ rgbframe += rgbstride;
+ yplane += ystride;
+
+ uplane += uvstride;
+ vplane += uvstride;
+ }
+
+ if (height)
+ ConvertRGBToYUVEven_SSSE3(rgbframe, yplane, uplane, vplane, width);
+#else
+ for (; height >= 2; height -= 2) {
+ ConvertRGBToYUVRow_SSSE3(rgbframe, yplane, uplane, vplane, width);
+ rgbframe += rgbstride;
+ yplane += ystride;
+
+ ConvertRGBToYUVRow_SSSE3(rgbframe, yplane, NULL, NULL, width);
+ rgbframe += rgbstride;
+ yplane += ystride;
+
+ uplane += uvstride;
+ vplane += uvstride;
+ }
+
+ if (height)
+ ConvertRGBToYUVRow_SSSE3(rgbframe, yplane, uplane, vplane, width);
+#endif
+}
+
+} // namespace media
diff --git a/media/base/simd/convert_rgb_to_yuv.h b/media/base/simd/convert_rgb_to_yuv.h
new file mode 100644
index 0000000..e16fa51
--- /dev/null
+++ b/media/base/simd/convert_rgb_to_yuv.h
@@ -0,0 +1,40 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_SIMD_CONVERT_RGB_TO_YUV_H_
+#define MEDIA_BASE_SIMD_CONVERT_RGB_TO_YUV_H_
+
+#include "base/basictypes.h"
+#include "media/base/yuv_convert.h"
+
+namespace media {
+
+// Converts an ARGB image to a YV12 image. This function calls ASM functions
+// implemented in "convert_rgb_to_yuv_ssse3.asm" to convert the specified ARGB
+// image to a YV12 image.
+void ConvertRGB32ToYUV_SSSE3(const uint8* rgbframe,
+ uint8* yplane,
+ uint8* uplane,
+ uint8* vplane,
+ int width,
+ int height,
+ int rgbstride,
+ int ystride,
+ int uvstride);
+
+// Converts an RGB image to a YV12 image. This function is almost same as
+// ConvertRGB32ToYUV_SSSE3 except its first argument is a pointer to RGB pixels.
+void ConvertRGB24ToYUV_SSSE3(const uint8* rgbframe,
+ uint8* yplane,
+ uint8* uplane,
+ uint8* vplane,
+ int width,
+ int height,
+ int rgbstride,
+ int ystride,
+ int uvstride);
+
+} // namespace media
+
+#endif // MEDIA_BASE_SIMD_CONVERT_RGB_TO_YUV_H_
diff --git a/media/base/simd/convert_rgb_to_yuv_ssse3.asm b/media/base/simd/convert_rgb_to_yuv_ssse3.asm
new file mode 100644
index 0000000..f445e98
--- /dev/null
+++ b/media/base/simd/convert_rgb_to_yuv_ssse3.asm
@@ -0,0 +1,317 @@
+; Copyright (c) 2011 The Chromium Authors. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+%include "x86inc.asm"
+
+;
+; This file uses SSE, SSE2, SSE3, and SSSE3, which are supported by all ATOM
+; processors.
+;
+ SECTION_TEXT
+ CPU SSE, SSE3, SSE3, SSSE3
+
+;
+; XMM registers representing constants. We must not use these registers as
+; destination operands.
+; for (int i = 0; i < 16; i += 4) {
+; xmm7.b[i] = 25; xmm7.b[i+1] = 2; xmm7.b[i+2] = 66; xmm7.b[i+3] = 0;
+; xmm6.b[i] = 0; xmm6.b[i+1] = 127; xmm6.b[i+2] = 0; xmm6.b[i+3] = 0;
+; xmm5.b[i] = 112; xmm5.b[i+1] = -74; xmm5.b[i+2] = -38; xmm5.b[i+3] = 0;
+; xmm4.b[i] = -18; xmm4.b[i+1] = -94; xmm4.b[i+2] = 112; xmm4.b[i+3] = 0;
+; }
+;
+%define XMM_CONST_Y0 xmm7
+%define XMM_CONST_Y1 xmm6
+%define XMM_CONST_U xmm5
+%define XMM_CONST_V xmm4
+%define XMM_CONST_128 xmm3
+
+;
+; LOAD_XMM %1 (xmm), %2 (imm32)
+; Loads an immediate value to an XMM register.
+; %1.d[0] = %1.d[1] = %1.d[2] = %1.d[3] = %2;
+;
+%macro LOAD_XMM 2
+ mov TEMPd, %2
+ movd %1, TEMPd
+ pshufd %1, %1, 00000000B
+%endmacro
+
+;
+; UNPACKRGB %1 (xmm), %2 (imm8)
+; Unpacks one RGB pixel in the specified XMM register.
+; for (int i = 15; i > %2; --i) %1.b[i] = %1.b[i - 1];
+; %1.b[%2] = 0;
+; for (int i = %2 - 1; i >= 0; --i) %1.b[i] = %1.b[i];
+;
+%macro UNPACKRGB 2
+ movdqa xmm1, %1
+ psrldq xmm1, %2
+ pslldq xmm1, %2
+ pxor %1, xmm1
+ pslldq xmm1, 1
+ por %1, xmm1
+%endmacro
+
+;
+; READ_ARGB %1 (xmm), %2 (imm)
+; Read the specified number of ARGB (or RGB) pixels from the source and store
+; them to the destination xmm register. If the input format is RGB, we read RGB
+; pixels and convert them to ARGB pixels. (For this case, the alpha values of
+; the output pixels become 0.)
+;
+%macro READ_ARGB 2
+
+%if PIXELSIZE == 4
+
+ ; Read ARGB pixels from the source. (This macro assumes the input buffer may
+ ; not be aligned to a 16-byte boundary.)
+%if %2 == 1
+ movd %1, DWORD [ARGBq + WIDTHq * 4 * 2]
+%elif %2 == 2
+ movq %1, QWORD [ARGBq + WIDTHq * 4 * 2]
+%elif %2 == 4
+ movdqu %1, DQWORD [ARGBq + WIDTHq * 4 * 2]
+%else
+%error unsupported number of pixels.
+%endif
+
+%elif PIXELSIZE == 3
+
+ ; Read RGB pixels from the source and convert them to ARGB pixels.
+%if %2 == 1
+ ; Read one RGB pixel and convert it to one ARGB pixel.
+ ; Save the WIDTH register to xmm1. (This macro needs to break it.)
+ MOVq xmm1, WIDTHq
+
+ ; Once read three bytes from the source to TEMPd, and copy it to the
+ ; destination xmm register.
+ lea WIDTHq, [WIDTHq + WIDTHq * 2]
+ movzx TEMPd, BYTE [ARGBq + WIDTHq * 2 + 2]
+ shl TEMPd, 16
+ mov TEMPw, WORD [ARGBq + WIDTHq * 2]
+ movd %1, TEMPd
+
+ ; Restore the WIDTH register.
+ MOVq WIDTHq, xmm1
+%elif %2 == 2
+ ; Read two RGB pixels and convert them to two ARGB pixels.
+ ; Read six bytes from the source to the destination xmm register.
+ mov TEMPq, WIDTHq
+ lea TEMPq, [TEMPq + TEMPq * 2]
+ movd %1, DWORD [ARGBq + TEMPq * 2]
+ pinsrw %1, WORD [ARGBq + TEMPq * 2 + 4], 3
+
+ ; Fill the alpha values of these RGB pixels with 0 and convert them to two
+ ; ARGB pixels.
+ UNPACKRGB %1, 3
+%elif %2 == 4
+ ; Read four RGB pixels and convert them to four ARGB pixels.
+ ; Read twelve bytes from the source to the destination xmm register.
+ mov TEMPq, WIDTHq
+ lea TEMPq, [TEMPq + TEMPq * 2]
+ movq %1, QWORD [ARGBq + TEMPq * 2]
+ movd xmm1, DWORD [ARGBq + TEMPq * 2 + 8]
+ shufps %1, xmm1, 01000100B
+
+ ; Fill the alpha values of these RGB pixels with 0 and convert them to four
+ ; ARGB pixels.
+ UNPACKRGB %1, 3
+ UNPACKRGB %1, 4 + 3
+ UNPACKRGB %1, 4 + 4 + 3
+%else
+%error unsupported number of pixels.
+%endif
+
+%else
+%error unsupported PIXELSIZE value.
+%endif
+
+%endmacro
+
+;
+; CALC_Y %1 (xmm), %2 (xmm)
+; Calculates four Y values from four ARGB pixels stored in %2.
+; %1.b[0] = ToByte((25 * B(0) + 129 * G(0) + 66 * R(0) + 128) / 256 + 16);
+; %1.b[1] = ToByte((25 * B(1) + 129 * G(1) + 66 * R(1) + 128) / 256 + 16);
+; %1.b[2] = ToByte((25 * B(2) + 129 * G(2) + 66 * R(2) + 128) / 256 + 16);
+; %1.b[3] = ToByte((25 * B(3) + 129 * G(3) + 66 * R(3) + 128) / 256 + 16);
+;
+%macro CALC_Y 2
+ ; To avoid signed saturation, we divide this conversion formula into two
+ ; formulae and store their results into two XMM registers %1 and xmm2.
+ ; %1.w[0] = 25 * %2.b[0] + 2 * %2.b[1] + 66 * %2.b[2] + 0 * %2.b[3];
+ ; %1.w[1] = 25 * %2.b[4] + 2 * %2.b[5] + 66 * %2.b[6] + 0 * %2.b[7];
+ ; %1.w[2] = 25 * %2.b[8] + 2 * %2.b[9] + 66 * %2.b[10] + 0 * %2.b[11];
+ ; %1.w[3] = 25 * %2.b[12] + 2 * %2.b[13] + 66 * %2.b[14] + 0 * %2.b[15];
+ ; xmm2.w[0] = 0 * %2.b[0] + 127 * %2.b[1] + 0 * %2.b[2] + 0 * %2.b[3];
+ ; xmm2.w[1] = 0 * %2.b[4] + 127 * %2.b[5] + 0 * %2.b[6] + 0 * %2.b[7];
+ ; xmm2.w[2] = 0 * %2.b[8] + 127 * %2.b[9] + 0 * %2.b[10] + 0 * %2.b[11];
+ ; xmm2.w[3] = 0 * %2.b[12] + 127 * %2.b[13] + 0 * %2.b[14] + 0 * %2.b[15];
+ movdqa %1, %2
+ pmaddubsw %1, XMM_CONST_Y0
+ phaddsw %1, %1
+ movdqa xmm2, %2
+ pmaddubsw xmm2, XMM_CONST_Y1
+ phaddsw xmm2, xmm2
+
+ ; %1.b[0] = ToByte((%1.w[0] + xmm2.w[0] + 128) / 256 + 16);
+ ; %1.b[1] = ToByte((%1.w[1] + xmm2.w[1] + 128) / 256 + 16);
+ ; %1.b[2] = ToByte((%1.w[2] + xmm2.w[2] + 128) / 256 + 16);
+ ; %1.b[3] = ToByte((%1.w[3] + xmm2.w[3] + 128) / 256 + 16);
+ paddw %1, xmm2
+ movdqa xmm2, XMM_CONST_128
+ paddw %1, xmm2
+ psrlw %1, 8
+ psrlw xmm2, 3
+ paddw %1, xmm2
+ packuswb %1, %1
+%endmacro
+
+;
+; INIT_UV %1 (r32), %2 (reg) %3 (imm)
+;
+%macro INIT_UV 3
+
+%if SUBSAMPLING == 1 && LINE == 1
+%if %3 == 1 || %3 == 2
+ movzx %1, BYTE [%2 + WIDTHq]
+%elif %3 == 4
+ movzx %1, WORD [%2 + WIDTHq]
+%else
+%error unsupported number of pixels.
+%endif
+%endif
+
+%endmacro
+
+;
+; CALC_UV %1 (xmm), %2 (xmm), %3 (xmm), %4 (r32)
+; Calculates two U (or V) values from four ARGB pixels stored in %2.
+; if %3 == XMM_CONST_U
+; if (SUBSAMPLING) {
+; %1.b[0] = ToByte((112 * B(0) - 74 * G(0) - 38 * R(0) + 128) / 256 + 128);
+; %1.b[0] = ToByte((112 * B(0) - 74 * G(0) - 38 * R(0) + 128) / 256 + 128);
+; %1.b[1] = ToByte((112 * B(2) - 74 * G(2) - 38 * R(2) + 128) / 256 + 128);
+; %1.b[1] = ToByte((112 * B(2) - 74 * G(2) - 38 * R(2) + 128) / 256 + 128);
+; } else {
+; %1.b[0] = ToByte((112 * B(0) - 74 * G(0) - 38 * R(0) + 128) / 256 + 128);
+; %1.b[1] = ToByte((112 * B(2) - 74 * G(2) - 38 * R(2) + 128) / 256 + 128);
+; }
+; if %3 == XMM_CONST_V
+; %1.b[0] = ToByte((-18 * B(0) - 94 * G(0) + 112 * R(0) + 128) / 256 + 128);
+; %1.b[1] = ToByte((-18 * B(2) - 94 * G(2) + 112 * R(2) + 128) / 256 + 128);
+;
+%macro CALC_UV 4
+ ; for (int i = 0; i < 4; ++i) {
+ ; %1.w[i] = 0;
+ ; for (int j = 0; j < 4; ++j)
+ ; %1.w[i] += %3.b[i * 4 + j] + %2.b[i * 4 + j];
+ ; }
+ movdqa %1, %2
+ pmaddubsw %1, %3
+ phaddsw %1, %1
+
+%if SUBSAMPLING == 1
+ ; %1.w[0] = (%1.w[0] + %1.w[1] + 1) / 2;
+ ; %1.w[1] = (%1.w[1] + %1.w[0] + 1) / 2;
+ ; %1.w[2] = (%1.w[2] + %1.w[3] + 1) / 2;
+ ; %1.w[3] = (%1.w[3] + %1.w[2] + 1) / 2;
+ pshuflw xmm2, %1, 10110001B
+ pavgw %1, xmm2
+%endif
+
+ ; %1.b[0] = ToByte((%1.w[0] + 128) / 256 + 128);
+ ; %1.b[1] = ToByte((%1.w[2] + 128) / 256 + 128);
+ pshuflw %1, %1, 10001000B
+ paddw %1, XMM_CONST_128
+ psraw %1, 8
+ paddw %1, XMM_CONST_128
+ packuswb %1, %1
+
+%if SUBSAMPLING == 1 && LINE == 1
+ ; %1.b[0] = (%1.b[0] + %3.b[0] + 1) / 2;
+ ; %1.b[1] = (%1.b[1] + %3.b[1] + 1) / 2;
+ movd xmm2, %4
+ pavgb %1, xmm2
+%endif
+%endmacro
+
+;
+; extern "C" void ConvertARGBToYUVRow_SSSE3(const uint8* argb,
+; uint8* y,
+; uint8* u,
+; uint8* v,
+; int width);
+;
+%define SYMBOL ConvertARGBToYUVRow_SSSE3
+%define PIXELSIZE 4
+%define SUBSAMPLING 0
+%define LINE 0
+%include "convert_rgb_to_yuv_ssse3.inc"
+
+;
+; extern "C" void ConvertRGBToYUVRow_SSSE3(const uint8* rgb,
+; uint8* y,
+; uint8* u,
+; uint8* v,
+; int width);
+;
+%define SYMBOL ConvertRGBToYUVRow_SSSE3
+%define PIXELSIZE 3
+%define SUBSAMPLING 0
+%define LINE 0
+%include "convert_rgb_to_yuv_ssse3.inc"
+
+;
+; extern "C" void ConvertARGBToYUVEven_SSSE3(const uint8* argb,
+; uint8* y,
+; uint8* u,
+; uint8* v,
+; int width);
+;
+%define SYMBOL ConvertARGBToYUVEven_SSSE3
+%define PIXELSIZE 4
+%define SUBSAMPLING 1
+%define LINE 0
+%include "convert_rgb_to_yuv_ssse3.inc"
+
+;
+; extern "C" void ConvertARGBToYUVOdd_SSSE3(const uint8* argb,
+; uint8* y,
+; uint8* u,
+; uint8* v,
+; int width);
+;
+%define SYMBOL ConvertARGBToYUVOdd_SSSE3
+%define PIXELSIZE 4
+%define SUBSAMPLING 1
+%define LINE 1
+%include "convert_rgb_to_yuv_ssse3.inc"
+
+;
+; extern "C" void ConvertRGBToYUVEven_SSSE3(const uint8* rgb,
+; uint8* y,
+; uint8* u,
+; uint8* v,
+; int width);
+;
+%define SYMBOL ConvertRGBToYUVEven_SSSE3
+%define PIXELSIZE 3
+%define SUBSAMPLING 1
+%define LINE 0
+%include "convert_rgb_to_yuv_ssse3.inc"
+
+;
+; extern "C" void ConvertRGBToYUVOdd_SSSE3(const uint8* rgb,
+; uint8* y,
+; uint8* u,
+; uint8* v,
+; int width);
+;
+%define SYMBOL ConvertRGBToYUVOdd_SSSE3
+%define PIXELSIZE 3
+%define SUBSAMPLING 1
+%define LINE 1
+%include "convert_rgb_to_yuv_ssse3.inc"
diff --git a/media/base/simd/convert_rgb_to_yuv_ssse3.h b/media/base/simd/convert_rgb_to_yuv_ssse3.h
new file mode 100644
index 0000000..9f7a372
--- /dev/null
+++ b/media/base/simd/convert_rgb_to_yuv_ssse3.h
@@ -0,0 +1,57 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_BASE_SIMD_CONVERT_RGB_TO_YUV_SSSE3_H_
+#define MEDIA_BASE_SIMD_CONVERT_RGB_TO_YUV_SSSE3_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// The header file for ASM functions that convert a row of RGB pixels with SSSE3
+// instructions so we can call them from C++ code. These functions are
+// implemented in "convert_rgb_to_yuv_ssse3.asm".
+
+// Convert a row of 24-bit RGB pixels to YV12 pixels.
+void ConvertRGBToYUVRow_SSSE3(const uint8* rgb,
+ uint8* y,
+ uint8* u,
+ uint8* v,
+ int width);
+
+// Convert a row of 32-bit RGB pixels to YV12 pixels.
+void ConvertARGBToYUVRow_SSSE3(const uint8* argb,
+ uint8* y,
+ uint8* u,
+ uint8* v,
+ int width);
+
+// Convert a row of 24-bit RGB pixels to YV12 pixels.
+void ConvertRGBToYUVEven_SSSE3(const uint8* rgb,
+ uint8* y,
+ uint8* u,
+ uint8* v,
+ int width);
+void ConvertRGBToYUVOdd_SSSE3(const uint8* rgb,
+ uint8* y,
+ uint8* u,
+ uint8* v,
+ int width);
+
+// Convert a row of 32-bit RGB pixels to YV12 pixels.
+void ConvertARGBToYUVEven_SSSE3(const uint8* argb,
+ uint8* y,
+ uint8* u,
+ uint8* v,
+ int width);
+void ConvertARGBToYUVOdd_SSSE3(const uint8* argb,
+ uint8* y,
+ uint8* u,
+ uint8* v,
+ int width);
+#ifdef __cplusplus
+}
+#endif
+
+#endif // MEDIA_BASE_SIMD_CONVERT_RGB_TO_YUV_SSSE3_H_
diff --git a/media/base/simd/convert_rgb_to_yuv_ssse3.inc b/media/base/simd/convert_rgb_to_yuv_ssse3.inc
new file mode 100644
index 0000000..35c0db9
--- /dev/null
+++ b/media/base/simd/convert_rgb_to_yuv_ssse3.inc
@@ -0,0 +1,200 @@
+; Copyright (c) 2011 The Chromium Authors. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+;
+; void SYMBOL(const uint8* argb, uint8* y, uint8* u, uint8* v, int width);
+;
+; The main code that converts RGB pixels to YUV pixels. This function roughly
+; consists of three parts: converting one ARGB pixel to YUV pixels, converting
+; two ARGB pixels to YUV pixels, and converting four ARGB pixels to YUV pixels.
+; To write the structure of this function in C, it becomes the snippet listed
+; below.
+;
+; if (width & 1) {
+; --width;
+; // Convert one ARGB pixel to one Y pixel, one U pixel, and one V pixel.
+; }
+;
+; if (width & 2) {
+; width -= 2;
+; // Convert two ARGB pixels to two Y pixels, one U pixel, and one V pixel.
+; }
+;
+; while (width) {
+; width -= 4;
+; // Convert four ARGB pixels to four Y pixels, two U pixels, and two V
+; // pixels.
+; }
+;
+ global mangle(SYMBOL) PRIVATE
+ align function_align
+
+mangle(SYMBOL):
+ %assign stack_offset 0
+ PROLOGUE 5, 6, 8, ARGB, Y, U, V, WIDTH, TEMP
+
+ ; Initialize constants used in this function. (We use immediates to avoid
+ ; dependency onto GOT.)
+ LOAD_XMM XMM_CONST_Y0, 0x00420219
+ LOAD_XMM XMM_CONST_Y1, 0x00007F00
+ LOAD_XMM XMM_CONST_U, 0x00DAB670
+ LOAD_XMM XMM_CONST_V, 0x0070A2EE
+ LOAD_XMM XMM_CONST_128, 0x00800080
+
+.convert_one_pixel:
+ ; Divide the input width by two so it represents the offsets for u[] and v[].
+ ; When the width is odd, We read the rightmost ARGB pixel and convert its
+ ; colorspace to YUV. This code stores one Y pixel, one U pixel, and one V
+ ; pixel.
+ sar WIDTHq, 1
+ jnc .convert_two_pixels
+
+ ; Read one ARGB (or RGB) pixel.
+ READ_ARGB xmm0, 1
+
+ ; Calculate y[0] from one RGB pixel read above.
+ CALC_Y xmm1, xmm0
+ movd TEMPd, xmm1
+ mov BYTE [Yq + WIDTHq * 2], TEMPb
+
+ ; Calculate u[0] from one RGB pixel read above. If this is an odd line, the
+ ; output pixel contains the U value calculated in the previous call. We also
+ ; read this pixel and calculate their average.
+ INIT_UV TEMPd, Uq, 4
+ CALC_UV xmm1, xmm0, XMM_CONST_U, TEMPd
+ movd TEMPd, xmm1
+ mov BYTE [Uq + WIDTHq], TEMPb
+
+ ; Calculate v[0] from one RGB pixel. Same as u[0], we read the result of the
+ ; previous call and get their average.
+ INIT_UV TEMPd, Uq, 4
+ CALC_UV xmm1, xmm0, XMM_CONST_V, TEMPd
+ movd TEMPd, xmm1
+ mov BYTE [Vq + WIDTHq], TEMPb
+
+.convert_two_pixels:
+ ; If the input width is not a multiple of four, read the rightmost two ARGB
+ ; pixels and convert their colorspace to YUV. This code stores two Y pixels,
+ ; one U pixel, and one V pixel.
+ test WIDTHb, 2 / 2
+ jz .convert_four_pixels
+ sub WIDTHb, 2 / 2
+
+ ; Read two ARGB (or RGB) pixels.
+ READ_ARGB xmm0, 2
+
+ ; Calculate r[0] and r[1] from two RGB pixels read above.
+ CALC_Y xmm1, xmm0
+ movd TEMPd, xmm1
+ mov WORD [Yq + WIDTHq * 2], TEMPw
+
+ ; Skip calculating u and v if the output buffer is NULL.
+ test Uq, Uq
+ jz .convert_four_pixels
+
+ ; Calculate u[0] from two RGB pixels read above. (For details, read the above
+ ; comment in .convert_one_pixel).
+ INIT_UV TEMPd, Uq, 2
+ CALC_UV xmm1, xmm0, XMM_CONST_U, TEMPd
+ movd TEMPd, xmm1
+ mov BYTE [Uq + WIDTHq], TEMPb
+
+ ; Calculate v[0] from two RGB pixels read above.
+ INIT_UV TEMPd, Vq, 2
+ CALC_UV xmm1, xmm0, XMM_CONST_V, TEMPd
+ movd TEMPd, xmm1
+ mov BYTE [Vq + WIDTHq], TEMPb
+
+.convert_four_pixels:
+ ; Read four ARGB pixels and convert their colorspace to YUV. This code stores
+ ; four Y pixels, two U pixels, and two V pixels.
+ test WIDTHq, WIDTHq
+ jz .convert_finish
+
+%if PIXELSIZE == 4
+ ; Check if the input buffer is aligned to a 16-byte boundary and use movdqa
+ ; for reading the ARGB pixels.
+ test ARGBw, 15
+ jnz .convert_four_pixels_unaligned
+
+.convert_four_pixels_aligned:
+ sub WIDTHq, 4 / 2
+
+ ; Read four ARGB pixels. (We can use movdqa here since we have checked if the
+ ; source address is aligned.)
+ movdqa xmm0, DQWORD [ARGBq + WIDTHq * 4 * 2]
+
+ ; Calculate y[0], y[1], y[2],and, y[3] from the input ARGB pixels.
+ CALC_Y xmm1, xmm0
+ movd DWORD [Yq + WIDTHq * 2], xmm1
+
+%if SUBSAMPLING == 0
+ ; Skip calculating u and v if the output buffer is NULL, which means we are
+ ; converting an odd line. (When we enable subsampling, these buffers must
+ ; contain the u and v values for the previous call, i.e. these variables must
+ ; not be NULL.)
+ test Uq, Uq
+ jz .convert_four_pixels_aligned_next
+%endif
+
+ ; Calculate u[0] and u[1] from four ARGB pixels read above.
+ INIT_UV TEMPd, Uq, 4
+ CALC_UV xmm1, xmm0, XMM_CONST_U, TEMPd
+ movd TEMPd, xmm1
+ mov WORD [Uq + WIDTHq], TEMPw
+
+ ; Calculate v[0] and v[1] from four ARGB pixels read above.
+ INIT_UV TEMPd, Vq, 4
+ CALC_UV xmm1, xmm0, XMM_CONST_V, TEMPd
+ movd TEMPd, xmm1
+ mov WORD [Vq + WIDTHq], TEMPw
+
+%if SUBSAMPLING == 0
+.convert_four_pixels_aligned_next:
+%endif
+
+ test WIDTHq, WIDTHq
+ jnz .convert_four_pixels_aligned
+
+ jmp .convert_finish
+%endif
+
+.convert_four_pixels_unaligned:
+ sub WIDTHq, 4 / 2
+
+ ; Read four ARGB (or RGB) pixels.
+ READ_ARGB xmm0, 4
+
+ ; Calculate y[0], y[1], y[2],and, y[3] from the input ARGB pixels.
+ CALC_Y xmm1, xmm0
+ movd DWORD [Yq + WIDTHq * 2], xmm1
+
+%if SUBSAMPLING == 0
+ ; Skip calculating u and v if the output buffer is NULL.
+ test Uq, Uq
+ jz .convert_four_pixels_unaligned_next
+%endif
+
+ ; Calculate u[0] and u[1] from the input ARGB pixels.
+ INIT_UV TEMPd, Uq, 4
+ CALC_UV xmm1, xmm0, XMM_CONST_U, TEMPd
+ movd TEMPd, xmm1
+ mov WORD [Uq + WIDTHq], TEMPw
+
+ ; Calculate v[0] and v[1] from the input ARGB pixels.
+ INIT_UV TEMPd, Vq, 4
+ CALC_UV xmm1, xmm0, XMM_CONST_V, TEMPd
+ movd TEMPd, xmm1
+ mov WORD [Vq + WIDTHq], TEMPw
+
+%if SUBSAMPLING == 0
+.convert_four_pixels_unaligned_next:
+%endif
+
+ test WIDTHq, WIDTHq
+ jnz .convert_four_pixels_unaligned
+
+.convert_finish:
+ ; Just exit this function since this is a void function.
+ RET
diff --git a/media/base/simd/convert_rgb_to_yuv_unittest.cc b/media/base/simd/convert_rgb_to_yuv_unittest.cc
new file mode 100644
index 0000000..5dcd9e8
--- /dev/null
+++ b/media/base/simd/convert_rgb_to_yuv_unittest.cc
@@ -0,0 +1,124 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/scoped_ptr.h"
+#include "base/stringprintf.h"
+#include "media/base/cpu_features.h"
+#include "media/base/simd/convert_rgb_to_yuv.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+// Reference code that converts RGB pixels to YUV pixels.
+int ConvertRGBToY(const uint8* rgb) {
+ int y = 25 * rgb[0] + 129 * rgb[1] + 66 * rgb[2];
+ y = ((y + 128) >> 8) + 16;
+ return std::max(0, std::min(255, y));
+}
+
+int ConvertRGBToU(const uint8* rgb, int size, bool subsampling) {
+ int u = 0;
+ if (!subsampling) {
+ u = 112 * rgb[0] - 74 * rgb[1] - 38 * rgb[2];
+ } else {
+ int u0 = 112 * rgb[0] - 74 * rgb[1] - 38 * rgb[2];
+ int u1 = 112 * rgb[size] - 74 * rgb[size + 1] - 38 * rgb[size + 2];
+ u = (u0 + u1 + 1) / 2;
+ }
+ u = ((u + 128) >> 8) + 128;
+ return std::max(0, std::min(255, u));
+}
+
+int ConvertRGBToV(const uint8* rgb, int size, bool subsampling) {
+ int v = 0;
+ if (!subsampling) {
+ v = -18 * rgb[0] - 94 * rgb[1] + 112 * rgb[2];
+ } else {
+ int v0 = -18 * rgb[0] - 94 * rgb[1] + 112 * rgb[2];
+ int v1 = -18 * rgb[size] - 94 * rgb[size + 1] + 112 * rgb[size + 2];
+ v = (v0 + v1 + 1) / 2;
+ }
+ v = ((v + 128) >> 8) + 128;
+ return std::max(0, std::min(255, v));
+}
+
+} // namespace
+
+// A side-by-side test that verifies our ASM functions that convert RGB pixels
+// to YUV pixels can output the expected results. This test converts RGB pixels
+// to YUV pixels with our ASM functions (which use SSE, SSE2, SSE3, and SSSE3)
+// and compare the output YUV pixels with the ones calculated with out reference
+// functions implemented in C++.
+TEST(YUVConvertTest, SideBySideRGB) {
+ // We skip this test on PCs which does not support SSE3 because this test
+ // needs it.
+ if (!media::hasSSSE3())
+ return;
+
+ // This test checks a subset of all RGB values so this test does not take so
+ // long time.
+ const int kStep = 8;
+ const int kWidth = 256 / kStep;
+
+#ifdef ENABLE_SUBSAMPLING
+ const bool kSubsampling = true;
+#else
+ const bool kSubsampling = false;
+#endif
+
+ for (int size = 3; size <= 4; ++size) {
+ // Create the output buffers.
+ scoped_array<uint8> rgb(new uint8[kWidth * size]);
+ scoped_array<uint8> y(new uint8[kWidth]);
+ scoped_array<uint8> u(new uint8[kWidth / 2]);
+ scoped_array<uint8> v(new uint8[kWidth / 2]);
+
+ // Choose the function that converts from RGB pixels to YUV ones.
+ void (*convert)(const uint8*, uint8*, uint8*, uint8*,
+ int, int, int, int, int) = NULL;
+ if (size == 3)
+ convert = media::ConvertRGB24ToYUV_SSSE3;
+ else
+ convert = media::ConvertRGB32ToYUV_SSSE3;
+
+ for (int r = 0; r < kWidth; ++r) {
+ for (int g = 0; g < kWidth; ++g) {
+
+ // Fill the input pixels.
+ for (int b = 0; b < kWidth; ++b) {
+ rgb[b * size + 0] = b * kStep;
+ rgb[b * size + 1] = g * kStep;
+ rgb[b * size + 2] = r * kStep;
+ if (size == 4)
+ rgb[b * size + 3] = 255;
+ }
+
+ // Convert the input RGB pixels to YUV ones.
+ convert(rgb.get(), y.get(), u.get(), v.get(), kWidth, 1, kWidth * size,
+ kWidth, kWidth / 2);
+
+ // Check the output Y pixels.
+ for (int i = 0; i < kWidth; ++i) {
+ const uint8* p = &rgb[i * size];
+ SCOPED_TRACE(base::StringPrintf("r=%d,g=%d,b=%d", p[2], p[1], p[0]));
+ EXPECT_EQ(ConvertRGBToY(p), y[i]);
+ }
+
+ // Check the output U pixels.
+ for (int i = 0; i < kWidth / 2; ++i) {
+ const uint8* p = &rgb[i * 2 * size];
+ SCOPED_TRACE(base::StringPrintf("r=%d,g=%d,b=%d", p[2], p[1], p[0]));
+ EXPECT_EQ(ConvertRGBToU(p, size, kSubsampling), u[i]);
+ }
+
+ // Check the output V pixels.
+ for (int i = 0; i < kWidth / 2; ++i) {
+ const uint8* p = &rgb[i * 2 * size];
+ SCOPED_TRACE(base::StringPrintf("r=%d,g=%d,b=%d", p[2], p[1], p[0]));
+ EXPECT_EQ(ConvertRGBToV(p, size, kSubsampling), v[i]);
+ }
+ }
+ }
+ }
+}
diff --git a/media/base/simd/x86inc.asm b/media/base/simd/x86inc.asm
new file mode 100644
index 0000000..956b999
--- /dev/null
+++ b/media/base/simd/x86inc.asm
@@ -0,0 +1,1001 @@
+;*****************************************************************************
+;* x86inc.asm
+;*****************************************************************************
+;* Copyright (C) 2005-2011 x264 project
+;*
+;* Authors: Loren Merritt <lorenm@u.washington.edu>
+;* Anton Mitrofanov <BugMaster@narod.ru>
+;* Jason Garrett-Glaser <darkshikari@gmail.com>
+;*
+;* Permission to use, copy, modify, and/or distribute this software for any
+;* purpose with or without fee is hereby granted, provided that the above
+;* copyright notice and this permission notice appear in all copies.
+;*
+;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+;*****************************************************************************
+
+; This is a header file for the x264ASM assembly language, which uses
+; NASM/YASM syntax combined with a large number of macros to provide easy
+; abstraction between different calling conventions (x86_32, win64, linux64).
+; It also has various other useful features to simplify writing the kind of
+; DSP functions that are most often used in x264.
+
+; Unlike the rest of x264, this file is available under an ISC license, as it
+; has significant usefulness outside of x264 and we want it to be available
+; to the largest audience possible. Of course, if you modify it for your own
+; purposes to add a new feature, we strongly encourage contributing a patch
+; as this feature might be useful for others as well. Send patches or ideas
+; to x264-devel@videolan.org .
+
+%ifndef MEDIA_BASE_SIMD_X86INC_ASM_
+%define MEDIA_BASE_SIMD_X86INC_ASM_
+
+%define program_name ff
+
+%ifdef ARCH_X86_64
+ %ifidn __OUTPUT_FORMAT__,win32
+ %define WIN64
+ %else
+ %define UNIX64
+ %endif
+%endif
+
+%ifdef PREFIX
+ %define mangle(x) _ %+ x
+%else
+ %define mangle(x) x
+%endif
+
+; FIXME: All of the 64bit asm functions that take a stride as an argument
+; via register, assume that the high dword of that register is filled with 0.
+; This is true in practice (since we never do any 64bit arithmetic on strides,
+; and x264's strides are all positive), but is not guaranteed by the ABI.
+
+; Name of the .rodata section.
+; Kludge: Something on OS X fails to align .rodata even given an align attribute,
+; so use a different read-only section.
+%ifdef CHROMIUM
+%macro SECTION_RODATA 0-1 16
+ %ifidn __OUTPUT_FORMAT__,macho64
+ SECTION .text align=%1
+ %elifidn __OUTPUT_FORMAT__,macho
+ SECTION .text align=%1
+ fakegot:
+ %elifidn __OUTPUT_FORMAT__,aout
+ section .text
+ %else
+ SECTION .rodata align=%1
+ %endif
+%endmacro
+%else
+%macro SECTION_RODATA 0-1 16
+ %ifidn __OUTPUT_FORMAT__,aout
+ section .text
+ %else
+ SECTION .rodata align=%1
+ %endif
+%endmacro
+%endif
+
+; aout does not support align=
+%macro SECTION_TEXT 0-1 16
+ %ifidn __OUTPUT_FORMAT__,aout
+ SECTION .text
+ %else
+ SECTION .text align=%1
+ %endif
+%endmacro
+
+%ifdef WIN64
+ %define PIC
+%elifndef ARCH_X86_64
+; x86_32 doesn't require PIC.
+; Some distros prefer shared objects to be PIC, but nothing breaks if
+; the code contains a few textrels, so we'll skip that complexity.
+ %undef PIC
+%endif
+%ifdef PIC
+ default rel
+%endif
+
+; Macros to eliminate most code duplication between x86_32 and x86_64:
+; Currently this works only for leaf functions which load all their arguments
+; into registers at the start, and make no other use of the stack. Luckily that
+; covers most of x264's asm.
+
+; PROLOGUE:
+; %1 = number of arguments. loads them from stack if needed.
+; %2 = number of registers used. pushes callee-saved regs if needed.
+; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
+; %4 = list of names to define to registers
+; PROLOGUE can also be invoked by adding the same options to cglobal
+
+; e.g.
+; cglobal foo, 2,3,0, dst, src, tmp
+; declares a function (foo), taking two args (dst and src) and one local variable (tmp)
+
+; TODO Some functions can use some args directly from the stack. If they're the
+; last args then you can just not declare them, but if they're in the middle
+; we need more flexible macro.
+
+; RET:
+; Pops anything that was pushed by PROLOGUE
+
+; REP_RET:
+; Same, but if it doesn't pop anything it becomes a 2-byte ret, for athlons
+; which are slow when a normal ret follows a branch.
+
+; registers:
+; rN and rNq are the native-size register holding function argument N
+; rNd, rNw, rNb are dword, word, and byte size
+; rNm is the original location of arg N (a register or on the stack), dword
+; rNmp is native size
+
+%macro DECLARE_REG 6
+ %define r%1q %2
+ %define r%1d %3
+ %define r%1w %4
+ %define r%1b %5
+ %define r%1m %6
+ %ifid %6 ; i.e. it's a register
+ %define r%1mp %2
+ %elifdef ARCH_X86_64 ; memory
+ %define r%1mp qword %6
+ %else
+ %define r%1mp dword %6
+ %endif
+ %define r%1 %2
+%endmacro
+
+%macro DECLARE_REG_SIZE 2
+ %define r%1q r%1
+ %define e%1q r%1
+ %define r%1d e%1
+ %define e%1d e%1
+ %define r%1w %1
+ %define e%1w %1
+ %define r%1b %2
+ %define e%1b %2
+%ifndef ARCH_X86_64
+ %define r%1 e%1
+%endif
+%endmacro
+
+DECLARE_REG_SIZE ax, al
+DECLARE_REG_SIZE bx, bl
+DECLARE_REG_SIZE cx, cl
+DECLARE_REG_SIZE dx, dl
+DECLARE_REG_SIZE si, sil
+DECLARE_REG_SIZE di, dil
+DECLARE_REG_SIZE bp, bpl
+
+; t# defines for when per-arch register allocation is more complex than just function arguments
+
+%macro DECLARE_REG_TMP 1-*
+ %assign %%i 0
+ %rep %0
+ CAT_XDEFINE t, %%i, r%1
+ %assign %%i %%i+1
+ %rotate 1
+ %endrep
+%endmacro
+
+%macro DECLARE_REG_TMP_SIZE 0-*
+ %rep %0
+ %define t%1q t%1 %+ q
+ %define t%1d t%1 %+ d
+ %define t%1w t%1 %+ w
+ %define t%1b t%1 %+ b
+ %rotate 1
+ %endrep
+%endmacro
+
+DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9
+
+%ifdef ARCH_X86_64
+ %define gprsize 8
+%else
+ %define gprsize 4
+%endif
+
+%macro PUSH 1
+ push %1
+ %assign stack_offset stack_offset+gprsize
+%endmacro
+
+%macro POP 1
+ pop %1
+ %assign stack_offset stack_offset-gprsize
+%endmacro
+
+%macro SUB 2
+ sub %1, %2
+ %ifidn %1, rsp
+ %assign stack_offset stack_offset+(%2)
+ %endif
+%endmacro
+
+%macro ADD 2
+ add %1, %2
+ %ifidn %1, rsp
+ %assign stack_offset stack_offset-(%2)
+ %endif
+%endmacro
+
+%macro movifnidn 2
+ %ifnidn %1, %2
+ mov %1, %2
+ %endif
+%endmacro
+
+%macro movsxdifnidn 2
+ %ifnidn %1, %2
+ movsxd %1, %2
+ %endif
+%endmacro
+
+%macro ASSERT 1
+ %if (%1) == 0
+ %error assert failed
+ %endif
+%endmacro
+
+%macro DEFINE_ARGS 0-*
+ %ifdef n_arg_names
+ %assign %%i 0
+ %rep n_arg_names
+ CAT_UNDEF arg_name %+ %%i, q
+ CAT_UNDEF arg_name %+ %%i, d
+ CAT_UNDEF arg_name %+ %%i, w
+ CAT_UNDEF arg_name %+ %%i, b
+ CAT_UNDEF arg_name %+ %%i, m
+ CAT_UNDEF arg_name, %%i
+ %assign %%i %%i+1
+ %endrep
+ %endif
+
+ %assign %%i 0
+ %rep %0
+ %xdefine %1q r %+ %%i %+ q
+ %xdefine %1d r %+ %%i %+ d
+ %xdefine %1w r %+ %%i %+ w
+ %xdefine %1b r %+ %%i %+ b
+ %xdefine %1m r %+ %%i %+ m
+ CAT_XDEFINE arg_name, %%i, %1
+ %assign %%i %%i+1
+ %rotate 1
+ %endrep
+ %assign n_arg_names %%i
+%endmacro
+
+%ifdef WIN64 ; Windows x64 ;=================================================
+
+DECLARE_REG 0, rcx, ecx, cx, cl, ecx
+DECLARE_REG 1, rdx, edx, dx, dl, edx
+DECLARE_REG 2, r8, r8d, r8w, r8b, r8d
+DECLARE_REG 3, r9, r9d, r9w, r9b, r9d
+DECLARE_REG 4, rdi, edi, di, dil, [rsp + stack_offset + 40]
+DECLARE_REG 5, rsi, esi, si, sil, [rsp + stack_offset + 48]
+DECLARE_REG 6, rax, eax, ax, al, [rsp + stack_offset + 56]
+%define r7m [rsp + stack_offset + 64]
+%define r8m [rsp + stack_offset + 72]
+
+%macro LOAD_IF_USED 2 ; reg_id, number_of_args
+ %if %1 < %2
+ mov r%1, [rsp + stack_offset + 8 + %1*8]
+ %endif
+%endmacro
+
+%macro PROLOGUE 2-4+ 0 ; #args, #regs, #xmm_regs, arg_names...
+ ASSERT %2 >= %1
+ %assign regs_used %2
+ ASSERT regs_used <= 7
+ %if regs_used > 4
+ push r4
+ push r5
+ %assign stack_offset stack_offset+16
+ %endif
+ WIN64_SPILL_XMM %3
+ LOAD_IF_USED 4, %1
+ LOAD_IF_USED 5, %1
+ LOAD_IF_USED 6, %1
+ DEFINE_ARGS %4
+%endmacro
+
+%macro WIN64_SPILL_XMM 1
+ %assign xmm_regs_used %1
+ ASSERT xmm_regs_used <= 16
+ %if xmm_regs_used > 6
+ sub rsp, (xmm_regs_used-6)*16+16
+ %assign stack_offset stack_offset+(xmm_regs_used-6)*16+16
+ %assign %%i xmm_regs_used
+ %rep (xmm_regs_used-6)
+ %assign %%i %%i-1
+ movdqa [rsp + (%%i-6)*16+8], xmm %+ %%i
+ %endrep
+ %endif
+%endmacro
+
+%macro WIN64_RESTORE_XMM_INTERNAL 1
+ %if xmm_regs_used > 6
+ %assign %%i xmm_regs_used
+ %rep (xmm_regs_used-6)
+ %assign %%i %%i-1
+ movdqa xmm %+ %%i, [%1 + (%%i-6)*16+8]
+ %endrep
+ add %1, (xmm_regs_used-6)*16+16
+ %endif
+%endmacro
+
+%macro WIN64_RESTORE_XMM 1
+ WIN64_RESTORE_XMM_INTERNAL %1
+ %assign stack_offset stack_offset-(xmm_regs_used-6)*16+16
+ %assign xmm_regs_used 0
+%endmacro
+
+%macro RET 0
+ WIN64_RESTORE_XMM_INTERNAL rsp
+ %if regs_used > 4
+ pop r5
+ pop r4
+ %endif
+ ret
+%endmacro
+
+%macro REP_RET 0
+ %if regs_used > 4 || xmm_regs_used > 6
+ RET
+ %else
+ rep ret
+ %endif
+%endmacro
+
+%elifdef ARCH_X86_64 ; *nix x64 ;=============================================
+
+DECLARE_REG 0, rdi, edi, di, dil, edi
+DECLARE_REG 1, rsi, esi, si, sil, esi
+DECLARE_REG 2, rdx, edx, dx, dl, edx
+DECLARE_REG 3, rcx, ecx, cx, cl, ecx
+DECLARE_REG 4, r8, r8d, r8w, r8b, r8d
+DECLARE_REG 5, r9, r9d, r9w, r9b, r9d
+DECLARE_REG 6, rax, eax, ax, al, [rsp + stack_offset + 8]
+%define r7m [rsp + stack_offset + 16]
+%define r8m [rsp + stack_offset + 24]
+
+%macro LOAD_IF_USED 2 ; reg_id, number_of_args
+ %if %1 < %2
+ mov r%1, [rsp - 40 + %1*8]
+ %endif
+%endmacro
+
+%macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names...
+ ASSERT %2 >= %1
+ ASSERT %2 <= 7
+ LOAD_IF_USED 6, %1
+ DEFINE_ARGS %4
+%endmacro
+
+%macro RET 0
+ ret
+%endmacro
+
+%macro REP_RET 0
+ rep ret
+%endmacro
+
+%else ; X86_32 ;==============================================================
+
+; Begin chromium edits
+%ifdef CHROMIUM
+; Change the order of registers so we can get the lower 8-bit or the 5th and 6th
+; arguments.
+DECLARE_REG 0, esi, esi, si, null, [esp + stack_offset + 4]
+DECLARE_REG 1, edi, edi, di, null, [esp + stack_offset + 8]
+DECLARE_REG 2, ecx, ecx, cx, cl, [esp + stack_offset + 12]
+DECLARE_REG 3, edx, edx, dx, dl, [esp + stack_offset + 16]
+DECLARE_REG 4, eax, eax, ax, al, [esp + stack_offset + 20]
+DECLARE_REG 5, ebx, ebx, bx, bl, [esp + stack_offset + 24]
+%else
+DECLARE_REG 0, eax, eax, ax, al, [esp + stack_offset + 4]
+DECLARE_REG 1, ecx, ecx, cx, cl, [esp + stack_offset + 8]
+DECLARE_REG 2, edx, edx, dx, dl, [esp + stack_offset + 12]
+DECLARE_REG 3, ebx, ebx, bx, bl, [esp + stack_offset + 16]
+DECLARE_REG 4, esi, esi, si, null, [esp + stack_offset + 20]
+DECLARE_REG 5, edi, edi, di, null, [esp + stack_offset + 24]
+%endif
+; End chromium edits
+DECLARE_REG 6, ebp, ebp, bp, null, [esp + stack_offset + 28]
+%define r7m [esp + stack_offset + 32]
+%define r8m [esp + stack_offset + 36]
+%define rsp esp
+
+%macro PUSH_IF_USED 1 ; reg_id
+ %if %1 < regs_used
+ push r%1
+ %assign stack_offset stack_offset+4
+ %endif
+%endmacro
+
+%macro POP_IF_USED 1 ; reg_id
+ %if %1 < regs_used
+ pop r%1
+ %endif
+%endmacro
+
+%macro LOAD_IF_USED 2 ; reg_id, number_of_args
+ %if %1 < %2
+ mov r%1, [esp + stack_offset + 4 + %1*4]
+ %endif
+%endmacro
+
+%macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names...
+ ASSERT %2 >= %1
+ %assign regs_used %2
+ ASSERT regs_used <= 7
+%ifdef CHROMIUM
+ PUSH_IF_USED 0
+ PUSH_IF_USED 1
+ PUSH_IF_USED 5
+%else
+ PUSH_IF_USED 3
+ PUSH_IF_USED 4
+ PUSH_IF_USED 5
+%endif
+ PUSH_IF_USED 6
+ LOAD_IF_USED 0, %1
+ LOAD_IF_USED 1, %1
+ LOAD_IF_USED 2, %1
+ LOAD_IF_USED 3, %1
+ LOAD_IF_USED 4, %1
+ LOAD_IF_USED 5, %1
+ LOAD_IF_USED 6, %1
+ DEFINE_ARGS %4
+%endmacro
+
+%macro RET 0
+ POP_IF_USED 6
+%ifdef CHROMIUM
+ POP_IF_USED 5
+ POP_IF_USED 1
+ POP_IF_USED 0
+%else
+ POP_IF_USED 5
+ POP_IF_USED 4
+ POP_IF_USED 3
+%endif
+ ret
+%endmacro
+
+%macro REP_RET 0
+ %if regs_used > 3
+ RET
+ %else
+ rep ret
+ %endif
+%endmacro
+
+%endif ;======================================================================
+
+%ifndef WIN64
+%macro WIN64_SPILL_XMM 1
+%endmacro
+%macro WIN64_RESTORE_XMM 1
+%endmacro
+%endif
+
+
+
+;=============================================================================
+; arch-independent part
+;=============================================================================
+
+%assign function_align 16
+
+; Symbol prefix for C linkage
+%macro cglobal 1-2+
+ %xdefine %1 mangle(program_name %+ _ %+ %1)
+ %xdefine %1.skip_prologue %1 %+ .skip_prologue
+ %ifidn __OUTPUT_FORMAT__,elf
+ global %1:function hidden
+ %else
+ global %1
+ %endif
+ align function_align
+ %1:
+ RESET_MM_PERMUTATION ; not really needed, but makes disassembly somewhat nicer
+ %assign stack_offset 0
+ %if %0 > 1
+ PROLOGUE %2
+ %endif
+%endmacro
+
+%macro cextern 1
+ %xdefine %1 mangle(program_name %+ _ %+ %1)
+ extern %1
+%endmacro
+
+;like cextern, but without the prefix
+%macro cextern_naked 1
+ %xdefine %1 mangle(%1)
+ extern %1
+%endmacro
+
+%macro const 2+
+ %xdefine %1 mangle(program_name %+ _ %+ %1)
+ global %1
+ %1: %2
+%endmacro
+
+; This is needed for ELF, otherwise the GNU linker assumes the stack is
+; executable by default.
+%ifidn __OUTPUT_FORMAT__,elf
+SECTION .note.GNU-stack noalloc noexec nowrite progbits
+%endif
+
+; merge mmx and sse*
+
+%macro CAT_XDEFINE 3
+ %xdefine %1%2 %3
+%endmacro
+
+%macro CAT_UNDEF 2
+ %undef %1%2
+%endmacro
+
+%macro INIT_MMX 0
+ %assign avx_enabled 0
+ %define RESET_MM_PERMUTATION INIT_MMX
+ %define mmsize 8
+ %define num_mmregs 8
+ %define mova movq
+ %define movu movq
+ %define movh movd
+ %define movnta movntq
+ %assign %%i 0
+ %rep 8
+ CAT_XDEFINE m, %%i, mm %+ %%i
+ CAT_XDEFINE nmm, %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+ %rep 8
+ CAT_UNDEF m, %%i
+ CAT_UNDEF nmm, %%i
+ %assign %%i %%i+1
+ %endrep
+%endmacro
+
+%macro INIT_XMM 0
+ %assign avx_enabled 0
+ %define RESET_MM_PERMUTATION INIT_XMM
+ %define mmsize 16
+ %define num_mmregs 8
+ %ifdef ARCH_X86_64
+ %define num_mmregs 16
+ %endif
+ %define mova movdqa
+ %define movu movdqu
+ %define movh movq
+ %define movnta movntdq
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE m, %%i, xmm %+ %%i
+ CAT_XDEFINE nxmm, %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+%endmacro
+
+%macro INIT_AVX 0
+ INIT_XMM
+ %assign avx_enabled 1
+ %define PALIGNR PALIGNR_SSSE3
+ %define RESET_MM_PERMUTATION INIT_AVX
+%endmacro
+
+%macro INIT_YMM 0
+ %assign avx_enabled 1
+ %define RESET_MM_PERMUTATION INIT_YMM
+ %define mmsize 32
+ %define num_mmregs 8
+ %ifdef ARCH_X86_64
+ %define num_mmregs 16
+ %endif
+ %define mova vmovaps
+ %define movu vmovups
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE m, %%i, ymm %+ %%i
+ CAT_XDEFINE nymm, %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+%endmacro
+
+INIT_MMX
+
+; I often want to use macros that permute their arguments. e.g. there's no
+; efficient way to implement butterfly or transpose or dct without swapping some
+; arguments.
+;
+; I would like to not have to manually keep track of the permutations:
+; If I insert a permutation in the middle of a function, it should automatically
+; change everything that follows. For more complex macros I may also have multiple
+; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
+;
+; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
+; permutes its arguments. It's equivalent to exchanging the contents of the
+; registers, except that this way you exchange the register names instead, so it
+; doesn't cost any cycles.
+
+%macro PERMUTE 2-* ; takes a list of pairs to swap
+%rep %0/2
+ %xdefine tmp%2 m%2
+ %xdefine ntmp%2 nm%2
+ %rotate 2
+%endrep
+%rep %0/2
+ %xdefine m%1 tmp%2
+ %xdefine nm%1 ntmp%2
+ %undef tmp%2
+ %undef ntmp%2
+ %rotate 2
+%endrep
+%endmacro
+
+%macro SWAP 2-* ; swaps a single chain (sometimes more concise than pairs)
+%rep %0-1
+%ifdef m%1
+ %xdefine tmp m%1
+ %xdefine m%1 m%2
+ %xdefine m%2 tmp
+ CAT_XDEFINE n, m%1, %1
+ CAT_XDEFINE n, m%2, %2
+%else
+ ; If we were called as "SWAP m0,m1" rather than "SWAP 0,1" infer the original numbers here.
+ ; Be careful using this mode in nested macros though, as in some cases there may be
+ ; other copies of m# that have already been dereferenced and don't get updated correctly.
+ %xdefine %%n1 n %+ %1
+ %xdefine %%n2 n %+ %2
+ %xdefine tmp m %+ %%n1
+ CAT_XDEFINE m, %%n1, m %+ %%n2
+ CAT_XDEFINE m, %%n2, tmp
+ CAT_XDEFINE n, m %+ %%n1, %%n1
+ CAT_XDEFINE n, m %+ %%n2, %%n2
+%endif
+ %undef tmp
+ %rotate 1
+%endrep
+%endmacro
+
+; If SAVE_MM_PERMUTATION is placed at the end of a function and given the
+; function name, then any later calls to that function will automatically
+; load the permutation, so values can be returned in mmregs.
+%macro SAVE_MM_PERMUTATION 1 ; name to save as
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE %1_m, %%i, m %+ %%i
+ %assign %%i %%i+1
+ %endrep
+%endmacro
+
+%macro LOAD_MM_PERMUTATION 1 ; name to load from
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE m, %%i, %1_m %+ %%i
+ CAT_XDEFINE n, m %+ %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+%endmacro
+
+%macro call 1
+ call %1
+ %ifdef %1_m0
+ LOAD_MM_PERMUTATION %1
+ %endif
+%endmacro
+
+; Substitutions that reduce instruction size but are functionally equivalent
+%macro add 2
+ %ifnum %2
+ %if %2==128
+ sub %1, -128
+ %else
+ add %1, %2
+ %endif
+ %else
+ add %1, %2
+ %endif
+%endmacro
+
+%macro sub 2
+ %ifnum %2
+ %if %2==128
+ add %1, -128
+ %else
+ sub %1, %2
+ %endif
+ %else
+ sub %1, %2
+ %endif
+%endmacro
+
+;=============================================================================
+; AVX abstraction layer
+;=============================================================================
+
+%assign i 0
+%rep 16
+ %if i < 8
+ CAT_XDEFINE sizeofmm, i, 8
+ %endif
+ CAT_XDEFINE sizeofxmm, i, 16
+ CAT_XDEFINE sizeofymm, i, 32
+%assign i i+1
+%endrep
+%undef i
+
+;%1 == instruction
+;%2 == 1 if float, 0 if int
+;%3 == 0 if 3-operand (xmm, xmm, xmm), 1 if 4-operand (xmm, xmm, xmm, imm)
+;%4 == number of operands given
+;%5+: operands
+%macro RUN_AVX_INSTR 6-7+
+ %if sizeof%5==32
+ v%1 %5, %6, %7
+ %else
+ %if sizeof%5==8
+ %define %%regmov movq
+ %elif %2
+ %define %%regmov movaps
+ %else
+ %define %%regmov movdqa
+ %endif
+
+ %if %4>=3+%3
+ %ifnidn %5, %6
+ %if avx_enabled && sizeof%5==16
+ v%1 %5, %6, %7
+ %else
+ %%regmov %5, %6
+ %1 %5, %7
+ %endif
+ %else
+ %1 %5, %7
+ %endif
+ %elif %3
+ %1 %5, %6, %7
+ %else
+ %1 %5, %6
+ %endif
+ %endif
+%endmacro
+
+;%1 == instruction
+;%2 == 1 if float, 0 if int
+;%3 == 0 if 3-operand (xmm, xmm, xmm), 1 if 4-operand (xmm, xmm, xmm, imm)
+%macro AVX_INSTR 3
+ %macro %1 2-8 fnord, fnord, fnord, %1, %2, %3
+ %ifidn %3, fnord
+ RUN_AVX_INSTR %6, %7, %8, 2, %1, %2
+ %elifidn %4, fnord
+ RUN_AVX_INSTR %6, %7, %8, 3, %1, %2, %3
+ %elifidn %5, fnord
+ RUN_AVX_INSTR %6, %7, %8, 4, %1, %2, %3, %4
+ %else
+ RUN_AVX_INSTR %6, %7, %8, 5, %1, %2, %3, %4, %5
+ %endif
+ %endmacro
+%endmacro
+
+AVX_INSTR addpd, 1, 0
+AVX_INSTR addps, 1, 0
+AVX_INSTR addsd, 1, 0
+AVX_INSTR addss, 1, 0
+AVX_INSTR addsubpd, 1, 0
+AVX_INSTR addsubps, 1, 0
+AVX_INSTR andpd, 1, 0
+AVX_INSTR andps, 1, 0
+AVX_INSTR andnpd, 1, 0
+AVX_INSTR andnps, 1, 0
+AVX_INSTR blendpd, 1, 0
+AVX_INSTR blendps, 1, 0
+AVX_INSTR blendvpd, 1, 0
+AVX_INSTR blendvps, 1, 0
+AVX_INSTR cmppd, 1, 0
+AVX_INSTR cmpps, 1, 0
+AVX_INSTR cmpsd, 1, 0
+AVX_INSTR cmpss, 1, 0
+AVX_INSTR divpd, 1, 0
+AVX_INSTR divps, 1, 0
+AVX_INSTR divsd, 1, 0
+AVX_INSTR divss, 1, 0
+AVX_INSTR dppd, 1, 0
+AVX_INSTR dpps, 1, 0
+AVX_INSTR haddpd, 1, 0
+AVX_INSTR haddps, 1, 0
+AVX_INSTR hsubpd, 1, 0
+AVX_INSTR hsubps, 1, 0
+AVX_INSTR maxpd, 1, 0
+AVX_INSTR maxps, 1, 0
+AVX_INSTR maxsd, 1, 0
+AVX_INSTR maxss, 1, 0
+AVX_INSTR minpd, 1, 0
+AVX_INSTR minps, 1, 0
+AVX_INSTR minsd, 1, 0
+AVX_INSTR minss, 1, 0
+AVX_INSTR mpsadbw, 0, 1
+AVX_INSTR mulpd, 1, 0
+AVX_INSTR mulps, 1, 0
+AVX_INSTR mulsd, 1, 0
+AVX_INSTR mulss, 1, 0
+AVX_INSTR orpd, 1, 0
+AVX_INSTR orps, 1, 0
+AVX_INSTR packsswb, 0, 0
+AVX_INSTR packssdw, 0, 0
+AVX_INSTR packuswb, 0, 0
+AVX_INSTR packusdw, 0, 0
+AVX_INSTR paddb, 0, 0
+AVX_INSTR paddw, 0, 0
+AVX_INSTR paddd, 0, 0
+AVX_INSTR paddq, 0, 0
+AVX_INSTR paddsb, 0, 0
+AVX_INSTR paddsw, 0, 0
+AVX_INSTR paddusb, 0, 0
+AVX_INSTR paddusw, 0, 0
+AVX_INSTR palignr, 0, 1
+AVX_INSTR pand, 0, 0
+AVX_INSTR pandn, 0, 0
+AVX_INSTR pavgb, 0, 0
+AVX_INSTR pavgw, 0, 0
+AVX_INSTR pblendvb, 0, 0
+AVX_INSTR pblendw, 0, 1
+AVX_INSTR pcmpestri, 0, 0
+AVX_INSTR pcmpestrm, 0, 0
+AVX_INSTR pcmpistri, 0, 0
+AVX_INSTR pcmpistrm, 0, 0
+AVX_INSTR pcmpeqb, 0, 0
+AVX_INSTR pcmpeqw, 0, 0
+AVX_INSTR pcmpeqd, 0, 0
+AVX_INSTR pcmpeqq, 0, 0
+AVX_INSTR pcmpgtb, 0, 0
+AVX_INSTR pcmpgtw, 0, 0
+AVX_INSTR pcmpgtd, 0, 0
+AVX_INSTR pcmpgtq, 0, 0
+AVX_INSTR phaddw, 0, 0
+AVX_INSTR phaddd, 0, 0
+AVX_INSTR phaddsw, 0, 0
+AVX_INSTR phsubw, 0, 0
+AVX_INSTR phsubd, 0, 0
+AVX_INSTR phsubsw, 0, 0
+AVX_INSTR pmaddwd, 0, 0
+AVX_INSTR pmaddubsw, 0, 0
+AVX_INSTR pmaxsb, 0, 0
+AVX_INSTR pmaxsw, 0, 0
+AVX_INSTR pmaxsd, 0, 0
+AVX_INSTR pmaxub, 0, 0
+AVX_INSTR pmaxuw, 0, 0
+AVX_INSTR pmaxud, 0, 0
+AVX_INSTR pminsb, 0, 0
+AVX_INSTR pminsw, 0, 0
+AVX_INSTR pminsd, 0, 0
+AVX_INSTR pminub, 0, 0
+AVX_INSTR pminuw, 0, 0
+AVX_INSTR pminud, 0, 0
+AVX_INSTR pmulhuw, 0, 0
+AVX_INSTR pmulhrsw, 0, 0
+AVX_INSTR pmulhw, 0, 0
+AVX_INSTR pmullw, 0, 0
+AVX_INSTR pmulld, 0, 0
+AVX_INSTR pmuludq, 0, 0
+AVX_INSTR pmuldq, 0, 0
+AVX_INSTR por, 0, 0
+AVX_INSTR psadbw, 0, 0
+AVX_INSTR pshufb, 0, 0
+AVX_INSTR psignb, 0, 0
+AVX_INSTR psignw, 0, 0
+AVX_INSTR psignd, 0, 0
+AVX_INSTR psllw, 0, 0
+AVX_INSTR pslld, 0, 0
+AVX_INSTR psllq, 0, 0
+AVX_INSTR pslldq, 0, 0
+AVX_INSTR psraw, 0, 0
+AVX_INSTR psrad, 0, 0
+AVX_INSTR psrlw, 0, 0
+AVX_INSTR psrld, 0, 0
+AVX_INSTR psrlq, 0, 0
+AVX_INSTR psrldq, 0, 0
+AVX_INSTR psubb, 0, 0
+AVX_INSTR psubw, 0, 0
+AVX_INSTR psubd, 0, 0
+AVX_INSTR psubq, 0, 0
+AVX_INSTR psubsb, 0, 0
+AVX_INSTR psubsw, 0, 0
+AVX_INSTR psubusb, 0, 0
+AVX_INSTR psubusw, 0, 0
+AVX_INSTR punpckhbw, 0, 0
+AVX_INSTR punpckhwd, 0, 0
+AVX_INSTR punpckhdq, 0, 0
+AVX_INSTR punpckhqdq, 0, 0
+AVX_INSTR punpcklbw, 0, 0
+AVX_INSTR punpcklwd, 0, 0
+AVX_INSTR punpckldq, 0, 0
+AVX_INSTR punpcklqdq, 0, 0
+AVX_INSTR pxor, 0, 0
+AVX_INSTR shufps, 0, 1
+AVX_INSTR subpd, 1, 0
+AVX_INSTR subps, 1, 0
+AVX_INSTR subsd, 1, 0
+AVX_INSTR subss, 1, 0
+AVX_INSTR unpckhpd, 1, 0
+AVX_INSTR unpckhps, 1, 0
+AVX_INSTR unpcklpd, 1, 0
+AVX_INSTR unpcklps, 1, 0
+AVX_INSTR xorpd, 1, 0
+AVX_INSTR xorps, 1, 0
+
+; 3DNow instructions, for sharing code between AVX, SSE and 3DN
+AVX_INSTR pfadd, 1, 0
+AVX_INSTR pfsub, 1, 0
+AVX_INSTR pfmul, 1, 0
+
+;=============================================================================
+; Chromium extensions
+;=============================================================================
+
+%ifdef CHROMIUM
+;
+; LOAD_SYM %1 (reg), %2 (sym)
+; Copies the address to a local symbol to the specified register.
+;
+
+%macro LOAD_SYM 2
+
+%ifdef MACHO
+ call %%geteip
+ add %1, %2 - $
+ jmp %%end
+%%geteip:
+ mov %1, [rsp]
+ ret
+%%end:
+
+%else
+ lea %1, [%2]
+%endif
+
+%endmacro
+
+;
+; MOVq %1 (xmm), %2 (reg)
+; MOVq %1 (reg), %2 (xmm)
+; Copies a general-purpose register to an XMM register, and vice versa.
+;
+%macro MOVq 2
+%if gprsize == 8
+ movq %1, %2
+%else
+ movd %1, %2
+%endif
+%endmacro
+
+;
+; PRIVATE
+; A flag representing the specified symbol is a private symbol. This define adds
+; a hidden flag on Linux and a private_extern flag on Mac. (We can use this
+; private_extern flag only on the latest yasm.)
+;
+%ifdef MACHO
+%define PRIVATE :private_extern
+%elifdef ELF
+%define PRIVATE :hidden
+%else
+%define PRIVATE
+%endif
+
+%endif ; CHROMIUM
+
+%endif ; MEDIA_BASE_SIMD_X86INC_ASM_
diff --git a/media/base/yuv_convert.cc b/media/base/yuv_convert.cc
index d332065..3a27ca4 100644
--- a/media/base/yuv_convert.cc
+++ b/media/base/yuv_convert.cc
@@ -19,6 +19,7 @@
#include "build/build_config.h"
#include "media/base/cpu_features.h"
+#include "media/base/simd/convert_rgb_to_yuv.h"
#include "media/base/yuv_convert_internal.h"
#include "media/base/yuv_row.h"
@@ -369,8 +370,10 @@ void ConvertRGB32ToYUV(const uint8* rgbframe,
// TODO(hclam): Implement a NEON version.
convert_proc = &ConvertRGB32ToYUV_C;
#else
- // For x86 processors, check if SSE2 is supported.
- if (hasSSE2())
+ // For x86 processors, check if SSSE3 (or SSE2) is supported.
+ if (hasSSSE3())
+ convert_proc = &ConvertRGB32ToYUV_SSSE3;
+ else if (hasSSE2())
convert_proc = &ConvertRGB32ToYUV_SSE2;
else
convert_proc = &ConvertRGB32ToYUV_C;
@@ -390,8 +393,21 @@ void ConvertRGB24ToYUV(const uint8* rgbframe,
int rgbstride,
int ystride,
int uvstride) {
+#if defined(ARCH_CPU_ARM_FAMILY)
ConvertRGB24ToYUV_C(rgbframe, yplane, uplane, vplane, width, height,
rgbstride, ystride, uvstride);
+#else
+ static void (*convert_proc)(const uint8*, uint8*, uint8*, uint8*,
+ int, int, int, int, int) = NULL;
+ if (!convert_proc) {
+ if (hasSSSE3())
+ convert_proc = &ConvertRGB24ToYUV_SSSE3;
+ else
+ convert_proc = &ConvertRGB24ToYUV_C;
+ }
+ convert_proc(rgbframe, yplane, uplane, vplane, width, height,
+ rgbstride, ystride, uvstride);
+#endif
}
void ConvertYUY2ToYUV(const uint8* src,
diff --git a/media/media.gyp b/media/media.gyp
index fcc9b53..dfe4bbf 100644
--- a/media/media.gyp
+++ b/media/media.gyp
@@ -380,9 +380,63 @@
},
},
}],
+ [ 'target_arch=="ia32" or target_arch=="x64"', {
+ 'sources': [
+ 'base/simd/convert_rgb_to_yuv_ssse3.asm',
+ ],
+ }],
+ [ 'OS=="win"', {
+ 'variables': {
+ 'yasm_flags': [
+ '-DWIN32',
+ '-DMSVC',
+ '-DCHROMIUM',
+ '-Isimd',
+ ],
+ },
+ }],
+ [ 'OS=="mac"', {
+ 'variables': {
+ 'yasm_flags': [
+ '-DPREFIX',
+ '-DMACHO',
+ '-DCHROMIUM',
+ '-Isimd',
+ ],
+ },
+ }],
+ [ 'OS=="linux"', {
+ 'variables': {
+ 'conditions': [
+ [ 'target_arch=="ia32"', {
+ 'yasm_flags': [
+ '-DX86_32',
+ '-DELF',
+ '-DCHROMIUM',
+ '-Isimd',
+ ],
+ }, {
+ 'yasm_flags': [
+ '-DARCH_X86_64',
+ '-DELF',
+ '-DPIC',
+ '-DCHROMIUM',
+ '-Isimd',
+ ],
+ }],
+ ],
+ },
+ }],
],
'sources': [
'base/yuv_convert_sse2.cc',
+ 'base/simd/convert_rgb_to_yuv.cc',
+ ],
+ 'variables': {
+ 'yasm_output_path': '<(SHARED_INTERMEDIATE_DIR)/media',
+ },
+ 'includes': [
+ '../third_party/yasm/yasm_compile.gypi',
],
},
{
@@ -460,6 +514,7 @@
'base/pts_stream_unittest.cc',
'base/run_all_unittests.cc',
'base/seekable_buffer_unittest.cc',
+ 'base/simd/convert_rgb_to_yuv_unittest.cc',
'base/state_matrix_unittest.cc',
'base/test_data_util.cc',
'base/test_data_util.h',