summaryrefslogtreecommitdiffstats
path: root/third_party
diff options
context:
space:
mode:
authorwaffles@chromium.org <waffles@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-06-17 20:33:11 +0000
committerwaffles@chromium.org <waffles@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-06-17 20:33:11 +0000
commitf6820ba92b1bd5f3f44cbedb116e8593dd6e5968 (patch)
treeb12dc213ca1e09abbfcfeeec2b407f3621957908 /third_party
parenta9bd323d70cfcea6b6f340893965fa3d21ec94ce (diff)
downloadchromium_src-f6820ba92b1bd5f3f44cbedb116e8593dd6e5968.zip
chromium_src-f6820ba92b1bd5f3f44cbedb116e8593dd6e5968.tar.gz
chromium_src-f6820ba92b1bd5f3f44cbedb116e8593dd6e5968.tar.bz2
Avoid register corruption in LZMA ASM in 64-bit architectures.
I tried this previously but missed that $ebx is used to track the global offset table in position-independent-code. I wish the .patch file was smaller: the actual change is much smaller, but I can't seem to make `diff` spit out a better patch at me, and I don't want to hand-edit it out of fear of breaking the patching. More on the reasons why this patch is necessary are at the bug. BUG=248385 Review URL: https://chromiumcodereview.appspot.com/16965016 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@206792 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'third_party')
-rw-r--r--third_party/lzma_sdk/CpuArch.c16
-rw-r--r--third_party/lzma_sdk/README.chromium5
-rw-r--r--third_party/lzma_sdk/chromium.patch354
3 files changed, 370 insertions, 5 deletions
diff --git a/third_party/lzma_sdk/CpuArch.c b/third_party/lzma_sdk/CpuArch.c
index f8df6b2..272220e 100644
--- a/third_party/lzma_sdk/CpuArch.c
+++ b/third_party/lzma_sdk/CpuArch.c
@@ -72,6 +72,20 @@ static void MyCPUID(UInt32 function, UInt32 *a, UInt32 *b, UInt32 *c, UInt32 *d)
#else
+ #if defined(MY_CPU_AMD64)
+
+ __asm__ __volatile__ (
+ "mov %%rbx, %%rdi\n"
+ "cpuid\n"
+ "xchg %%rdi, %%rbx\n"
+ : "=a" (*a) ,
+ "=D" (*b) ,
+ "=c" (*c) ,
+ "=d" (*d)
+ : "0" (function)) ;
+
+ #else
+
__asm__ __volatile__ (
"mov %%ebx, %%edi\n"
"cpuid\n"
@@ -83,6 +97,8 @@ static void MyCPUID(UInt32 function, UInt32 *a, UInt32 *b, UInt32 *c, UInt32 *d)
: "0" (function)) ;
#endif
+
+ #endif
#else
diff --git a/third_party/lzma_sdk/README.chromium b/third_party/lzma_sdk/README.chromium
index c9ea02f..a042cb9 100644
--- a/third_party/lzma_sdk/README.chromium
+++ b/third_party/lzma_sdk/README.chromium
@@ -13,7 +13,10 @@ compression has been included. The project files have been rewritten to use
proper file paths and generate a static lib.
The patch in chromium.patch was applied to CpuArch.c to fix compile error on
-32bit Linux.
+32bit Linux (when compiled with -fPIC).
+
+2013-06-14: The patch was updated to fix register corruption that can occur on
+64-bit platforms.
An #include <stdlib.h> needs to be added to CpuArch.h to avoid a warning on
Win32.
diff --git a/third_party/lzma_sdk/chromium.patch b/third_party/lzma_sdk/chromium.patch
index 319a840..7cd6149 100644
--- a/third_party/lzma_sdk/chromium.patch
+++ b/third_party/lzma_sdk/chromium.patch
@@ -1,10 +1,356 @@
-76,78c76
+1,186c1,168
+< /* CpuArch.c -- CPU specific code
+< 2010-10-26: Igor Pavlov : Public domain */
+<
+< #include "CpuArch.h"
+<
+< #ifdef MY_CPU_X86_OR_AMD64
+<
+< #if (defined(_MSC_VER) && !defined(MY_CPU_AMD64)) || defined(__GNUC__)
+< #define USE_ASM
+< #endif
+<
+< #if defined(USE_ASM) && !defined(MY_CPU_AMD64)
+< static UInt32 CheckFlag(UInt32 flag)
+< {
+< #ifdef _MSC_VER
+< __asm pushfd;
+< __asm pop EAX;
+< __asm mov EDX, EAX;
+< __asm xor EAX, flag;
+< __asm push EAX;
+< __asm popfd;
+< __asm pushfd;
+< __asm pop EAX;
+< __asm xor EAX, EDX;
+< __asm push EDX;
+< __asm popfd;
+< __asm and flag, EAX;
+< #else
+< __asm__ __volatile__ (
+< "pushf\n\t"
+< "pop %%EAX\n\t"
+< "movl %%EAX,%%EDX\n\t"
+< "xorl %0,%%EAX\n\t"
+< "push %%EAX\n\t"
+< "popf\n\t"
+< "pushf\n\t"
+< "pop %%EAX\n\t"
+< "xorl %%EDX,%%EAX\n\t"
+< "push %%EDX\n\t"
+< "popf\n\t"
+< "andl %%EAX, %0\n\t":
+< "=c" (flag) : "c" (flag));
+< #endif
+< return flag;
+< }
+< #define CHECK_CPUID_IS_SUPPORTED if (CheckFlag(1 << 18) == 0 || CheckFlag(1 << 21) == 0) return False;
+< #else
+< #define CHECK_CPUID_IS_SUPPORTED
+< #endif
+<
+< static void MyCPUID(UInt32 function, UInt32 *a, UInt32 *b, UInt32 *c, UInt32 *d)
+< {
+< #ifdef USE_ASM
+<
+< #ifdef _MSC_VER
+<
+< UInt32 a2, b2, c2, d2;
+< __asm xor EBX, EBX;
+< __asm xor ECX, ECX;
+< __asm xor EDX, EDX;
+< __asm mov EAX, function;
+< __asm cpuid;
+< __asm mov a2, EAX;
+< __asm mov b2, EBX;
+< __asm mov c2, ECX;
+< __asm mov d2, EDX;
+<
+< *a = a2;
+< *b = b2;
+< *c = c2;
+< *d = d2;
+<
+< #else
+<
+< #if defined(MY_CPU_AMD64)
+<
+< __asm__ __volatile__ (
+< "mov %%rbx, %%rdi\n"
+< "cpuid\n"
+< "xchg %%rdi, %%rbx\n"
+< : "=a" (*a) ,
+< "=D" (*b) ,
+< "=c" (*c) ,
+< "=d" (*d)
+< : "0" (function)) ;
+<
+< #else
+<
+< __asm__ __volatile__ (
< "mov %%ebx, %%edi\n"
< "cpuid\n"
< "xchg %%edi, %%ebx\n"
----
-> "cpuid"
-80c78
+< : "=a" (*a) ,
< "=D" (*b) ,
+< "=c" (*c) ,
+< "=d" (*d)
+< : "0" (function)) ;
+<
+< #endif
+<
+< #endif
+<
+< #else
+<
+< int CPUInfo[4];
+< __cpuid(CPUInfo, function);
+< *a = CPUInfo[0];
+< *b = CPUInfo[1];
+< *c = CPUInfo[2];
+< *d = CPUInfo[3];
+<
+< #endif
+< }
+<
+< Bool x86cpuid_CheckAndRead(Cx86cpuid *p)
+< {
+< CHECK_CPUID_IS_SUPPORTED
+< MyCPUID(0, &p->maxFunc, &p->vendor[0], &p->vendor[2], &p->vendor[1]);
+< MyCPUID(1, &p->ver, &p->b, &p->c, &p->d);
+< return True;
+< }
+<
+< static UInt32 kVendors[][3] =
+< {
+< { 0x756E6547, 0x49656E69, 0x6C65746E},
+< { 0x68747541, 0x69746E65, 0x444D4163},
+< { 0x746E6543, 0x48727561, 0x736C7561}
+< };
+<
+< int x86cpuid_GetFirm(const Cx86cpuid *p)
+< {
+< unsigned i;
+< for (i = 0; i < sizeof(kVendors) / sizeof(kVendors[i]); i++)
+< {
+< const UInt32 *v = kVendors[i];
+< if (v[0] == p->vendor[0] &&
+< v[1] == p->vendor[1] &&
+< v[2] == p->vendor[2])
+< return (int)i;
+< }
+< return -1;
+< }
+<
+< Bool CPU_Is_InOrder()
+< {
+< Cx86cpuid p;
+< int firm;
+< UInt32 family, model;
+< if (!x86cpuid_CheckAndRead(&p))
+< return True;
+< family = x86cpuid_GetFamily(&p);
+< model = x86cpuid_GetModel(&p);
+< firm = x86cpuid_GetFirm(&p);
+< switch (firm)
+< {
+< case CPU_FIRM_INTEL: return (family < 6 || (family == 6 && model == 0x100C));
+< case CPU_FIRM_AMD: return (family < 5 || (family == 5 && (model < 6 || model == 0xA)));
+< case CPU_FIRM_VIA: return (family < 6 || (family == 6 && model < 0xF));
+< }
+< return True;
+< }
+<
+< #if !defined(MY_CPU_AMD64) && defined(_WIN32)
+< static Bool CPU_Sys_Is_SSE_Supported()
+< {
+< OSVERSIONINFO vi;
+< vi.dwOSVersionInfoSize = sizeof(vi);
+< if (!GetVersionEx(&vi))
+< return False;
+< return (vi.dwMajorVersion >= 5);
+< }
+< #define CHECK_SYS_SSE_SUPPORT if (!CPU_Sys_Is_SSE_Supported()) return False;
+< #else
+< #define CHECK_SYS_SSE_SUPPORT
+< #endif
+<
+< Bool CPU_Is_Aes_Supported()
+< {
+< Cx86cpuid p;
+< CHECK_SYS_SSE_SUPPORT
+< if (!x86cpuid_CheckAndRead(&p))
+< return False;
+< return (p.c >> 25) & 1;
+< }
+<
+< #endif
---
+> /* CpuArch.c -- CPU specific code
+> 2010-10-26: Igor Pavlov : Public domain */
+>
+> #include "CpuArch.h"
+>
+> #ifdef MY_CPU_X86_OR_AMD64
+>
+> #if (defined(_MSC_VER) && !defined(MY_CPU_AMD64)) || defined(__GNUC__)
+> #define USE_ASM
+> #endif
+>
+> #if defined(USE_ASM) && !defined(MY_CPU_AMD64)
+> static UInt32 CheckFlag(UInt32 flag)
+> {
+> #ifdef _MSC_VER
+> __asm pushfd;
+> __asm pop EAX;
+> __asm mov EDX, EAX;
+> __asm xor EAX, flag;
+> __asm push EAX;
+> __asm popfd;
+> __asm pushfd;
+> __asm pop EAX;
+> __asm xor EAX, EDX;
+> __asm push EDX;
+> __asm popfd;
+> __asm and flag, EAX;
+> #else
+> __asm__ __volatile__ (
+> "pushf\n\t"
+> "pop %%EAX\n\t"
+> "movl %%EAX,%%EDX\n\t"
+> "xorl %0,%%EAX\n\t"
+> "push %%EAX\n\t"
+> "popf\n\t"
+> "pushf\n\t"
+> "pop %%EAX\n\t"
+> "xorl %%EDX,%%EAX\n\t"
+> "push %%EDX\n\t"
+> "popf\n\t"
+> "andl %%EAX, %0\n\t":
+> "=c" (flag) : "c" (flag));
+> #endif
+> return flag;
+> }
+> #define CHECK_CPUID_IS_SUPPORTED if (CheckFlag(1 << 18) == 0 || CheckFlag(1 << 21) == 0) return False;
+> #else
+> #define CHECK_CPUID_IS_SUPPORTED
+> #endif
+>
+> static void MyCPUID(UInt32 function, UInt32 *a, UInt32 *b, UInt32 *c, UInt32 *d)
+> {
+> #ifdef USE_ASM
+>
+> #ifdef _MSC_VER
+>
+> UInt32 a2, b2, c2, d2;
+> __asm xor EBX, EBX;
+> __asm xor ECX, ECX;
+> __asm xor EDX, EDX;
+> __asm mov EAX, function;
+> __asm cpuid;
+> __asm mov a2, EAX;
+> __asm mov b2, EBX;
+> __asm mov c2, ECX;
+> __asm mov d2, EDX;
+>
+> *a = a2;
+> *b = b2;
+> *c = c2;
+> *d = d2;
+>
+> #else
+>
+> __asm__ __volatile__ (
+> "cpuid"
+> : "=a" (*a) ,
> "=b" (*b) ,
+> "=c" (*c) ,
+> "=d" (*d)
+> : "0" (function)) ;
+>
+> #endif
+>
+> #else
+>
+> int CPUInfo[4];
+> __cpuid(CPUInfo, function);
+> *a = CPUInfo[0];
+> *b = CPUInfo[1];
+> *c = CPUInfo[2];
+> *d = CPUInfo[3];
+>
+> #endif
+> }
+>
+> Bool x86cpuid_CheckAndRead(Cx86cpuid *p)
+> {
+> CHECK_CPUID_IS_SUPPORTED
+> MyCPUID(0, &p->maxFunc, &p->vendor[0], &p->vendor[2], &p->vendor[1]);
+> MyCPUID(1, &p->ver, &p->b, &p->c, &p->d);
+> return True;
+> }
+>
+> static UInt32 kVendors[][3] =
+> {
+> { 0x756E6547, 0x49656E69, 0x6C65746E},
+> { 0x68747541, 0x69746E65, 0x444D4163},
+> { 0x746E6543, 0x48727561, 0x736C7561}
+> };
+>
+> int x86cpuid_GetFirm(const Cx86cpuid *p)
+> {
+> unsigned i;
+> for (i = 0; i < sizeof(kVendors) / sizeof(kVendors[i]); i++)
+> {
+> const UInt32 *v = kVendors[i];
+> if (v[0] == p->vendor[0] &&
+> v[1] == p->vendor[1] &&
+> v[2] == p->vendor[2])
+> return (int)i;
+> }
+> return -1;
+> }
+>
+> Bool CPU_Is_InOrder()
+> {
+> Cx86cpuid p;
+> int firm;
+> UInt32 family, model;
+> if (!x86cpuid_CheckAndRead(&p))
+> return True;
+> family = x86cpuid_GetFamily(&p);
+> model = x86cpuid_GetModel(&p);
+> firm = x86cpuid_GetFirm(&p);
+> switch (firm)
+> {
+> case CPU_FIRM_INTEL: return (family < 6 || (family == 6 && model == 0x100C));
+> case CPU_FIRM_AMD: return (family < 5 || (family == 5 && (model < 6 || model == 0xA)));
+> case CPU_FIRM_VIA: return (family < 6 || (family == 6 && model < 0xF));
+> }
+> return True;
+> }
+>
+> #if !defined(MY_CPU_AMD64) && defined(_WIN32)
+> static Bool CPU_Sys_Is_SSE_Supported()
+> {
+> OSVERSIONINFO vi;
+> vi.dwOSVersionInfoSize = sizeof(vi);
+> if (!GetVersionEx(&vi))
+> return False;
+> return (vi.dwMajorVersion >= 5);
+> }
+> #define CHECK_SYS_SSE_SUPPORT if (!CPU_Sys_Is_SSE_Supported()) return False;
+> #else
+> #define CHECK_SYS_SSE_SUPPORT
+> #endif
+>
+> Bool CPU_Is_Aes_Supported()
+> {
+> Cx86cpuid p;
+> CHECK_SYS_SSE_SUPPORT
+> if (!x86cpuid_CheckAndRead(&p))
+> return False;
+> return (p.c >> 25) & 1;
+> }
+>
+> #endif