summaryrefslogtreecommitdiffstats
path: root/chrome_frame
diff options
context:
space:
mode:
authorsiggi@chromium.org <siggi@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-03-23 21:09:02 +0000
committersiggi@chromium.org <siggi@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-03-23 21:09:02 +0000
commitd999c7c3f4bd9c7c779b4c06ee148d3d0d227e60 (patch)
treed4fca3d3c24d56a325000a4129d11c35b548abfa /chrome_frame
parent722ed48734460567be2c97590a036480c4667815 (diff)
downloadchromium_src-d999c7c3f4bd9c7c779b4c06ee148d3d0d227e60.zip
chromium_src-d999c7c3f4bd9c7c779b4c06ee148d3d0d227e60.tar.gz
chromium_src-d999c7c3f4bd9c7c779b4c06ee148d3d0d227e60.tar.bz2
Reimplementation of FunctionStub to avoid rewriting potentially executing code for a slight improvement in thread safety.
Make VTABLE patching treadsafe to the extent possible. As-is it's now safe against itself running on other threads at lease, as well as against other similar implementations, though the inherent VM operation race is resolved by retrying. BUG=27415 TEST=Included unittests. Review URL: http://codereview.chromium.org/992008 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@42381 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'chrome_frame')
-rw-r--r--chrome_frame/chrome_frame.gyp6
-rw-r--r--chrome_frame/function_stub.cc141
-rw-r--r--chrome_frame/function_stub.h232
-rw-r--r--chrome_frame/function_stub_unittest.cc207
-rw-r--r--chrome_frame/test/function_stub_unittest.cc65
-rw-r--r--chrome_frame/vtable_patch_manager.cc112
-rw-r--r--chrome_frame/vtable_patch_manager.h11
-rw-r--r--chrome_frame/vtable_patch_manager_unittest.cc287
8 files changed, 813 insertions, 248 deletions
diff --git a/chrome_frame/chrome_frame.gyp b/chrome_frame/chrome_frame.gyp
index 36c2e62..159b762 100644
--- a/chrome_frame/chrome_frame.gyp
+++ b/chrome_frame/chrome_frame.gyp
@@ -131,6 +131,7 @@
'chrome_frame_npapi_unittest.cc',
'chrome_frame_unittest_main.cc',
'chrome_launcher_unittest.cc',
+ 'function_stub_unittest.cc',
'test/com_message_event_unittest.cc',
'test/html_util_unittests.cc',
'test/http_negotiate_unittest.cc',
@@ -141,6 +142,7 @@
'unittest_precompile.cc',
'urlmon_upload_data_stream.cc',
'urlmon_upload_data_stream_unittest.cc',
+ 'vtable_patch_manager_unittest.cc',
],
'include_dirs': [
# To allow including "chrome_tab.h"
@@ -202,7 +204,6 @@
'test/chrome_frame_test_utils.h',
'test/chrome_frame_automation_mock.cc',
'test/chrome_frame_automation_mock.h',
- 'test/function_stub_unittest.cc',
'test/http_server.cc',
'test/http_server.h',
'test/proxy_factory_mock.cc',
@@ -344,6 +345,7 @@
'../third_party/icu/icu.gyp:icui18n',
'../third_party/icu/icu.gyp:icuuc',
'chrome_frame_npapi',
+ 'chrome_frame_ie',
'npchrome_frame',
],
'sources': [
@@ -407,6 +409,7 @@
'../testing/gtest.gyp:gtest',
'../third_party/WebKit/WebKit/chromium/WebKit.gyp:webkit',
'base_noicu',
+ 'chrome_frame_ie',
'chrome_frame_npapi',
'chrome_frame_strings',
],
@@ -603,6 +606,7 @@
'find_dialog.cc',
'find_dialog.h',
'function_stub.h',
+ 'function_stub.cc',
'http_negotiate.h',
'http_negotiate.cc',
'iids.cc',
diff --git a/chrome_frame/function_stub.cc b/chrome_frame/function_stub.cc
new file mode 100644
index 0000000..59a4029
--- /dev/null
+++ b/chrome_frame/function_stub.cc
@@ -0,0 +1,141 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include "chrome_frame/function_stub.h"
+
+#include <new>
+#include "base/lock.h"
+#include "base/logging.h"
+
+#ifndef _M_IX86
+#error Only x86 supported right now.
+#endif
+
+namespace {
+typedef enum AsmConstants {
+ POP_EAX = 0x58,
+ PUSH_IND = 0x35ff,
+ PUSH_EAX = 0x50,
+ JUMP_IND = 0x25ff,
+};
+
+// A quick and dirty wrapper class that allows us to defer allocating
+// the executable heap until first use, and to release it teardown.
+class ExecutableHeap {
+ public:
+ ExecutableHeap() : heap_(NULL) {
+ }
+
+ ~ExecutableHeap() {
+ if (heap_ != NULL) {
+ BOOL ret = ::HeapDestroy(heap_);
+ heap_ = NULL;
+ }
+ }
+
+ void* Allocate(size_t size) {
+ if (!heap_)
+ CreateHeap();
+
+ DCHECK(heap_);
+
+ return ::HeapAlloc(heap_, 0, size);
+ }
+
+ void Free(void* ptr) {
+ DCHECK(heap_ != NULL);
+ ::HeapFree(heap_, 0, ptr);
+ }
+
+ void CreateHeap() {
+ AutoLock lock(init_lock_);
+
+ if (heap_ == NULL)
+ heap_ = ::HeapCreate(HEAP_CREATE_ENABLE_EXECUTE, 0, 0);
+ }
+
+ private:
+ Lock init_lock_;
+ HANDLE heap_;
+};
+
+// Our executable heap instance, all stubs are allocated from here.
+ExecutableHeap heap_;
+
+} // namespace
+
+extern "C" IMAGE_DOS_HEADER __ImageBase;
+
+bool FunctionStub::is_valid() const {
+ return signature_ == reinterpret_cast<HMODULE>(&__ImageBase) &&
+ !is_bypassed();
+}
+
+FunctionStub::FunctionStub(uintptr_t extra_argument, void* dest)
+ : signature_(reinterpret_cast<HMODULE>(&__ImageBase)),
+ argument_(extra_argument),
+ destination_function_(reinterpret_cast<uintptr_t>(dest)) {
+ bypass_address_ = reinterpret_cast<uintptr_t>(&stub_.pop_return_addr_);
+ Init(&stub_);
+}
+
+FunctionStub::~FunctionStub() {
+}
+
+void FunctionStub::Init(FunctionStubAsm* stub) {
+ DCHECK(stub != NULL);
+
+ stub->jump_to_bypass_ = JUMP_IND;
+ stub->bypass_target_addr_ = reinterpret_cast<uintptr_t>(&bypass_address_);
+ stub->pop_return_addr_ = POP_EAX;
+ stub->push_ = PUSH_IND;
+ stub->arg_addr_ = reinterpret_cast<uintptr_t>(&argument_);
+ stub->push_return_addr_ = PUSH_EAX;
+ stub->jump_to_target = JUMP_IND;
+ stub->target_addr_ = reinterpret_cast<uintptr_t>(&destination_function_);
+
+ // Flush the instruction cache for the newly written code.
+ BOOL ret = ::FlushInstructionCache(::GetCurrentProcess(),
+ stub,
+ sizeof(*stub));
+}
+
+void FunctionStub::BypassStub(void* new_target) {
+ set_bypass_address(reinterpret_cast<uintptr_t>(new_target));
+}
+
+FunctionStub* FunctionStub::Create(uintptr_t extra_argument, void* dest) {
+ DCHECK(dest);
+ FunctionStub* stub =
+ reinterpret_cast<FunctionStub*>(heap_.Allocate(sizeof(FunctionStub)));
+
+ if (stub != NULL)
+ new (stub) FunctionStub(extra_argument, dest);
+
+ return stub;
+}
+
+FunctionStub* FunctionStub::FromCode(void* address) {
+ // Address points to arbitrary code here, which may e.g.
+ // lie at the end of an executable segment, which in turn
+ // may terminate earlier than the last address we probe.
+ // We therefore execute under an SEH, so as not to crash
+ // on failed probes.
+ __try {
+ // Retrieve the candidata function stub.
+ FunctionStub* candidate = CONTAINING_RECORD(address, FunctionStub, stub_);
+ if (candidate->stub_.jump_to_bypass_ == JUMP_IND &&
+ candidate->signature_ == reinterpret_cast<HMODULE>(&__ImageBase)) {
+ return candidate;
+ }
+ } __except(EXCEPTION_EXECUTE_HANDLER) {
+ }
+
+ return NULL;
+}
+
+bool FunctionStub::Destroy(FunctionStub* stub) {
+ heap_.Free(stub);
+
+ return true;
+}
diff --git a/chrome_frame/function_stub.h b/chrome_frame/function_stub.h
index 55f0c5e..09cd909 100644
--- a/chrome_frame/function_stub.h
+++ b/chrome_frame/function_stub.h
@@ -13,12 +13,28 @@
#pragma pack(push)
#pragma pack(1)
+struct FunctionStubAsm {
+ // The stub always starts with an indirect jump, which starts out jumping
+ // to the remainder of the stub. This means we can bypass the stub by
+ // rewriting the jump destination, which is data, in a manner that does
+ // not involve writing code, only writing data at a natural word boundary.
+ uint16 jump_to_bypass_; // indirect jump
+ uintptr_t bypass_target_addr_; // to the bypass target.
+ uint8 pop_return_addr_; // pop eax
+ uint16 push_; // push [arg] ; push...
+ uintptr_t arg_addr_; // ; extra argument
+ uint8 push_return_addr_; // push eax ; push the return address
+ uint16 jump_to_target; // jmp [target] ; jump...
+ uintptr_t target_addr_; // ; to the hook function
+};
+
+#pragma pack(pop)
+
+
#ifndef _M_IX86
#error Only x86 supported right now.
#endif
-extern "C" IMAGE_DOS_HEADER __ImageBase;
-
// This struct is assembly code + signature. The purpose of the struct is to be
// able to hook an existing function with our own and store information such
// as the original function pointer with the code stub. Typically this is used
@@ -30,7 +46,7 @@ extern "C" IMAGE_DOS_HEADER __ImageBase;
//
// @note: This class is meant for __stdcall calling convention and
// it uses eax as a temporary variable. The struct can
-// be improved in the future to save eax before the
+// be improved in the future to save eax before the
// operation and then restore it.
//
// For instance if the function prototype is:
@@ -42,7 +58,7 @@ extern "C" IMAGE_DOS_HEADER __ImageBase;
// and we would like to add one static argument to make it, say:
//
// @code
-// LRESULT MyNewWndProc(WNDPROC original, HWND hwnd, UINT msg,
+// LRESULT MyNewWndProc(WNDPROC original, HWND hwnd, UINT msg,
// WPARAM wparam, LPARAM lparam);
// @endcode
//
@@ -53,189 +69,73 @@ extern "C" IMAGE_DOS_HEADER __ImageBase;
// SetClassLongPtr(wnd, GCLP_WNDPROC, stub->code());
// @endcode
struct FunctionStub {
- private:
- typedef enum AsmConstants {
- POP_EAX = 0x58,
- PUSH = 0x68,
- PUSH_EAX = 0x50,
- JUMP_RELATIVE = 0xE9
- };
-
- FunctionStub(uintptr_t extra_argument, void* dest)
- : signature_(reinterpret_cast<HMODULE>(&__ImageBase)) {
- Opcodes::Hook& hook = code_.hook_;
- hook.pop_return_addr_ = POP_EAX;
- hook.push_ = PUSH;
- hook.arg_ = extra_argument;
- hook.push_return_addr_ = PUSH_EAX;
- hook.jump_to_ = JUMP_RELATIVE;
-
- // Calculate the target jump to the destination function.
- hook.target_ = CalculateRelativeJump(dest, &hook.jump_to_);
-
- // Allow the process to execute this struct as code.
- DWORD old_protect = 0;
- // Allow reads too since we want read-only member variable access at
- // all times.
- ::VirtualProtect(this, sizeof(FunctionStub), PAGE_EXECUTE_READ,
- &old_protect);
- }
-
- ~FunctionStub() {
- // No more execution rights.
- DWORD old_protect = 0;
- ::VirtualProtect(this, sizeof(FunctionStub), PAGE_READWRITE, &old_protect);
- }
-
- // Calculates the target value for a relative jump.
- // The function assumes that the size of the opcode is 1 byte.
- inline uintptr_t CalculateRelativeJump(const void* absolute_to,
- const void* absolute_from) const {
- return (reinterpret_cast<uintptr_t>(absolute_to) -
- reinterpret_cast<uintptr_t>(absolute_from)) -
- (sizeof(uint8) + sizeof(uintptr_t));
- }
-
- // Does the opposite of what CalculateRelativeJump does, which is to
- // calculate back the absolute address that previously was relative to
- // some other address.
- inline uintptr_t CalculateAbsoluteAddress(const void* relative_to,
- uintptr_t relative_address) const {
- return relative_address + sizeof(uint8) + sizeof(uintptr_t) +
- reinterpret_cast<uintptr_t>(relative_to);
- }
-
- // Used to identify function stubs that belong to this module.
- HMODULE signature_;
-
- // IMPORTANT: Do not change the order of member variables
- union Opcodes {
- // Use this struct when the stub forwards the call to our hook function
- // providing an extra argument.
- struct Hook {
- uint8 pop_return_addr_; // pop eax
- uint8 push_; // push arg ; push...
- uintptr_t arg_; // ; extra argument
- uint8 push_return_addr_; // push eax ; push the return address
- uint8 jump_to_; // jmp ; jump...
- uintptr_t target_; // ; to the hook function
- } hook_;
- // When the stub is bypassed, we jump directly to a given target,
- // usually the originally hooked function.
- struct Bypassed {
- uint8 jump_to_; // jmp to
- uintptr_t target_; // relative target.
- } bypassed_;
- };
-
- Opcodes code_;
-
public:
// Neutralizes this stub and converts it to a direct jump to a new target.
- void BypassStub(void* new_target) {
- DWORD old_protect = 0;
- // Temporarily allow us to write to member variables
- ::VirtualProtect(this, sizeof(FunctionStub), PAGE_EXECUTE_READWRITE,
- &old_protect);
-
- // Now, just change the first 5 bytes to jump directly to the new target.
- Opcodes::Bypassed& bypassed = code_.bypassed_;
- bypassed.jump_to_ = JUMP_RELATIVE;
- bypassed.target_ = CalculateRelativeJump(new_target, &bypassed.jump_to_);
-
- // Restore the previous protection flags.
- ::VirtualProtect(this, sizeof(FunctionStub), old_protect, &old_protect);
-
- // Flush the instruction cache just in case.
- ::FlushInstructionCache(::GetCurrentProcess(), this, sizeof(FunctionStub));
- }
-
- // @returns the argument supplied in the call to @ref Create
- inline uintptr_t argument() const {
- DCHECK(code_.hook_.pop_return_addr_ == POP_EAX) << "stub has been disabled";
- return code_.hook_.arg_;
- }
+ void BypassStub(void* new_target);
inline bool is_bypassed() const {
- return code_.bypassed_.jump_to_ == JUMP_RELATIVE;
- }
-
- inline uintptr_t absolute_target() const {
- DCHECK(code_.hook_.pop_return_addr_ == POP_EAX) << "stub has been disabled";
- return CalculateAbsoluteAddress(
- reinterpret_cast<const void*>(&code_.hook_.jump_to_),
- code_.hook_.target_);
+ return bypass_address_ !=
+ reinterpret_cast<uintptr_t>(&stub_.pop_return_addr_);
}
// Returns true if the stub is valid and enabled.
// Don't call this method after bypassing the stub.
- inline bool is_valid() const {
- return signature_ == reinterpret_cast<HMODULE>(&__ImageBase) &&
- code_.hook_.pop_return_addr_ == POP_EAX;
- }
+ bool is_valid() const;
inline PROC code() const {
- return reinterpret_cast<PROC>(const_cast<Opcodes*>(&code_));
+ return reinterpret_cast<PROC>(const_cast<FunctionStubAsm*>(&stub_));
}
// Use to create a new function stub as shown above.
- //
// @param extra_argument The static argument to pass to the function.
// @param dest Target function to which the stub applies.
// @returns NULL if an error occurs, otherwise a pointer to the
- // function stub.
- //
- // TODO(tommi): Change this so that VirtualAlloc isn't called for
- // every stub. Instead we should re-use each allocation for
- // multiple stubs. In practice I'm guessing that there would
- // only be one allocation per process, since each allocation actually
- // allocates at least one page of memory (4K). Size of FunctionStub
- // is 12 bytes, so one page could house 341 function stubs.
- // When stubs are created frequently, VirtualAlloc could fail
- // and last error is ERROR_NOT_ENOUGH_MEMORY (8).
- static FunctionStub* Create(uintptr_t extra_argument, void* dest) {
- DCHECK(dest);
-
- // Use VirtualAlloc to get memory for the stub. This gives us a
- // whole page that we don't share with anyone else.
- // Initially the memory must be READWRITE to allow for construction
- // PAGE_EXECUTE is set in constructor.
- FunctionStub* ret = reinterpret_cast<FunctionStub*>(VirtualAlloc(NULL,
- sizeof(FunctionStub), MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE));
-
- if (!ret) {
- NOTREACHED();
- } else {
- // Construct
- ret->FunctionStub::FunctionStub(extra_argument, dest);
- }
-
- return ret;
- }
+ // function stub.
+ static FunctionStub* Create(uintptr_t extra_argument, void* dest);
+
+ // Test whether address (likely) points to an existing function stub.
+ // @returns NULL if address does not point to a function stub.
+ // @note likely means approximately 1/2^48 here.
+ static FunctionStub* FromCode(void* address);
+
+ // Deallocates a FunctionStub.
+ // The stub must not be in use on any thread!
+ static bool Destroy(FunctionStub* stub);
- static FunctionStub* FromCode(void* address) {
- Opcodes* code = reinterpret_cast<Opcodes*>(address);
- if (code->hook_.pop_return_addr_ == POP_EAX) {
- FunctionStub* stub = reinterpret_cast<FunctionStub*>(
- reinterpret_cast<uint8*>(address) - sizeof(HMODULE));
- if (stub->is_valid())
- return stub;
- }
+ // Accessors.
+ uintptr_t argument() const { return argument_; }
+ void set_argument(uintptr_t argument) { argument_ = argument; }
- return NULL;
+ uintptr_t bypass_address() const { return bypass_address_; }
+ void set_bypass_address(uintptr_t bypass_address) {
+ bypass_address_ = bypass_address;
}
- // Deallocates a FunctionStub. The stub must not be in use on any thread!
- static bool Destroy(FunctionStub* stub) {
- DCHECK(stub != NULL);
- FunctionStub* to_free = reinterpret_cast<FunctionStub*>(stub);
- to_free->FunctionStub::~FunctionStub();
- BOOL success = VirtualFree(to_free, sizeof(FunctionStub), MEM_DECOMMIT);
- DCHECK(success) << "VirtualFree";
- return success != FALSE;
+ uintptr_t destination_function() const { return destination_function_; }
+ void set_destination_function(uintptr_t destination_function) {
+ destination_function_ = destination_function;
}
-};
-#pragma pack(pop)
+ protected:
+ // Protected for testing only.
+ FunctionStub(uintptr_t extra_argument, void* dest);
+ ~FunctionStub();
+
+ void Init(FunctionStubAsm* stub);
+
+ FunctionStubAsm stub_;
+
+ // Used to identify function stubs that belong to this module.
+ HMODULE signature_;
+
+ // This is the argument value that gets passed to the destination_function_.
+ uintptr_t argument_;
+ // Bypass address, if this is the address of the pop_return_addr_, the
+ // function stub is not bypassed.
+ uintptr_t bypass_address_;
+ // The destination function we dispatch to, not used if the stub
+ // is bypassed.
+ uintptr_t destination_function_;
+};
#endif // CHROME_FRAME_FUNCTION_STUB_H_
diff --git a/chrome_frame/function_stub_unittest.cc b/chrome_frame/function_stub_unittest.cc
new file mode 100644
index 0000000..4ad2c31
--- /dev/null
+++ b/chrome_frame/function_stub_unittest.cc
@@ -0,0 +1,207 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#include "chrome_frame/function_stub.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace {
+
+// Test subclass to expose extra stuff.
+class TestFunctionStub: public FunctionStub {
+ public:
+ static void Init(TestFunctionStub* stub) {
+ stub->FunctionStub::Init(&stub->stub_);
+ }
+
+ // Expose the offset to our signature_ field.
+ static const size_t kSignatureOffset;
+
+ void set_signature(HMODULE signature) { signature_ = signature; }
+};
+
+const size_t TestFunctionStub::kSignatureOffset =
+ FIELD_OFFSET(TestFunctionStub, signature_);
+
+class FunctionStubTest: public testing::Test {
+ public:
+ FunctionStubTest() : stub_(NULL) {
+ }
+
+ virtual void SetUp() {
+ SYSTEM_INFO sys_info;
+ ::GetSystemInfo(&sys_info);
+
+ // Playpen size is a system page.
+ playpen_size_ = sys_info.dwPageSize;
+
+ // Reserve two pages.
+ playpen_ = reinterpret_cast<uint8*>(
+ ::VirtualAlloc(NULL,
+ 2 * playpen_size_,
+ MEM_RESERVE,
+ PAGE_EXECUTE_READWRITE));
+ ASSERT_TRUE(playpen_ != NULL);
+
+ // And commit the first one.
+ ASSERT_TRUE(::VirtualAlloc(playpen_,
+ playpen_size_,
+ MEM_COMMIT,
+ PAGE_EXECUTE_READWRITE));
+ }
+
+ virtual void TearDown() {
+ if (stub_ != NULL) {
+ EXPECT_TRUE(FunctionStub::Destroy(stub_));
+ }
+
+ if (playpen_ != NULL) {
+ EXPECT_TRUE(::VirtualFree(playpen_, 0, MEM_RELEASE));
+ }
+ }
+
+ protected:
+ typedef uintptr_t (CALLBACK *FuncPtr0)();
+ typedef uintptr_t (CALLBACK *FuncPtr1)(uintptr_t arg);
+
+ MOCK_METHOD0(Foo0, uintptr_t());
+ MOCK_METHOD1(Foo1, uintptr_t(uintptr_t));
+ MOCK_METHOD0(Bar0, uintptr_t());
+ MOCK_METHOD1(Bar1, uintptr_t(uintptr_t));
+
+ static uintptr_t CALLBACK FooCallback0(FunctionStubTest* test) {
+ return test->Foo0();
+ }
+ static uintptr_t CALLBACK FooCallback1(FunctionStubTest* test, uintptr_t arg) {
+ return test->Foo1(arg);
+ }
+ static uintptr_t CALLBACK BarCallback0(FunctionStubTest* test) {
+ return test->Foo0();
+ }
+ static uintptr_t CALLBACK BarCallback1(FunctionStubTest* test, uintptr_t arg) {
+ return test->Foo1(arg);
+ }
+
+ // If a stub is allocated during testing, assigning it here
+ // will deallocate it at the end of test.
+ FunctionStub* stub_;
+
+ // playpen_[0 .. playpen_size_ - 1] is committed, writable memory.
+ // playpen_[playpen_size_] is uncommitted, defined memory.
+ uint8* playpen_;
+ size_t playpen_size_;
+};
+
+const uintptr_t kDivertedRetVal = 0x42;
+const uintptr_t kFooRetVal = 0xCAFEBABE;
+const uintptr_t kFooArg = 0xF0F0F0F0;
+
+uintptr_t CALLBACK Foo() {
+ return kFooRetVal;
+}
+
+uintptr_t CALLBACK FooDivert(uintptr_t arg) {
+ return kFooRetVal;
+}
+
+} // namespace
+
+TEST_F(FunctionStubTest, Accessors) {
+ uintptr_t argument = reinterpret_cast<uintptr_t>(this);
+ uintptr_t dest_fn = reinterpret_cast<uintptr_t>(FooDivert);
+ stub_ = FunctionStub::Create(argument, FooDivert);
+
+ EXPECT_FALSE(stub_->is_bypassed());
+ EXPECT_TRUE(stub_->is_valid());
+ EXPECT_TRUE(stub_->code() != NULL);
+
+ // Check that the stub code is executable.
+ MEMORY_BASIC_INFORMATION info = {};
+ EXPECT_NE(0, ::VirtualQuery(stub_->code(), &info, sizeof(info)));
+ const DWORD kExecutableMask = PAGE_EXECUTE | PAGE_EXECUTE_READ |
+ PAGE_EXECUTE_READWRITE | PAGE_EXECUTE_WRITECOPY;
+ EXPECT_NE(0, info.Protect & kExecutableMask);
+
+ EXPECT_EQ(argument, stub_->argument());
+ EXPECT_TRUE(stub_->bypass_address() != NULL);
+ EXPECT_EQ(dest_fn, stub_->destination_function());
+}
+
+TEST_F(FunctionStubTest, ZeroArgumentStub) {
+ stub_ = FunctionStub::Create(reinterpret_cast<uintptr_t>(this),
+ &FunctionStubTest::FooCallback0);
+
+ FuncPtr0 func = reinterpret_cast<FuncPtr0>(stub_->code());
+ EXPECT_CALL(*this, Foo0())
+ .WillOnce(testing::Return(kDivertedRetVal));
+
+ EXPECT_EQ(kDivertedRetVal, func());
+}
+
+TEST_F(FunctionStubTest, OneArgumentStub) {
+ stub_ = FunctionStub::Create(reinterpret_cast<uintptr_t>(this),
+ &FunctionStubTest::FooCallback1);
+
+ FuncPtr1 func = reinterpret_cast<FuncPtr1>(stub_->code());
+ EXPECT_CALL(*this, Foo1(kFooArg))
+ .WillOnce(testing::Return(kDivertedRetVal));
+
+ EXPECT_EQ(kDivertedRetVal, func(kFooArg));
+}
+
+TEST_F(FunctionStubTest, Bypass) {
+ stub_ = FunctionStub::Create(reinterpret_cast<uintptr_t>(this),
+ &FunctionStubTest::FooCallback0);
+
+ FuncPtr0 func = reinterpret_cast<FuncPtr0>(stub_->code());
+ EXPECT_CALL(*this, Foo0())
+ .WillOnce(testing::Return(kDivertedRetVal));
+
+ // This will call through to foo.
+ EXPECT_EQ(kDivertedRetVal, func());
+
+ // Now bypass to Foo().
+ stub_->BypassStub(Foo);
+ EXPECT_TRUE(stub_->is_bypassed());
+ EXPECT_FALSE(stub_->is_valid());
+
+ // We should not call through anymore.
+ EXPECT_CALL(*this, Foo0())
+ .Times(0);
+
+ EXPECT_EQ(kFooRetVal, func());
+}
+
+TEST_F(FunctionStubTest, FromCode) {
+ // We should get NULL and no crash from reserved memory.
+ EXPECT_EQ(NULL, FunctionStub::FromCode(playpen_ + playpen_size_));
+
+ // Create a FunctionStub pointer whose signature_
+ // field hangs just off the playpen.
+ TestFunctionStub* stub =
+ reinterpret_cast<TestFunctionStub*>(playpen_ + playpen_size_ -
+ TestFunctionStub::kSignatureOffset);
+ TestFunctionStub::Init(stub);
+ EXPECT_EQ(NULL, FunctionStub::FromCode(stub));
+
+ // Create a stub in committed memory.
+ stub = reinterpret_cast<TestFunctionStub*>(playpen_);
+ TestFunctionStub::Init(stub);
+ // Signature is NULL, which won't do.
+ EXPECT_EQ(NULL, FunctionStub::FromCode(stub));
+
+ const DWORD kFlags = GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
+ GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT;
+
+ HMODULE my_module = NULL;
+ EXPECT_TRUE(::GetModuleHandleEx(kFlags,
+ reinterpret_cast<const wchar_t*>(&kDivertedRetVal),
+ &my_module));
+
+ // Set our module as signature.
+ stub->set_signature(my_module);
+ EXPECT_EQ(stub, FunctionStub::FromCode(stub));
+}
+
diff --git a/chrome_frame/test/function_stub_unittest.cc b/chrome_frame/test/function_stub_unittest.cc
deleted file mode 100644
index 8ac107c..0000000
--- a/chrome_frame/test/function_stub_unittest.cc
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright (c) 2009 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "chrome_frame/function_stub.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-#define NO_INLINE __declspec(noinline)
-
-namespace {
-
-typedef int (__stdcall* FooPrototype)();
-
-NO_INLINE int __stdcall Foo() {
- return 1;
-}
-
-NO_INLINE int __stdcall PatchedFoo(FooPrototype original) {
- return original() + 1;
-}
-
-} // end namespace
-
-TEST(PatchTests, FunctionStub) {
- EXPECT_EQ(Foo(), 1);
- // Create a function stub that calls PatchedFoo and supplies it with
- // a pointer to Foo.
- FunctionStub* stub = FunctionStub::Create(reinterpret_cast<uintptr_t>(&Foo),
- &PatchedFoo);
- EXPECT_TRUE(stub != NULL);
- // Call the stub as it were Foo(). The call should get forwarded to Foo().
- FooPrototype patch = reinterpret_cast<FooPrototype>(stub->code());
- EXPECT_EQ(patch(), 2);
- // Now neutralize the stub so that it calls Foo() directly without touching
- // PatchedFoo().
- // stub->BypassStub(&Foo);
- stub->BypassStub(reinterpret_cast<void*>(stub->argument()));
- EXPECT_EQ(patch(), 1);
- // We're done with the stub.
- FunctionStub::Destroy(stub);
-}
-
-// Basic tests to check the validity of a stub.
-TEST(PatchTests, FunctionStubCompare) {
- EXPECT_EQ(Foo(), 1);
-
- // Detect the absence of a stub
- FunctionStub* stub = reinterpret_cast<FunctionStub*>(&Foo);
- EXPECT_FALSE(stub->is_valid());
-
- stub = FunctionStub::Create(reinterpret_cast<uintptr_t>(&Foo), &PatchedFoo);
- EXPECT_TRUE(stub != NULL);
- EXPECT_TRUE(stub->is_valid());
-
- FooPrototype patch = reinterpret_cast<FooPrototype>(stub->code());
- EXPECT_EQ(patch(), 2);
-
- // See if we can get the correct absolute pointer to the hook function
- // back from the stub.
- EXPECT_EQ(stub->absolute_target(), reinterpret_cast<uintptr_t>(&PatchedFoo));
-
- // Also verify that the argument being passed to the hook function is indeed
- // the pointer to the original function (again, absolute not relative).
- EXPECT_EQ(stub->argument(), reinterpret_cast<uintptr_t>(&Foo));
-}
diff --git a/chrome_frame/vtable_patch_manager.cc b/chrome_frame/vtable_patch_manager.cc
index c070211..dc88079 100644
--- a/chrome_frame/vtable_patch_manager.cc
+++ b/chrome_frame/vtable_patch_manager.cc
@@ -5,7 +5,10 @@
#include "chrome_frame/vtable_patch_manager.h"
#include <algorithm>
+#include <atlcomcli.h>
+#include "base/atomicops.h"
+#include "base/lock.h"
#include "base/logging.h"
#include "base/scoped_ptr.h"
@@ -13,6 +16,40 @@
namespace vtable_patch {
+// The number of times we retry a patch/unpatch operation in case of
+// VM races with other 3rd party software trying to patch the same thing.
+const int kMaxRetries = 3;
+
+// We hold a lock over all patching operations to make sure that we don't
+// e.g. race on VM operations to the same patches, or to physical pages
+// shared across different VTABLEs.
+Lock patch_lock_;
+
+namespace internal {
+// Because other parties in our process might be attempting to patch the same
+// virtual tables at the same time, we have a race to modify the VM protections
+// on the pages. We also need to do a compare/swap type operation when we
+// modify the function, so as to be sure that we grab the most recent value.
+// Hence the SEH blocks and the nasty-looking compare/swap operation.
+bool ReplaceFunctionPointer(void** entry, void* new_proc, void* curr_proc) {
+ __try {
+ base::subtle::Atomic32 prev_value;
+
+ prev_value = base::subtle::NoBarrier_CompareAndSwap(
+ reinterpret_cast<base::subtle::Atomic32 volatile*>(entry),
+ reinterpret_cast<base::subtle::Atomic32>(curr_proc),
+ reinterpret_cast<base::subtle::Atomic32>(new_proc));
+
+ return curr_proc == reinterpret_cast<void*>(prev_value);
+ } __except(EXCEPTION_EXECUTE_HANDLER) {
+ // Oops, we took exception on access.
+ }
+
+ return false;
+}
+
+} // namespace
+
// Convenient definition of a VTABLE
typedef PROC* Vtable;
@@ -32,57 +69,100 @@ HRESULT PatchInterfaceMethods(void* unknown, MethodPatchInfo* patches) {
Vtable vtable = GetIFVTable(unknown);
DCHECK(vtable);
+ // All VM operations, patching and manipulation of MethodPatchInfo
+ // is done under a global lock, to ensure multiple threads don't
+ // race, whether on an individual patch, or on VM operations to
+ // the same physical pages.
+ AutoLock lock(patch_lock_);
+
for (MethodPatchInfo* it = patches; it->index_ != -1; ++it) {
if (it->stub_ != NULL) {
// If this DCHECK fires it means that we are using the same VTable
- // information to patch two different interfaces.
- DCHECK(false);
- DLOG(ERROR) << "Attempting to patch two different VTables with the "
- << "same VTable information";
+ // information to patch two different interfaces, or we've lost a
+ // race with another thread who's patching the same interface.
+ DLOG(WARNING) << "Attempting to patch two different VTables with the "
+ "same VTable information, or patching the same interface on "
+ "multiple threads";
continue;
}
PROC original_fn = vtable[it->index_];
- FunctionStub* stub = FunctionStub::FromCode(original_fn);
+ FunctionStub* stub = NULL;
+
+#ifndef NDEBUG
+ FunctionStub::FromCode(original_fn);
if (stub != NULL) {
DLOG(ERROR) << "attempt to patch a function that's already patched";
- DCHECK(stub->absolute_target() ==
+ DCHECK(stub->destination_function() ==
reinterpret_cast<uintptr_t>(it->method_)) <<
"patching the same method multiple times with different hooks?";
continue;
}
+#endif
stub = FunctionStub::Create(reinterpret_cast<uintptr_t>(original_fn),
it->method_);
if (!stub) {
NOTREACHED();
return E_OUTOFMEMORY;
- } else {
+ }
+
+ // Do the VM operations and the patching in a loop, to try and ensure
+ // we succeed even if there's a VM operation or a patch race against
+ // other 3rd parties patching.
+ bool succeeded = false;
+ for (int i = 0; !succeeded && i < kMaxRetries; ++i) {
DWORD protect = 0;
- if (::VirtualProtect(&vtable[it->index_], sizeof(PROC),
- PAGE_EXECUTE_READWRITE, &protect)) {
- it->stub_ = stub; // save the stub
- vtable[it->index_] = stub->code();
- ::VirtualProtect(&vtable[it->index_], sizeof(PROC), protect,
- &protect);
- } else {
- NOTREACHED();
+ if (!::VirtualProtect(&vtable[it->index_], sizeof(PROC),
+ PAGE_EXECUTE_READWRITE, &protect)) {
+ HRESULT hr = AtlHresultFromLastError();
+ DLOG(ERROR) << "VirtualProtect failed 0x" << std::hex << hr;
+
+ // Go around again in the feeble hope that this is
+ // a temporary problem.
+ continue;
+ }
+ original_fn = vtable[it->index_];
+ stub->set_argument(reinterpret_cast<uintptr_t>(original_fn));
+ succeeded = internal::ReplaceFunctionPointer(
+ reinterpret_cast<void**>(&vtable[it->index_]), stub->code(),
+ original_fn);
+
+ if (!::VirtualProtect(&vtable[it->index_], sizeof(PROC), protect,
+ &protect)) {
+ DLOG(ERROR) << "VirtualProtect failed to restore protection";
}
}
+
+ if (!succeeded) {
+ FunctionStub::Destroy(stub);
+ stub = NULL;
+
+ DLOG(ERROR) << "Failed to patch VTable.";
+ return E_FAIL;
+ } else {
+ // Success, save the stub we created.
+ it->stub_ = stub;
+ }
}
return S_OK;
}
HRESULT UnpatchInterfaceMethods(MethodPatchInfo* patches) {
+ AutoLock lock(patch_lock_);
+
for (MethodPatchInfo* it = patches; it->index_ != -1; ++it) {
if (it->stub_) {
- DCHECK(it->stub_->absolute_target() ==
+ DCHECK(it->stub_->destination_function() ==
reinterpret_cast<uintptr_t>(it->method_));
// Modify the stub to just jump directly to the original function.
it->stub_->BypassStub(reinterpret_cast<void*>(it->stub_->argument()));
it->stub_ = NULL;
// Leave the stub in memory so that we won't break any possible chains.
+
+ // TODO(siggi): why not restore the original VTBL pointer here, provided
+ // we haven't been chained?
} else {
DLOG(WARNING) << "attempt to unpatch a function that wasn't patched";
}
diff --git a/chrome_frame/vtable_patch_manager.h b/chrome_frame/vtable_patch_manager.h
index f326def..acf3cdb 100644
--- a/chrome_frame/vtable_patch_manager.h
+++ b/chrome_frame/vtable_patch_manager.h
@@ -12,9 +12,20 @@
#include "base/lock.h"
struct FunctionStub;
+
// This namespace provides methods to patch VTable methods of COM interfaces.
namespace vtable_patch {
+// Internal implementation, exposed only for testing.
+namespace internal {
+
+// Replaces *entry with new_proc iff *entry is curr_proc.
+// Returns true iff *entry was rewritten.
+// Note: does not crash on access violation.
+bool ReplaceFunctionPointer(void** entry, void* new_proc, void* curr_proc);
+
+} // namespace internal
+
// This structure represents information about one VTable method.
// We allocate an array of these structures per VTable that we patch to
// remember the original method. We also use this structure to actually
diff --git a/chrome_frame/vtable_patch_manager_unittest.cc b/chrome_frame/vtable_patch_manager_unittest.cc
new file mode 100644
index 0000000..dd1eb83
--- /dev/null
+++ b/chrome_frame/vtable_patch_manager_unittest.cc
@@ -0,0 +1,287 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome_frame/vtable_patch_manager.h"
+#include <unknwn.h>
+#include "base/thread.h"
+#include "base/scoped_handle.h"
+#include "gtest/gtest.h"
+#include "gmock/gmock.h"
+
+namespace {
+// GMock names we use.
+using testing::_;
+using testing::Return;
+
+class MockClassFactory : public IClassFactory {
+ public:
+ MOCK_METHOD2_WITH_CALLTYPE(__stdcall, QueryInterface,
+ HRESULT(REFIID riid, void **object));
+ MOCK_METHOD0_WITH_CALLTYPE(__stdcall, AddRef, ULONG());
+ MOCK_METHOD0_WITH_CALLTYPE(__stdcall, Release, ULONG());
+ MOCK_METHOD3_WITH_CALLTYPE(__stdcall, CreateInstance,
+ HRESULT (IUnknown *outer, REFIID riid, void **object));
+ MOCK_METHOD1_WITH_CALLTYPE(__stdcall, LockServer, HRESULT(BOOL lock));
+};
+
+// Retrieve the vtable for an interface.
+void* GetVtable(IUnknown* unk) {
+ return *reinterpret_cast<void**>(unk);
+}
+
+// Forward decl.
+extern vtable_patch::MethodPatchInfo IClassFactory_PatchInfo[];
+
+class VtablePatchManagerTest: public testing::Test {
+ public:
+ VtablePatchManagerTest() {
+ EXPECT_TRUE(current_ == NULL);
+ current_ = this;
+ }
+
+ ~VtablePatchManagerTest() {
+ EXPECT_TRUE(current_ == this);
+ current_ = NULL;
+ }
+
+ virtual void SetUp() {
+ // Make a backup of the test vtable and it's page protection settings.
+ void* vtable = GetVtable(&factory_);
+ MEMORY_BASIC_INFORMATION info;
+ ASSERT_TRUE(::VirtualQuery(vtable, &info, sizeof(info)));
+ vtable_protection_ = info.Protect;
+ memcpy(vtable_backup_, vtable, sizeof(vtable_backup_));
+ }
+
+ virtual void TearDown() {
+ // Unpatch to make sure we've restored state for subsequent test.
+ UnpatchInterfaceMethods(IClassFactory_PatchInfo);
+
+ // Restore the test vtable and its page protection settings.
+ void* vtable = GetVtable(&factory_);
+ DWORD old_protect = 0;
+ EXPECT_TRUE(::VirtualProtect(vtable, sizeof(vtable_backup_),
+ PAGE_EXECUTE_WRITECOPY, &old_protect));
+ memcpy(vtable, vtable_backup_, sizeof(vtable_backup_));
+ EXPECT_TRUE(::VirtualProtect(vtable, sizeof(vtable_backup_),
+ vtable_protection_, &old_protect));
+ }
+
+ typedef HRESULT (__stdcall* LockServerFun)(IClassFactory* self, BOOL lock);
+ MOCK_METHOD3(LockServerPatch,
+ HRESULT(LockServerFun old_fun, IClassFactory* self, BOOL lock));
+
+ static HRESULT STDMETHODCALLTYPE LockServerPatchCallback(
+ LockServerFun fun, IClassFactory* self, BOOL lock) {
+ EXPECT_TRUE(current_ != NULL);
+ if (current_ != NULL)
+ return current_->LockServerPatch(fun, self, lock);
+ else
+ return E_UNEXPECTED;
+ }
+
+ protected:
+ // Number of functions in the IClassFactory vtable.
+ static const size_t kFunctionCount = 5;
+
+ // Backup of the factory_ vtable as we found it at Setup.
+ PROC vtable_backup_[kFunctionCount];
+ // VirtualProtect flags on the factory_ vtable as we found it at Setup.
+ DWORD vtable_protection_;
+
+ // The mock factory class we patch.
+ MockClassFactory factory_;
+
+ // Current test running for routing the patch callback function.
+ static VtablePatchManagerTest* current_;
+};
+
+VtablePatchManagerTest* VtablePatchManagerTest::current_ = NULL;
+
+BEGIN_VTABLE_PATCHES(IClassFactory)
+ VTABLE_PATCH_ENTRY(4, &VtablePatchManagerTest::LockServerPatchCallback)
+END_VTABLE_PATCHES();
+
+} // namespace
+
+TEST_F(VtablePatchManagerTest, ReplacePointer) {
+ void* const kFunctionOriginal = reinterpret_cast<void*>(0xCAFEBABE);
+ void* const kFunctionFoo = reinterpret_cast<void*>(0xF0F0F0F0);
+ void* const kFunctionBar = reinterpret_cast<void*>(0xBABABABA);
+
+ using vtable_patch::internal::ReplaceFunctionPointer;
+ // Replacing a non-writable location should fail, but not crash.
+ EXPECT_FALSE(ReplaceFunctionPointer(NULL, kFunctionBar, kFunctionFoo));
+
+ void* foo_entry = kFunctionOriginal;
+ // Replacing with the wrong original function should
+ // fail and not change the entry.
+ EXPECT_FALSE(ReplaceFunctionPointer(&foo_entry, kFunctionBar, kFunctionFoo));
+ EXPECT_EQ(foo_entry, kFunctionOriginal);
+
+ // Replacing with the correct original should succeed.
+ EXPECT_TRUE(ReplaceFunctionPointer(&foo_entry,
+ kFunctionBar,
+ kFunctionOriginal));
+ EXPECT_EQ(foo_entry, kFunctionBar);
+}
+
+TEST_F(VtablePatchManagerTest, PatchInterfaceMethods) {
+ // Unpatched.
+ EXPECT_CALL(factory_, LockServer(TRUE))
+ .WillOnce(Return(E_FAIL));
+ EXPECT_EQ(E_FAIL, factory_.LockServer(TRUE));
+
+ EXPECT_HRESULT_SUCCEEDED(
+ PatchInterfaceMethods(&factory_, IClassFactory_PatchInfo));
+
+ EXPECT_NE(0, memcmp(GetVtable(&factory_),
+ vtable_backup_,
+ sizeof(vtable_backup_)));
+
+ // This should not be called while the patch is in effect.
+ EXPECT_CALL(factory_, LockServer(_))
+ .Times(0);
+
+ EXPECT_CALL(*this, LockServerPatch(testing::_, &factory_, TRUE))
+ .WillOnce(testing::Return(S_FALSE));
+
+ EXPECT_EQ(S_FALSE, factory_.LockServer(TRUE));
+}
+
+TEST_F(VtablePatchManagerTest, UnpatchInterfaceMethods) {
+ // Patch it.
+ EXPECT_HRESULT_SUCCEEDED(
+ PatchInterfaceMethods(&factory_, IClassFactory_PatchInfo));
+
+ EXPECT_NE(0, memcmp(GetVtable(&factory_),
+ vtable_backup_,
+ sizeof(vtable_backup_)));
+
+ // This should not be called while the patch is in effect.
+ EXPECT_CALL(factory_, LockServer(testing::_))
+ .Times(0);
+
+ EXPECT_CALL(*this, LockServerPatch(testing::_, &factory_, TRUE))
+ .WillOnce(testing::Return(S_FALSE));
+
+ EXPECT_EQ(S_FALSE, factory_.LockServer(TRUE));
+
+ // Now unpatch.
+ EXPECT_HRESULT_SUCCEEDED(
+ UnpatchInterfaceMethods(IClassFactory_PatchInfo));
+
+ // And check that the call comes through correctly.
+ EXPECT_CALL(factory_, LockServer(FALSE))
+ .WillOnce(testing::Return(E_FAIL));
+ EXPECT_EQ(E_FAIL, factory_.LockServer(FALSE));
+}
+
+TEST_F(VtablePatchManagerTest, DoublePatch) {
+ // Patch it.
+ EXPECT_HRESULT_SUCCEEDED(
+ PatchInterfaceMethods(&factory_, IClassFactory_PatchInfo));
+
+ // Capture the VTable after patching.
+ PROC vtable[kFunctionCount];
+ memcpy(vtable, GetVtable(&factory_), sizeof(vtable));
+
+ // Patch it again, this should be idempotent.
+ EXPECT_HRESULT_SUCCEEDED(
+ PatchInterfaceMethods(&factory_, IClassFactory_PatchInfo));
+
+ // Should not have changed the VTable on second call.
+ EXPECT_EQ(0, memcmp(vtable, GetVtable(&factory_), sizeof(vtable)));
+}
+
+namespace vtable_patch {
+// Expose internal implementation detail, purely for testing.
+extern Lock patch_lock_;
+
+} // namespace vtable_patch
+
+TEST_F(VtablePatchManagerTest, ThreadSafePatching) {
+ // It's difficult to test for threadsafe patching, but as a close proxy,
+ // test for no patching happening from a background thread while the patch
+ // lock is held.
+ base::Thread background("Background Test Thread");
+
+ EXPECT_TRUE(background.Start());
+ ScopedHandle event(::CreateEvent(NULL, TRUE, FALSE, NULL));
+
+ // Grab the patch lock.
+ vtable_patch::patch_lock_.Acquire();
+
+ // Instruct the background thread to patch factory_.
+ background.message_loop()->PostTask(FROM_HERE,
+ NewRunnableFunction(&vtable_patch::PatchInterfaceMethods,
+ &factory_,
+ &IClassFactory_PatchInfo[0]));
+
+ // And subsequently to signal the event. Neither of these actions should
+ // occur until we've released the patch lock.
+ background.message_loop()->PostTask(FROM_HERE,
+ NewRunnableFunction(::SetEvent, event.Get()));
+
+ // Wait for a little while, to give the background thread time to process.
+ // We expect this wait to time out, as the background thread should end up
+ // blocking on the patch lock.
+ EXPECT_EQ(WAIT_TIMEOUT, ::WaitForSingleObject(event.Get(), 50));
+
+ // Verify that patching did not take place yet.
+ EXPECT_CALL(factory_, LockServer(TRUE))
+ .WillOnce(Return(S_FALSE));
+ EXPECT_EQ(S_FALSE, factory_.LockServer(TRUE));
+
+ // Release the lock and wait on the event again to ensure
+ // the patching has taken place now.
+ vtable_patch::patch_lock_.Release();
+ EXPECT_EQ(WAIT_OBJECT_0, ::WaitForSingleObject(event.Get(), INFINITE));
+
+ // We should not get called here anymore.
+ EXPECT_CALL(factory_, LockServer(TRUE))
+ .Times(0);
+
+ // But should be diverted here.
+ EXPECT_CALL(*this, LockServerPatch(_, &factory_, TRUE))
+ .WillOnce(Return(S_FALSE));
+ EXPECT_EQ(S_FALSE, factory_.LockServer(TRUE));
+
+ // Same deal for unpatching.
+ ::ResetEvent(event.Get());
+
+ // Grab the patch lock.
+ vtable_patch::patch_lock_.Acquire();
+
+ // Instruct the background thread to unpatch.
+ background.message_loop()->PostTask(FROM_HERE,
+ NewRunnableFunction(&vtable_patch::UnpatchInterfaceMethods,
+ &IClassFactory_PatchInfo[0]));
+
+ // And subsequently to signal the event. Neither of these actions should
+ // occur until we've released the patch lock.
+ background.message_loop()->PostTask(FROM_HERE,
+ NewRunnableFunction(::SetEvent, event.Get()));
+
+ // Wait for a little while, to give the background thread time to process.
+ // We expect this wait to time out, as the background thread should end up
+ // blocking on the patch lock.
+ EXPECT_EQ(WAIT_TIMEOUT, ::WaitForSingleObject(event.Get(), 50));
+
+ // We should still be patched.
+ EXPECT_CALL(factory_, LockServer(TRUE))
+ .Times(0);
+ EXPECT_CALL(*this, LockServerPatch(_, &factory_, TRUE))
+ .WillOnce(Return(S_FALSE));
+ EXPECT_EQ(S_FALSE, factory_.LockServer(TRUE));
+
+ // Release the patch lock and wait on the event.
+ vtable_patch::patch_lock_.Release();
+ EXPECT_EQ(WAIT_OBJECT_0, ::WaitForSingleObject(event.Get(), INFINITE));
+
+ // Verify that unpatching took place.
+ EXPECT_CALL(factory_, LockServer(TRUE))
+ .WillOnce(Return(S_FALSE));
+ EXPECT_EQ(S_FALSE, factory_.LockServer(TRUE));
+}