diff options
author | jbates@chromium.org <jbates@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-02-23 17:52:20 +0000 |
---|---|---|
committer | jbates@chromium.org <jbates@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-02-23 17:52:20 +0000 |
commit | cd924d6e5efdf80f66283ac66987f2b339c381cf (patch) | |
tree | 89d2ccd8588e12669b93eb98117e8d76a766fbf2 /base | |
parent | 2e8bb832c7b3867b3a10b81621830add0dd5bfb5 (diff) | |
download | chromium_src-cd924d6e5efdf80f66283ac66987f2b339c381cf.zip chromium_src-cd924d6e5efdf80f66283ac66987f2b339c381cf.tar.gz chromium_src-cd924d6e5efdf80f66283ac66987f2b339c381cf.tar.bz2 |
Add ALIGNAS and ALIGNOF macros to ensure proper alignment of StaticMemorySingletonTraits
BUG=95006
Review URL: https://chromiumcodereview.appspot.com/9186057
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@123270 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'base')
-rw-r--r-- | base/base.gyp | 1 | ||||
-rw-r--r-- | base/compiler_specific.h | 19 | ||||
-rw-r--r-- | base/lazy_instance.h | 19 | ||||
-rw-r--r-- | base/lazy_instance_unittest.cc | 31 | ||||
-rw-r--r-- | base/memory/aligned_memory.h | 77 | ||||
-rw-r--r-- | base/memory/aligned_memory_unittest.cc | 50 | ||||
-rw-r--r-- | base/memory/singleton.h | 18 | ||||
-rw-r--r-- | base/memory/singleton_unittest.cc | 37 | ||||
-rw-r--r-- | base/stack_container.h | 14 | ||||
-rw-r--r-- | base/stack_container_unittest.cc | 35 |
10 files changed, 260 insertions, 41 deletions
diff --git a/base/base.gyp b/base/base.gyp index 324c812..33ee182 100644 --- a/base/base.gyp +++ b/base/base.gyp @@ -171,6 +171,7 @@ 'mac/objc_property_releaser_unittest.mm', 'mac/scoped_sending_event_unittest.mm', 'md5_unittest.cc', + 'memory/aligned_memory_unittest.cc', 'memory/linked_ptr_unittest.cc', 'memory/mru_cache_unittest.cc', 'memory/ref_counted_memory_unittest.cc', diff --git a/base/compiler_specific.h b/base/compiler_specific.h index 0d93460..9e65ae4 100644 --- a/base/compiler_specific.h +++ b/base/compiler_specific.h @@ -105,6 +105,25 @@ #define NOINLINE #endif +// Specify memory alignment for structs, classes, etc. +// Use like: +// class ALIGNAS(16) MyClass { ... } +// ALIGNAS(16) int array[4]; +#if defined(COMPILER_MSVC) +#define ALIGNAS(byte_alignment) __declspec(align(byte_alignment)) +#elif defined(COMPILER_GCC) +#define ALIGNAS(byte_alignment) __attribute__((aligned(byte_alignment))) +#endif + +// Return the byte alignment of the given type (available at compile time). +// Use like: +// ALIGNOF(int32) // this would be 4 +#if defined(COMPILER_MSVC) +#define ALIGNOF(type) __alignof(type) +#elif defined(COMPILER_GCC) +#define ALIGNOF(type) __alignof__(type) +#endif + // Annotate a virtual method indicating it must be overriding a virtual // method in the parent class. // Use like: diff --git a/base/lazy_instance.h b/base/lazy_instance.h index 5ddc002..c428feb 100644 --- a/base/lazy_instance.h +++ b/base/lazy_instance.h @@ -42,6 +42,7 @@ #include "base/base_export.h" #include "base/basictypes.h" #include "base/logging.h" +#include "base/memory/aligned_memory.h" #include "base/third_party/dynamic_annotations/dynamic_annotations.h" #include "base/threading/thread_restrictions.h" @@ -59,7 +60,7 @@ struct DefaultLazyInstanceTraits { static const bool kAllowedToAccessOnNonjoinableThread = false; static Type* New(void* instance) { - DCHECK_EQ(reinterpret_cast<uintptr_t>(instance) % sizeof(instance), 0u) + DCHECK_EQ(reinterpret_cast<uintptr_t>(instance) & (ALIGNOF(Type) - 1), 0u) << ": Bad boy, the buffer passed to placement new is not aligned!\n" "This may break some stuff like SSE-based optimizations assuming the " "<Type> objects are word aligned."; @@ -155,7 +156,8 @@ class LazyInstance { if (!(value & kLazyInstanceCreatedMask) && internal::NeedsLazyInstance(&private_instance_)) { // Create the instance in the space provided by |private_buf_|. - value = reinterpret_cast<subtle::AtomicWord>(Traits::New(private_buf_)); + value = reinterpret_cast<subtle::AtomicWord>( + Traits::New(private_buf_.void_data())); internal::CompleteLazyInstance(&private_instance_, value, this, Traits::kRegisterOnExit ? OnExit : NULL); } @@ -174,20 +176,19 @@ class LazyInstance { case 0: return p == NULL; case internal::kLazyInstanceStateCreating: - return static_cast<int8*>(static_cast<void*>(p)) == private_buf_; + return static_cast<void*>(p) == private_buf_.void_data(); default: return p == instance(); } } // Effectively private: member data is only public to allow the linker to - // statically initialize it. DO NOT USE FROM OUTSIDE THIS CLASS. + // statically initialize it and to maintain a POD class. DO NOT USE FROM + // OUTSIDE THIS CLASS. - // Note this must use AtomicWord, not Atomic32, to ensure correct alignment - // of |private_buf_| on 64 bit architectures. (This member must be first to - // allow the syntax used in LAZY_INSTANCE_INITIALIZER to work correctly.) subtle::AtomicWord private_instance_; - int8 private_buf_[sizeof(Type)]; // Preallocated space for the Type instance. + // Preallocated space for the Type instance. + base::AlignedMemory<sizeof(Type), ALIGNOF(Type)> private_buf_; private: Type* instance() { @@ -201,7 +202,7 @@ class LazyInstance { LazyInstance<Type, Traits>* me = reinterpret_cast<LazyInstance<Type, Traits>*>(lazy_instance); Traits::Delete(me->instance()); - subtle::Release_Store(&me->private_instance_, 0); + subtle::NoBarrier_Store(&me->private_instance_, 0); } }; diff --git a/base/lazy_instance_unittest.cc b/base/lazy_instance_unittest.cc index 1bd3ab4..01ce8e5 100644 --- a/base/lazy_instance_unittest.cc +++ b/base/lazy_instance_unittest.cc @@ -5,6 +5,7 @@ #include "base/at_exit.h" #include "base/atomic_sequence_num.h" #include "base/lazy_instance.h" +#include "base/memory/aligned_memory.h" #include "base/threading/simple_thread.h" #include "testing/gtest/include/gtest/gtest.h" @@ -139,3 +140,33 @@ TEST(LazyInstanceTest, LeakyLazyInstance) { } EXPECT_FALSE(deleted2); } + +namespace { + +template <size_t alignment> +class AlignedData { + public: + AlignedData() {} + ~AlignedData() {} + base::AlignedMemory<alignment, alignment> data_; +}; + +} // anonymous namespace + +#define EXPECT_ALIGNED(ptr, align) \ + EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) & (align - 1)) + +TEST(LazyInstanceTest, Alignment) { + using base::LazyInstance; + + // Create some static instances with increasing sizes and alignment + // requirements. By ordering this way, the linker will need to do some work to + // ensure proper alignment of the static data. + static LazyInstance<AlignedData<4> > align4 = LAZY_INSTANCE_INITIALIZER; + static LazyInstance<AlignedData<32> > align32 = LAZY_INSTANCE_INITIALIZER; + static LazyInstance<AlignedData<4096> > align4096 = LAZY_INSTANCE_INITIALIZER; + + EXPECT_ALIGNED(align4.Pointer(), 4); + EXPECT_ALIGNED(align32.Pointer(), 32); + EXPECT_ALIGNED(align4096.Pointer(), 4096); +} diff --git a/base/memory/aligned_memory.h b/base/memory/aligned_memory.h new file mode 100644 index 0000000..dc9db90 --- /dev/null +++ b/base/memory/aligned_memory.h @@ -0,0 +1,77 @@ +// Copyright (c) 2012 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// AlignedMemory is a POD type that gives you a portable way to specify static +// or local stack data of a given alignment and size. For example, if you need +// static storage for a class, but you want manual control over when the object +// is constructed and destructed (you don't want static initialization and +// destruction), use AlignedMemory: +// +// static AlignedMemory<sizeof(MyClass), ALIGNOF(MyClass)> my_class; +// +// // ... at runtime: +// new(my_class.void_data()) MyClass(); +// +// // ... use it: +// MyClass* mc = my_class.data_as<MyClass>(); +// +// // ... later, to destruct my_class: +// my_class.data_as<MyClass>()->MyClass::~MyClass(); + +#ifndef BASE_MEMORY_ALIGNED_MEMORY_H_ +#define BASE_MEMORY_ALIGNED_MEMORY_H_ +#pragma once + +#include "base/basictypes.h" +#include "base/compiler_specific.h" +#include "base/logging.h" + +namespace base { + +// AlignedMemory is specialized for all supported alignments. +// Make sure we get a compiler error if someone uses an unsupported alignment. +template <size_t Size, size_t ByteAlignment> +struct AlignedMemory {}; + +#define BASE_DECL_ALIGNED_MEMORY(byte_alignment) \ + template <size_t Size> \ + class AlignedMemory<Size, byte_alignment> { \ + public: \ + ALIGNAS(byte_alignment) uint8 data_[Size]; \ + void* void_data() { return reinterpret_cast<void*>(data_); } \ + const void* void_data() const { \ + return reinterpret_cast<const void*>(data_); \ + } \ + template<typename Type> \ + Type* data_as() { return reinterpret_cast<Type*>(void_data()); } \ + template<typename Type> \ + const Type* data_as() const { \ + return reinterpret_cast<const Type*>(void_data()); \ + } \ + private: \ + void* operator new(size_t); \ + void operator delete(void*); \ + } + +// Specialization for all alignments is required because MSVC (as of VS 2008) +// does not understand ALIGNAS(ALIGNOF(Type)) or ALIGNAS(template_param). +// Greater than 4096 alignment is not supported by some compilers, so 4096 is +// the maximum specified here. +BASE_DECL_ALIGNED_MEMORY(1); +BASE_DECL_ALIGNED_MEMORY(2); +BASE_DECL_ALIGNED_MEMORY(4); +BASE_DECL_ALIGNED_MEMORY(8); +BASE_DECL_ALIGNED_MEMORY(16); +BASE_DECL_ALIGNED_MEMORY(32); +BASE_DECL_ALIGNED_MEMORY(64); +BASE_DECL_ALIGNED_MEMORY(128); +BASE_DECL_ALIGNED_MEMORY(256); +BASE_DECL_ALIGNED_MEMORY(512); +BASE_DECL_ALIGNED_MEMORY(1024); +BASE_DECL_ALIGNED_MEMORY(2048); +BASE_DECL_ALIGNED_MEMORY(4096); + +} // base + +#endif // BASE_MEMORY_ALIGNED_MEMORY_H_ diff --git a/base/memory/aligned_memory_unittest.cc b/base/memory/aligned_memory_unittest.cc new file mode 100644 index 0000000..065e952 --- /dev/null +++ b/base/memory/aligned_memory_unittest.cc @@ -0,0 +1,50 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/memory/aligned_memory.h" +#include "base/memory/scoped_ptr.h" +#include "testing/gtest/include/gtest/gtest.h" + +#define EXPECT_ALIGNED(ptr, align) \ + EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) & (align - 1)) + +namespace { + +using base::AlignedMemory; + +TEST(AlignedMemoryTest, StaticAlignment) { + static AlignedMemory<8, 8> raw8; + static AlignedMemory<8, 16> raw16; + static AlignedMemory<8, 256> raw256; + static AlignedMemory<8, 4096> raw4096; + + EXPECT_EQ(8u, ALIGNOF(raw8)); + EXPECT_EQ(16u, ALIGNOF(raw16)); + EXPECT_EQ(256u, ALIGNOF(raw256)); + EXPECT_EQ(4096u, ALIGNOF(raw4096)); + + EXPECT_ALIGNED(raw8.void_data(), 8); + EXPECT_ALIGNED(raw16.void_data(), 16); + EXPECT_ALIGNED(raw256.void_data(), 256); + EXPECT_ALIGNED(raw4096.void_data(), 4096); +} + +TEST(AlignedMemoryTest, StackAlignment) { + AlignedMemory<8, 8> raw8; + AlignedMemory<8, 16> raw16; + AlignedMemory<8, 256> raw256; + AlignedMemory<8, 4096> raw4096; + + EXPECT_EQ(8u, ALIGNOF(raw8)); + EXPECT_EQ(16u, ALIGNOF(raw16)); + EXPECT_EQ(256u, ALIGNOF(raw256)); + EXPECT_EQ(4096u, ALIGNOF(raw4096)); + + EXPECT_ALIGNED(raw8.void_data(), 8); + EXPECT_ALIGNED(raw16.void_data(), 16); + EXPECT_ALIGNED(raw256.void_data(), 256); + EXPECT_ALIGNED(raw4096.void_data(), 4096); +} + +} // namespace diff --git a/base/memory/singleton.h b/base/memory/singleton.h index d76070019..5549012 100644 --- a/base/memory/singleton.h +++ b/base/memory/singleton.h @@ -23,6 +23,7 @@ #include "base/at_exit.h" #include "base/atomicops.h" #include "base/base_export.h" +#include "base/memory/aligned_memory.h" #include "base/third_party/dynamic_annotations/dynamic_annotations.h" #include "base/threading/thread_restrictions.h" @@ -106,18 +107,14 @@ struct StaticMemorySingletonTraits { // WARNING: User has to deal with get() in the singleton class // this is traits for returning NULL. static Type* New() { + // Only constructs once and returns pointer; otherwise returns NULL. if (base::subtle::NoBarrier_AtomicExchange(&dead_, 1)) return NULL; - Type* ptr = reinterpret_cast<Type*>(buffer_); - // We are protected by a memory barrier. - new(ptr) Type(); - return ptr; + return new(buffer_.void_data()) Type(); } static void Delete(Type* p) { - base::subtle::NoBarrier_Store(&dead_, 1); - base::subtle::MemoryBarrier(); if (p != NULL) p->Type::~Type(); } @@ -131,16 +128,13 @@ struct StaticMemorySingletonTraits { } private: - static const size_t kBufferSize = (sizeof(Type) + - sizeof(intptr_t) - 1) / sizeof(intptr_t); - static intptr_t buffer_[kBufferSize]; - + static base::AlignedMemory<sizeof(Type), ALIGNOF(Type)> buffer_; // Signal the object was already deleted, so it is not revived. static base::subtle::Atomic32 dead_; }; -template <typename Type> intptr_t - StaticMemorySingletonTraits<Type>::buffer_[kBufferSize]; +template <typename Type> base::AlignedMemory<sizeof(Type), ALIGNOF(Type)> + StaticMemorySingletonTraits<Type>::buffer_; template <typename Type> base::subtle::Atomic32 StaticMemorySingletonTraits<Type>::dead_ = 0; diff --git a/base/memory/singleton_unittest.cc b/base/memory/singleton_unittest.cc index 068148f..33928a7 100644 --- a/base/memory/singleton_unittest.cc +++ b/base/memory/singleton_unittest.cc @@ -110,6 +110,19 @@ struct CallbackSingletonWithStaticTrait::Trait } }; +template <class Type> +class AlignedTestSingleton { + public: + AlignedTestSingleton() {} + ~AlignedTestSingleton() {} + static AlignedTestSingleton* GetInstance() { + return Singleton<AlignedTestSingleton, + StaticMemorySingletonTraits<AlignedTestSingleton> >::get(); + } + + Type type_; +}; + void SingletonNoLeak(CallbackFunc CallOnQuit) { CallbackSingletonWithNoLeakTrait::GetInstance()->callback_ = CallOnQuit; @@ -250,3 +263,27 @@ TEST_F(SingletonTest, Basic) { // The leaky singleton shouldn't leak since SingletonLeak has not been called. VerifiesCallbacksNotCalled(); } + +#define EXPECT_ALIGNED(ptr, align) \ + EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) & (align - 1)) + +TEST_F(SingletonTest, Alignment) { + using base::AlignedMemory; + + // Create some static singletons with increasing sizes and alignment + // requirements. By ordering this way, the linker will need to do some work to + // ensure proper alignment of the static data. + AlignedTestSingleton<int32>* align4 = + AlignedTestSingleton<int32>::GetInstance(); + AlignedTestSingleton<AlignedMemory<32, 32> >* align32 = + AlignedTestSingleton<AlignedMemory<32, 32> >::GetInstance(); + AlignedTestSingleton<AlignedMemory<128, 128> >* align128 = + AlignedTestSingleton<AlignedMemory<128, 128> >::GetInstance(); + AlignedTestSingleton<AlignedMemory<4096, 4096> >* align4096 = + AlignedTestSingleton<AlignedMemory<4096, 4096> >::GetInstance(); + + EXPECT_ALIGNED(align4, 4); + EXPECT_ALIGNED(align32, 32); + EXPECT_ALIGNED(align128, 128); + EXPECT_ALIGNED(align4096, 4096); +} diff --git a/base/stack_container.h b/base/stack_container.h index dc946db..06ef2a4 100644 --- a/base/stack_container.h +++ b/base/stack_container.h @@ -10,6 +10,7 @@ #include <vector> #include "base/basictypes.h" +#include "base/memory/aligned_memory.h" // This allocator can be used with STL containers to provide a stack buffer // from which to allocate memory and overflows onto the heap. This stack buffer @@ -43,22 +44,15 @@ class StackAllocator : public std::allocator<T> { } // Casts the buffer in its right type. - T* stack_buffer() { return reinterpret_cast<T*>(stack_buffer_); } + T* stack_buffer() { return stack_buffer_.template data_as<T>(); } const T* stack_buffer() const { - return reinterpret_cast<const T*>(stack_buffer_); + return stack_buffer_.template data_as<T>(); } - // - // IMPORTANT: Take care to ensure that stack_buffer_ is aligned - // since it is used to mimic an array of T. - // Be careful while declaring any unaligned types (like bool) - // before stack_buffer_. - // - // The buffer itself. It is not of type T because we don't want the // constructors and destructors to be automatically called. Define a POD // buffer of the right size instead. - char stack_buffer_[sizeof(T[stack_capacity])]; + base::AlignedMemory<sizeof(T[stack_capacity]), ALIGNOF(T)> stack_buffer_; // Set when the stack buffer is used for an allocation. We do not track // how much of the buffer is used, only that somebody is using it. diff --git a/base/stack_container_unittest.cc b/base/stack_container_unittest.cc index 816ee07..175df36 100644 --- a/base/stack_container_unittest.cc +++ b/base/stack_container_unittest.cc @@ -6,6 +6,7 @@ #include <algorithm> +#include "base/memory/aligned_memory.h" #include "base/memory/ref_counted.h" #include "testing/gtest/include/gtest/gtest.h" @@ -96,19 +97,33 @@ TEST(StackContainer, VectorDoubleDelete) { // Shouldn't crash at exit. } +namespace { + +template <size_t alignment> +class AlignedData { + public: + AlignedData() { memset(data_.void_data(), 0, alignment); } + ~AlignedData() {} + base::AlignedMemory<alignment, alignment> data_; +}; + +} // anonymous namespace + +#define EXPECT_ALIGNED(ptr, align) \ + EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) & (align - 1)) + TEST(StackContainer, BufferAlignment) { StackVector<wchar_t, 16> text; text->push_back(L'A'); - text->push_back(L'B'); - text->push_back(L'C'); - text->push_back(L'D'); - text->push_back(L'E'); - text->push_back(L'F'); - text->push_back(0); - - const wchar_t* buffer = &text[1]; - bool even_aligned = (0 == (((size_t)buffer) & 0x1)); - EXPECT_EQ(even_aligned, true); + EXPECT_ALIGNED(&text[0], ALIGNOF(wchar_t)); + + StackVector<double, 1> doubles; + doubles->push_back(0.0); + EXPECT_ALIGNED(&doubles[0], ALIGNOF(double)); + + StackVector<AlignedData<256>, 1> aligned256; + aligned256->push_back(AlignedData<256>()); + EXPECT_ALIGNED(&aligned256[0], 256); } #ifdef COMPILER_MSVC |