diff options
Diffstat (limited to 'runtime')
87 files changed, 2247 insertions, 1564 deletions
diff --git a/runtime/Android.mk b/runtime/Android.mk index 1e5a681..d6d2b42 100644 --- a/runtime/Android.mk +++ b/runtime/Android.mk @@ -332,7 +332,7 @@ define build-libart ifeq ($$(art_target_or_host),target) LOCAL_SRC_FILES := $(LIBART_TARGET_SRC_FILES) $(foreach arch,$(ART_SUPPORTED_ARCH), - LOCAL_SRC_FILES_$(arch) := $$(LIBART_TARGET_SRC_FILES_$(arch)))) + LOCAL_SRC_FILES_$(arch) := $$(LIBART_TARGET_SRC_FILES_$(arch))) else # host LOCAL_SRC_FILES := $(LIBART_HOST_SRC_FILES) LOCAL_IS_HOST_MODULE := true @@ -352,7 +352,7 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT LOCAL_CFLAGS := $(LIBART_CFLAGS) LOCAL_LDFLAGS := $(LIBART_LDFLAGS) $(foreach arch,$(ART_SUPPORTED_ARCH), - LOCAL_LDFLAGS_$(arch) := $$(LIBART_TARGET_LDFLAGS_$(arch)))) + LOCAL_LDFLAGS_$(arch) := $$(LIBART_TARGET_LDFLAGS_$(arch))) ifeq ($$(art_target_or_host),target) LOCAL_CLANG := $(ART_TARGET_CLANG) diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S index b1f2275..8683a56 100644 --- a/runtime/arch/x86/quick_entrypoints_x86.S +++ b/runtime/arch/x86/quick_entrypoints_x86.S @@ -1307,8 +1307,10 @@ DEFINE_FUNCTION art_quick_string_compareto * esi: pointer to this string data * edi: pointer to comp string data */ + jecxz .Lkeep_length repe cmpsw // find nonmatching chars in [%esi] and [%edi], up to length %ecx jne .Lnot_equal +.Lkeep_length: POP edi // pop callee save reg POP esi // pop callee save reg ret diff --git a/runtime/asm_support.h b/runtime/asm_support.h index 4c42099..0c1a72a 100644 --- a/runtime/asm_support.h +++ b/runtime/asm_support.h @@ -17,6 +17,8 @@ #ifndef ART_RUNTIME_ASM_SUPPORT_H_ #define ART_RUNTIME_ASM_SUPPORT_H_ +#include "brooks_pointer.h" + // Value loaded into rSUSPEND for quick. When this value is counted down to zero we do a suspend // check. #define SUSPEND_CHECK_INTERVAL (1000) @@ -25,6 +27,8 @@ #define CLASS_OFFSET 0 #define LOCK_WORD_OFFSET 4 +#ifndef USE_BROOKS_POINTER + // Offsets within java.lang.Class. #define CLASS_COMPONENT_TYPE_OFFSET 12 @@ -43,4 +47,26 @@ #define METHOD_PORTABLE_CODE_OFFSET 40 #define METHOD_QUICK_CODE_OFFSET 48 +#else + +// Offsets within java.lang.Class. +#define CLASS_COMPONENT_TYPE_OFFSET 20 + +// Array offsets. +#define ARRAY_LENGTH_OFFSET 16 +#define OBJECT_ARRAY_DATA_OFFSET 20 + +// Offsets within java.lang.String. +#define STRING_VALUE_OFFSET 16 +#define STRING_COUNT_OFFSET 20 +#define STRING_OFFSET_OFFSET 28 +#define STRING_DATA_OFFSET 20 + +// Offsets within java.lang.Method. +#define METHOD_DEX_CACHE_METHODS_OFFSET 20 +#define METHOD_PORTABLE_CODE_OFFSET 48 +#define METHOD_QUICK_CODE_OFFSET 56 + +#endif + #endif // ART_RUNTIME_ASM_SUPPORT_H_ diff --git a/runtime/barrier_test.cc b/runtime/barrier_test.cc index 69951c5..7d32338 100644 --- a/runtime/barrier_test.cc +++ b/runtime/barrier_test.cc @@ -19,7 +19,7 @@ #include <string> #include "atomic.h" -#include "common_test.h" +#include "common_runtime_test.h" #include "mirror/object_array-inl.h" #include "thread_pool.h" #include "UniquePtr.h" @@ -56,7 +56,7 @@ class CheckWaitTask : public Task { AtomicInteger* const count3_; }; -class BarrierTest : public CommonTest { +class BarrierTest : public CommonRuntimeTest { public: static int32_t num_threads; }; diff --git a/runtime/base/macros.h b/runtime/base/macros.h index cf7029a..6cc9396 100644 --- a/runtime/base/macros.h +++ b/runtime/base/macros.h @@ -21,6 +21,15 @@ #define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) +// C++11 final and override keywords that were introduced in GCC version 4.7. +#if GCC_VERSION >= 40700 +#define OVERRIDE override +#define FINAL final +#else +#define OVERRIDE +#define FINAL +#endif + // The COMPILE_ASSERT macro can be used to verify that a compile time // expression is true. For example, you could use it to verify the // size of a static array: diff --git a/runtime/base/mutex_test.cc b/runtime/base/mutex_test.cc index 1af8e0a..ee0b1be 100644 --- a/runtime/base/mutex_test.cc +++ b/runtime/base/mutex_test.cc @@ -16,11 +16,11 @@ #include "mutex.h" -#include "common_test.h" +#include "common_runtime_test.h" namespace art { -class MutexTest : public CommonTest {}; +class MutexTest : public CommonRuntimeTest {}; struct MutexTester { static void AssertDepth(Mutex& mu, uint32_t expected_depth) { diff --git a/runtime/base/timing_logger_test.cc b/runtime/base/timing_logger_test.cc index 03cc9cc..0757751 100644 --- a/runtime/base/timing_logger_test.cc +++ b/runtime/base/timing_logger_test.cc @@ -16,11 +16,11 @@ #include "timing_logger.h" -#include "common_test.h" +#include "common_runtime_test.h" namespace art { -class TimingLoggerTest : public CommonTest {}; +class TimingLoggerTest : public CommonRuntimeTest {}; // TODO: Negative test cases (improper pairing of EndSplit, etc.) diff --git a/runtime/base/unix_file/mapped_file_test.cc b/runtime/base/unix_file/mapped_file_test.cc index 49750f4..7e45321 100644 --- a/runtime/base/unix_file/mapped_file_test.cc +++ b/runtime/base/unix_file/mapped_file_test.cc @@ -30,7 +30,7 @@ class MappedFileTest : public RandomAccessFileTest { } void SetUp() { - art::CommonTest::SetEnvironmentVariables(android_data_); + art::CommonRuntimeTest::SetEnvironmentVariables(android_data_); good_path_ = GetTmpPath("some-file.txt"); int fd = TEMP_FAILURE_RETRY(open(good_path_.c_str(), O_CREAT|O_RDWR, 0666)); diff --git a/runtime/base/unix_file/random_access_file_test.h b/runtime/base/unix_file/random_access_file_test.h index 3152788..8a6605e 100644 --- a/runtime/base/unix_file/random_access_file_test.h +++ b/runtime/base/unix_file/random_access_file_test.h @@ -21,8 +21,7 @@ #include <string> -#include "common_test.h" -#include "gtest/gtest.h" +#include "common_runtime_test.h" #include "UniquePtr.h" namespace unix_file { @@ -37,7 +36,7 @@ class RandomAccessFileTest : public testing::Test { virtual RandomAccessFile* MakeTestFile() = 0; virtual void SetUp() { - art::CommonTest::SetEnvironmentVariables(android_data_); + art::CommonRuntimeTest::SetEnvironmentVariables(android_data_); } std::string GetTmpPath(const std::string& name) { diff --git a/runtime/brooks_pointer.h b/runtime/brooks_pointer.h new file mode 100644 index 0000000..3dac6e9 --- /dev/null +++ b/runtime/brooks_pointer.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_BROOKS_POINTER_H_ +#define ART_RUNTIME_BROOKS_POINTER_H_ + +// This is in a separate file (from globals.h) because asm_support.h +// (a C header, not C++) can't include globals.h. + +// Uncomment this and the two fields in Object.java (libcore) to +// enable brooks pointers. +// #define USE_BROOKS_POINTER + +#endif // ART_RUNTIME_BROOKS_POINTER_H_ diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h index 6ef0082..3da7409 100644 --- a/runtime/class_linker-inl.h +++ b/runtime/class_linker-inl.h @@ -40,7 +40,7 @@ inline mirror::Class* ClassLinker::FindSystemClass(Thread* self, const char* des inline mirror::Class* ClassLinker::FindArrayClass(Thread* self, mirror::Class* element_class) { for (size_t i = 0; i < kFindArrayCacheSize; ++i) { - // Read the cached the array class once to avoid races with other threads setting it. + // Read the cached array class once to avoid races with other threads setting it. mirror::Class* array_class = find_array_class_cache_[i]; if (array_class != nullptr && array_class->GetComponentType() == element_class) { return array_class; diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 69d957f..87323f9 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -205,6 +205,9 @@ void ClassLinker::InitFromCompiler(const std::vector<const DexFile*>& boot_class CHECK(java_lang_Class.get() != NULL); mirror::Class::SetClassClass(java_lang_Class.get()); java_lang_Class->SetClass(java_lang_Class.get()); + if (kUseBrooksPointer) { + java_lang_Class->AssertSelfBrooksPointer(); + } java_lang_Class->SetClassSize(sizeof(mirror::ClassClass)); heap->DecrementDisableMovingGC(self); // AllocClass(mirror::Class*) can now be used @@ -1182,27 +1185,47 @@ mirror::DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_fi return dex_cache.get(); } +// Used to initialize a class in the allocation code path to ensure it is guarded by a StoreStore +// fence. +class InitializeClassVisitor { + public: + explicit InitializeClassVisitor(uint32_t class_size) : class_size_(class_size) { + } + + void operator()(mirror::Object* obj, size_t usable_size) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK_LE(class_size_, usable_size); + // Avoid AsClass as object is not yet in live bitmap or allocation stack. + mirror::Class* klass = down_cast<mirror::Class*>(obj); + // DCHECK(klass->IsClass()); + klass->SetClassSize(class_size_); + klass->SetPrimitiveType(Primitive::kPrimNot); // Default to not being primitive. + klass->SetDexClassDefIndex(DexFile::kDexNoIndex16); // Default to no valid class def index. + klass->SetDexTypeIndex(DexFile::kDexNoIndex16); // Default to no valid type index. + } + + private: + const uint32_t class_size_; + + DISALLOW_COPY_AND_ASSIGN(InitializeClassVisitor); +}; + mirror::Class* ClassLinker::AllocClass(Thread* self, mirror::Class* java_lang_Class, - size_t class_size) { + uint32_t class_size) { DCHECK_GE(class_size, sizeof(mirror::Class)); gc::Heap* heap = Runtime::Current()->GetHeap(); + InitializeClassVisitor visitor(class_size); mirror::Object* k = - kMovingClasses ? - heap->AllocObject<true>(self, java_lang_Class, class_size) : - heap->AllocNonMovableObject<true>(self, java_lang_Class, class_size); - if (UNLIKELY(k == NULL)) { + kMovingClasses ? heap->AllocObject<true>(self, java_lang_Class, class_size, visitor) + : heap->AllocNonMovableObject<true>(self, java_lang_Class, class_size, visitor); + if (UNLIKELY(k == nullptr)) { CHECK(self->IsExceptionPending()); // OOME. - return NULL; + return nullptr; } - mirror::Class* klass = k->AsClass(); - klass->SetPrimitiveType(Primitive::kPrimNot); // Default to not being primitive. - klass->SetClassSize(class_size); - klass->SetDexClassDefIndex(DexFile::kDexNoIndex16); // Default to no valid class def index. - klass->SetDexTypeIndex(DexFile::kDexNoIndex16); // Default to no valid type index. - return klass; + return k->AsClass(); } -mirror::Class* ClassLinker::AllocClass(Thread* self, size_t class_size) { +mirror::Class* ClassLinker::AllocClass(Thread* self, uint32_t class_size) { return AllocClass(self, GetClassRoot(kJavaLangClass), class_size); } @@ -1416,7 +1439,7 @@ mirror::Class* ClassLinker::DefineClass(const char* descriptor, } // Precomputes size that will be needed for Class, matching LinkStaticFields -size_t ClassLinker::SizeOfClass(const DexFile& dex_file, +uint32_t ClassLinker::SizeOfClass(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def) { const byte* class_data = dex_file.GetClassData(dex_class_def); size_t num_ref = 0; @@ -1437,7 +1460,7 @@ size_t ClassLinker::SizeOfClass(const DexFile& dex_file, } } // start with generic class data - size_t size = sizeof(mirror::Class); + uint32_t size = sizeof(mirror::Class); // follow with reference fields which must be contiguous at start size += (num_ref * sizeof(uint32_t)); // if there are 64-bit fields to add, make sure they are aligned @@ -1744,6 +1767,9 @@ void ClassLinker::LoadClass(const DexFile& dex_file, CHECK(descriptor != NULL); klass->SetClass(GetClassRoot(kJavaLangClass)); + if (kUseBrooksPointer) { + klass->AssertSelfBrooksPointer(); + } uint32_t access_flags = dex_class_def.access_flags_; // Make sure that none of our runtime-only flags are set. CHECK_EQ(access_flags & ~kAccJavaFlagsMask, 0U); diff --git a/runtime/class_linker.h b/runtime/class_linker.h index f346102..88dbb9c 100644 --- a/runtime/class_linker.h +++ b/runtime/class_linker.h @@ -388,13 +388,13 @@ class ClassLinker { void FinishInit(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // For early bootstrapping by Init - mirror::Class* AllocClass(Thread* self, mirror::Class* java_lang_Class, size_t class_size) + mirror::Class* AllocClass(Thread* self, mirror::Class* java_lang_Class, uint32_t class_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Alloc* convenience functions to avoid needing to pass in mirror::Class* // values that are known to the ClassLinker such as // kObjectArrayClass and kJavaLangString etc. - mirror::Class* AllocClass(Thread* self, size_t class_size) + mirror::Class* AllocClass(Thread* self, uint32_t class_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::DexCache* AllocDexCache(Thread* self, const DexFile& dex_file) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -419,7 +419,7 @@ class ClassLinker { mirror::Class* c, SafeMap<uint32_t, mirror::ArtField*>& field_map) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - size_t SizeOfClass(const DexFile& dex_file, + uint32_t SizeOfClass(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def); void LoadClass(const DexFile& dex_file, diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc index d6a67cc..55c23f4 100644 --- a/runtime/class_linker_test.cc +++ b/runtime/class_linker_test.cc @@ -20,7 +20,7 @@ #include "UniquePtr.h" #include "class_linker-inl.h" -#include "common_test.h" +#include "common_runtime_test.h" #include "dex_file.h" #include "entrypoints/entrypoint_utils.h" #include "gc/heap.h" @@ -37,7 +37,7 @@ namespace art { -class ClassLinkerTest : public CommonTest { +class ClassLinkerTest : public CommonRuntimeTest { protected: void AssertNonExistentClass(const std::string& descriptor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { @@ -339,7 +339,7 @@ class ClassLinkerTest : public CommonTest { mirror::DexCache* dex_cache = class_linker_->FindDexCache(*dex); mirror::ObjectArray<mirror::ArtMethod>* resolved_methods = dex_cache->GetResolvedMethods(); for (size_t i = 0; i < static_cast<size_t>(resolved_methods->GetLength()); i++) { - EXPECT_TRUE(resolved_methods->Get(i) != NULL); + EXPECT_TRUE(resolved_methods->Get(i) != NULL) << dex->GetLocation() << " i=" << i; } } @@ -451,6 +451,10 @@ struct ObjectOffsets : public CheckOffsets<mirror::Object> { // alphabetical 32-bit offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Object, monitor_), "shadow$_monitor_")); +#ifdef USE_BROOKS_POINTER + offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Object, x_brooks_ptr_), "shadow$_x_brooks_ptr_")); + offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Object, x_padding_), "shadow$_x_padding_")); +#endif }; }; @@ -705,11 +709,21 @@ TEST_F(ClassLinkerTest, FindClass) { EXPECT_FALSE(JavaLangObject->IsSynthetic()); EXPECT_EQ(2U, JavaLangObject->NumDirectMethods()); EXPECT_EQ(11U, JavaLangObject->NumVirtualMethods()); - EXPECT_EQ(2U, JavaLangObject->NumInstanceFields()); + if (!kUseBrooksPointer) { + EXPECT_EQ(2U, JavaLangObject->NumInstanceFields()); + } else { + EXPECT_EQ(4U, JavaLangObject->NumInstanceFields()); + } FieldHelper fh(JavaLangObject->GetInstanceField(0)); EXPECT_STREQ(fh.GetName(), "shadow$_klass_"); fh.ChangeField(JavaLangObject->GetInstanceField(1)); EXPECT_STREQ(fh.GetName(), "shadow$_monitor_"); + if (kUseBrooksPointer) { + fh.ChangeField(JavaLangObject->GetInstanceField(2)); + EXPECT_STREQ(fh.GetName(), "shadow$_x_brooks_ptr_"); + fh.ChangeField(JavaLangObject->GetInstanceField(3)); + EXPECT_STREQ(fh.GetName(), "shadow$_x_padding_"); + } EXPECT_EQ(0U, JavaLangObject->NumStaticFields()); EXPECT_EQ(0U, kh.NumDirectInterfaces()); diff --git a/runtime/common_test.cc b/runtime/common_runtime_test.cc index 5df7d41..0ed8b63 100644 --- a/runtime/common_test.cc +++ b/runtime/common_runtime_test.cc @@ -19,7 +19,7 @@ int main(int argc, char **argv) { art::InitLogging(argv); - LOG(INFO) << "Running main() from common_test.cc..."; + LOG(INFO) << "Running main() from common_runtime_test.cc..."; testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h new file mode 100644 index 0000000..e2ecf4b --- /dev/null +++ b/runtime/common_runtime_test.h @@ -0,0 +1,358 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_COMMON_RUNTIME_TEST_H_ +#define ART_RUNTIME_COMMON_RUNTIME_TEST_H_ + +#include <dirent.h> +#include <dlfcn.h> +#include <sys/mman.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <fstream> + +#include "../../external/icu4c/common/unicode/uvernum.h" +#include "base/macros.h" +#include "base/stl_util.h" +#include "base/stringprintf.h" +#include "base/unix_file/fd_file.h" +#include "class_linker.h" +#include "compiler_callbacks.h" +#include "dex_file-inl.h" +#include "entrypoints/entrypoint_utils.h" +#include "gc/heap.h" +#include "gtest/gtest.h" +#include "instruction_set.h" +#include "interpreter/interpreter.h" +#include "mirror/class_loader.h" +#include "oat_file.h" +#include "object_utils.h" +#include "os.h" +#include "runtime.h" +#include "scoped_thread_state_change.h" +#include "ScopedLocalRef.h" +#include "thread.h" +#include "utils.h" +#include "UniquePtr.h" +#include "verifier/method_verifier.h" +#include "verifier/method_verifier-inl.h" +#include "well_known_classes.h" + +namespace art { + +class ScratchFile { + public: + ScratchFile() { + filename_ = getenv("ANDROID_DATA"); + filename_ += "/TmpFile-XXXXXX"; + int fd = mkstemp(&filename_[0]); + CHECK_NE(-1, fd); + file_.reset(new File(fd, GetFilename())); + } + + ~ScratchFile() { + int unlink_result = unlink(filename_.c_str()); + CHECK_EQ(0, unlink_result); + } + + const std::string& GetFilename() const { + return filename_; + } + + File* GetFile() const { + return file_.get(); + } + + int GetFd() const { + return file_->Fd(); + } + + private: + std::string filename_; + UniquePtr<File> file_; +}; + +class NoopCompilerCallbacks : public CompilerCallbacks { + public: + NoopCompilerCallbacks() {} + virtual ~NoopCompilerCallbacks() {} + virtual bool MethodVerified(verifier::MethodVerifier* verifier) { + return true; + } + virtual void ClassRejected(ClassReference ref) {} +}; + +class CommonRuntimeTest : public testing::Test { + public: + static void SetEnvironmentVariables(std::string& android_data) { + if (IsHost()) { + // $ANDROID_ROOT is set on the device, but not on the host. + // We need to set this so that icu4c can find its locale data. + std::string root; + const char* android_build_top = getenv("ANDROID_BUILD_TOP"); + if (android_build_top != nullptr) { + root += android_build_top; + } else { + // Not set by build server, so default to current directory + char* cwd = getcwd(nullptr, 0); + setenv("ANDROID_BUILD_TOP", cwd, 1); + root += cwd; + free(cwd); + } +#if defined(__linux__) + root += "/out/host/linux-x86"; +#elif defined(__APPLE__) + root += "/out/host/darwin-x86"; +#else +#error unsupported OS +#endif + setenv("ANDROID_ROOT", root.c_str(), 1); + setenv("LD_LIBRARY_PATH", ":", 0); // Required by java.lang.System.<clinit>. + + // Not set by build server, so default + if (getenv("ANDROID_HOST_OUT") == nullptr) { + setenv("ANDROID_HOST_OUT", root.c_str(), 1); + } + } + + // On target, Cannot use /mnt/sdcard because it is mounted noexec, so use subdir of dalvik-cache + android_data = (IsHost() ? "/tmp/art-data-XXXXXX" : "/data/dalvik-cache/art-data-XXXXXX"); + if (mkdtemp(&android_data[0]) == nullptr) { + PLOG(FATAL) << "mkdtemp(\"" << &android_data[0] << "\") failed"; + } + setenv("ANDROID_DATA", android_data.c_str(), 1); + } + + protected: + static bool IsHost() { + return !kIsTargetBuild; + } + + virtual void SetUp() { + SetEnvironmentVariables(android_data_); + dalvik_cache_.append(android_data_.c_str()); + dalvik_cache_.append("/dalvik-cache"); + int mkdir_result = mkdir(dalvik_cache_.c_str(), 0700); + ASSERT_EQ(mkdir_result, 0); + + std::string error_msg; + java_lang_dex_file_ = DexFile::Open(GetLibCoreDexFileName().c_str(), + GetLibCoreDexFileName().c_str(), &error_msg); + if (java_lang_dex_file_ == nullptr) { + LOG(FATAL) << "Could not open .dex file '" << GetLibCoreDexFileName() << "': " + << error_msg << "\n"; + } + boot_class_path_.push_back(java_lang_dex_file_); + + std::string min_heap_string(StringPrintf("-Xms%zdm", gc::Heap::kDefaultInitialSize / MB)); + std::string max_heap_string(StringPrintf("-Xmx%zdm", gc::Heap::kDefaultMaximumSize / MB)); + + Runtime::Options options; + options.push_back(std::make_pair("bootclasspath", &boot_class_path_)); + options.push_back(std::make_pair("-Xcheck:jni", nullptr)); + options.push_back(std::make_pair(min_heap_string.c_str(), nullptr)); + options.push_back(std::make_pair(max_heap_string.c_str(), nullptr)); + options.push_back(std::make_pair("compilercallbacks", &callbacks_)); + SetUpRuntimeOptions(&options); + if (!Runtime::Create(options, false)) { + LOG(FATAL) << "Failed to create runtime"; + return; + } + runtime_.reset(Runtime::Current()); + class_linker_ = runtime_->GetClassLinker(); + class_linker_->FixupDexCaches(runtime_->GetResolutionMethod()); + + // Runtime::Create acquired the mutator_lock_ that is normally given away when we + // Runtime::Start, give it away now and then switch to a more managable ScopedObjectAccess. + Thread::Current()->TransitionFromRunnableToSuspended(kNative); + + // We're back in native, take the opportunity to initialize well known classes. + WellKnownClasses::Init(Thread::Current()->GetJniEnv()); + + // Create the heap thread pool so that the GC runs in parallel for tests. Normally, the thread + // pool is created by the runtime. + runtime_->GetHeap()->CreateThreadPool(); + runtime_->GetHeap()->VerifyHeap(); // Check for heap corruption before the test + } + + // Allow subclases such as CommonCompilerTest to add extra options. + virtual void SetUpRuntimeOptions(Runtime::Options *options) {} + + virtual void TearDown() { + const char* android_data = getenv("ANDROID_DATA"); + ASSERT_TRUE(android_data != nullptr); + DIR* dir = opendir(dalvik_cache_.c_str()); + ASSERT_TRUE(dir != nullptr); + dirent* e; + while ((e = readdir(dir)) != nullptr) { + if ((strcmp(e->d_name, ".") == 0) || (strcmp(e->d_name, "..") == 0)) { + continue; + } + std::string filename(dalvik_cache_); + filename.push_back('/'); + filename.append(e->d_name); + int unlink_result = unlink(filename.c_str()); + ASSERT_EQ(0, unlink_result); + } + closedir(dir); + int rmdir_cache_result = rmdir(dalvik_cache_.c_str()); + ASSERT_EQ(0, rmdir_cache_result); + int rmdir_data_result = rmdir(android_data_.c_str()); + ASSERT_EQ(0, rmdir_data_result); + + // icu4c has a fixed 10-element array "gCommonICUDataArray". + // If we run > 10 tests, we fill that array and u_setCommonData fails. + // There's a function to clear the array, but it's not public... + typedef void (*IcuCleanupFn)(); + void* sym = dlsym(RTLD_DEFAULT, "u_cleanup_" U_ICU_VERSION_SHORT); + CHECK(sym != nullptr); + IcuCleanupFn icu_cleanup_fn = reinterpret_cast<IcuCleanupFn>(sym); + (*icu_cleanup_fn)(); + + STLDeleteElements(&opened_dex_files_); + + Runtime::Current()->GetHeap()->VerifyHeap(); // Check for heap corruption after the test + } + + std::string GetLibCoreDexFileName() { + return GetDexFileName("core-libart"); + } + + std::string GetDexFileName(const std::string& jar_prefix) { + if (IsHost()) { + const char* host_dir = getenv("ANDROID_HOST_OUT"); + CHECK(host_dir != nullptr); + return StringPrintf("%s/framework/%s-hostdex.jar", host_dir, jar_prefix.c_str()); + } + return StringPrintf("%s/framework/%s.jar", GetAndroidRoot(), jar_prefix.c_str()); + } + + std::string GetTestAndroidRoot() { + if (IsHost()) { + const char* host_dir = getenv("ANDROID_HOST_OUT"); + CHECK(host_dir != nullptr); + return host_dir; + } + return GetAndroidRoot(); + } + + const DexFile* OpenTestDexFile(const char* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + CHECK(name != nullptr); + std::string filename; + if (IsHost()) { + filename += getenv("ANDROID_HOST_OUT"); + filename += "/framework/"; + } else { + filename += "/data/nativetest/art/"; + } + filename += "art-test-dex-"; + filename += name; + filename += ".jar"; + std::string error_msg; + const DexFile* dex_file = DexFile::Open(filename.c_str(), filename.c_str(), &error_msg); + CHECK(dex_file != nullptr) << "Failed to open '" << filename << "': " << error_msg; + CHECK_EQ(PROT_READ, dex_file->GetPermissions()); + CHECK(dex_file->IsReadOnly()); + opened_dex_files_.push_back(dex_file); + return dex_file; + } + + jobject LoadDex(const char* dex_name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const DexFile* dex_file = OpenTestDexFile(dex_name); + CHECK(dex_file != nullptr); + class_linker_->RegisterDexFile(*dex_file); + std::vector<const DexFile*> class_path; + class_path.push_back(dex_file); + ScopedObjectAccessUnchecked soa(Thread::Current()); + ScopedLocalRef<jobject> class_loader_local(soa.Env(), + soa.Env()->AllocObject(WellKnownClasses::dalvik_system_PathClassLoader)); + jobject class_loader = soa.Env()->NewGlobalRef(class_loader_local.get()); + soa.Self()->SetClassLoaderOverride(soa.Decode<mirror::ClassLoader*>(class_loader_local.get())); + Runtime::Current()->SetCompileTimeClassPath(class_loader, class_path); + return class_loader; + } + + std::string android_data_; + std::string dalvik_cache_; + const DexFile* java_lang_dex_file_; // owned by runtime_ + std::vector<const DexFile*> boot_class_path_; + UniquePtr<Runtime> runtime_; + // Owned by the runtime + ClassLinker* class_linker_; + + private: + NoopCompilerCallbacks callbacks_; + std::vector<const DexFile*> opened_dex_files_; +}; + +// Sets a CheckJni abort hook to catch failures. Note that this will cause CheckJNI to carry on +// rather than aborting, so be careful! +class CheckJniAbortCatcher { + public: + CheckJniAbortCatcher() : vm_(Runtime::Current()->GetJavaVM()) { + vm_->check_jni_abort_hook = Hook; + vm_->check_jni_abort_hook_data = &actual_; + } + + ~CheckJniAbortCatcher() { + vm_->check_jni_abort_hook = nullptr; + vm_->check_jni_abort_hook_data = nullptr; + EXPECT_TRUE(actual_.empty()) << actual_; + } + + void Check(const char* expected_text) { + EXPECT_TRUE(actual_.find(expected_text) != std::string::npos) << "\n" + << "Expected to find: " << expected_text << "\n" + << "In the output : " << actual_; + actual_.clear(); + } + + private: + static void Hook(void* data, const std::string& reason) { + // We use += because when we're hooking the aborts like this, multiple problems can be found. + *reinterpret_cast<std::string*>(data) += reason; + } + + JavaVMExt* vm_; + std::string actual_; + + DISALLOW_COPY_AND_ASSIGN(CheckJniAbortCatcher); +}; + +// TODO: These tests were disabled for portable when we went to having +// MCLinker link LLVM ELF output because we no longer just have code +// blobs in memory. We'll need to dlopen to load and relocate +// temporary output to resurrect these tests. +#define TEST_DISABLED_FOR_PORTABLE() \ + if (kUsePortableCompiler) { \ + printf("WARNING: TEST DISABLED FOR PORTABLE\n"); \ + return; \ + } + +} // namespace art + +namespace std { + +// TODO: isn't gtest supposed to be able to print STL types for itself? +template <typename T> +std::ostream& operator<<(std::ostream& os, const std::vector<T>& rhs) { + os << ::art::ToString(rhs); + return os; +} + +} // namespace std + +#endif // ART_RUNTIME_COMMON_RUNTIME_TEST_H_ diff --git a/runtime/common_test.h b/runtime/common_test.h deleted file mode 100644 index 9eaec46..0000000 --- a/runtime/common_test.h +++ /dev/null @@ -1,782 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_COMMON_TEST_H_ -#define ART_RUNTIME_COMMON_TEST_H_ - -#include <dirent.h> -#include <dlfcn.h> -#include <sys/mman.h> -#include <sys/stat.h> -#include <sys/types.h> -#include <fstream> - -#include "../../external/icu4c/common/unicode/uvernum.h" -#include "../compiler/compiler_backend.h" -#include "../compiler/dex/quick/dex_file_to_method_inliner_map.h" -#include "../compiler/dex/verification_results.h" -#include "../compiler/driver/compiler_callbacks_impl.h" -#include "../compiler/driver/compiler_driver.h" -#include "../compiler/driver/compiler_options.h" -#include "base/macros.h" -#include "base/stl_util.h" -#include "base/stringprintf.h" -#include "base/unix_file/fd_file.h" -#include "class_linker.h" -#include "compiler_callbacks.h" -#include "dex_file-inl.h" -#include "entrypoints/entrypoint_utils.h" -#include "gc/heap.h" -#include "gtest/gtest.h" -#include "instruction_set.h" -#include "interpreter/interpreter.h" -#include "mirror/class_loader.h" -#include "oat_file.h" -#include "object_utils.h" -#include "os.h" -#include "runtime.h" -#include "scoped_thread_state_change.h" -#include "ScopedLocalRef.h" -#include "thread.h" -#include "utils.h" -#include "UniquePtr.h" -#include "verifier/method_verifier.h" -#include "verifier/method_verifier-inl.h" -#include "well_known_classes.h" - -namespace art { - -static const byte kBase64Map[256] = { - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 62, 255, 255, 255, 63, - 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 255, 255, - 255, 254, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6, - 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, // NOLINT - 19, 20, 21, 22, 23, 24, 25, 255, 255, 255, 255, 255, // NOLINT - 255, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, - 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, // NOLINT - 49, 50, 51, 255, 255, 255, 255, 255, 255, 255, 255, 255, // NOLINT - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255 -}; - -byte* DecodeBase64(const char* src, size_t* dst_size) { - std::vector<byte> tmp; - uint32_t t = 0, y = 0; - int g = 3; - for (size_t i = 0; src[i] != '\0'; ++i) { - byte c = kBase64Map[src[i] & 0xFF]; - if (c == 255) continue; - // the final = symbols are read and used to trim the remaining bytes - if (c == 254) { - c = 0; - // prevent g < 0 which would potentially allow an overflow later - if (--g < 0) { - *dst_size = 0; - return NULL; - } - } else if (g != 3) { - // we only allow = to be at the end - *dst_size = 0; - return NULL; - } - t = (t << 6) | c; - if (++y == 4) { - tmp.push_back((t >> 16) & 255); - if (g > 1) { - tmp.push_back((t >> 8) & 255); - } - if (g > 2) { - tmp.push_back(t & 255); - } - y = t = 0; - } - } - if (y != 0) { - *dst_size = 0; - return NULL; - } - UniquePtr<byte[]> dst(new byte[tmp.size()]); - if (dst_size != NULL) { - *dst_size = tmp.size(); - } else { - *dst_size = 0; - } - std::copy(tmp.begin(), tmp.end(), dst.get()); - return dst.release(); -} - -class ScratchFile { - public: - ScratchFile() { - filename_ = getenv("ANDROID_DATA"); - filename_ += "/TmpFile-XXXXXX"; - int fd = mkstemp(&filename_[0]); - CHECK_NE(-1, fd); - file_.reset(new File(fd, GetFilename())); - } - - ~ScratchFile() { - int unlink_result = unlink(filename_.c_str()); - CHECK_EQ(0, unlink_result); - } - - const std::string& GetFilename() const { - return filename_; - } - - File* GetFile() const { - return file_.get(); - } - - int GetFd() const { - return file_->Fd(); - } - - private: - std::string filename_; - UniquePtr<File> file_; -}; - -#if defined(__arm__) - -#include <sys/ucontext.h> - -// A signal handler called when have an illegal instruction. We record the fact in -// a global boolean and then increment the PC in the signal context to return to -// the next instruction. We know the instruction is an sdiv (4 bytes long). -static void baddivideinst(int signo, siginfo *si, void *data) { - (void)signo; - (void)si; - struct ucontext *uc = (struct ucontext *)data; - struct sigcontext *sc = &uc->uc_mcontext; - sc->arm_r0 = 0; // set R0 to #0 to signal error - sc->arm_pc += 4; // skip offending instruction -} - -// This is in arch/arm/arm_sdiv.S. It does the following: -// mov r1,#1 -// sdiv r0,r1,r1 -// bx lr -// -// the result will be the value 1 if sdiv is supported. If it is not supported -// a SIGILL signal will be raised and the signal handler (baddivideinst) called. -// The signal handler sets r0 to #0 and then increments pc beyond the failed instruction. -// Thus if the instruction is not supported, the result of this function will be #0 - -extern "C" bool CheckForARMSDIVInstruction(); - -static InstructionSetFeatures GuessInstructionFeatures() { - InstructionSetFeatures f; - - // Uncomment this for processing of /proc/cpuinfo. - if (false) { - // Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that - // the kernel puts the appropriate feature flags in here. Sometimes it doesn't. - std::ifstream in("/proc/cpuinfo"); - if (in) { - while (!in.eof()) { - std::string line; - std::getline(in, line); - if (!in.eof()) { - if (line.find("Features") != std::string::npos) { - if (line.find("idivt") != std::string::npos) { - f.SetHasDivideInstruction(true); - } - } - } - in.close(); - } - } else { - LOG(INFO) << "Failed to open /proc/cpuinfo"; - } - } - - // See if have a sdiv instruction. Register a signal handler and try to execute - // an sdiv instruction. If we get a SIGILL then it's not supported. We can't use - // the /proc/cpuinfo method for this because Krait devices don't always put the idivt - // feature in the list. - struct sigaction sa, osa; - sa.sa_flags = SA_ONSTACK | SA_RESTART | SA_SIGINFO; - sa.sa_sigaction = baddivideinst; - sigaction(SIGILL, &sa, &osa); - - if (CheckForARMSDIVInstruction()) { - f.SetHasDivideInstruction(true); - } - - // Restore the signal handler. - sigaction(SIGILL, &osa, NULL); - - // Other feature guesses in here. - return f; -} - -#endif - -// Given a set of instruction features from the build, parse it. The -// input 'str' is a comma separated list of feature names. Parse it and -// return the InstructionSetFeatures object. -static InstructionSetFeatures ParseFeatureList(std::string str) { - InstructionSetFeatures result; - typedef std::vector<std::string> FeatureList; - FeatureList features; - Split(str, ',', features); - for (FeatureList::iterator i = features.begin(); i != features.end(); i++) { - std::string feature = Trim(*i); - if (feature == "default") { - // Nothing to do. - } else if (feature == "div") { - // Supports divide instruction. - result.SetHasDivideInstruction(true); - } else if (feature == "nodiv") { - // Turn off support for divide instruction. - result.SetHasDivideInstruction(false); - } else { - LOG(FATAL) << "Unknown instruction set feature: '" << feature << "'"; - } - } - // Others... - return result; -} - -class CommonTest : public testing::Test { - public: - static void MakeExecutable(const std::vector<uint8_t>& code) { - CHECK_NE(code.size(), 0U); - MakeExecutable(&code[0], code.size()); - } - - // Create an OatMethod based on pointers (for unit tests). - OatFile::OatMethod CreateOatMethod(const void* code, - const size_t frame_size_in_bytes, - const uint32_t core_spill_mask, - const uint32_t fp_spill_mask, - const uint8_t* mapping_table, - const uint8_t* vmap_table, - const uint8_t* gc_map) { - const byte* base; - uint32_t code_offset, mapping_table_offset, vmap_table_offset, gc_map_offset; - if (mapping_table == nullptr && vmap_table == nullptr && gc_map == nullptr) { - base = reinterpret_cast<const byte*>(code); // Base of data points at code. - base -= kPointerSize; // Move backward so that code_offset != 0. - code_offset = kPointerSize; - mapping_table_offset = 0; - vmap_table_offset = 0; - gc_map_offset = 0; - } else { - // TODO: 64bit support. - base = nullptr; // Base of data in oat file, ie 0. - code_offset = PointerToLowMemUInt32(code); - mapping_table_offset = PointerToLowMemUInt32(mapping_table); - vmap_table_offset = PointerToLowMemUInt32(vmap_table); - gc_map_offset = PointerToLowMemUInt32(gc_map); - } - return OatFile::OatMethod(base, - code_offset, - frame_size_in_bytes, - core_spill_mask, - fp_spill_mask, - mapping_table_offset, - vmap_table_offset, - gc_map_offset); - } - - void MakeExecutable(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - CHECK(method != nullptr); - - const CompiledMethod* compiled_method = nullptr; - if (!method->IsAbstract()) { - mirror::DexCache* dex_cache = method->GetDeclaringClass()->GetDexCache(); - const DexFile& dex_file = *dex_cache->GetDexFile(); - compiled_method = - compiler_driver_->GetCompiledMethod(MethodReference(&dex_file, - method->GetDexMethodIndex())); - } - if (compiled_method != nullptr) { - const std::vector<uint8_t>* code = compiled_method->GetQuickCode(); - if (code == nullptr) { - code = compiled_method->GetPortableCode(); - } - MakeExecutable(*code); - const void* method_code = CompiledMethod::CodePointer(&(*code)[0], - compiled_method->GetInstructionSet()); - LOG(INFO) << "MakeExecutable " << PrettyMethod(method) << " code=" << method_code; - OatFile::OatMethod oat_method = CreateOatMethod(method_code, - compiled_method->GetFrameSizeInBytes(), - compiled_method->GetCoreSpillMask(), - compiled_method->GetFpSpillMask(), - &compiled_method->GetMappingTable()[0], - &compiled_method->GetVmapTable()[0], - NULL); - oat_method.LinkMethod(method); - method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge); - } else { - // No code? You must mean to go into the interpreter. - const void* method_code = kUsePortableCompiler ? GetPortableToInterpreterBridge() - : GetQuickToInterpreterBridge(); - OatFile::OatMethod oat_method = CreateOatMethod(method_code, - kStackAlignment, - 0, - 0, - NULL, - NULL, - NULL); - oat_method.LinkMethod(method); - method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterBridge); - } - // Create bridges to transition between different kinds of compiled bridge. - if (method->GetEntryPointFromPortableCompiledCode() == nullptr) { - method->SetEntryPointFromPortableCompiledCode(GetPortableToQuickBridge()); - } else { - CHECK(method->GetEntryPointFromQuickCompiledCode() == nullptr); - method->SetEntryPointFromQuickCompiledCode(GetQuickToPortableBridge()); - method->SetIsPortableCompiled(); - } - } - - static void MakeExecutable(const void* code_start, size_t code_length) { - CHECK(code_start != NULL); - CHECK_NE(code_length, 0U); - uintptr_t data = reinterpret_cast<uintptr_t>(code_start); - uintptr_t base = RoundDown(data, kPageSize); - uintptr_t limit = RoundUp(data + code_length, kPageSize); - uintptr_t len = limit - base; - int result = mprotect(reinterpret_cast<void*>(base), len, PROT_READ | PROT_WRITE | PROT_EXEC); - CHECK_EQ(result, 0); - - // Flush instruction cache - // Only uses __builtin___clear_cache if GCC >= 4.3.3 -#if GCC_VERSION >= 40303 - __builtin___clear_cache(reinterpret_cast<void*>(base), reinterpret_cast<void*>(base + len)); -#else - LOG(FATAL) << "UNIMPLEMENTED: cache flush"; -#endif - } - - static void SetEnvironmentVariables(std::string& android_data) { - if (IsHost()) { - // $ANDROID_ROOT is set on the device, but not on the host. - // We need to set this so that icu4c can find its locale data. - std::string root; - const char* android_build_top = getenv("ANDROID_BUILD_TOP"); - if (android_build_top != nullptr) { - root += android_build_top; - } else { - // Not set by build server, so default to current directory - char* cwd = getcwd(nullptr, 0); - setenv("ANDROID_BUILD_TOP", cwd, 1); - root += cwd; - free(cwd); - } -#if defined(__linux__) - root += "/out/host/linux-x86"; -#elif defined(__APPLE__) - root += "/out/host/darwin-x86"; -#else -#error unsupported OS -#endif - setenv("ANDROID_ROOT", root.c_str(), 1); - setenv("LD_LIBRARY_PATH", ":", 0); // Required by java.lang.System.<clinit>. - - // Not set by build server, so default - if (getenv("ANDROID_HOST_OUT") == nullptr) { - setenv("ANDROID_HOST_OUT", root.c_str(), 1); - } - } - - // On target, Cannot use /mnt/sdcard because it is mounted noexec, so use subdir of dalvik-cache - android_data = (IsHost() ? "/tmp/art-data-XXXXXX" : "/data/dalvik-cache/art-data-XXXXXX"); - if (mkdtemp(&android_data[0]) == NULL) { - PLOG(FATAL) << "mkdtemp(\"" << &android_data[0] << "\") failed"; - } - setenv("ANDROID_DATA", android_data.c_str(), 1); - } - - void MakeExecutable(mirror::ClassLoader* class_loader, const char* class_name) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - std::string class_descriptor(DotToDescriptor(class_name)); - Thread* self = Thread::Current(); - SirtRef<mirror::ClassLoader> loader(self, class_loader); - mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader); - CHECK(klass != NULL) << "Class not found " << class_name; - for (size_t i = 0; i < klass->NumDirectMethods(); i++) { - MakeExecutable(klass->GetDirectMethod(i)); - } - for (size_t i = 0; i < klass->NumVirtualMethods(); i++) { - MakeExecutable(klass->GetVirtualMethod(i)); - } - } - - protected: - static bool IsHost() { - return !kIsTargetBuild; - } - - virtual void SetUp() { - SetEnvironmentVariables(android_data_); - dalvik_cache_.append(android_data_.c_str()); - dalvik_cache_.append("/dalvik-cache"); - int mkdir_result = mkdir(dalvik_cache_.c_str(), 0700); - ASSERT_EQ(mkdir_result, 0); - - std::string error_msg; - java_lang_dex_file_ = DexFile::Open(GetLibCoreDexFileName().c_str(), - GetLibCoreDexFileName().c_str(), &error_msg); - if (java_lang_dex_file_ == NULL) { - LOG(FATAL) << "Could not open .dex file '" << GetLibCoreDexFileName() << "': " - << error_msg << "\n"; - } - boot_class_path_.push_back(java_lang_dex_file_); - - std::string min_heap_string(StringPrintf("-Xms%zdm", gc::Heap::kDefaultInitialSize / MB)); - std::string max_heap_string(StringPrintf("-Xmx%zdm", gc::Heap::kDefaultMaximumSize / MB)); - - // TODO: make selectable - CompilerBackend::Kind compiler_backend = kUsePortableCompiler - ? CompilerBackend::kPortable - : CompilerBackend::kQuick; - - compiler_options_.reset(new CompilerOptions); - verification_results_.reset(new VerificationResults(compiler_options_.get())); - method_inliner_map_.reset(new DexFileToMethodInlinerMap); - callbacks_.reset(new CompilerCallbacksImpl(verification_results_.get(), - method_inliner_map_.get())); - Runtime::Options options; - options.push_back(std::make_pair("compilercallbacks", callbacks_.get())); - options.push_back(std::make_pair("bootclasspath", &boot_class_path_)); - options.push_back(std::make_pair("-Xcheck:jni", reinterpret_cast<void*>(NULL))); - options.push_back(std::make_pair(min_heap_string.c_str(), reinterpret_cast<void*>(NULL))); - options.push_back(std::make_pair(max_heap_string.c_str(), reinterpret_cast<void*>(NULL))); - if (!Runtime::Create(options, false)) { - LOG(FATAL) << "Failed to create runtime"; - return; - } - runtime_.reset(Runtime::Current()); - // Runtime::Create acquired the mutator_lock_ that is normally given away when we - // Runtime::Start, give it away now and then switch to a more managable ScopedObjectAccess. - Thread::Current()->TransitionFromRunnableToSuspended(kNative); - { - ScopedObjectAccess soa(Thread::Current()); - ASSERT_TRUE(runtime_.get() != NULL); - class_linker_ = runtime_->GetClassLinker(); - - InstructionSet instruction_set = kNone; - - // Take the default set of instruction features from the build. - InstructionSetFeatures instruction_set_features = - ParseFeatureList(STRINGIFY(ART_DEFAULT_INSTRUCTION_SET_FEATURES)); - -#if defined(__arm__) - instruction_set = kThumb2; - InstructionSetFeatures runtime_features = GuessInstructionFeatures(); - - // for ARM, do a runtime check to make sure that the features we are passed from - // the build match the features we actually determine at runtime. - ASSERT_EQ(instruction_set_features, runtime_features); -#elif defined(__mips__) - instruction_set = kMips; -#elif defined(__i386__) - instruction_set = kX86; -#elif defined(__x86_64__) - instruction_set = kX86_64; - // TODO: x86_64 compilation support. - runtime_->SetCompilerFilter(Runtime::kInterpretOnly); -#endif - - for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) { - Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i); - if (!runtime_->HasCalleeSaveMethod(type)) { - runtime_->SetCalleeSaveMethod( - runtime_->CreateCalleeSaveMethod(instruction_set, type), type); - } - } - class_linker_->FixupDexCaches(runtime_->GetResolutionMethod()); - timer_.reset(new CumulativeLogger("Compilation times")); - compiler_driver_.reset(new CompilerDriver(compiler_options_.get(), - verification_results_.get(), - method_inliner_map_.get(), - compiler_backend, instruction_set, - instruction_set_features, - true, new CompilerDriver::DescriptorSet, - 2, true, true, timer_.get())); - } - // We typically don't generate an image in unit tests, disable this optimization by default. - compiler_driver_->SetSupportBootImageFixup(false); - - // We're back in native, take the opportunity to initialize well known classes. - WellKnownClasses::Init(Thread::Current()->GetJniEnv()); - // Create the heap thread pool so that the GC runs in parallel for tests. Normally, the thread - // pool is created by the runtime. - runtime_->GetHeap()->CreateThreadPool(); - runtime_->GetHeap()->VerifyHeap(); // Check for heap corruption before the test - } - - virtual void TearDown() { - const char* android_data = getenv("ANDROID_DATA"); - ASSERT_TRUE(android_data != NULL); - DIR* dir = opendir(dalvik_cache_.c_str()); - ASSERT_TRUE(dir != NULL); - dirent* e; - while ((e = readdir(dir)) != NULL) { - if ((strcmp(e->d_name, ".") == 0) || (strcmp(e->d_name, "..") == 0)) { - continue; - } - std::string filename(dalvik_cache_); - filename.push_back('/'); - filename.append(e->d_name); - int unlink_result = unlink(filename.c_str()); - ASSERT_EQ(0, unlink_result); - } - closedir(dir); - int rmdir_cache_result = rmdir(dalvik_cache_.c_str()); - ASSERT_EQ(0, rmdir_cache_result); - int rmdir_data_result = rmdir(android_data_.c_str()); - ASSERT_EQ(0, rmdir_data_result); - - // icu4c has a fixed 10-element array "gCommonICUDataArray". - // If we run > 10 tests, we fill that array and u_setCommonData fails. - // There's a function to clear the array, but it's not public... - typedef void (*IcuCleanupFn)(); - void* sym = dlsym(RTLD_DEFAULT, "u_cleanup_" U_ICU_VERSION_SHORT); - CHECK(sym != NULL); - IcuCleanupFn icu_cleanup_fn = reinterpret_cast<IcuCleanupFn>(sym); - (*icu_cleanup_fn)(); - - compiler_driver_.reset(); - timer_.reset(); - callbacks_.reset(); - method_inliner_map_.reset(); - verification_results_.reset(); - compiler_options_.reset(); - STLDeleteElements(&opened_dex_files_); - - Runtime::Current()->GetHeap()->VerifyHeap(); // Check for heap corruption after the test - } - - std::string GetLibCoreDexFileName() { - return GetDexFileName("core-libart"); - } - - std::string GetDexFileName(const std::string& jar_prefix) { - if (IsHost()) { - const char* host_dir = getenv("ANDROID_HOST_OUT"); - CHECK(host_dir != NULL); - return StringPrintf("%s/framework/%s-hostdex.jar", host_dir, jar_prefix.c_str()); - } - return StringPrintf("%s/framework/%s.jar", GetAndroidRoot(), jar_prefix.c_str()); - } - - std::string GetTestAndroidRoot() { - if (IsHost()) { - const char* host_dir = getenv("ANDROID_HOST_OUT"); - CHECK(host_dir != NULL); - return host_dir; - } - return GetAndroidRoot(); - } - - const DexFile* OpenTestDexFile(const char* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - CHECK(name != NULL); - std::string filename; - if (IsHost()) { - filename += getenv("ANDROID_HOST_OUT"); - filename += "/framework/"; - } else { - filename += "/data/nativetest/art/"; - } - filename += "art-test-dex-"; - filename += name; - filename += ".jar"; - std::string error_msg; - const DexFile* dex_file = DexFile::Open(filename.c_str(), filename.c_str(), &error_msg); - CHECK(dex_file != NULL) << "Failed to open '" << filename << "': " << error_msg; - CHECK_EQ(PROT_READ, dex_file->GetPermissions()); - CHECK(dex_file->IsReadOnly()); - opened_dex_files_.push_back(dex_file); - return dex_file; - } - - jobject LoadDex(const char* dex_name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - const DexFile* dex_file = OpenTestDexFile(dex_name); - CHECK(dex_file != NULL); - class_linker_->RegisterDexFile(*dex_file); - std::vector<const DexFile*> class_path; - class_path.push_back(dex_file); - ScopedObjectAccessUnchecked soa(Thread::Current()); - ScopedLocalRef<jobject> class_loader_local(soa.Env(), - soa.Env()->AllocObject(WellKnownClasses::dalvik_system_PathClassLoader)); - jobject class_loader = soa.Env()->NewGlobalRef(class_loader_local.get()); - soa.Self()->SetClassLoaderOverride(soa.Decode<mirror::ClassLoader*>(class_loader_local.get())); - Runtime::Current()->SetCompileTimeClassPath(class_loader, class_path); - return class_loader; - } - - void CompileClass(mirror::ClassLoader* class_loader, const char* class_name) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - std::string class_descriptor(DotToDescriptor(class_name)); - Thread* self = Thread::Current(); - SirtRef<mirror::ClassLoader> loader(self, class_loader); - mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader); - CHECK(klass != NULL) << "Class not found " << class_name; - for (size_t i = 0; i < klass->NumDirectMethods(); i++) { - CompileMethod(klass->GetDirectMethod(i)); - } - for (size_t i = 0; i < klass->NumVirtualMethods(); i++) { - CompileMethod(klass->GetVirtualMethod(i)); - } - } - - void CompileMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - CHECK(method != NULL); - TimingLogger timings("CommonTest::CompileMethod", false, false); - timings.StartSplit("CompileOne"); - compiler_driver_->CompileOne(method, timings); - MakeExecutable(method); - timings.EndSplit(); - } - - void CompileDirectMethod(SirtRef<mirror::ClassLoader>& class_loader, const char* class_name, - const char* method_name, const char* signature) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - std::string class_descriptor(DotToDescriptor(class_name)); - Thread* self = Thread::Current(); - mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader); - CHECK(klass != NULL) << "Class not found " << class_name; - mirror::ArtMethod* method = klass->FindDirectMethod(method_name, signature); - CHECK(method != NULL) << "Direct method not found: " - << class_name << "." << method_name << signature; - CompileMethod(method); - } - - void CompileVirtualMethod(SirtRef<mirror::ClassLoader>& class_loader, const char* class_name, - const char* method_name, const char* signature) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - std::string class_descriptor(DotToDescriptor(class_name)); - Thread* self = Thread::Current(); - mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader); - CHECK(klass != NULL) << "Class not found " << class_name; - mirror::ArtMethod* method = klass->FindVirtualMethod(method_name, signature); - CHECK(method != NULL) << "Virtual method not found: " - << class_name << "." << method_name << signature; - CompileMethod(method); - } - - void ReserveImageSpace() { - // Reserve where the image will be loaded up front so that other parts of test set up don't - // accidentally end up colliding with the fixed memory address when we need to load the image. - std::string error_msg; - image_reservation_.reset(MemMap::MapAnonymous("image reservation", - reinterpret_cast<byte*>(ART_BASE_ADDRESS), - (size_t)100 * 1024 * 1024, // 100MB - PROT_NONE, - false /* no need for 4gb flag with fixed mmap*/, - &error_msg)); - CHECK(image_reservation_.get() != nullptr) << error_msg; - } - - void UnreserveImageSpace() { - image_reservation_.reset(); - } - - std::string android_data_; - std::string dalvik_cache_; - const DexFile* java_lang_dex_file_; // owned by runtime_ - std::vector<const DexFile*> boot_class_path_; - UniquePtr<Runtime> runtime_; - // Owned by the runtime - ClassLinker* class_linker_; - UniquePtr<CompilerOptions> compiler_options_; - UniquePtr<VerificationResults> verification_results_; - UniquePtr<DexFileToMethodInlinerMap> method_inliner_map_; - UniquePtr<CompilerCallbacksImpl> callbacks_; - UniquePtr<CompilerDriver> compiler_driver_; - UniquePtr<CumulativeLogger> timer_; - - private: - std::vector<const DexFile*> opened_dex_files_; - UniquePtr<MemMap> image_reservation_; -}; - -// Sets a CheckJni abort hook to catch failures. Note that this will cause CheckJNI to carry on -// rather than aborting, so be careful! -class CheckJniAbortCatcher { - public: - CheckJniAbortCatcher() : vm_(Runtime::Current()->GetJavaVM()) { - vm_->check_jni_abort_hook = Hook; - vm_->check_jni_abort_hook_data = &actual_; - } - - ~CheckJniAbortCatcher() { - vm_->check_jni_abort_hook = NULL; - vm_->check_jni_abort_hook_data = NULL; - EXPECT_TRUE(actual_.empty()) << actual_; - } - - void Check(const char* expected_text) { - EXPECT_TRUE(actual_.find(expected_text) != std::string::npos) << "\n" - << "Expected to find: " << expected_text << "\n" - << "In the output : " << actual_; - actual_.clear(); - } - - private: - static void Hook(void* data, const std::string& reason) { - // We use += because when we're hooking the aborts like this, multiple problems can be found. - *reinterpret_cast<std::string*>(data) += reason; - } - - JavaVMExt* vm_; - std::string actual_; - - DISALLOW_COPY_AND_ASSIGN(CheckJniAbortCatcher); -}; - -// TODO: These tests were disabled for portable when we went to having -// MCLinker link LLVM ELF output because we no longer just have code -// blobs in memory. We'll need to dlopen to load and relocate -// temporary output to resurrect these tests. -#define TEST_DISABLED_FOR_PORTABLE() \ - if (kUsePortableCompiler) { \ - printf("WARNING: TEST DISABLED FOR PORTABLE\n"); \ - return; \ - } - -} // namespace art - -namespace std { - -// TODO: isn't gtest supposed to be able to print STL types for itself? -template <typename T> -std::ostream& operator<<(std::ostream& os, const std::vector<T>& rhs) { - os << ::art::ToString(rhs); - return os; -} - -} // namespace std - -#endif // ART_RUNTIME_COMMON_TEST_H_ diff --git a/runtime/debugger.cc b/runtime/debugger.cc index 9f09709..3b4e9c7 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -1202,7 +1202,9 @@ JDWP::JdwpError Dbg::CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t if (c == NULL) { return status; } - new_array = gRegistry->Add(mirror::Array::Alloc<true>(Thread::Current(), c, length)); + new_array = gRegistry->Add(mirror::Array::Alloc<true>(Thread::Current(), c, length, + c->GetComponentSize(), + Runtime::Current()->GetHeap()->GetCurrentAllocator())); return JDWP::ERR_NONE; } diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc index 543a7b0..9b6859a 100644 --- a/runtime/dex_file_test.cc +++ b/runtime/dex_file_test.cc @@ -17,11 +17,11 @@ #include "dex_file.h" #include "UniquePtr.h" -#include "common_test.h" +#include "common_runtime_test.h" namespace art { -class DexFileTest : public CommonTest {}; +class DexFileTest : public CommonRuntimeTest {}; TEST_F(DexFileTest, Open) { ScopedObjectAccess soa(Thread::Current()); @@ -29,6 +29,77 @@ TEST_F(DexFileTest, Open) { ASSERT_TRUE(dex != NULL); } +static const byte kBase64Map[256] = { + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 62, 255, 255, 255, 63, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 255, 255, + 255, 254, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, // NOLINT + 19, 20, 21, 22, 23, 24, 25, 255, 255, 255, 255, 255, // NOLINT + 255, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, + 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, // NOLINT + 49, 50, 51, 255, 255, 255, 255, 255, 255, 255, 255, 255, // NOLINT + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255 +}; + +static inline byte* DecodeBase64(const char* src, size_t* dst_size) { + std::vector<byte> tmp; + uint32_t t = 0, y = 0; + int g = 3; + for (size_t i = 0; src[i] != '\0'; ++i) { + byte c = kBase64Map[src[i] & 0xFF]; + if (c == 255) continue; + // the final = symbols are read and used to trim the remaining bytes + if (c == 254) { + c = 0; + // prevent g < 0 which would potentially allow an overflow later + if (--g < 0) { + *dst_size = 0; + return nullptr; + } + } else if (g != 3) { + // we only allow = to be at the end + *dst_size = 0; + return nullptr; + } + t = (t << 6) | c; + if (++y == 4) { + tmp.push_back((t >> 16) & 255); + if (g > 1) { + tmp.push_back((t >> 8) & 255); + } + if (g > 2) { + tmp.push_back(t & 255); + } + y = t = 0; + } + } + if (y != 0) { + *dst_size = 0; + return nullptr; + } + UniquePtr<byte[]> dst(new byte[tmp.size()]); + if (dst_size != nullptr) { + *dst_size = tmp.size(); + } else { + *dst_size = 0; + } + std::copy(tmp.begin(), tmp.end(), dst.get()); + return dst.release(); +} + // Although this is the same content logically as the Nested test dex, // the DexFileHeader test is sensitive to subtle changes in the // contents due to the checksum etc, so we embed the exact input here. diff --git a/runtime/dex_method_iterator_test.cc b/runtime/dex_method_iterator_test.cc index 2941db6..5e2d89e 100644 --- a/runtime/dex_method_iterator_test.cc +++ b/runtime/dex_method_iterator_test.cc @@ -16,11 +16,11 @@ #include "dex_method_iterator.h" -#include "common_test.h" +#include "common_runtime_test.h" namespace art { -class DexMethodIteratorTest : public CommonTest { +class DexMethodIteratorTest : public CommonRuntimeTest { public: const DexFile* OpenDexFile(const std::string& partial_filename) { std::string dfn = GetDexFileName(partial_filename); diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc index e4e58b8..3a17e41 100644 --- a/runtime/elf_file.cc +++ b/runtime/elf_file.cc @@ -22,10 +22,10 @@ namespace art { -ElfFile::ElfFile() - : file_(NULL), - writable_(false), - program_header_only_(false), +ElfFile::ElfFile(File* file, bool writable, bool program_header_only) + : file_(file), + writable_(writable), + program_header_only_(program_header_only), header_(NULL), base_address_(NULL), program_headers_start_(NULL), @@ -38,23 +38,20 @@ ElfFile::ElfFile() dynstr_section_start_(NULL), hash_section_start_(NULL), symtab_symbol_table_(NULL), - dynsym_symbol_table_(NULL) {} + dynsym_symbol_table_(NULL) { + CHECK(file != NULL); +} ElfFile* ElfFile::Open(File* file, bool writable, bool program_header_only, std::string* error_msg) { - UniquePtr<ElfFile> elf_file(new ElfFile()); - if (!elf_file->Setup(file, writable, program_header_only, error_msg)) { + UniquePtr<ElfFile> elf_file(new ElfFile(file, writable, program_header_only)); + if (!elf_file->Setup(error_msg)) { return nullptr; } return elf_file.release(); } -bool ElfFile::Setup(File* file, bool writable, bool program_header_only, std::string* error_msg) { - CHECK(file != NULL); - file_ = file; - writable_ = writable; - program_header_only_ = program_header_only; - +bool ElfFile::Setup(std::string* error_msg) { int prot; int flags; if (writable_) { @@ -79,7 +76,7 @@ bool ElfFile::Setup(File* file, bool writable, bool program_header_only, std::st return false; } - if (program_header_only) { + if (program_header_only_) { // first just map ELF header to get program header size information size_t elf_header_size = sizeof(Elf32_Ehdr); if (!SetMap(MemMap::MapFile(elf_header_size, prot, flags, file_->Fd(), 0, @@ -114,7 +111,7 @@ bool ElfFile::Setup(File* file, bool writable, bool program_header_only, std::st // Either way, the program header is relative to the elf header program_headers_start_ = Begin() + GetHeader().e_phoff; - if (!program_header_only) { + if (!program_header_only_) { // Setup section headers. section_headers_start_ = Begin() + GetHeader().e_shoff; @@ -192,7 +189,8 @@ bool ElfFile::SetMap(MemMap* map, std::string* error_msg) { || (ELFMAG1 != header_->e_ident[EI_MAG1]) || (ELFMAG2 != header_->e_ident[EI_MAG2]) || (ELFMAG3 != header_->e_ident[EI_MAG3])) { - *error_msg = StringPrintf("Failed to find ELF magic in %s: %c%c%c%c", + *error_msg = StringPrintf("Failed to find ELF magic value %d %d %d %d in %s, found %d %d %d %d", + ELFMAG0, ELFMAG1, ELFMAG2, ELFMAG3, file_->GetPath().c_str(), header_->e_ident[EI_MAG0], header_->e_ident[EI_MAG1], @@ -200,61 +198,142 @@ bool ElfFile::SetMap(MemMap* map, std::string* error_msg) { header_->e_ident[EI_MAG3]); return false; } + if (ELFCLASS32 != header_->e_ident[EI_CLASS]) { + *error_msg = StringPrintf("Failed to find expected EI_CLASS value %d in %s, found %d", + ELFCLASS32, + file_->GetPath().c_str(), + header_->e_ident[EI_CLASS]); + return false; + } + if (ELFDATA2LSB != header_->e_ident[EI_DATA]) { + *error_msg = StringPrintf("Failed to find expected EI_DATA value %d in %s, found %d", + ELFDATA2LSB, + file_->GetPath().c_str(), + header_->e_ident[EI_CLASS]); + return false; + } + if (EV_CURRENT != header_->e_ident[EI_VERSION]) { + *error_msg = StringPrintf("Failed to find expected EI_VERSION value %d in %s, found %d", + EV_CURRENT, + file_->GetPath().c_str(), + header_->e_ident[EI_CLASS]); + return false; + } + if (ET_DYN != header_->e_type) { + *error_msg = StringPrintf("Failed to find expected e_type value %d in %s, found %d", + ET_DYN, + file_->GetPath().c_str(), + header_->e_type); + return false; + } + if (EV_CURRENT != header_->e_version) { + *error_msg = StringPrintf("Failed to find expected e_version value %d in %s, found %d", + EV_CURRENT, + file_->GetPath().c_str(), + header_->e_version); + return false; + } + if (0 != header_->e_entry) { + *error_msg = StringPrintf("Failed to find expected e_entry value %d in %s, found %d", + 0, + file_->GetPath().c_str(), + header_->e_entry); + return false; + } + if (0 == header_->e_phoff) { + *error_msg = StringPrintf("Failed to find non-zero e_phoff value in %s", + file_->GetPath().c_str()); + return false; + } + if (0 == header_->e_shoff) { + *error_msg = StringPrintf("Failed to find non-zero e_shoff value in %s", + file_->GetPath().c_str()); + return false; + } + if (0 == header_->e_ehsize) { + *error_msg = StringPrintf("Failed to find non-zero e_ehsize value in %s", + file_->GetPath().c_str()); + return false; + } + if (0 == header_->e_phentsize) { + *error_msg = StringPrintf("Failed to find non-zero e_phentsize value in %s", + file_->GetPath().c_str()); + return false; + } + if (0 == header_->e_phnum) { + *error_msg = StringPrintf("Failed to find non-zero e_phnum value in %s", + file_->GetPath().c_str()); + return false; + } + if (0 == header_->e_shentsize) { + *error_msg = StringPrintf("Failed to find non-zero e_shentsize value in %s", + file_->GetPath().c_str()); + return false; + } + if (0 == header_->e_shnum) { + *error_msg = StringPrintf("Failed to find non-zero e_shnum value in %s", + file_->GetPath().c_str()); + return false; + } + if (0 == header_->e_shstrndx) { + *error_msg = StringPrintf("Failed to find non-zero e_shstrndx value in %s", + file_->GetPath().c_str()); + return false; + } + if (header_->e_shstrndx >= header_->e_shnum) { + *error_msg = StringPrintf("Failed to find e_shnum value %d less than %d in %s", + header_->e_shstrndx, + header_->e_shnum, + file_->GetPath().c_str()); + return false; + } - - // TODO: remove these static_casts from enum when using -std=gnu++0x - CHECK_EQ(static_cast<unsigned char>(ELFCLASS32), header_->e_ident[EI_CLASS]) << file_->GetPath(); - CHECK_EQ(static_cast<unsigned char>(ELFDATA2LSB), header_->e_ident[EI_DATA]) << file_->GetPath(); - CHECK_EQ(static_cast<unsigned char>(EV_CURRENT), header_->e_ident[EI_VERSION]) << file_->GetPath(); - - // TODO: remove these static_casts from enum when using -std=gnu++0x - CHECK_EQ(static_cast<Elf32_Half>(ET_DYN), header_->e_type) << file_->GetPath(); - CHECK_EQ(static_cast<Elf32_Word>(EV_CURRENT), header_->e_version) << file_->GetPath(); - CHECK_EQ(0U, header_->e_entry) << file_->GetPath(); - - CHECK_NE(0U, header_->e_phoff) << file_->GetPath(); - CHECK_NE(0U, header_->e_shoff) << file_->GetPath(); - CHECK_NE(0U, header_->e_ehsize) << file_->GetPath(); - CHECK_NE(0U, header_->e_phentsize) << file_->GetPath(); - CHECK_NE(0U, header_->e_phnum) << file_->GetPath(); - CHECK_NE(0U, header_->e_shentsize) << file_->GetPath(); - CHECK_NE(0U, header_->e_shnum) << file_->GetPath(); - CHECK_NE(0U, header_->e_shstrndx) << file_->GetPath(); - CHECK_GE(header_->e_shnum, header_->e_shstrndx) << file_->GetPath(); if (!program_header_only_) { - CHECK_GT(Size(), header_->e_phoff) << file_->GetPath(); - CHECK_GT(Size(), header_->e_shoff) << file_->GetPath(); + if (header_->e_phoff >= Size()) { + *error_msg = StringPrintf("Failed to find e_phoff value %d less than %zd in %s", + header_->e_phoff, + Size(), + file_->GetPath().c_str()); + return false; + } + if (header_->e_shoff >= Size()) { + *error_msg = StringPrintf("Failed to find e_shoff value %d less than %zd in %s", + header_->e_shoff, + Size(), + file_->GetPath().c_str()); + return false; + } } return true; } -Elf32_Ehdr& ElfFile::GetHeader() { +Elf32_Ehdr& ElfFile::GetHeader() const { CHECK(header_ != NULL); return *header_; } -byte* ElfFile::GetProgramHeadersStart() { +byte* ElfFile::GetProgramHeadersStart() const { CHECK(program_headers_start_ != NULL); return program_headers_start_; } -byte* ElfFile::GetSectionHeadersStart() { +byte* ElfFile::GetSectionHeadersStart() const { CHECK(section_headers_start_ != NULL); return section_headers_start_; } -Elf32_Phdr& ElfFile::GetDynamicProgramHeader() { +Elf32_Phdr& ElfFile::GetDynamicProgramHeader() const { CHECK(dynamic_program_header_ != NULL); return *dynamic_program_header_; } -Elf32_Dyn* ElfFile::GetDynamicSectionStart() { +Elf32_Dyn* ElfFile::GetDynamicSectionStart() const { CHECK(dynamic_section_start_ != NULL); return dynamic_section_start_; } -Elf32_Sym* ElfFile::GetSymbolSectionStart(Elf32_Word section_type) { +Elf32_Sym* ElfFile::GetSymbolSectionStart(Elf32_Word section_type) const { CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type; Elf32_Sym* symbol_section_start; switch (section_type) { @@ -275,7 +354,7 @@ Elf32_Sym* ElfFile::GetSymbolSectionStart(Elf32_Word section_type) { return symbol_section_start; } -const char* ElfFile::GetStringSectionStart(Elf32_Word section_type) { +const char* ElfFile::GetStringSectionStart(Elf32_Word section_type) const { CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type; const char* string_section_start; switch (section_type) { @@ -296,7 +375,7 @@ const char* ElfFile::GetStringSectionStart(Elf32_Word section_type) { return string_section_start; } -const char* ElfFile::GetString(Elf32_Word section_type, Elf32_Word i) { +const char* ElfFile::GetString(Elf32_Word section_type, Elf32_Word i) const { CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type; if (i == 0) { return NULL; @@ -306,43 +385,43 @@ const char* ElfFile::GetString(Elf32_Word section_type, Elf32_Word i) { return string; } -Elf32_Word* ElfFile::GetHashSectionStart() { +Elf32_Word* ElfFile::GetHashSectionStart() const { CHECK(hash_section_start_ != NULL); return hash_section_start_; } -Elf32_Word ElfFile::GetHashBucketNum() { +Elf32_Word ElfFile::GetHashBucketNum() const { return GetHashSectionStart()[0]; } -Elf32_Word ElfFile::GetHashChainNum() { +Elf32_Word ElfFile::GetHashChainNum() const { return GetHashSectionStart()[1]; } -Elf32_Word ElfFile::GetHashBucket(size_t i) { +Elf32_Word ElfFile::GetHashBucket(size_t i) const { CHECK_LT(i, GetHashBucketNum()); // 0 is nbucket, 1 is nchain return GetHashSectionStart()[2 + i]; } -Elf32_Word ElfFile::GetHashChain(size_t i) { +Elf32_Word ElfFile::GetHashChain(size_t i) const { CHECK_LT(i, GetHashChainNum()); // 0 is nbucket, 1 is nchain, & chains are after buckets return GetHashSectionStart()[2 + GetHashBucketNum() + i]; } -Elf32_Word ElfFile::GetProgramHeaderNum() { +Elf32_Word ElfFile::GetProgramHeaderNum() const { return GetHeader().e_phnum; } -Elf32_Phdr& ElfFile::GetProgramHeader(Elf32_Word i) { +Elf32_Phdr& ElfFile::GetProgramHeader(Elf32_Word i) const { CHECK_LT(i, GetProgramHeaderNum()) << file_->GetPath(); byte* program_header = GetProgramHeadersStart() + (i * GetHeader().e_phentsize); CHECK_LT(program_header, End()) << file_->GetPath(); return *reinterpret_cast<Elf32_Phdr*>(program_header); } -Elf32_Phdr* ElfFile::FindProgamHeaderByType(Elf32_Word type) { +Elf32_Phdr* ElfFile::FindProgamHeaderByType(Elf32_Word type) const { for (Elf32_Word i = 0; i < GetProgramHeaderNum(); i++) { Elf32_Phdr& program_header = GetProgramHeader(i); if (program_header.p_type == type) { @@ -352,11 +431,11 @@ Elf32_Phdr* ElfFile::FindProgamHeaderByType(Elf32_Word type) { return NULL; } -Elf32_Word ElfFile::GetSectionHeaderNum() { +Elf32_Word ElfFile::GetSectionHeaderNum() const { return GetHeader().e_shnum; } -Elf32_Shdr& ElfFile::GetSectionHeader(Elf32_Word i) { +Elf32_Shdr& ElfFile::GetSectionHeader(Elf32_Word i) const { // Can only access arbitrary sections when we have the whole file, not just program header. // Even if we Load(), it doesn't bring in all the sections. CHECK(!program_header_only_) << file_->GetPath(); @@ -366,7 +445,7 @@ Elf32_Shdr& ElfFile::GetSectionHeader(Elf32_Word i) { return *reinterpret_cast<Elf32_Shdr*>(section_header); } -Elf32_Shdr* ElfFile::FindSectionByType(Elf32_Word type) { +Elf32_Shdr* ElfFile::FindSectionByType(Elf32_Word type) const { // Can only access arbitrary sections when we have the whole file, not just program header. // We could change this to switch on known types if they were detected during loading. CHECK(!program_header_only_) << file_->GetPath(); @@ -393,11 +472,11 @@ static unsigned elfhash(const char *_name) { return h; } -Elf32_Shdr& ElfFile::GetSectionNameStringSection() { +Elf32_Shdr& ElfFile::GetSectionNameStringSection() const { return GetSectionHeader(GetHeader().e_shstrndx); } -byte* ElfFile::FindDynamicSymbolAddress(const std::string& symbol_name) { +const byte* ElfFile::FindDynamicSymbolAddress(const std::string& symbol_name) const { Elf32_Word hash = elfhash(symbol_name.c_str()); Elf32_Word bucket_index = hash % GetHashBucketNum(); Elf32_Word symbol_and_chain_index = GetHashBucket(bucket_index); @@ -416,14 +495,15 @@ bool ElfFile::IsSymbolSectionType(Elf32_Word section_type) { return ((section_type == SHT_SYMTAB) || (section_type == SHT_DYNSYM)); } -Elf32_Word ElfFile::GetSymbolNum(Elf32_Shdr& section_header) { - CHECK(IsSymbolSectionType(section_header.sh_type)) << file_->GetPath() << " " << section_header.sh_type; +Elf32_Word ElfFile::GetSymbolNum(Elf32_Shdr& section_header) const { + CHECK(IsSymbolSectionType(section_header.sh_type)) + << file_->GetPath() << " " << section_header.sh_type; CHECK_NE(0U, section_header.sh_entsize) << file_->GetPath(); return section_header.sh_size / section_header.sh_entsize; } Elf32_Sym& ElfFile::GetSymbol(Elf32_Word section_type, - Elf32_Word i) { + Elf32_Word i) const { return *(GetSymbolSectionStart(section_type) + i); } @@ -467,7 +547,8 @@ Elf32_Sym* ElfFile::FindSymbolByName(Elf32_Word section_type, if (name == NULL) { continue; } - std::pair<SymbolTable::iterator, bool> result = (*symbol_table)->insert(std::make_pair(name, &symbol)); + std::pair<SymbolTable::iterator, bool> result = + (*symbol_table)->insert(std::make_pair(name, &symbol)); if (!result.second) { // If a duplicate, make sure it has the same logical value. Seen on x86. CHECK_EQ(symbol.st_value, result.first->second->st_value); @@ -504,8 +585,8 @@ Elf32_Sym* ElfFile::FindSymbolByName(Elf32_Word section_type, } Elf32_Addr ElfFile::FindSymbolAddress(Elf32_Word section_type, - const std::string& symbol_name, - bool build_map) { + const std::string& symbol_name, + bool build_map) { Elf32_Sym* symbol = FindSymbolByName(section_type, symbol_name, build_map); if (symbol == NULL) { return 0; @@ -513,7 +594,7 @@ Elf32_Addr ElfFile::FindSymbolAddress(Elf32_Word section_type, return symbol->st_value; } -const char* ElfFile::GetString(Elf32_Shdr& string_section, Elf32_Word i) { +const char* ElfFile::GetString(Elf32_Shdr& string_section, Elf32_Word i) const { CHECK(!program_header_only_) << file_->GetPath(); // TODO: remove this static_cast from enum when using -std=gnu++0x CHECK_EQ(static_cast<Elf32_Word>(SHT_STRTAB), string_section.sh_type) << file_->GetPath(); @@ -527,16 +608,16 @@ const char* ElfFile::GetString(Elf32_Shdr& string_section, Elf32_Word i) { return reinterpret_cast<const char*>(string); } -Elf32_Word ElfFile::GetDynamicNum() { +Elf32_Word ElfFile::GetDynamicNum() const { return GetDynamicProgramHeader().p_filesz / sizeof(Elf32_Dyn); } -Elf32_Dyn& ElfFile::GetDynamic(Elf32_Word i) { +Elf32_Dyn& ElfFile::GetDynamic(Elf32_Word i) const { CHECK_LT(i, GetDynamicNum()) << file_->GetPath(); return *(GetDynamicSectionStart() + i); } -Elf32_Word ElfFile::FindDynamicValueByType(Elf32_Sword type) { +Elf32_Word ElfFile::FindDynamicValueByType(Elf32_Sword type) const { for (Elf32_Word i = 0; i < GetDynamicNum(); i++) { Elf32_Dyn& elf_dyn = GetDynamic(i); if (elf_dyn.d_tag == type) { @@ -546,41 +627,41 @@ Elf32_Word ElfFile::FindDynamicValueByType(Elf32_Sword type) { return 0; } -Elf32_Rel* ElfFile::GetRelSectionStart(Elf32_Shdr& section_header) { +Elf32_Rel* ElfFile::GetRelSectionStart(Elf32_Shdr& section_header) const { CHECK(SHT_REL == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type; return reinterpret_cast<Elf32_Rel*>(Begin() + section_header.sh_offset); } -Elf32_Word ElfFile::GetRelNum(Elf32_Shdr& section_header) { +Elf32_Word ElfFile::GetRelNum(Elf32_Shdr& section_header) const { CHECK(SHT_REL == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type; CHECK_NE(0U, section_header.sh_entsize) << file_->GetPath(); return section_header.sh_size / section_header.sh_entsize; } -Elf32_Rel& ElfFile::GetRel(Elf32_Shdr& section_header, Elf32_Word i) { +Elf32_Rel& ElfFile::GetRel(Elf32_Shdr& section_header, Elf32_Word i) const { CHECK(SHT_REL == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type; CHECK_LT(i, GetRelNum(section_header)) << file_->GetPath(); return *(GetRelSectionStart(section_header) + i); } -Elf32_Rela* ElfFile::GetRelaSectionStart(Elf32_Shdr& section_header) { +Elf32_Rela* ElfFile::GetRelaSectionStart(Elf32_Shdr& section_header) const { CHECK(SHT_RELA == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type; return reinterpret_cast<Elf32_Rela*>(Begin() + section_header.sh_offset); } -Elf32_Word ElfFile::GetRelaNum(Elf32_Shdr& section_header) { +Elf32_Word ElfFile::GetRelaNum(Elf32_Shdr& section_header) const { CHECK(SHT_RELA == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type; return section_header.sh_size / section_header.sh_entsize; } -Elf32_Rela& ElfFile::GetRela(Elf32_Shdr& section_header, Elf32_Word i) { +Elf32_Rela& ElfFile::GetRela(Elf32_Shdr& section_header, Elf32_Word i) const { CHECK(SHT_RELA == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type; CHECK_LT(i, GetRelaNum(section_header)) << file_->GetPath(); return *(GetRelaSectionStart(section_header) + i); } // Base on bionic phdr_table_get_load_size -size_t ElfFile::GetLoadedSize() { +size_t ElfFile::GetLoadedSize() const { Elf32_Addr min_vaddr = 0xFFFFFFFFu; Elf32_Addr max_vaddr = 0x00000000u; for (Elf32_Word i = 0; i < GetProgramHeaderNum(); i++) { @@ -605,7 +686,6 @@ size_t ElfFile::GetLoadedSize() { } bool ElfFile::Load(bool executable, std::string* error_msg) { - // TODO: actually return false error CHECK(program_header_only_) << file_->GetPath(); for (Elf32_Word i = 0; i < GetProgramHeaderNum(); i++) { Elf32_Phdr& program_header = GetProgramHeader(i); @@ -643,11 +723,14 @@ bool ElfFile::Load(bool executable, std::string* error_msg) { if (program_header.p_vaddr == 0) { std::string reservation_name("ElfFile reservation for "); reservation_name += file_->GetPath(); - std::string error_msg; UniquePtr<MemMap> reserve(MemMap::MapAnonymous(reservation_name.c_str(), NULL, GetLoadedSize(), PROT_NONE, false, - &error_msg)); - CHECK(reserve.get() != NULL) << file_->GetPath() << ": " << error_msg; + error_msg)); + if (reserve.get() == nullptr) { + *error_msg = StringPrintf("Failed to allocate %s: %s", + reservation_name.c_str(), error_msg->c_str()); + return false; + } base_address_ = reserve->Begin(); segments_.push_back(reserve.release()); } @@ -687,8 +770,17 @@ bool ElfFile::Load(bool executable, std::string* error_msg) { true, file_->GetPath().c_str(), error_msg)); - CHECK(segment.get() != nullptr) << *error_msg; - CHECK_EQ(segment->Begin(), p_vaddr) << file_->GetPath(); + if (segment.get() == nullptr) { + *error_msg = StringPrintf("Failed to map ELF file segment %d from %s: %s", + i, file_->GetPath().c_str(), error_msg->c_str()); + return false; + } + if (segment->Begin() != p_vaddr) { + *error_msg = StringPrintf("Failed to map ELF file segment %d from %s at expected address %p, " + "instead mapped to %p", + i, file_->GetPath().c_str(), p_vaddr, segment->Begin()); + return false; + } segments_.push_back(segment.release()); } @@ -700,19 +792,39 @@ bool ElfFile::Load(bool executable, std::string* error_msg) { byte* d_ptr = base_address_ + elf_dyn.d_un.d_ptr; switch (elf_dyn.d_tag) { case DT_HASH: { + if (!ValidPointer(d_ptr)) { + *error_msg = StringPrintf("DT_HASH value %p does not refer to a loaded ELF segment of %s", + d_ptr, file_->GetPath().c_str()); + return false; + } hash_section_start_ = reinterpret_cast<Elf32_Word*>(d_ptr); break; } case DT_STRTAB: { + if (!ValidPointer(d_ptr)) { + *error_msg = StringPrintf("DT_HASH value %p does not refer to a loaded ELF segment of %s", + d_ptr, file_->GetPath().c_str()); + return false; + } dynstr_section_start_ = reinterpret_cast<char*>(d_ptr); break; } case DT_SYMTAB: { + if (!ValidPointer(d_ptr)) { + *error_msg = StringPrintf("DT_HASH value %p does not refer to a loaded ELF segment of %s", + d_ptr, file_->GetPath().c_str()); + return false; + } dynsym_section_start_ = reinterpret_cast<Elf32_Sym*>(d_ptr); break; } case DT_NULL: { - CHECK_EQ(GetDynamicNum(), i+1); + if (GetDynamicNum() != i+1) { + *error_msg = StringPrintf("DT_NULL found after %d .dynamic entries, " + "expected %d as implied by size of PT_DYNAMIC segment in %s", + i + 1, GetDynamicNum(), file_->GetPath().c_str()); + return false; + } break; } } @@ -721,4 +833,14 @@ bool ElfFile::Load(bool executable, std::string* error_msg) { return true; } +bool ElfFile::ValidPointer(const byte* start) const { + for (size_t i = 0; i < segments_.size(); ++i) { + const MemMap* segment = segments_[i]; + if (segment->Begin() <= start && start < segment->End()) { + return true; + } + } + return false; +} + } // namespace art diff --git a/runtime/elf_file.h b/runtime/elf_file.h index baf4356..8a0a5f8 100644 --- a/runtime/elf_file.h +++ b/runtime/elf_file.h @@ -39,15 +39,15 @@ class ElfFile { // Load segments into memory based on PT_LOAD program headers - File& GetFile() const { + const File& GetFile() const { return *file_; } - byte* Begin() { + byte* Begin() const { return map_->Begin(); } - byte* End() { + byte* End() const { return map_->End(); } @@ -55,24 +55,24 @@ class ElfFile { return map_->Size(); } - Elf32_Ehdr& GetHeader(); + Elf32_Ehdr& GetHeader() const; - Elf32_Word GetProgramHeaderNum(); - Elf32_Phdr& GetProgramHeader(Elf32_Word); - Elf32_Phdr* FindProgamHeaderByType(Elf32_Word type); + Elf32_Word GetProgramHeaderNum() const; + Elf32_Phdr& GetProgramHeader(Elf32_Word) const; + Elf32_Phdr* FindProgamHeaderByType(Elf32_Word type) const; - Elf32_Word GetSectionHeaderNum(); - Elf32_Shdr& GetSectionHeader(Elf32_Word); - Elf32_Shdr* FindSectionByType(Elf32_Word type); + Elf32_Word GetSectionHeaderNum() const; + Elf32_Shdr& GetSectionHeader(Elf32_Word) const; + Elf32_Shdr* FindSectionByType(Elf32_Word type) const; - Elf32_Shdr& GetSectionNameStringSection(); + Elf32_Shdr& GetSectionNameStringSection() const; // Find .dynsym using .hash for more efficient lookup than FindSymbolAddress. - byte* FindDynamicSymbolAddress(const std::string& symbol_name); + const byte* FindDynamicSymbolAddress(const std::string& symbol_name) const; static bool IsSymbolSectionType(Elf32_Word section_type); - Elf32_Word GetSymbolNum(Elf32_Shdr&); - Elf32_Sym& GetSymbol(Elf32_Word section_type, Elf32_Word i); + Elf32_Word GetSymbolNum(Elf32_Shdr&) const; + Elf32_Sym& GetSymbol(Elf32_Word section_type, Elf32_Word i) const; // Find symbol in specified table, returning NULL if it is not found. // @@ -83,73 +83,77 @@ class ElfFile { // should be set unless only a small number of symbols will be // looked up. Elf32_Sym* FindSymbolByName(Elf32_Word section_type, - const std::string& symbol_name, - bool build_map); + const std::string& symbol_name, + bool build_map); // Find address of symbol in specified table, returning 0 if it is // not found. See FindSymbolByName for an explanation of build_map. Elf32_Addr FindSymbolAddress(Elf32_Word section_type, - const std::string& symbol_name, - bool build_map); + const std::string& symbol_name, + bool build_map); // Lookup a string given string section and offset. Returns NULL for // special 0 offset. - const char* GetString(Elf32_Shdr&, Elf32_Word); + const char* GetString(Elf32_Shdr&, Elf32_Word) const; // Lookup a string by section type. Returns NULL for special 0 offset. - const char* GetString(Elf32_Word section_type, Elf32_Word); + const char* GetString(Elf32_Word section_type, Elf32_Word) const; - Elf32_Word GetDynamicNum(); - Elf32_Dyn& GetDynamic(Elf32_Word); - Elf32_Word FindDynamicValueByType(Elf32_Sword type); + Elf32_Word GetDynamicNum() const; + Elf32_Dyn& GetDynamic(Elf32_Word) const; + Elf32_Word FindDynamicValueByType(Elf32_Sword type) const; - Elf32_Word GetRelNum(Elf32_Shdr&); - Elf32_Rel& GetRel(Elf32_Shdr&, Elf32_Word); + Elf32_Word GetRelNum(Elf32_Shdr&) const; + Elf32_Rel& GetRel(Elf32_Shdr&, Elf32_Word) const; - Elf32_Word GetRelaNum(Elf32_Shdr&); - Elf32_Rela& GetRela(Elf32_Shdr&, Elf32_Word); + Elf32_Word GetRelaNum(Elf32_Shdr&) const; + Elf32_Rela& GetRela(Elf32_Shdr&, Elf32_Word) const; // Returns the expected size when the file is loaded at runtime - size_t GetLoadedSize(); + size_t GetLoadedSize() const; // Load segments into memory based on PT_LOAD program headers. // executable is true at run time, false at compile time. bool Load(bool executable, std::string* error_msg); private: - ElfFile(); + ElfFile(File* file, bool writable, bool program_header_only); - bool Setup(File* file, bool writable, bool program_header_only, std::string* error_msg); + bool Setup(std::string* error_msg); bool SetMap(MemMap* map, std::string* error_msg); - byte* GetProgramHeadersStart(); - byte* GetSectionHeadersStart(); - Elf32_Phdr& GetDynamicProgramHeader(); - Elf32_Dyn* GetDynamicSectionStart(); - Elf32_Sym* GetSymbolSectionStart(Elf32_Word section_type); - const char* GetStringSectionStart(Elf32_Word section_type); - Elf32_Rel* GetRelSectionStart(Elf32_Shdr&); - Elf32_Rela* GetRelaSectionStart(Elf32_Shdr&); - Elf32_Word* GetHashSectionStart(); - Elf32_Word GetHashBucketNum(); - Elf32_Word GetHashChainNum(); - Elf32_Word GetHashBucket(size_t i); - Elf32_Word GetHashChain(size_t i); + byte* GetProgramHeadersStart() const; + byte* GetSectionHeadersStart() const; + Elf32_Phdr& GetDynamicProgramHeader() const; + Elf32_Dyn* GetDynamicSectionStart() const; + Elf32_Sym* GetSymbolSectionStart(Elf32_Word section_type) const; + const char* GetStringSectionStart(Elf32_Word section_type) const; + Elf32_Rel* GetRelSectionStart(Elf32_Shdr&) const; + Elf32_Rela* GetRelaSectionStart(Elf32_Shdr&) const; + Elf32_Word* GetHashSectionStart() const; + Elf32_Word GetHashBucketNum() const; + Elf32_Word GetHashChainNum() const; + Elf32_Word GetHashBucket(size_t i) const; + Elf32_Word GetHashChain(size_t i) const; typedef std::map<std::string, Elf32_Sym*> SymbolTable; SymbolTable** GetSymbolTable(Elf32_Word section_type); - File* file_; - bool writable_; - bool program_header_only_; + bool ValidPointer(const byte* start) const; - // ELF header mapping. If program_header_only_ is false, will actually point to the entire elf file. + const File* const file_; + const bool writable_; + const bool program_header_only_; + + // ELF header mapping. If program_header_only_ is false, will + // actually point to the entire elf file. UniquePtr<MemMap> map_; Elf32_Ehdr* header_; std::vector<MemMap*> segments_; - // Pointer to start of first PT_LOAD program segment after Load() when program_header_only_ is true. + // Pointer to start of first PT_LOAD program segment after Load() + // when program_header_only_ is true. byte* base_address_; // The program header should always available but use GetProgramHeadersStart() to be sure. @@ -161,8 +165,8 @@ class ElfFile { Elf32_Dyn* dynamic_section_start_; Elf32_Sym* symtab_section_start_; Elf32_Sym* dynsym_section_start_; - const char* strtab_section_start_; - const char* dynstr_section_start_; + char* strtab_section_start_; + char* dynstr_section_start_; Elf32_Word* hash_section_start_; SymbolTable* symtab_symbol_table_; diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc index 4078cac..829ec4a 100644 --- a/runtime/entrypoints/entrypoint_utils.cc +++ b/runtime/entrypoints/entrypoint_utils.cc @@ -87,7 +87,8 @@ mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::ArtMethod* gc::Heap* heap = Runtime::Current()->GetHeap(); // Use the current allocator type in case CheckFilledNewArrayAlloc caused us to suspend and then // the heap switched the allocator type while we were suspended. - return mirror::Array::Alloc<false>(self, klass, component_count, heap->GetCurrentAllocator()); + return mirror::Array::Alloc<false>(self, klass, component_count, klass->GetComponentSize(), + heap->GetCurrentAllocator()); } // Helper function to allocate array for FILLED_NEW_ARRAY. @@ -103,7 +104,8 @@ mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx, mirror: gc::Heap* heap = Runtime::Current()->GetHeap(); // Use the current allocator type in case CheckFilledNewArrayAlloc caused us to suspend and then // the heap switched the allocator type while we were suspended. - return mirror::Array::Alloc<true>(self, klass, component_count, heap->GetCurrentAllocator()); + return mirror::Array::Alloc<true>(self, klass, component_count, klass->GetComponentSize(), + heap->GetCurrentAllocator()); } void ThrowStackOverflowError(Thread* self) { diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h index 2c08351..2ced942 100644 --- a/runtime/entrypoints/entrypoint_utils.h +++ b/runtime/entrypoints/entrypoint_utils.h @@ -228,9 +228,11 @@ ALWAYS_INLINE static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx, } gc::Heap* heap = Runtime::Current()->GetHeap(); return mirror::Array::Alloc<kInstrumented>(self, klass, component_count, + klass->GetComponentSize(), heap->GetCurrentAllocator()); } - return mirror::Array::Alloc<kInstrumented>(self, klass, component_count, allocator_type); + return mirror::Array::Alloc<kInstrumented>(self, klass, component_count, + klass->GetComponentSize(), allocator_type); } template <bool kAccessCheck, bool kInstrumented> @@ -252,9 +254,10 @@ ALWAYS_INLINE static inline mirror::Array* AllocArrayFromCodeResolved(mirror::Cl return nullptr; // Failure } } - // No need to retry a slow-path allocation as the above code won't - // cause a GC or thread suspension. - return mirror::Array::Alloc<kInstrumented>(self, klass, component_count, allocator_type); + // No need to retry a slow-path allocation as the above code won't cause a GC or thread + // suspension. + return mirror::Array::Alloc<kInstrumented>(self, klass, component_count, + klass->GetComponentSize(), allocator_type); } extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::ArtMethod* method, diff --git a/runtime/entrypoints/math_entrypoints_test.cc b/runtime/entrypoints/math_entrypoints_test.cc index ca8b931..b69aeb4 100644 --- a/runtime/entrypoints/math_entrypoints_test.cc +++ b/runtime/entrypoints/math_entrypoints_test.cc @@ -16,12 +16,13 @@ #include "math_entrypoints.h" -#include "common_test.h" #include <limits> +#include "common_runtime_test.h" + namespace art { -class MathEntrypointsTest : public CommonTest {}; +class MathEntrypointsTest : public CommonRuntimeTest {}; TEST_F(MathEntrypointsTest, DoubleToLong) { EXPECT_EQ(std::numeric_limits<int64_t>::max(), art_d2l(1.85e19)); diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc index 910a817..3653b37 100644 --- a/runtime/exception_test.cc +++ b/runtime/exception_test.cc @@ -15,10 +15,10 @@ */ #include "class_linker.h" -#include "common_test.h" +#include "common_runtime_test.h" #include "dex_file.h" #include "gtest/gtest.h" -#include "leb128_encoder.h" +#include "leb128.h" #include "mirror/class-inl.h" #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" @@ -32,10 +32,10 @@ namespace art { -class ExceptionTest : public CommonTest { +class ExceptionTest : public CommonRuntimeTest { protected: virtual void SetUp() { - CommonTest::SetUp(); + CommonRuntimeTest::SetUp(); ScopedObjectAccess soa(Thread::Current()); SirtRef<mirror::ClassLoader> class_loader( @@ -77,7 +77,7 @@ class ExceptionTest : public CommonTest { method_f_ = my_klass_->FindVirtualMethod("f", "()I"); ASSERT_TRUE(method_f_ != NULL); method_f_->SetFrameSizeInBytes(kStackAlignment); - method_f_->SetEntryPointFromQuickCompiledCode(CompiledMethod::CodePointer(&fake_code_[sizeof(code_size)], kThumb2)); + method_f_->SetEntryPointFromQuickCompiledCode(&fake_code_[sizeof(code_size)]); method_f_->SetMappingTable(&fake_mapping_data_.GetData()[0]); method_f_->SetVmapTable(&fake_vmap_table_data_.GetData()[0]); method_f_->SetNativeGcMap(&fake_gc_map_[0]); @@ -85,7 +85,7 @@ class ExceptionTest : public CommonTest { method_g_ = my_klass_->FindVirtualMethod("g", "(I)V"); ASSERT_TRUE(method_g_ != NULL); method_g_->SetFrameSizeInBytes(kStackAlignment); - method_g_->SetEntryPointFromQuickCompiledCode(CompiledMethod::CodePointer(&fake_code_[sizeof(code_size)], kThumb2)); + method_g_->SetEntryPointFromQuickCompiledCode(&fake_code_[sizeof(code_size)]); method_g_->SetMappingTable(&fake_mapping_data_.GetData()[0]); method_g_->SetVmapTable(&fake_vmap_table_data_.GetData()[0]); method_g_->SetNativeGcMap(&fake_gc_map_[0]); diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc index e70704f..ba4e2ac 100644 --- a/runtime/gc/accounting/space_bitmap_test.cc +++ b/runtime/gc/accounting/space_bitmap_test.cc @@ -16,20 +16,18 @@ #include "space_bitmap.h" -#include "common_test.h" +#include <stdint.h> + +#include "common_runtime_test.h" #include "globals.h" #include "space_bitmap-inl.h" #include "UniquePtr.h" -#include <stdint.h> - namespace art { namespace gc { namespace accounting { -class SpaceBitmapTest : public CommonTest { - public: -}; +class SpaceBitmapTest : public CommonRuntimeTest {}; TEST_F(SpaceBitmapTest, Init) { byte* heap_begin = reinterpret_cast<byte*>(0x10000000); diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h index c4238c7..5b4ca80 100644 --- a/runtime/gc/allocator/rosalloc.h +++ b/runtime/gc/allocator/rosalloc.h @@ -54,10 +54,10 @@ namespace art { namespace gc { namespace allocator { -// A Runs-of-slots memory allocator. +// A runs-of-slots memory allocator. class RosAlloc { private: - // Rerepresents a run of free pages. + // Represents a run of free pages. class FreePageRun { public: byte magic_num_; // The magic number used for debugging only. diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc index 8ca3892..7b2bc3b 100644 --- a/runtime/gc/collector/mark_sweep.cc +++ b/runtime/gc/collector/mark_sweep.cc @@ -450,6 +450,12 @@ mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) { inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) { DCHECK(!IsImmune(obj)); + + if (kUseBrooksPointer) { + // Verify all the objects have the correct Brooks pointer installed. + obj->AssertSelfBrooksPointer(); + } + // Try to take advantage of locality of references within a space, failing this find the space // the hard way. accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_; @@ -470,6 +476,11 @@ inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) { inline void MarkSweep::MarkObjectNonNull(const Object* obj) { DCHECK(obj != NULL); + if (kUseBrooksPointer) { + // Verify all the objects have the correct Brooks pointer installed. + obj->AssertSelfBrooksPointer(); + } + if (IsImmune(obj)) { DCHECK(IsMarked(obj)); return; @@ -532,6 +543,11 @@ bool MarkSweep::MarkLargeObject(const Object* obj, bool set) { inline bool MarkSweep::MarkObjectParallel(const Object* obj) { DCHECK(obj != NULL); + if (kUseBrooksPointer) { + // Verify all the objects have the correct Brooks pointer installed. + obj->AssertSelfBrooksPointer(); + } + if (IsImmune(obj)) { DCHECK(IsMarked(obj)); return false; diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h index 29fafd6..c55b2b2 100644 --- a/runtime/gc/collector/mark_sweep.h +++ b/runtime/gc/collector/mark_sweep.h @@ -64,16 +64,18 @@ class MarkSweep : public GarbageCollector { ~MarkSweep() {} - virtual void InitializePhase(); - virtual bool IsConcurrent() const; - virtual bool HandleDirtyObjectsPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); - virtual void MarkingPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - virtual void ReclaimPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - virtual void FinishPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + virtual void InitializePhase() OVERRIDE; + virtual void MarkingPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + virtual bool HandleDirtyObjectsPhase() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + virtual void ReclaimPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + virtual void FinishPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); virtual void MarkReachableObjects() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - virtual GcType GetGcType() const { + + virtual bool IsConcurrent() const OVERRIDE; + + virtual GcType GetGcType() const OVERRIDE { return kGcTypeFull; } @@ -131,7 +133,7 @@ class MarkSweep : public GarbageCollector { void ProcessReferences(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - // Update and mark references from immune spaces. + // Update and mark references from immune spaces. Virtual as overridden by StickyMarkSweep. virtual void UpdateAndMarkModUnion() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -140,7 +142,8 @@ class MarkSweep : public GarbageCollector { EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - // Sweeps unmarked objects to complete the garbage collection. + // Sweeps unmarked objects to complete the garbage collection. Virtual as by default it sweeps + // all allocation spaces. Partial and sticky GCs want to just sweep a subset of the heap. virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Sweeps unmarked objects to complete the garbage collection. @@ -232,7 +235,7 @@ class MarkSweep : public GarbageCollector { EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Mark the vm thread roots. - virtual void MarkThreadRoots(Thread* self) + void MarkThreadRoots(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/runtime/gc/collector/partial_mark_sweep.h b/runtime/gc/collector/partial_mark_sweep.h index 3b788f4..44ae9e9 100644 --- a/runtime/gc/collector/partial_mark_sweep.h +++ b/runtime/gc/collector/partial_mark_sweep.h @@ -26,7 +26,8 @@ namespace collector { class PartialMarkSweep : public MarkSweep { public: - virtual GcType GetGcType() const { + // Virtual as overridden by StickyMarkSweep. + virtual GcType GetGcType() const OVERRIDE { return kGcTypePartial; } @@ -35,8 +36,9 @@ class PartialMarkSweep : public MarkSweep { protected: // Bind the live bits to the mark bits of bitmaps for spaces that aren't collected for partial - // collections, ie the Zygote space. Also mark this space is immune. - virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // collections, ie the Zygote space. Also mark this space is immune. Virtual as overridden by + // StickyMarkSweep. + virtual void BindBitmaps() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: DISALLOW_COPY_AND_ASSIGN(PartialMarkSweep); diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc index fe8c253..a4c9dea 100644 --- a/runtime/gc/collector/semi_space.cc +++ b/runtime/gc/collector/semi_space.cc @@ -133,13 +133,15 @@ SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_pref immune_end_(nullptr), is_large_object_space_immune_(false), to_space_(nullptr), + to_space_live_bitmap_(nullptr), from_space_(nullptr), self_(nullptr), generational_(generational), last_gc_to_space_end_(nullptr), bytes_promoted_(0), whole_heap_collection_(true), - whole_heap_collection_interval_counter_(0) { + whole_heap_collection_interval_counter_(0), + saved_bytes_(0) { } void SemiSpace::InitializePhase() { @@ -263,7 +265,7 @@ class SemiSpaceScanObjectVisitor { semi_space_->ScanObject(obj); } private: - SemiSpace* semi_space_; + SemiSpace* const semi_space_; }; void SemiSpace::MarkReachableObjects() { @@ -467,10 +469,10 @@ mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { // of an old generation.) size_t bytes_promoted; space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); - forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_promoted); + forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_promoted, nullptr); if (forward_address == nullptr) { // If out of space, fall back to the to-space. - forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated); + forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr); } else { GetHeap()->num_bytes_allocated_.FetchAndAdd(bytes_promoted); bytes_promoted_ += bytes_promoted; @@ -511,12 +513,18 @@ mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { DCHECK(forward_address != nullptr); } else { // If it's allocated after the last GC (younger), copy it to the to-space. - forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated); + forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr); } // Copy over the object and add it to the mark stack since we still need to update its // references. saved_bytes_ += CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size); + if (kUseBrooksPointer) { + obj->AssertSelfBrooksPointer(); + DCHECK_EQ(forward_address->GetBrooksPointer(), obj); + forward_address->SetBrooksPointer(forward_address); + forward_address->AssertSelfBrooksPointer(); + } if (to_space_live_bitmap_ != nullptr) { to_space_live_bitmap_->Set(forward_address); } @@ -529,6 +537,12 @@ mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { // the to-space and have their forward address updated. Objects which have been newly marked are // pushed on the mark stack. Object* SemiSpace::MarkObject(Object* obj) { + if (kUseBrooksPointer) { + // Verify all the objects have the correct forward pointer installed. + if (obj != nullptr) { + obj->AssertSelfBrooksPointer(); + } + } Object* forward_address = obj; if (obj != nullptr && !IsImmune(obj)) { if (from_space_->HasAddress(obj)) { diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h index ba97376..c164c5f 100644 --- a/runtime/gc/collector/semi_space.h +++ b/runtime/gc/collector/semi_space.h @@ -275,7 +275,7 @@ class SemiSpace : public GarbageCollector { // When true, the generational mode (promotion and the bump pointer // space only collection) is enabled. TODO: move these to a new file // as a new garbage collector? - bool generational_; + const bool generational_; // Used for the generational mode. the end/top of the bump // pointer space at the end of the last collection. diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc index 9e3adb4..ce51ac5 100644 --- a/runtime/gc/collector/sticky_mark_sweep.cc +++ b/runtime/gc/collector/sticky_mark_sweep.cc @@ -59,11 +59,6 @@ void StickyMarkSweep::Sweep(bool swap_bitmaps) { SweepArray(GetHeap()->GetLiveStack(), false); } -void StickyMarkSweep::MarkThreadRoots(Thread* self) { - MarkRootsCheckpoint(self); -} - - } // namespace collector } // namespace gc } // namespace art diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h index b675877..98f2b59 100644 --- a/runtime/gc/collector/sticky_mark_sweep.h +++ b/runtime/gc/collector/sticky_mark_sweep.h @@ -25,9 +25,9 @@ namespace art { namespace gc { namespace collector { -class StickyMarkSweep : public PartialMarkSweep { +class StickyMarkSweep FINAL : public PartialMarkSweep { public: - GcType GetGcType() const { + GcType GetGcType() const OVERRIDE { return kGcTypeSticky; } @@ -37,21 +37,17 @@ class StickyMarkSweep : public PartialMarkSweep { protected: // Bind the live bits to the mark bits of bitmaps for all spaces, all spaces other than the // alloc space will be marked as immune. - void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void BindBitmaps() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void MarkReachableObjects() + void MarkReachableObjects() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - virtual void MarkThreadRoots(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - - void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + void Sweep(bool swap_bitmaps) OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); // Don't need to do anything special here since we scan all the cards which may have references // to the newly allocated objects. - virtual void UpdateAndMarkModUnion() { } + void UpdateAndMarkModUnion() OVERRIDE { } private: DISALLOW_COPY_AND_ASSIGN(StickyMarkSweep); diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h index 3d591f0..e089ef2 100644 --- a/runtime/gc/heap-inl.h +++ b/runtime/gc/heap-inl.h @@ -50,11 +50,13 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas } mirror::Object* obj; AllocationTimer alloc_timer(this, &obj); - size_t bytes_allocated; - obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated); + size_t bytes_allocated, usable_size; + obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated, + &usable_size); if (UNLIKELY(obj == nullptr)) { bool is_current_allocator = allocator == GetCurrentAllocator(); - obj = AllocateInternalWithGc(self, allocator, byte_count, &bytes_allocated, &klass); + obj = AllocateInternalWithGc(self, allocator, byte_count, &bytes_allocated, &usable_size, + &klass); if (obj == nullptr) { bool after_is_current_allocator = allocator == GetCurrentAllocator(); if (is_current_allocator && !after_is_current_allocator) { @@ -64,9 +66,17 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas return nullptr; } } - obj->SetClass(klass); - pre_fence_visitor(obj); DCHECK_GT(bytes_allocated, 0u); + DCHECK_GT(usable_size, 0u); + obj->SetClass(klass); + if (kUseBrooksPointer) { + obj->SetBrooksPointer(obj); + obj->AssertSelfBrooksPointer(); + } + pre_fence_visitor(obj, usable_size); + if (kIsDebugBuild && Runtime::Current()->IsStarted()) { + CHECK_LE(obj->SizeOf(), usable_size); + } const size_t new_num_bytes_allocated = static_cast<size_t>(num_bytes_allocated_.FetchAndAdd(bytes_allocated)) + bytes_allocated; // TODO: Deprecate. @@ -144,7 +154,8 @@ inline mirror::Object* Heap::AllocLargeObject(Thread* self, mirror::Class* klass template <const bool kInstrumented, const bool kGrow> inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator_type, - size_t alloc_size, size_t* bytes_allocated) { + size_t alloc_size, size_t* bytes_allocated, + size_t* usable_size) { if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) { return nullptr; } @@ -156,35 +167,36 @@ inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator ret = bump_pointer_space_->AllocNonvirtual(alloc_size); if (LIKELY(ret != nullptr)) { *bytes_allocated = alloc_size; + *usable_size = alloc_size; } break; } case kAllocatorTypeRosAlloc: { if (kInstrumented && UNLIKELY(running_on_valgrind_)) { // If running on valgrind, we should be using the instrumented path. - ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated); + ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size); } else { DCHECK(!running_on_valgrind_); - ret = rosalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated); + ret = rosalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size); } break; } case kAllocatorTypeDlMalloc: { if (kInstrumented && UNLIKELY(running_on_valgrind_)) { // If running on valgrind, we should be using the instrumented path. - ret = dlmalloc_space_->Alloc(self, alloc_size, bytes_allocated); + ret = dlmalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size); } else { DCHECK(!running_on_valgrind_); - ret = dlmalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated); + ret = dlmalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size); } break; } case kAllocatorTypeNonMoving: { - ret = non_moving_space_->Alloc(self, alloc_size, bytes_allocated); + ret = non_moving_space_->Alloc(self, alloc_size, bytes_allocated, usable_size); break; } case kAllocatorTypeLOS: { - ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated); + ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated, usable_size); // Note that the bump pointer spaces aren't necessarily next to // the other continuous spaces like the non-moving alloc space or // the zygote space. diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 58db7a8..8d8cdd6 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -952,6 +952,7 @@ void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) { mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator, size_t alloc_size, size_t* bytes_allocated, + size_t* usable_size, mirror::Class** klass) { mirror::Object* ptr = nullptr; bool was_default_allocator = allocator == GetCurrentAllocator(); @@ -968,7 +969,7 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocat return nullptr; } // A GC was in progress and we blocked, retry allocation now that memory has been freed. - ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated); + ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size); } // Loop through our different Gc types and try to Gc until we get enough free memory. @@ -985,13 +986,13 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocat } if (gc_ran) { // Did we free sufficient memory for the allocation to succeed? - ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated); + ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size); } } // Allocations have failed after GCs; this is an exceptional state. if (ptr == nullptr) { // Try harder, growing the heap if necessary. - ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated); + ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size); } if (ptr == nullptr) { // Most allocations should have succeeded by now, so the heap is really full, really fragmented, @@ -1008,7 +1009,7 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocat *klass = sirt_klass.get(); return nullptr; } - ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated); + ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size); if (ptr == nullptr) { ThrowOutOfMemoryError(self, alloc_size, false); } @@ -1318,9 +1319,10 @@ void Heap::ChangeCollector(CollectorType collector_type) { } // Special compacting collector which uses sub-optimal bin packing to reduce zygote space size. -class ZygoteCompactingCollector : public collector::SemiSpace { +class ZygoteCompactingCollector FINAL : public collector::SemiSpace { public: - explicit ZygoteCompactingCollector(gc::Heap* heap) : SemiSpace(heap, "zygote collector") { + explicit ZygoteCompactingCollector(gc::Heap* heap) : SemiSpace(heap, "zygote collector"), + bin_live_bitmap_(nullptr), bin_mark_bitmap_(nullptr) { } void BuildBins(space::ContinuousSpace* space) { @@ -1382,7 +1384,7 @@ class ZygoteCompactingCollector : public collector::SemiSpace { // No available space in the bins, place it in the target space instead (grows the zygote // space). size_t bytes_allocated; - forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated); + forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr); if (to_space_live_bitmap_ != nullptr) { to_space_live_bitmap_->Set(forward_address); } else { @@ -1402,6 +1404,12 @@ class ZygoteCompactingCollector : public collector::SemiSpace { } // Copy the object over to its new location. memcpy(reinterpret_cast<void*>(forward_address), obj, object_size); + if (kUseBrooksPointer) { + obj->AssertSelfBrooksPointer(); + DCHECK_EQ(forward_address->GetBrooksPointer(), obj); + forward_address->SetBrooksPointer(forward_address); + forward_address->AssertSelfBrooksPointer(); + } return forward_address; } }; diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index 5d44ee1..5d3232f 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -151,18 +151,24 @@ class Heap { ~Heap(); // Allocates and initializes storage for an object instance. - template <bool kInstrumented> - mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes) + template <bool kInstrumented, typename PreFenceVisitor = VoidFunctor> + mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes, + const PreFenceVisitor& pre_fence_visitor = VoidFunctor()) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes, - GetCurrentAllocator()); + GetCurrentAllocator(), + pre_fence_visitor); } - template <bool kInstrumented> - mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass, size_t num_bytes) + + template <bool kInstrumented, typename PreFenceVisitor = VoidFunctor> + mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass, size_t num_bytes, + const PreFenceVisitor& pre_fence_visitor = VoidFunctor()) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes, - GetCurrentNonMovingAllocator()); + GetCurrentNonMovingAllocator(), + pre_fence_visitor); } + template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor = VoidFunctor> ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator( Thread* self, mirror::Class* klass, size_t byte_count, AllocatorType allocator, @@ -570,7 +576,8 @@ class Heap { // Handles Allocate()'s slow allocation path with GC involved after // an initial allocation attempt failed. mirror::Object* AllocateInternalWithGc(Thread* self, AllocatorType allocator, size_t num_bytes, - size_t* bytes_allocated, mirror::Class** klass) + size_t* bytes_allocated, size_t* usable_size, + mirror::Class** klass) LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -583,7 +590,8 @@ class Heap { // that the switch statement is constant optimized in the entrypoints. template <const bool kInstrumented, const bool kGrow> ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self, AllocatorType allocator_type, - size_t alloc_size, size_t* bytes_allocated) + size_t alloc_size, size_t* bytes_allocated, + size_t* usable_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ThrowOutOfMemoryError(Thread* self, size_t byte_count, bool large_object_allocation) diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc index 4b86339..07e5088 100644 --- a/runtime/gc/heap_test.cc +++ b/runtime/gc/heap_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "common_test.h" +#include "common_runtime_test.h" #include "gc/accounting/card_table-inl.h" #include "gc/accounting/space_bitmap-inl.h" #include "mirror/class-inl.h" @@ -25,7 +25,7 @@ namespace art { namespace gc { -class HeapTest : public CommonTest {}; +class HeapTest : public CommonRuntimeTest {}; TEST_F(HeapTest, ClearGrowthLimit) { Heap* heap = Runtime::Current()->GetHeap(); diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h index 74a0274..70ab64b 100644 --- a/runtime/gc/space/bump_pointer_space-inl.h +++ b/runtime/gc/space/bump_pointer_space-inl.h @@ -23,6 +23,19 @@ namespace art { namespace gc { namespace space { +inline mirror::Object* BumpPointerSpace::Alloc(Thread*, size_t num_bytes, size_t* bytes_allocated, + size_t* usable_size) { + num_bytes = RoundUp(num_bytes, kAlignment); + mirror::Object* ret = AllocNonvirtual(num_bytes); + if (LIKELY(ret != nullptr)) { + *bytes_allocated = num_bytes; + if (usable_size != nullptr) { + *usable_size = num_bytes; + } + } + return ret; +} + inline mirror::Object* BumpPointerSpace::AllocNonvirtualWithoutAccounting(size_t num_bytes) { DCHECK(IsAligned<kAlignment>(num_bytes)); byte* old_end; @@ -49,6 +62,15 @@ inline mirror::Object* BumpPointerSpace::AllocNonvirtual(size_t num_bytes) { return ret; } +inline size_t BumpPointerSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t num_bytes = obj->SizeOf(); + if (usable_size != nullptr) { + *usable_size = RoundUp(num_bytes, kAlignment); + } + return num_bytes; +} + } // namespace space } // namespace gc } // namespace art diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc index f3f594f..43674ea 100644 --- a/runtime/gc/space/bump_pointer_space.cc +++ b/runtime/gc/space/bump_pointer_space.cc @@ -58,19 +58,6 @@ BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap* mem_map) num_blocks_(0) { } -mirror::Object* BumpPointerSpace::Alloc(Thread*, size_t num_bytes, size_t* bytes_allocated) { - num_bytes = RoundUp(num_bytes, kAlignment); - mirror::Object* ret = AllocNonvirtual(num_bytes); - if (LIKELY(ret != nullptr)) { - *bytes_allocated = num_bytes; - } - return ret; -} - -size_t BumpPointerSpace::AllocationSize(mirror::Object* obj) { - return AllocationSizeNonvirtual(obj); -} - void BumpPointerSpace::Clear() { // Release the pages back to the operating system. CHECK_NE(madvise(Begin(), Limit() - Begin(), MADV_DONTNEED), -1) << "madvise failed"; @@ -185,8 +172,9 @@ void BumpPointerSpace::Walk(ObjectCallback* callback, void* arg) { } } -bool BumpPointerSpace::IsEmpty() const { - return Begin() == End(); +accounting::SpaceBitmap::SweepCallback* BumpPointerSpace::GetSweepCallback() { + LOG(FATAL) << "Unimplemented"; + return nullptr; } uint64_t BumpPointerSpace::GetBytesAllocated() { diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h index d7e6f5b..476b833 100644 --- a/runtime/gc/space/bump_pointer_space.h +++ b/runtime/gc/space/bump_pointer_space.h @@ -29,12 +29,13 @@ namespace collector { namespace space { -// A bump pointer space is a space where objects may be allocated and garbage collected. -class BumpPointerSpace : public ContinuousMemMapAllocSpace { +// A bump pointer space allocates by incrementing a pointer, it doesn't provide a free +// implementation as its intended to be evacuated. +class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace { public: typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg); - SpaceType GetType() const { + SpaceType GetType() const OVERRIDE { return kSpaceTypeBumpPointerSpace; } @@ -44,25 +45,28 @@ class BumpPointerSpace : public ContinuousMemMapAllocSpace { static BumpPointerSpace* Create(const std::string& name, size_t capacity, byte* requested_begin); // Allocate num_bytes, returns nullptr if the space is full. - virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated); + mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, + size_t* usable_size) OVERRIDE; mirror::Object* AllocNonvirtual(size_t num_bytes); mirror::Object* AllocNonvirtualWithoutAccounting(size_t num_bytes); // Return the storage space required by obj. - virtual size_t AllocationSize(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return AllocationSizeNonvirtual(obj, usable_size); + } // NOPS unless we support free lists. - virtual size_t Free(Thread*, mirror::Object*) { + size_t Free(Thread*, mirror::Object*) OVERRIDE { return 0; } - virtual size_t FreeList(Thread*, size_t, mirror::Object**) { + + size_t FreeList(Thread*, size_t, mirror::Object**) OVERRIDE { return 0; } - size_t AllocationSizeNonvirtual(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return obj->SizeOf(); - } + size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Removes the fork time growth limit on capacity, allowing the application to allocate up to the // maximum reserved size of the heap. @@ -80,16 +84,16 @@ class BumpPointerSpace : public ContinuousMemMapAllocSpace { return GetMemMap()->Size(); } - accounting::SpaceBitmap* GetLiveBitmap() const { + accounting::SpaceBitmap* GetLiveBitmap() const OVERRIDE { return nullptr; } - accounting::SpaceBitmap* GetMarkBitmap() const { + accounting::SpaceBitmap* GetMarkBitmap() const OVERRIDE { return nullptr; } // Clear the memory and reset the pointer to the start of the space. - void Clear() LOCKS_EXCLUDED(block_lock_); + void Clear() OVERRIDE LOCKS_EXCLUDED(block_lock_); void Dump(std::ostream& os) const; @@ -99,7 +103,10 @@ class BumpPointerSpace : public ContinuousMemMapAllocSpace { uint64_t GetBytesAllocated() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); uint64_t GetObjectsAllocated() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsEmpty() const; + bool IsEmpty() const { + return Begin() == End(); + } + bool Contains(const mirror::Object* obj) const { const byte* byte_obj = reinterpret_cast<const byte*>(obj); @@ -116,7 +123,7 @@ class BumpPointerSpace : public ContinuousMemMapAllocSpace { // Allocate a new TLAB, returns false if the allocation failed. bool AllocNewTlab(Thread* self, size_t bytes); - virtual BumpPointerSpace* AsBumpPointerSpace() { + BumpPointerSpace* AsBumpPointerSpace() OVERRIDE { return this; } @@ -124,6 +131,8 @@ class BumpPointerSpace : public ContinuousMemMapAllocSpace { void Walk(ObjectCallback* callback, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + accounting::SpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE; + // Object alignment within the space. static constexpr size_t kAlignment = 8; diff --git a/runtime/gc/space/dlmalloc_space-inl.h b/runtime/gc/space/dlmalloc_space-inl.h index c14a4e1..02d8b54 100644 --- a/runtime/gc/space/dlmalloc_space-inl.h +++ b/runtime/gc/space/dlmalloc_space-inl.h @@ -18,6 +18,7 @@ #define ART_RUNTIME_GC_SPACE_DLMALLOC_SPACE_INL_H_ #include "dlmalloc_space.h" +#include "gc/allocator/dlmalloc.h" #include "thread.h" namespace art { @@ -25,11 +26,12 @@ namespace gc { namespace space { inline mirror::Object* DlMallocSpace::AllocNonvirtual(Thread* self, size_t num_bytes, - size_t* bytes_allocated) { + size_t* bytes_allocated, + size_t* usable_size) { mirror::Object* obj; { MutexLock mu(self, lock_); - obj = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated); + obj = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size); } if (LIKELY(obj != NULL)) { // Zero freshly allocated memory, done while not holding the space's lock. @@ -38,15 +40,25 @@ inline mirror::Object* DlMallocSpace::AllocNonvirtual(Thread* self, size_t num_b return obj; } +inline size_t DlMallocSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) { + void* obj_ptr = const_cast<void*>(reinterpret_cast<const void*>(obj)); + size_t size = mspace_usable_size(obj_ptr); + if (usable_size != nullptr) { + *usable_size = size; + } + return size + kChunkOverhead; +} + inline mirror::Object* DlMallocSpace::AllocWithoutGrowthLocked(Thread* /*self*/, size_t num_bytes, - size_t* bytes_allocated) { + size_t* bytes_allocated, + size_t* usable_size) { mirror::Object* result = reinterpret_cast<mirror::Object*>(mspace_malloc(mspace_for_alloc_, num_bytes)); if (LIKELY(result != NULL)) { if (kDebugSpaces) { CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result) << ") not in bounds of allocation space " << *this; } - size_t allocation_size = AllocationSizeNonvirtual(result); + size_t allocation_size = AllocationSizeNonvirtual(result, usable_size); DCHECK(bytes_allocated != NULL); *bytes_allocated = allocation_size; } diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc index 1493019..caedaaf 100644 --- a/runtime/gc/space/dlmalloc_space.cc +++ b/runtime/gc/space/dlmalloc_space.cc @@ -25,15 +25,15 @@ #include "thread.h" #include "thread_list.h" #include "utils.h" - -#include <valgrind.h> -#include <memcheck/memcheck.h> +#include "valgrind_malloc_space-inl.h" namespace art { namespace gc { namespace space { -static const bool kPrefetchDuringDlMallocFreeList = true; +static constexpr bool kPrefetchDuringDlMallocFreeList = true; + +template class ValgrindMallocSpace<DlMallocSpace, void*>; DlMallocSpace::DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin, byte* end, byte* limit, size_t growth_limit) @@ -119,11 +119,8 @@ void* DlMallocSpace::CreateMspace(void* begin, size_t morecore_start, size_t ini return msp; } -mirror::Object* DlMallocSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) { - return AllocNonvirtual(self, num_bytes, bytes_allocated); -} - -mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated) { +mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes, + size_t* bytes_allocated, size_t* usable_size) { mirror::Object* result; { MutexLock mu(self, lock_); @@ -131,7 +128,7 @@ mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes, s size_t max_allowed = Capacity(); mspace_set_footprint_limit(mspace_, max_allowed); // Try the allocation. - result = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated); + result = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size); // Shrink back down as small as possible. size_t footprint = mspace_footprint(mspace_); mspace_set_footprint_limit(mspace_, footprint); @@ -145,7 +142,8 @@ mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes, s return result; } -MallocSpace* DlMallocSpace::CreateInstance(const std::string& name, MemMap* mem_map, void* allocator, byte* begin, byte* end, +MallocSpace* DlMallocSpace::CreateInstance(const std::string& name, MemMap* mem_map, + void* allocator, byte* begin, byte* end, byte* limit, size_t growth_limit) { return new DlMallocSpace(name, mem_map, allocator, begin, end, limit, growth_limit); } @@ -156,7 +154,7 @@ size_t DlMallocSpace::Free(Thread* self, mirror::Object* ptr) { CHECK(ptr != NULL); CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this; } - const size_t bytes_freed = AllocationSizeNonvirtual(ptr); + const size_t bytes_freed = AllocationSizeNonvirtual(ptr, nullptr); if (kRecentFreeCount > 0) { RegisterRecentFree(ptr); } @@ -176,7 +174,7 @@ size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** p // The head of chunk for the allocation is sizeof(size_t) behind the allocation. __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + look_ahead]) - sizeof(size_t)); } - bytes_freed += AllocationSizeNonvirtual(ptr); + bytes_freed += AllocationSizeNonvirtual(ptr, nullptr); } if (kRecentFreeCount > 0) { @@ -228,10 +226,6 @@ extern "C" void* art_heap_morecore(void* mspace, intptr_t increment) { return dlmalloc_space->MoreCore(increment); } -size_t DlMallocSpace::AllocationSize(mirror::Object* obj) { - return AllocationSizeNonvirtual(obj); -} - size_t DlMallocSpace::Trim() { MutexLock mu(Thread::Current(), lock_); // Trim to release memory at the end of the space. diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h index 4507c36..6ea10ad 100644 --- a/runtime/gc/space/dlmalloc_space.h +++ b/runtime/gc/space/dlmalloc_space.h @@ -17,7 +17,6 @@ #ifndef ART_RUNTIME_GC_SPACE_DLMALLOC_SPACE_H_ #define ART_RUNTIME_GC_SPACE_DLMALLOC_SPACE_H_ -#include "gc/allocator/dlmalloc.h" #include "malloc_space.h" #include "space.h" @@ -30,7 +29,8 @@ namespace collector { namespace space { -// An alloc space is a space where objects may be allocated and garbage collected. +// An alloc space is a space where objects may be allocated and garbage collected. Not final as may +// be overridden by a ValgrindMallocSpace. class DlMallocSpace : public MallocSpace { public: // Create a DlMallocSpace from an existing mem_map. @@ -45,22 +45,40 @@ class DlMallocSpace : public MallocSpace { static DlMallocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit, size_t capacity, byte* requested_begin); - virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, - size_t* bytes_allocated) LOCKS_EXCLUDED(lock_); - virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated); - virtual size_t AllocationSize(mirror::Object* obj); - virtual size_t Free(Thread* self, mirror::Object* ptr) + // Virtual to allow ValgrindMallocSpace to intercept. + virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated, + size_t* usable_size) OVERRIDE LOCKS_EXCLUDED(lock_); + // Virtual to allow ValgrindMallocSpace to intercept. + virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, + size_t* usable_size) OVERRIDE LOCKS_EXCLUDED(lock_) { + return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size); + } + // Virtual to allow ValgrindMallocSpace to intercept. + virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE { + return AllocationSizeNonvirtual(obj, usable_size); + } + // Virtual to allow ValgrindMallocSpace to intercept. + virtual size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE + LOCKS_EXCLUDED(lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) + // Virtual to allow ValgrindMallocSpace to intercept. + virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE + LOCKS_EXCLUDED(lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated); - - size_t AllocationSizeNonvirtual(mirror::Object* obj) { - void* obj_ptr = const_cast<void*>(reinterpret_cast<const void*>(obj)); - return mspace_usable_size(obj_ptr) + kChunkOverhead; + // DlMallocSpaces don't have thread local state. + void RevokeThreadLocalBuffers(art::Thread*) OVERRIDE { + } + void RevokeAllThreadLocalBuffers() OVERRIDE { } + // Faster non-virtual allocation path. + mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated, + size_t* usable_size) LOCKS_EXCLUDED(lock_); + + // Faster non-virtual allocation size path. + size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size); + #ifndef NDEBUG // Override only in the debug build. void CheckMoreCoreForPrecondition(); @@ -70,39 +88,37 @@ class DlMallocSpace : public MallocSpace { return mspace_; } - size_t Trim(); + size_t Trim() OVERRIDE; // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be // in use, indicated by num_bytes equaling zero. - void Walk(WalkCallback callback, void* arg) LOCKS_EXCLUDED(lock_); + void Walk(WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_); // Returns the number of bytes that the space has currently obtained from the system. This is // greater or equal to the amount of live data in the space. - size_t GetFootprint(); + size_t GetFootprint() OVERRIDE; // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore. - size_t GetFootprintLimit(); + size_t GetFootprintLimit() OVERRIDE; // Set the maximum number of bytes that the heap is allowed to obtain from the system via // MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow. - void SetFootprintLimit(size_t limit); + void SetFootprintLimit(size_t limit) OVERRIDE; MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator, byte* begin, byte* end, byte* limit, size_t growth_limit); - uint64_t GetBytesAllocated(); - uint64_t GetObjectsAllocated(); + uint64_t GetBytesAllocated() OVERRIDE; + uint64_t GetObjectsAllocated() OVERRIDE; - // Returns the class of a recently freed object. - mirror::Class* FindRecentFreedObject(const mirror::Object* obj); + void Clear() OVERRIDE; - virtual void Clear(); - - virtual bool IsDlMallocSpace() const { + bool IsDlMallocSpace() const OVERRIDE { return true; } - virtual DlMallocSpace* AsDlMallocSpace() { + + DlMallocSpace* AsDlMallocSpace() OVERRIDE { return this; } @@ -111,10 +127,12 @@ class DlMallocSpace : public MallocSpace { byte* limit, size_t growth_limit); private: - mirror::Object* AllocWithoutGrowthLocked(Thread* self, size_t num_bytes, size_t* bytes_allocated) + mirror::Object* AllocWithoutGrowthLocked(Thread* self, size_t num_bytes, size_t* bytes_allocated, + size_t* usable_size) EXCLUSIVE_LOCKS_REQUIRED(lock_); - void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size, bool /*low_memory_mode*/) { + void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size, + bool /*low_memory_mode*/) OVERRIDE { return CreateMspace(base, morecore_start, initial_size); } static void* CreateMspace(void* base, size_t morecore_start, size_t initial_size); @@ -122,11 +140,11 @@ class DlMallocSpace : public MallocSpace { // The boundary tag overhead. static const size_t kChunkOverhead = kWordSize; - // Underlying malloc space + // Underlying malloc space. void* const mspace_; - // A mspace pointer used for allocation. Equals to what mspace_ - // points to or nullptr after InvalidateAllocator() is called. + // An mspace pointer used for allocation. Equals mspace_ or nullptr after InvalidateAllocator() + // is called. void* mspace_for_alloc_; friend class collector::MarkSweep; diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc index 12c5451..76c4d25 100644 --- a/runtime/gc/space/image_space.cc +++ b/runtime/gc/space/image_space.cc @@ -133,6 +133,11 @@ void ImageSpace::VerifyImageAllocations() { mirror::Object* obj = reinterpret_cast<mirror::Object*>(current); CHECK(live_bitmap_->Test(obj)); CHECK(obj->GetClass() != nullptr) << "Image object at address " << obj << " has null class"; + if (kUseBrooksPointer) { + CHECK(obj->GetBrooksPointer() == obj) + << "Bad Brooks pointer: obj=" << reinterpret_cast<void*>(obj) + << " brooks_ptr=" << reinterpret_cast<void*>(obj->GetBrooksPointer()); + } current += RoundUp(obj->SizeOf(), kObjectAlignment); } } diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc index 987a655..1ca132e 100644 --- a/runtime/gc/space/large_object_space.cc +++ b/runtime/gc/space/large_object_space.cc @@ -57,7 +57,7 @@ LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) { } mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes, - size_t* bytes_allocated) { + size_t* bytes_allocated, size_t* usable_size) { std::string error_msg; MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", NULL, num_bytes, PROT_READ | PROT_WRITE, true, &error_msg); @@ -72,6 +72,9 @@ mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes, size_t allocation_size = mem_map->Size(); DCHECK(bytes_allocated != NULL); *bytes_allocated = allocation_size; + if (usable_size != nullptr) { + *usable_size = allocation_size; + } num_bytes_allocated_ += allocation_size; total_bytes_allocated_ += allocation_size; ++num_objects_allocated_; @@ -92,9 +95,9 @@ size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) { return allocation_size; } -size_t LargeObjectMapSpace::AllocationSize(mirror::Object* obj) { +size_t LargeObjectMapSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) { MutexLock mu(Thread::Current(), lock_); - MemMaps::iterator found = mem_maps_.find(obj); + auto found = mem_maps_.find(obj); CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live"; return found->second->Size(); } @@ -112,7 +115,7 @@ size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object* void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) { MutexLock mu(Thread::Current(), lock_); - for (MemMaps::iterator it = mem_maps_.begin(); it != mem_maps_.end(); ++it) { + for (auto it = mem_maps_.begin(); it != mem_maps_.end(); ++it) { MemMap* mem_map = it->second; callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg); callback(NULL, NULL, 0, arg); @@ -244,14 +247,19 @@ bool FreeListSpace::Contains(const mirror::Object* obj) const { return mem_map_->HasAddress(obj); } -size_t FreeListSpace::AllocationSize(mirror::Object* obj) { +size_t FreeListSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) { AllocationHeader* header = GetAllocationHeader(obj); DCHECK(Contains(obj)); DCHECK(!header->IsFree()); - return header->AllocationSize(); + size_t alloc_size = header->AllocationSize(); + if (usable_size != nullptr) { + *usable_size = alloc_size - sizeof(AllocationHeader); + } + return alloc_size; } -mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) { +mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, + size_t* usable_size) { MutexLock mu(self, lock_); size_t allocation_size = RoundUp(num_bytes + sizeof(AllocationHeader), kAlignment); AllocationHeader temp; @@ -280,13 +288,15 @@ mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* byt new_header = reinterpret_cast<AllocationHeader*>(end_ - free_end_); free_end_ -= allocation_size; } else { - return NULL; + return nullptr; } } - DCHECK(bytes_allocated != NULL); + DCHECK(bytes_allocated != nullptr); *bytes_allocated = allocation_size; - + if (usable_size != nullptr) { + *usable_size = allocation_size - sizeof(AllocationHeader); + } // Need to do these inside of the lock. ++num_objects_allocated_; ++total_objects_allocated_; diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h index 5274c8d..b1b0c3c 100644 --- a/runtime/gc/space/large_object_space.h +++ b/runtime/gc/space/large_object_space.h @@ -32,20 +32,20 @@ namespace space { // Abstraction implemented by all large object spaces. class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace { public: - virtual SpaceType GetType() const { + SpaceType GetType() const OVERRIDE { return kSpaceTypeLargeObjectSpace; } - virtual void SwapBitmaps(); - virtual void CopyLiveToMarked(); + void SwapBitmaps(); + void CopyLiveToMarked(); virtual void Walk(DlMallocSpace::WalkCallback, void* arg) = 0; virtual ~LargeObjectSpace() {} - uint64_t GetBytesAllocated() { + uint64_t GetBytesAllocated() OVERRIDE { return num_bytes_allocated_; } - uint64_t GetObjectsAllocated() { + uint64_t GetObjectsAllocated() OVERRIDE { return num_objects_allocated_; } @@ -57,17 +57,23 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace { return total_objects_allocated_; } - size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs); + size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE; - virtual bool IsAllocSpace() const { + // LargeObjectSpaces don't have thread local state. + void RevokeThreadLocalBuffers(art::Thread*) OVERRIDE { + } + void RevokeAllThreadLocalBuffers() OVERRIDE { + } + + bool IsAllocSpace() const OVERRIDE { return true; } - virtual AllocSpace* AsAllocSpace() { + AllocSpace* AsAllocSpace() OVERRIDE { return this; } - virtual void Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes); + void Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes); protected: explicit LargeObjectSpace(const std::string& name); @@ -85,17 +91,18 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace { }; // A discontinuous large object space implemented by individual mmap/munmap calls. -class LargeObjectMapSpace : public LargeObjectSpace { +class LargeObjectMapSpace FINAL : public LargeObjectSpace { public: // Creates a large object space. Allocations into the large object space use memory maps instead // of malloc. static LargeObjectMapSpace* Create(const std::string& name); // Return the storage space required by obj. - size_t AllocationSize(mirror::Object* obj); - mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated); + size_t AllocationSize(mirror::Object* obj, size_t* usable_size); + mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, + size_t* usable_size); size_t Free(Thread* self, mirror::Object* ptr); - void Walk(DlMallocSpace::WalkCallback, void* arg) LOCKS_EXCLUDED(lock_); + void Walk(DlMallocSpace::WalkCallback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_); // TODO: disabling thread safety analysis as this may be called when we already hold lock_. bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS; @@ -113,16 +120,18 @@ class LargeObjectMapSpace : public LargeObjectSpace { }; // A continuous large object space with a free-list to handle holes. -class FreeListSpace : public LargeObjectSpace { +class FreeListSpace FINAL : public LargeObjectSpace { public: virtual ~FreeListSpace(); static FreeListSpace* Create(const std::string& name, byte* requested_begin, size_t capacity); - size_t AllocationSize(mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(lock_); - mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated); - size_t Free(Thread* self, mirror::Object* obj); - bool Contains(const mirror::Object* obj) const; - void Walk(DlMallocSpace::WalkCallback callback, void* arg) LOCKS_EXCLUDED(lock_); + size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE + EXCLUSIVE_LOCKS_REQUIRED(lock_); + mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, + size_t* usable_size) OVERRIDE; + size_t Free(Thread* self, mirror::Object* obj) OVERRIDE; + bool Contains(const mirror::Object* obj) const OVERRIDE; + void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_); // Address at which the space begins. byte* Begin() const { diff --git a/runtime/gc/space/large_object_space_test.cc b/runtime/gc/space/large_object_space_test.cc index 845b9e3..8a6636d 100644 --- a/runtime/gc/space/large_object_space_test.cc +++ b/runtime/gc/space/large_object_space_test.cc @@ -45,9 +45,10 @@ void LargeObjectSpaceTest::LargeObjectTest() { while (requests.size() < num_allocations) { size_t request_size = test_rand(&rand_seed) % max_allocation_size; size_t allocation_size = 0; - mirror::Object* obj = los->Alloc(Thread::Current(), request_size, &allocation_size); + mirror::Object* obj = los->Alloc(Thread::Current(), request_size, &allocation_size, + nullptr); ASSERT_TRUE(obj != nullptr); - ASSERT_EQ(allocation_size, los->AllocationSize(obj)); + ASSERT_EQ(allocation_size, los->AllocationSize(obj, nullptr)); ASSERT_GE(allocation_size, request_size); // Fill in our magic value. byte magic = (request_size & 0xFF) | 1; @@ -78,7 +79,7 @@ void LargeObjectSpaceTest::LargeObjectTest() { size_t bytes_allocated = 0; // Checks that the coalescing works. - mirror::Object* obj = los->Alloc(Thread::Current(), 100 * MB, &bytes_allocated); + mirror::Object* obj = los->Alloc(Thread::Current(), 100 * MB, &bytes_allocated, nullptr); EXPECT_TRUE(obj != nullptr); los->Free(Thread::Current(), obj); diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h index f17bcd2..8e34fd0 100644 --- a/runtime/gc/space/malloc_space.h +++ b/runtime/gc/space/malloc_space.h @@ -52,13 +52,15 @@ class MallocSpace : public ContinuousMemMapAllocSpace { return kSpaceTypeMallocSpace; } - // Allocate num_bytes without allowing the underlying space to grow. - virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, - size_t* bytes_allocated) = 0; // Allocate num_bytes allowing the underlying space to grow. - virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) = 0; - // Return the storage space required by obj. - virtual size_t AllocationSize(mirror::Object* obj) = 0; + virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, + size_t* bytes_allocated, size_t* usable_size) = 0; + // Allocate num_bytes without allowing the underlying space to grow. + virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, + size_t* usable_size) = 0; + // Return the storage space required by obj. If usable_size isn't nullptr then it is set to the + // amount of the storage space that may be used by obj. + virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0; virtual size_t Free(Thread* self, mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) @@ -132,9 +134,8 @@ class MallocSpace : public ContinuousMemMapAllocSpace { static MemMap* CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size, size_t* growth_limit, size_t* capacity, byte* requested_begin); - // When true the low memory mode argument specifies that the heap - // wishes the created allocator to be more aggressive in releasing - // unused pages. + // When true the low memory mode argument specifies that the heap wishes the created allocator to + // be more aggressive in releasing unused pages. virtual void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size, bool low_memory_mode) = 0; @@ -173,82 +174,6 @@ class MallocSpace : public ContinuousMemMapAllocSpace { DISALLOW_COPY_AND_ASSIGN(MallocSpace); }; -// Number of bytes to use as a red zone (rdz). A red zone of this size will be placed before and -// after each allocation. 8 bytes provides long/double alignment. -static constexpr size_t kValgrindRedZoneBytes = 8; - -// A specialization of DlMallocSpace/RosAllocSpace that provides information to valgrind wrt allocations. -template <typename BaseMallocSpaceType, typename AllocatorType> -class ValgrindMallocSpace : public BaseMallocSpaceType { - public: - virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated) { - void* obj_with_rdz = BaseMallocSpaceType::AllocWithGrowth(self, num_bytes + 2 * kValgrindRedZoneBytes, - bytes_allocated); - if (obj_with_rdz == NULL) { - return NULL; - } - mirror::Object* result = reinterpret_cast<mirror::Object*>( - reinterpret_cast<byte*>(obj_with_rdz) + kValgrindRedZoneBytes); - // Make redzones as no access. - VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes); - VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(result) + num_bytes, kValgrindRedZoneBytes); - return result; - } - - virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) { - void* obj_with_rdz = BaseMallocSpaceType::Alloc(self, num_bytes + 2 * kValgrindRedZoneBytes, - bytes_allocated); - if (obj_with_rdz == NULL) { - return NULL; - } - mirror::Object* result = reinterpret_cast<mirror::Object*>( - reinterpret_cast<byte*>(obj_with_rdz) + kValgrindRedZoneBytes); - // Make redzones as no access. - VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes); - VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(result) + num_bytes, kValgrindRedZoneBytes); - return result; - } - - virtual size_t AllocationSize(mirror::Object* obj) { - size_t result = BaseMallocSpaceType::AllocationSize(reinterpret_cast<mirror::Object*>( - reinterpret_cast<byte*>(obj) - kValgrindRedZoneBytes)); - return result - 2 * kValgrindRedZoneBytes; - } - - virtual size_t Free(Thread* self, mirror::Object* ptr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - void* obj_after_rdz = reinterpret_cast<void*>(ptr); - void* obj_with_rdz = reinterpret_cast<byte*>(obj_after_rdz) - kValgrindRedZoneBytes; - // Make redzones undefined. - size_t allocation_size = BaseMallocSpaceType::AllocationSize( - reinterpret_cast<mirror::Object*>(obj_with_rdz)); - VALGRIND_MAKE_MEM_UNDEFINED(obj_with_rdz, allocation_size); - size_t freed = BaseMallocSpaceType::Free(self, reinterpret_cast<mirror::Object*>(obj_with_rdz)); - return freed - 2 * kValgrindRedZoneBytes; - } - - virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - size_t freed = 0; - for (size_t i = 0; i < num_ptrs; i++) { - freed += Free(self, ptrs[i]); - } - return freed; - } - - ValgrindMallocSpace(const std::string& name, MemMap* mem_map, AllocatorType allocator, byte* begin, - byte* end, byte* limit, size_t growth_limit, size_t initial_size) : - BaseMallocSpaceType(name, mem_map, allocator, begin, end, limit, growth_limit) { - VALGRIND_MAKE_MEM_UNDEFINED(mem_map->Begin() + initial_size, mem_map->Size() - initial_size); - } - - virtual ~ValgrindMallocSpace() { - } - - private: - DISALLOW_COPY_AND_ASSIGN(ValgrindMallocSpace); -}; - } // namespace space } // namespace gc } // namespace art diff --git a/runtime/gc/space/rosalloc_space-inl.h b/runtime/gc/space/rosalloc_space-inl.h index 5de4265..2627c85 100644 --- a/runtime/gc/space/rosalloc_space-inl.h +++ b/runtime/gc/space/rosalloc_space-inl.h @@ -25,20 +25,32 @@ namespace art { namespace gc { namespace space { -inline mirror::Object* RosAllocSpace::AllocNonvirtual(Thread* self, size_t num_bytes, - size_t* bytes_allocated) { - mirror::Object* obj; - obj = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated); - // RosAlloc zeroes memory internally. - return obj; +inline size_t RosAllocSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) { + void* obj_ptr = const_cast<void*>(reinterpret_cast<const void*>(obj)); + // obj is a valid object. Use its class in the header to get the size. + // Don't use verification since the object may be dead if we are sweeping. + size_t size = obj->SizeOf<kVerifyNone>(); + size_t size_by_size = rosalloc_->UsableSize(size); + if (kIsDebugBuild) { + size_t size_by_ptr = rosalloc_->UsableSize(obj_ptr); + if (size_by_size != size_by_ptr) { + LOG(INFO) << "Found a bad sized obj of size " << size + << " at " << std::hex << reinterpret_cast<intptr_t>(obj_ptr) << std::dec + << " size_by_size=" << size_by_size << " size_by_ptr=" << size_by_ptr; + } + DCHECK_EQ(size_by_size, size_by_ptr); + } + if (usable_size != nullptr) { + *usable_size = size_by_size; + } + return size_by_size; } -inline mirror::Object* RosAllocSpace::AllocWithoutGrowthLocked(Thread* self, size_t num_bytes, - size_t* bytes_allocated) { +inline mirror::Object* RosAllocSpace::AllocCommon(Thread* self, size_t num_bytes, + size_t* bytes_allocated, size_t* usable_size) { size_t rosalloc_size = 0; mirror::Object* result = reinterpret_cast<mirror::Object*>( - rosalloc_for_alloc_->Alloc(self, num_bytes, - &rosalloc_size)); + rosalloc_for_alloc_->Alloc(self, num_bytes, &rosalloc_size)); if (LIKELY(result != NULL)) { if (kDebugSpaces) { CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result) @@ -46,6 +58,10 @@ inline mirror::Object* RosAllocSpace::AllocWithoutGrowthLocked(Thread* self, siz } DCHECK(bytes_allocated != NULL); *bytes_allocated = rosalloc_size; + DCHECK_EQ(rosalloc_size, rosalloc_->UsableSize(result)); + if (usable_size != nullptr) { + *usable_size = rosalloc_size; + } } return result; } diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc index cc6c1d9..fe8421d 100644 --- a/runtime/gc/space/rosalloc_space.cc +++ b/runtime/gc/space/rosalloc_space.cc @@ -26,15 +26,15 @@ #include "thread.h" #include "thread_list.h" #include "utils.h" - -#include <valgrind.h> -#include <memcheck/memcheck.h> +#include "valgrind_malloc_space-inl.h" namespace art { namespace gc { namespace space { -static const bool kPrefetchDuringRosAllocFreeList = true; +static constexpr bool kPrefetchDuringRosAllocFreeList = true; + +template class ValgrindMallocSpace<RosAllocSpace, allocator::RosAlloc*>; RosAllocSpace::RosAllocSpace(const std::string& name, MemMap* mem_map, art::gc::allocator::RosAlloc* rosalloc, byte* begin, byte* end, @@ -45,9 +45,9 @@ RosAllocSpace::RosAllocSpace(const std::string& name, MemMap* mem_map, } RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name, - size_t starting_size, - size_t initial_size, size_t growth_limit, - size_t capacity, bool low_memory_mode) { + size_t starting_size, size_t initial_size, + size_t growth_limit, size_t capacity, + bool low_memory_mode) { DCHECK(mem_map != nullptr); allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map->Begin(), starting_size, initial_size, low_memory_mode); @@ -63,19 +63,18 @@ RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap* mem_map, const std::strin } // Everything is set so record in immutable structure and leave - RosAllocSpace* space; byte* begin = mem_map->Begin(); if (RUNNING_ON_VALGRIND > 0) { - space = new ValgrindMallocSpace<RosAllocSpace, art::gc::allocator::RosAlloc*>( + return new ValgrindMallocSpace<RosAllocSpace, allocator::RosAlloc*>( name, mem_map, rosalloc, begin, end, begin + capacity, growth_limit, initial_size); } else { - space = new RosAllocSpace(name, mem_map, rosalloc, begin, end, begin + capacity, growth_limit); + return new RosAllocSpace(name, mem_map, rosalloc, begin, end, begin + capacity, growth_limit); } - return space; } -RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_size, size_t growth_limit, - size_t capacity, byte* requested_begin, bool low_memory_mode) { +RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_size, + size_t growth_limit, size_t capacity, byte* requested_begin, + bool low_memory_mode) { uint64_t start_time = 0; if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { start_time = NanoTime(); @@ -129,11 +128,8 @@ allocator::RosAlloc* RosAllocSpace::CreateRosAlloc(void* begin, size_t morecore_ return rosalloc; } -mirror::Object* RosAllocSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) { - return AllocNonvirtual(self, num_bytes, bytes_allocated); -} - -mirror::Object* RosAllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated) { +mirror::Object* RosAllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes, + size_t* bytes_allocated, size_t* usable_size) { mirror::Object* result; { MutexLock mu(self, lock_); @@ -141,7 +137,7 @@ mirror::Object* RosAllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes, s size_t max_allowed = Capacity(); rosalloc_->SetFootprintLimit(max_allowed); // Try the allocation. - result = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated); + result = AllocCommon(self, num_bytes, bytes_allocated, usable_size); // Shrink back down as small as possible. size_t footprint = rosalloc_->Footprint(); rosalloc_->SetFootprintLimit(footprint); @@ -163,7 +159,7 @@ size_t RosAllocSpace::Free(Thread* self, mirror::Object* ptr) { CHECK(ptr != NULL); CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this; } - const size_t bytes_freed = AllocationSizeNonvirtual(ptr); + const size_t bytes_freed = AllocationSizeNonvirtual(ptr, nullptr); if (kRecentFreeCount > 0) { MutexLock mu(self, lock_); RegisterRecentFree(ptr); @@ -183,7 +179,7 @@ size_t RosAllocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** p if (kPrefetchDuringRosAllocFreeList && i + look_ahead < num_ptrs) { __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + look_ahead])); } - bytes_freed += AllocationSizeNonvirtual(ptr); + bytes_freed += AllocationSizeNonvirtual(ptr, nullptr); } if (kRecentFreeCount > 0) { @@ -220,10 +216,6 @@ extern "C" void* art_heap_rosalloc_morecore(allocator::RosAlloc* rosalloc, intpt return rosalloc_space->MoreCore(increment); } -size_t RosAllocSpace::AllocationSize(mirror::Object* obj) { - return AllocationSizeNonvirtual(obj); -} - size_t RosAllocSpace::Trim() { { MutexLock mu(Thread::Current(), lock_); diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h index 72e84f6..bd32196 100644 --- a/runtime/gc/space/rosalloc_space.h +++ b/runtime/gc/space/rosalloc_space.h @@ -30,7 +30,8 @@ namespace collector { namespace space { -// An alloc space is a space where objects may be allocated and garbage collected. +// An alloc space implemented using a runs-of-slots memory allocator. Not final as may be +// overridden by a ValgrindMallocSpace. class RosAllocSpace : public MallocSpace { public: // Create a RosAllocSpace with the requested sizes. The requested @@ -44,53 +45,46 @@ class RosAllocSpace : public MallocSpace { size_t growth_limit, size_t capacity, bool low_memory_mode); - virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, - size_t* bytes_allocated) LOCKS_EXCLUDED(lock_); - virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated); - virtual size_t AllocationSize(mirror::Object* obj); - virtual size_t Free(Thread* self, mirror::Object* ptr) + mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated, + size_t* usable_size) OVERRIDE LOCKS_EXCLUDED(lock_); + mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, + size_t* usable_size) OVERRIDE { + return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size); + } + size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE { + return AllocationSizeNonvirtual(obj, usable_size); + } + size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) + size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated); - - size_t AllocationSizeNonvirtual(mirror::Object* obj) - NO_THREAD_SAFETY_ANALYSIS { - // TODO: NO_THREAD_SAFETY_ANALYSIS because SizeOf() requires that mutator_lock is held. - void* obj_ptr = const_cast<void*>(reinterpret_cast<const void*>(obj)); - // obj is a valid object. Use its class in the header to get the size. - // Don't use verification since the object may be dead if we are sweeping. - size_t size = obj->SizeOf<kVerifyNone>(); - size_t size_by_size = rosalloc_->UsableSize(size); - if (kIsDebugBuild) { - size_t size_by_ptr = rosalloc_->UsableSize(obj_ptr); - if (size_by_size != size_by_ptr) { - LOG(INFO) << "Found a bad sized obj of size " << size - << " at " << std::hex << reinterpret_cast<intptr_t>(obj_ptr) << std::dec - << " size_by_size=" << size_by_size << " size_by_ptr=" << size_by_ptr; - } - DCHECK_EQ(size_by_size, size_by_ptr); - } - return size_by_size; + mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated, + size_t* usable_size) { + // RosAlloc zeroes memory internally. + return AllocCommon(self, num_bytes, bytes_allocated, usable_size); } - art::gc::allocator::RosAlloc* GetRosAlloc() { + // TODO: NO_THREAD_SAFETY_ANALYSIS because SizeOf() requires that mutator_lock is held. + size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) + NO_THREAD_SAFETY_ANALYSIS; + + allocator::RosAlloc* GetRosAlloc() const { return rosalloc_; } - size_t Trim(); - void Walk(WalkCallback callback, void* arg) LOCKS_EXCLUDED(lock_); - size_t GetFootprint(); - size_t GetFootprintLimit(); - void SetFootprintLimit(size_t limit); + size_t Trim() OVERRIDE; + void Walk(WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_); + size_t GetFootprint() OVERRIDE; + size_t GetFootprintLimit() OVERRIDE; + void SetFootprintLimit(size_t limit) OVERRIDE; - virtual void Clear(); + void Clear() OVERRIDE; MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator, byte* begin, byte* end, byte* limit, size_t growth_limit); - uint64_t GetBytesAllocated(); - uint64_t GetObjectsAllocated(); + uint64_t GetBytesAllocated() OVERRIDE; + uint64_t GetObjectsAllocated() OVERRIDE; void RevokeThreadLocalBuffers(Thread* thread); void RevokeAllThreadLocalBuffers(); @@ -98,10 +92,11 @@ class RosAllocSpace : public MallocSpace { // Returns the class of a recently freed object. mirror::Class* FindRecentFreedObject(const mirror::Object* obj); - virtual bool IsRosAllocSpace() const { + bool IsRosAllocSpace() const OVERRIDE { return true; } - virtual RosAllocSpace* AsRosAllocSpace() { + + RosAllocSpace* AsRosAllocSpace() OVERRIDE { return this; } @@ -114,9 +109,11 @@ class RosAllocSpace : public MallocSpace { byte* begin, byte* end, byte* limit, size_t growth_limit); private: - mirror::Object* AllocWithoutGrowthLocked(Thread* self, size_t num_bytes, size_t* bytes_allocated); + mirror::Object* AllocCommon(Thread* self, size_t num_bytes, size_t* bytes_allocated, + size_t* usable_size); - void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size, bool low_memory_mode) { + void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size, + bool low_memory_mode) OVERRIDE { return CreateRosAlloc(base, morecore_start, initial_size, low_memory_mode); } static allocator::RosAlloc* CreateRosAlloc(void* base, size_t morecore_start, size_t initial_size, @@ -127,11 +124,11 @@ class RosAllocSpace : public MallocSpace { LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, Locks::thread_list_lock_); // Underlying rosalloc. - art::gc::allocator::RosAlloc* const rosalloc_; + allocator::RosAlloc* const rosalloc_; - // A rosalloc pointer used for allocation. Equals to what rosalloc_ - // points to or nullptr after InvalidateAllocator() is called. - art::gc::allocator::RosAlloc* rosalloc_for_alloc_; + // The rosalloc pointer used for allocation. Equal to rosalloc_ or nullptr after + // InvalidateAllocator() is called. + allocator::RosAlloc* rosalloc_for_alloc_; friend class collector::MarkSweep; diff --git a/runtime/gc/space/space.cc b/runtime/gc/space/space.cc index 32a00bc..4af65a9 100644 --- a/runtime/gc/space/space.cc +++ b/runtime/gc/space/space.cc @@ -37,6 +37,36 @@ std::ostream& operator<<(std::ostream& os, const Space& space) { return os; } +DlMallocSpace* Space::AsDlMallocSpace() { + LOG(FATAL) << "Unreachable"; + return nullptr; +} + +RosAllocSpace* Space::AsRosAllocSpace() { + LOG(FATAL) << "Unreachable"; + return nullptr; +} + +ZygoteSpace* Space::AsZygoteSpace() { + LOG(FATAL) << "Unreachable"; + return nullptr; +} + +BumpPointerSpace* Space::AsBumpPointerSpace() { + LOG(FATAL) << "Unreachable"; + return nullptr; +} + +AllocSpace* Space::AsAllocSpace() { + LOG(FATAL) << "Unimplemented"; + return nullptr; +} + +ContinuousMemMapAllocSpace* Space::AsContinuousMemMapAllocSpace() { + LOG(FATAL) << "Unimplemented"; + return nullptr; +} + DiscontinuousSpace::DiscontinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy) : Space(name, gc_retention_policy), diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h index 98e6f65..0f8f38a 100644 --- a/runtime/gc/space/space.h +++ b/runtime/gc/space/space.h @@ -115,35 +115,24 @@ class Space { virtual bool IsDlMallocSpace() const { return false; } - virtual DlMallocSpace* AsDlMallocSpace() { - LOG(FATAL) << "Unreachable"; - return nullptr; - } + virtual DlMallocSpace* AsDlMallocSpace(); + virtual bool IsRosAllocSpace() const { return false; } - virtual RosAllocSpace* AsRosAllocSpace() { - LOG(FATAL) << "Unreachable"; - return nullptr; - } + virtual RosAllocSpace* AsRosAllocSpace(); - // Is this the space allocated into by the Zygote and no-longer in use? + // Is this the space allocated into by the Zygote and no-longer in use for allocation? bool IsZygoteSpace() const { return GetType() == kSpaceTypeZygoteSpace; } - virtual ZygoteSpace* AsZygoteSpace() { - LOG(FATAL) << "Unreachable"; - return nullptr; - } + virtual ZygoteSpace* AsZygoteSpace(); // Is this space a bump pointer space? bool IsBumpPointerSpace() const { return GetType() == kSpaceTypeBumpPointerSpace; } - virtual BumpPointerSpace* AsBumpPointerSpace() { - LOG(FATAL) << "Unreachable"; - return nullptr; - } + virtual BumpPointerSpace* AsBumpPointerSpace(); // Does this space hold large objects and implement the large object space abstraction? bool IsLargeObjectSpace() const { @@ -164,18 +153,12 @@ class Space { virtual bool IsAllocSpace() const { return false; } - virtual AllocSpace* AsAllocSpace() { - LOG(FATAL) << "Unimplemented"; - return nullptr; - } + virtual AllocSpace* AsAllocSpace(); virtual bool IsContinuousMemMapAllocSpace() const { return false; } - virtual ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() { - LOG(FATAL) << "Unimplemented"; - return nullptr; - } + virtual ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace(); virtual ~Space() {} @@ -220,10 +203,11 @@ class AllocSpace { // Allocate num_bytes without allowing growth. If the allocation // succeeds, the output parameter bytes_allocated will be set to the // actually allocated bytes which is >= num_bytes. - virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) = 0; + virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, + size_t* usable_size) = 0; // Return the storage space required by obj. - virtual size_t AllocationSize(mirror::Object* obj) = 0; + virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0; // Returns how many bytes were freed. virtual size_t Free(Thread* self, mirror::Object* ptr) = 0; @@ -231,15 +215,13 @@ class AllocSpace { // Returns how many bytes were freed. virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) = 0; - // Revoke any sort of thread-local buffers that are used to speed up - // allocations for the given thread, if the alloc space - // implementation uses any. No-op by default. - virtual void RevokeThreadLocalBuffers(Thread* /*thread*/) {} + // Revoke any sort of thread-local buffers that are used to speed up allocations for the given + // thread, if the alloc space implementation uses any. + virtual void RevokeThreadLocalBuffers(Thread* thread) = 0; - // Revoke any sort of thread-local buffers that are used to speed up - // allocations for all the threads, if the alloc space - // implementation uses any. No-op by default. - virtual void RevokeAllThreadLocalBuffers() {} + // Revoke any sort of thread-local buffers that are used to speed up allocations for all the + // threads, if the alloc space implementation uses any. + virtual void RevokeAllThreadLocalBuffers() = 0; protected: AllocSpace() {} @@ -393,17 +375,17 @@ class MemMapSpace : public ContinuousSpace { // Used by the heap compaction interface to enable copying from one type of alloc space to another. class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace { public: - virtual bool IsAllocSpace() const { + bool IsAllocSpace() const OVERRIDE { return true; } - virtual AllocSpace* AsAllocSpace() { + AllocSpace* AsAllocSpace() OVERRIDE { return this; } - virtual bool IsContinuousMemMapAllocSpace() const { + bool IsContinuousMemMapAllocSpace() const OVERRIDE { return true; } - virtual ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() { + ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() { return this; } @@ -414,22 +396,19 @@ class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace { // Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping. void SwapBitmaps(); - virtual void Clear() { - LOG(FATAL) << "Unimplemented"; - } + // Free all memory associated with this space. + virtual void Clear() = 0; - virtual accounting::SpaceBitmap* GetLiveBitmap() const { + accounting::SpaceBitmap* GetLiveBitmap() const { return live_bitmap_.get(); } - virtual accounting::SpaceBitmap* GetMarkBitmap() const { + + accounting::SpaceBitmap* GetMarkBitmap() const { return mark_bitmap_.get(); } - virtual void Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes); - virtual accounting::SpaceBitmap::SweepCallback* GetSweepCallback() { - LOG(FATAL) << "Unimplemented"; - return nullptr; - } + void Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes); + virtual accounting::SpaceBitmap::SweepCallback* GetSweepCallback() = 0; protected: UniquePtr<accounting::SpaceBitmap> live_bitmap_; diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h index 093967e..cb036f8 100644 --- a/runtime/gc/space/space_test.h +++ b/runtime/gc/space/space_test.h @@ -19,19 +19,19 @@ #include "zygote_space.h" -#include "common_test.h" +#include <stdint.h> + +#include "common_runtime_test.h" #include "globals.h" #include "UniquePtr.h" #include "mirror/array-inl.h" #include "mirror/object-inl.h" -#include <stdint.h> - namespace art { namespace gc { namespace space { -class SpaceTest : public CommonTest { +class SpaceTest : public CommonRuntimeTest { public: void AddSpace(ContinuousSpace* space) { // For RosAlloc, revoke the thread local runs before moving onto a @@ -49,6 +49,9 @@ class SpaceTest : public CommonTest { null_loader); EXPECT_TRUE(byte_array_class != nullptr); o->SetClass(byte_array_class); + if (kUseBrooksPointer) { + o->SetBrooksPointer(o.get()); + } mirror::Array* arr = o->AsArray<kVerifyNone>(); size_t header_size = SizeOfZeroLengthByteArray(); int32_t length = size - header_size; @@ -120,7 +123,7 @@ void SpaceTest::InitTestBody(CreateSpaceFn create_space) { // allocations after the ZygoteSpace is created. The test should also do some GCs to ensure that // the GC works with the ZygoteSpace. void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) { - size_t dummy = 0; + size_t dummy; MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr)); ASSERT_TRUE(space != nullptr); @@ -130,47 +133,60 @@ void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) { ScopedObjectAccess soa(self); // Succeeds, fits without adjusting the footprint limit. - SirtRef<mirror::Object> ptr1(self, space->Alloc(self, 1 * MB, &dummy)); + size_t ptr1_bytes_allocated, ptr1_usable_size; + SirtRef<mirror::Object> ptr1(self, space->Alloc(self, 1 * MB, &ptr1_bytes_allocated, + &ptr1_usable_size)); EXPECT_TRUE(ptr1.get() != nullptr); + EXPECT_LE(1U * MB, ptr1_bytes_allocated); + EXPECT_LE(1U * MB, ptr1_usable_size); + EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated); InstallClass(ptr1, 1 * MB); // Fails, requires a higher footprint limit. - mirror::Object* ptr2 = space->Alloc(self, 8 * MB, &dummy); + mirror::Object* ptr2 = space->Alloc(self, 8 * MB, &dummy, nullptr); EXPECT_TRUE(ptr2 == nullptr); // Succeeds, adjusts the footprint. - size_t ptr3_bytes_allocated; - SirtRef<mirror::Object> ptr3(self, space->AllocWithGrowth(self, 8 * MB, &ptr3_bytes_allocated)); + size_t ptr3_bytes_allocated, ptr3_usable_size; + SirtRef<mirror::Object> ptr3(self, space->AllocWithGrowth(self, 8 * MB, &ptr3_bytes_allocated, + &ptr3_usable_size)); EXPECT_TRUE(ptr3.get() != nullptr); EXPECT_LE(8U * MB, ptr3_bytes_allocated); + EXPECT_LE(8U * MB, ptr3_usable_size); + EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated); InstallClass(ptr3, 8 * MB); // Fails, requires a higher footprint limit. - mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy); + mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy, nullptr); EXPECT_TRUE(ptr4 == nullptr); // Also fails, requires a higher allowed footprint. - mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy); + mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy, nullptr); EXPECT_TRUE(ptr5 == nullptr); // Release some memory. - size_t free3 = space->AllocationSize(ptr3.get()); + size_t free3 = space->AllocationSize(ptr3.get(), nullptr); EXPECT_EQ(free3, ptr3_bytes_allocated); EXPECT_EQ(free3, space->Free(self, ptr3.reset(nullptr))); EXPECT_LE(8U * MB, free3); // Succeeds, now that memory has been freed. - SirtRef<mirror::Object> ptr6(self, space->AllocWithGrowth(self, 9 * MB, &dummy)); + size_t ptr6_bytes_allocated, ptr6_usable_size; + SirtRef<mirror::Object> ptr6(self, space->AllocWithGrowth(self, 9 * MB, &ptr6_bytes_allocated, + &ptr6_usable_size)); EXPECT_TRUE(ptr6.get() != nullptr); + EXPECT_LE(9U * MB, ptr6_bytes_allocated); + EXPECT_LE(9U * MB, ptr6_usable_size); + EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated); InstallClass(ptr6, 9 * MB); // Final clean up. - size_t free1 = space->AllocationSize(ptr1.get()); + size_t free1 = space->AllocationSize(ptr1.get(), nullptr); space->Free(self, ptr1.reset(nullptr)); EXPECT_LE(1U * MB, free1); // Make sure that the zygote space isn't directly at the start of the space. - space->Alloc(self, 1U * MB, &dummy); + EXPECT_TRUE(space->Alloc(self, 1U * MB, &dummy, nullptr) != nullptr); gc::Heap* heap = Runtime::Current()->GetHeap(); space::Space* old_space = space; @@ -186,22 +202,28 @@ void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) { AddSpace(space); // Succeeds, fits without adjusting the footprint limit. - ptr1.reset(space->Alloc(self, 1 * MB, &dummy)); + ptr1.reset(space->Alloc(self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size)); EXPECT_TRUE(ptr1.get() != nullptr); + EXPECT_LE(1U * MB, ptr1_bytes_allocated); + EXPECT_LE(1U * MB, ptr1_usable_size); + EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated); InstallClass(ptr1, 1 * MB); // Fails, requires a higher footprint limit. - ptr2 = space->Alloc(self, 8 * MB, &dummy); + ptr2 = space->Alloc(self, 8 * MB, &dummy, nullptr); EXPECT_TRUE(ptr2 == nullptr); // Succeeds, adjusts the footprint. - ptr3.reset(space->AllocWithGrowth(self, 2 * MB, &dummy)); + ptr3.reset(space->AllocWithGrowth(self, 2 * MB, &ptr3_bytes_allocated, &ptr3_usable_size)); EXPECT_TRUE(ptr3.get() != nullptr); + EXPECT_LE(2U * MB, ptr3_bytes_allocated); + EXPECT_LE(2U * MB, ptr3_usable_size); + EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated); InstallClass(ptr3, 2 * MB); space->Free(self, ptr3.reset(nullptr)); // Final clean up. - free1 = space->AllocationSize(ptr1.get()); + free1 = space->AllocationSize(ptr1.get(), nullptr); space->Free(self, ptr1.reset(nullptr)); EXPECT_LE(1U * MB, free1); } @@ -217,42 +239,55 @@ void SpaceTest::AllocAndFreeTestBody(CreateSpaceFn create_space) { AddSpace(space); // Succeeds, fits without adjusting the footprint limit. - SirtRef<mirror::Object> ptr1(self, space->Alloc(self, 1 * MB, &dummy)); + size_t ptr1_bytes_allocated, ptr1_usable_size; + SirtRef<mirror::Object> ptr1(self, space->Alloc(self, 1 * MB, &ptr1_bytes_allocated, + &ptr1_usable_size)); EXPECT_TRUE(ptr1.get() != nullptr); + EXPECT_LE(1U * MB, ptr1_bytes_allocated); + EXPECT_LE(1U * MB, ptr1_usable_size); + EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated); InstallClass(ptr1, 1 * MB); // Fails, requires a higher footprint limit. - mirror::Object* ptr2 = space->Alloc(self, 8 * MB, &dummy); + mirror::Object* ptr2 = space->Alloc(self, 8 * MB, &dummy, nullptr); EXPECT_TRUE(ptr2 == nullptr); // Succeeds, adjusts the footprint. - size_t ptr3_bytes_allocated; - SirtRef<mirror::Object> ptr3(self, space->AllocWithGrowth(self, 8 * MB, &ptr3_bytes_allocated)); + size_t ptr3_bytes_allocated, ptr3_usable_size; + SirtRef<mirror::Object> ptr3(self, space->AllocWithGrowth(self, 8 * MB, &ptr3_bytes_allocated, + &ptr3_usable_size)); EXPECT_TRUE(ptr3.get() != nullptr); EXPECT_LE(8U * MB, ptr3_bytes_allocated); + EXPECT_LE(8U * MB, ptr3_usable_size); + EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated); InstallClass(ptr3, 8 * MB); // Fails, requires a higher footprint limit. - mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy); + mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy, nullptr); EXPECT_TRUE(ptr4 == nullptr); // Also fails, requires a higher allowed footprint. - mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy); + mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy, nullptr); EXPECT_TRUE(ptr5 == nullptr); // Release some memory. - size_t free3 = space->AllocationSize(ptr3.get()); + size_t free3 = space->AllocationSize(ptr3.get(), nullptr); EXPECT_EQ(free3, ptr3_bytes_allocated); space->Free(self, ptr3.reset(nullptr)); EXPECT_LE(8U * MB, free3); // Succeeds, now that memory has been freed. - SirtRef<mirror::Object> ptr6(self, space->AllocWithGrowth(self, 9 * MB, &dummy)); + size_t ptr6_bytes_allocated, ptr6_usable_size; + SirtRef<mirror::Object> ptr6(self, space->AllocWithGrowth(self, 9 * MB, &ptr6_bytes_allocated, + &ptr6_usable_size)); EXPECT_TRUE(ptr6.get() != nullptr); + EXPECT_LE(9U * MB, ptr6_bytes_allocated); + EXPECT_LE(9U * MB, ptr6_usable_size); + EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated); InstallClass(ptr6, 9 * MB); // Final clean up. - size_t free1 = space->AllocationSize(ptr1.get()); + size_t free1 = space->AllocationSize(ptr1.get(), nullptr); space->Free(self, ptr1.reset(nullptr)); EXPECT_LE(1U * MB, free1); } @@ -269,14 +304,17 @@ void SpaceTest::AllocAndFreeListTestBody(CreateSpaceFn create_space) { // Succeeds, fits without adjusting the max allowed footprint. mirror::Object* lots_of_objects[1024]; for (size_t i = 0; i < arraysize(lots_of_objects); i++) { - size_t allocation_size = 0; + size_t allocation_size, usable_size; size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray(); - lots_of_objects[i] = space->Alloc(self, size_of_zero_length_byte_array, &allocation_size); + lots_of_objects[i] = space->Alloc(self, size_of_zero_length_byte_array, &allocation_size, + &usable_size); EXPECT_TRUE(lots_of_objects[i] != nullptr); SirtRef<mirror::Object> obj(self, lots_of_objects[i]); InstallClass(obj, size_of_zero_length_byte_array); lots_of_objects[i] = obj.get(); - EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i])); + size_t computed_usable_size; + EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size)); + EXPECT_EQ(usable_size, computed_usable_size); } // Release memory and check pointers are nullptr. @@ -287,13 +325,15 @@ void SpaceTest::AllocAndFreeListTestBody(CreateSpaceFn create_space) { // Succeeds, fits by adjusting the max allowed footprint. for (size_t i = 0; i < arraysize(lots_of_objects); i++) { - size_t allocation_size = 0; - lots_of_objects[i] = space->AllocWithGrowth(self, 1024, &allocation_size); + size_t allocation_size, usable_size; + lots_of_objects[i] = space->AllocWithGrowth(self, 1024, &allocation_size, &usable_size); EXPECT_TRUE(lots_of_objects[i] != nullptr); SirtRef<mirror::Object> obj(self, lots_of_objects[i]); InstallClass(obj, 1024); lots_of_objects[i] = obj.get(); - EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i])); + size_t computed_usable_size; + EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size)); + EXPECT_EQ(usable_size, computed_usable_size); } // Release memory and check pointers are nullptr @@ -354,16 +394,16 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t SirtRef<mirror::Object> object(self, nullptr); size_t bytes_allocated = 0; if (round <= 1) { - object.reset(space->Alloc(self, alloc_size, &bytes_allocated)); + object.reset(space->Alloc(self, alloc_size, &bytes_allocated, nullptr)); } else { - object.reset(space->AllocWithGrowth(self, alloc_size, &bytes_allocated)); + object.reset(space->AllocWithGrowth(self, alloc_size, &bytes_allocated, nullptr)); } footprint = space->GetFootprint(); EXPECT_GE(space->Size(), footprint); // invariant if (object.get() != nullptr) { // allocation succeeded InstallClass(object, alloc_size); lots_of_objects[i] = object.get(); - size_t allocation_size = space->AllocationSize(object.get()); + size_t allocation_size = space->AllocationSize(object.get(), nullptr); EXPECT_EQ(bytes_allocated, allocation_size); if (object_size > 0) { EXPECT_GE(allocation_size, static_cast<size_t>(object_size)); @@ -418,7 +458,7 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t if (object == nullptr) { continue; } - size_t allocation_size = space->AllocationSize(object); + size_t allocation_size = space->AllocationSize(object, nullptr); if (object_size > 0) { EXPECT_GE(allocation_size, static_cast<size_t>(object_size)); } else { @@ -447,9 +487,10 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4); size_t bytes_allocated = 0; if (round <= 1) { - large_object.reset(space->Alloc(self, three_quarters_space, &bytes_allocated)); + large_object.reset(space->Alloc(self, three_quarters_space, &bytes_allocated, nullptr)); } else { - large_object.reset(space->AllocWithGrowth(self, three_quarters_space, &bytes_allocated)); + large_object.reset(space->AllocWithGrowth(self, three_quarters_space, &bytes_allocated, + nullptr)); } EXPECT_TRUE(large_object.get() != nullptr); InstallClass(large_object, three_quarters_space); diff --git a/runtime/gc/space/valgrind_malloc_space-inl.h b/runtime/gc/space/valgrind_malloc_space-inl.h new file mode 100644 index 0000000..4b0c8e3 --- /dev/null +++ b/runtime/gc/space/valgrind_malloc_space-inl.h @@ -0,0 +1,116 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_INL_H_ +#define ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_INL_H_ + +#include "valgrind_malloc_space.h" + +#include <memcheck/memcheck.h> + +namespace art { +namespace gc { +namespace space { + +// Number of bytes to use as a red zone (rdz). A red zone of this size will be placed before and +// after each allocation. 8 bytes provides long/double alignment. +static constexpr size_t kValgrindRedZoneBytes = 8; + +template <typename S, typename A> +mirror::Object* ValgrindMallocSpace<S, A>::AllocWithGrowth(Thread* self, size_t num_bytes, + size_t* bytes_allocated, + size_t* usable_size) { + void* obj_with_rdz = S::AllocWithGrowth(self, num_bytes + 2 * kValgrindRedZoneBytes, + bytes_allocated, usable_size); + if (obj_with_rdz == nullptr) { + return nullptr; + } + if (usable_size != nullptr) { + *usable_size -= 2 * kValgrindRedZoneBytes; + } + mirror::Object* result = reinterpret_cast<mirror::Object*>( + reinterpret_cast<byte*>(obj_with_rdz) + kValgrindRedZoneBytes); + // Make redzones as no access. + VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes); + VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(result) + num_bytes, kValgrindRedZoneBytes); + return result; +} + +template <typename S, typename A> +mirror::Object* ValgrindMallocSpace<S, A>::Alloc(Thread* self, size_t num_bytes, + size_t* bytes_allocated, + size_t* usable_size) { + void* obj_with_rdz = S::Alloc(self, num_bytes + 2 * kValgrindRedZoneBytes, bytes_allocated, + usable_size); + if (obj_with_rdz == nullptr) { + return nullptr; + } + if (usable_size != nullptr) { + *usable_size -= 2 * kValgrindRedZoneBytes; + } + mirror::Object* result = reinterpret_cast<mirror::Object*>( + reinterpret_cast<byte*>(obj_with_rdz) + kValgrindRedZoneBytes); + // Make redzones as no access. + VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes); + VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(result) + num_bytes, kValgrindRedZoneBytes); + return result; +} + +template <typename S, typename A> +size_t ValgrindMallocSpace<S, A>::AllocationSize(mirror::Object* obj, size_t* usable_size) { + size_t result = S::AllocationSize(reinterpret_cast<mirror::Object*>( + reinterpret_cast<byte*>(obj) - kValgrindRedZoneBytes), usable_size); + if (usable_size != nullptr) { + *usable_size -= 2 * kValgrindRedZoneBytes; + } + return result - 2 * kValgrindRedZoneBytes; +} + +template <typename S, typename A> +size_t ValgrindMallocSpace<S, A>::Free(Thread* self, mirror::Object* ptr) { + void* obj_after_rdz = reinterpret_cast<void*>(ptr); + void* obj_with_rdz = reinterpret_cast<byte*>(obj_after_rdz) - kValgrindRedZoneBytes; + // Make redzones undefined. + size_t allocation_size = + AllocationSize(reinterpret_cast<mirror::Object*>(obj_with_rdz), nullptr); + VALGRIND_MAKE_MEM_UNDEFINED(obj_with_rdz, allocation_size); + size_t freed = S::Free(self, reinterpret_cast<mirror::Object*>(obj_with_rdz)); + return freed - 2 * kValgrindRedZoneBytes; +} + +template <typename S, typename A> +size_t ValgrindMallocSpace<S, A>::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) { + size_t freed = 0; + for (size_t i = 0; i < num_ptrs; i++) { + freed += Free(self, ptrs[i]); + } + return freed; +} + +template <typename S, typename A> +ValgrindMallocSpace<S, A>::ValgrindMallocSpace(const std::string& name, MemMap* mem_map, + A allocator, byte* begin, + byte* end, byte* limit, size_t growth_limit, + size_t initial_size) : + S(name, mem_map, allocator, begin, end, limit, growth_limit) { + VALGRIND_MAKE_MEM_UNDEFINED(mem_map->Begin() + initial_size, mem_map->Size() - initial_size); +} + +} // namespace space +} // namespace gc +} // namespace art + +#endif // ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_INL_H_ diff --git a/runtime/gc/space/valgrind_malloc_space.h b/runtime/gc/space/valgrind_malloc_space.h new file mode 100644 index 0000000..8d00b30 --- /dev/null +++ b/runtime/gc/space/valgrind_malloc_space.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_H_ +#define ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_H_ + +#include "malloc_space.h" + +#include <valgrind.h> + +namespace art { +namespace gc { +namespace space { + +// A specialization of DlMallocSpace/RosAllocSpace that places valgrind red zones around +// allocations. +template <typename BaseMallocSpaceType, typename AllocatorType> +class ValgrindMallocSpace FINAL : public BaseMallocSpaceType { + public: + mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated, + size_t* usable_size) OVERRIDE; + mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, + size_t* usable_size) OVERRIDE; + + size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE; + + size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + ValgrindMallocSpace(const std::string& name, MemMap* mem_map, AllocatorType allocator, + byte* begin, byte* end, byte* limit, size_t growth_limit, + size_t initial_size); + virtual ~ValgrindMallocSpace() {} + + private: + DISALLOW_COPY_AND_ASSIGN(ValgrindMallocSpace); +}; + +} // namespace space +} // namespace gc +} // namespace art + +#endif // ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_H_ diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc index a303765..a60ab38 100644 --- a/runtime/gc/space/zygote_space.cc +++ b/runtime/gc/space/zygote_space.cc @@ -57,6 +57,10 @@ ZygoteSpace* ZygoteSpace::Create(const std::string& name, MemMap* mem_map, return zygote_space; } +void ZygoteSpace::Clear() { + LOG(FATAL) << "Unimplemented"; +} + ZygoteSpace::ZygoteSpace(const std::string& name, MemMap* mem_map, size_t objects_allocated) : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(), kGcRetentionPolicyFullCollect), @@ -71,6 +75,27 @@ void ZygoteSpace::Dump(std::ostream& os) const { << ",name=\"" << GetName() << "\"]"; } +mirror::Object* ZygoteSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, + size_t* usable_size) { + LOG(FATAL) << "Unimplemented"; + return nullptr; +} + +size_t ZygoteSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) { + LOG(FATAL) << "Unimplemented"; + return 0; +} + +size_t ZygoteSpace::Free(Thread* self, mirror::Object* ptr) { + LOG(FATAL) << "Unimplemented"; + return 0; +} + +size_t ZygoteSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) { + LOG(FATAL) << "Unimplemented"; + return 0; +} + void ZygoteSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) { SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); DCHECK(context->space->IsZygoteSpace()); diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h index e0035b3..8cd1a9f 100644 --- a/runtime/gc/space/zygote_space.h +++ b/runtime/gc/space/zygote_space.h @@ -30,7 +30,7 @@ class SpaceBitmap; namespace space { // An zygote space is a space which you cannot allocate into or free from. -class ZygoteSpace : public ContinuousMemMapAllocSpace { +class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace { public: // Returns the remaining storage in the out_map field. static ZygoteSpace* Create(const std::string& name, MemMap* mem_map, @@ -39,40 +39,40 @@ class ZygoteSpace : public ContinuousMemMapAllocSpace { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void Dump(std::ostream& os) const; - virtual SpaceType GetType() const { + + SpaceType GetType() const OVERRIDE { return kSpaceTypeZygoteSpace; } - virtual ZygoteSpace* AsZygoteSpace() { + + ZygoteSpace* AsZygoteSpace() OVERRIDE { return this; } - virtual mirror::Object* AllocWithGrowth(Thread* /*self*/, size_t /*num_bytes*/, - size_t* /*bytes_allocated*/) { - LOG(FATAL) << "Unimplemented"; - return nullptr; - } - virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) { - LOG(FATAL) << "Unimplemented"; - return nullptr; - } - virtual size_t AllocationSize(mirror::Object* obj) { - LOG(FATAL) << "Unimplemented"; - return 0; - } - virtual size_t Free(Thread* self, mirror::Object* ptr) { - LOG(FATAL) << "Unimplemented"; - return 0; + + mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, + size_t* usable_size) OVERRIDE; + + size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE; + + size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE; + + size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE; + + // ZygoteSpaces don't have thread local state. + void RevokeThreadLocalBuffers(art::Thread*) OVERRIDE { } - virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) { - LOG(FATAL) << "Unimplemented"; - return 0; + void RevokeAllThreadLocalBuffers() OVERRIDE { } - virtual uint64_t GetBytesAllocated() { + + uint64_t GetBytesAllocated() { return Size(); } - virtual uint64_t GetObjectsAllocated() { + + uint64_t GetObjectsAllocated() { return objects_allocated_; } + void Clear(); + protected: virtual accounting::SpaceBitmap::SweepCallback* GetSweepCallback() { return &SweepCallback; diff --git a/runtime/globals.h b/runtime/globals.h index 8c3ae56..83e3028 100644 --- a/runtime/globals.h +++ b/runtime/globals.h @@ -19,6 +19,7 @@ #include <stddef.h> #include <stdint.h> +#include "brooks_pointer.h" namespace art { @@ -92,6 +93,12 @@ static constexpr bool kMovingMethods = false; // code, if possible. static constexpr bool kEmbedClassInCode = true; +#ifdef USE_BROOKS_POINTER +static constexpr bool kUseBrooksPointer = true; +#else +static constexpr bool kUseBrooksPointer = false; +#endif + } // namespace art #endif // ART_RUNTIME_GLOBALS_H_ diff --git a/runtime/indirect_reference_table_test.cc b/runtime/indirect_reference_table_test.cc index 78e1992..9b42e59 100644 --- a/runtime/indirect_reference_table_test.cc +++ b/runtime/indirect_reference_table_test.cc @@ -14,15 +14,14 @@ * limitations under the License. */ -#include "common_test.h" - #include "indirect_reference_table.h" + +#include "common_runtime_test.h" #include "mirror/object-inl.h" namespace art { -class IndirectReferenceTableTest : public CommonTest { -}; +class IndirectReferenceTableTest : public CommonRuntimeTest {}; static void CheckDump(IndirectReferenceTable* irt, size_t num_objects, size_t num_unique) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc index c328245..8987127 100644 --- a/runtime/intern_table_test.cc +++ b/runtime/intern_table_test.cc @@ -16,13 +16,13 @@ #include "intern_table.h" -#include "common_test.h" +#include "common_runtime_test.h" #include "mirror/object.h" #include "sirt_ref.h" namespace art { -class InternTableTest : public CommonTest {}; +class InternTableTest : public CommonRuntimeTest {}; TEST_F(InternTableTest, Intern) { ScopedObjectAccess soa(Thread::Current()); diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc index cb9e2e8..40d4ea3 100644 --- a/runtime/interpreter/interpreter.cc +++ b/runtime/interpreter/interpreter.cc @@ -372,22 +372,12 @@ void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receive void* memory = alloca(ShadowFrame::ComputeSize(num_regs)); ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, last_shadow_frame, method, 0, memory)); self->PushShadowFrame(shadow_frame); - self->EndAssertNoThreadSuspension(old_cause); size_t cur_reg = num_regs - num_ins; if (!method->IsStatic()) { CHECK(receiver != NULL); shadow_frame->SetVRegReference(cur_reg, receiver); ++cur_reg; - } else if (UNLIKELY(!method->GetDeclaringClass()->IsInitializing())) { - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - SirtRef<mirror::Class> sirt_c(self, method->GetDeclaringClass()); - if (UNLIKELY(!class_linker->EnsureInitialized(sirt_c, true, true))) { - CHECK(self->IsExceptionPending()); - self->PopShadowFrame(); - return; - } - CHECK(sirt_c->IsInitializing()); } const char* shorty = mh.GetShorty(); for (size_t shorty_pos = 0, arg_pos = 0; cur_reg < num_regs; ++shorty_pos, ++arg_pos, cur_reg++) { @@ -410,6 +400,17 @@ void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receive break; } } + self->EndAssertNoThreadSuspension(old_cause); + // Do this after populating the shadow frame in case EnsureInitialized causes a GC. + if (method->IsStatic() && UNLIKELY(!method->GetDeclaringClass()->IsInitializing())) { + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + SirtRef<mirror::Class> sirt_c(self, method->GetDeclaringClass()); + if (UNLIKELY(!class_linker->EnsureInitialized(sirt_c, true, true))) { + CHECK(self->IsExceptionPending()); + self->PopShadowFrame(); + return; + } + } if (LIKELY(!method->IsNative())) { JValue r = Execute(self, mh, code_item, *shadow_frame, JValue()); if (result != NULL) { @@ -418,6 +419,9 @@ void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receive } else { // We don't expect to be asked to interpret native code (which is entered via a JNI compiler // generated stub) except during testing and image writing. + // Update args to be the args in the shadow frame since the input ones could hold stale + // references pointers due to moving GC. + args = shadow_frame->GetVRegArgs(method->IsStatic() ? 0 : 1); if (!Runtime::Current()->IsStarted()) { UnstartedRuntimeJni(self, method, receiver, args, result); } else { diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc index 83a1fbc..f76d50c 100644 --- a/runtime/interpreter/interpreter_common.cc +++ b/runtime/interpreter/interpreter_common.cc @@ -197,7 +197,8 @@ bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame, } return false; } - Object* newArray = Array::Alloc<true>(self, arrayClass, length); + Object* newArray = Array::Alloc<true>(self, arrayClass, length, arrayClass->GetComponentSize(), + Runtime::Current()->GetHeap()->GetCurrentAllocator()); if (UNLIKELY(newArray == NULL)) { DCHECK(self->IsExceptionPending()); return false; diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc index 76aa734..37fb2db 100644 --- a/runtime/jni_internal.cc +++ b/runtime/jni_internal.cc @@ -263,7 +263,7 @@ static mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa) // See if the override ClassLoader is set for gtests. class_loader = soa.Self()->GetClassLoaderOverride(); if (class_loader != nullptr) { - // If so, CommonTest should have set UseCompileTimeClassPath. + // If so, CommonCompilerTest should have set UseCompileTimeClassPath. CHECK(Runtime::Current()->UseCompileTimeClassPath()); return class_loader; } diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc index 63bc45c..28408d2 100644 --- a/runtime/jni_internal_test.cc +++ b/runtime/jni_internal_test.cc @@ -20,7 +20,7 @@ #include <cfloat> #include <cmath> -#include "common_test.h" +#include "common_compiler_test.h" #include "invoke_arg_array_builder.h" #include "mirror/art_method-inl.h" #include "mirror/class-inl.h" @@ -31,10 +31,11 @@ namespace art { -class JniInternalTest : public CommonTest { +// TODO: Convert to CommonRuntimeTest. Currently MakeExecutable is used. +class JniInternalTest : public CommonCompilerTest { protected: virtual void SetUp() { - CommonTest::SetUp(); + CommonCompilerTest::SetUp(); vm_ = Runtime::Current()->GetJavaVM(); @@ -75,7 +76,7 @@ class JniInternalTest : public CommonTest { virtual void TearDown() { CleanUpJniEnv(); - CommonTest::TearDown(); + CommonCompilerTest::TearDown(); } jclass GetPrimitiveClass(char descriptor) { @@ -2070,7 +2071,7 @@ TEST_F(JniInternalTest, DetachCurrentThread) { jint err = vm_->DetachCurrentThread(); EXPECT_EQ(JNI_ERR, err); - vm_->AttachCurrentThread(&env_, NULL); // need attached thread for CommonTest::TearDown + vm_->AttachCurrentThread(&env_, NULL); // need attached thread for CommonRuntimeTest::TearDown } } // namespace art diff --git a/runtime/leb128.h b/runtime/leb128.h index 7a7d38d..0e80fe2 100644 --- a/runtime/leb128.h +++ b/runtime/leb128.h @@ -112,6 +112,88 @@ static inline uint32_t SignedLeb128Size(int32_t data) { return (x * 37) >> 8; } +static inline uint8_t* EncodeUnsignedLeb128(uint8_t* dest, uint32_t value) { + uint8_t out = value & 0x7f; + value >>= 7; + while (value != 0) { + *dest++ = out | 0x80; + out = value & 0x7f; + value >>= 7; + } + *dest++ = out; + return dest; +} + +static inline uint8_t* EncodeSignedLeb128(uint8_t* dest, int32_t value) { + uint32_t extra_bits = static_cast<uint32_t>(value ^ (value >> 31)) >> 6; + uint8_t out = value & 0x7f; + while (extra_bits != 0u) { + *dest++ = out | 0x80; + value >>= 7; + out = value & 0x7f; + extra_bits >>= 7; + } + *dest++ = out; + return dest; +} + +// An encoder with an API similar to vector<uint32_t> where the data is captured in ULEB128 format. +class Leb128EncodingVector { + public: + Leb128EncodingVector() { + } + + void Reserve(uint32_t size) { + data_.reserve(size); + } + + void PushBackUnsigned(uint32_t value) { + uint8_t out = value & 0x7f; + value >>= 7; + while (value != 0) { + data_.push_back(out | 0x80); + out = value & 0x7f; + value >>= 7; + } + data_.push_back(out); + } + + template<typename It> + void InsertBackUnsigned(It cur, It end) { + for (; cur != end; ++cur) { + PushBackUnsigned(*cur); + } + } + + void PushBackSigned(int32_t value) { + uint32_t extra_bits = static_cast<uint32_t>(value ^ (value >> 31)) >> 6; + uint8_t out = value & 0x7f; + while (extra_bits != 0u) { + data_.push_back(out | 0x80); + value >>= 7; + out = value & 0x7f; + extra_bits >>= 7; + } + data_.push_back(out); + } + + template<typename It> + void InsertBackSigned(It cur, It end) { + for (; cur != end; ++cur) { + PushBackSigned(*cur); + } + } + + const std::vector<uint8_t>& GetData() const { + return data_; + } + + private: + std::vector<uint8_t> data_; + + DISALLOW_COPY_AND_ASSIGN(Leb128EncodingVector); +}; + } // namespace art #endif // ART_RUNTIME_LEB128_H_ diff --git a/runtime/leb128_test.cc b/runtime/leb128_test.cc new file mode 100644 index 0000000..d75d5c2 --- /dev/null +++ b/runtime/leb128_test.cc @@ -0,0 +1,290 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "leb128.h" + +#include "gtest/gtest.h" +#include "base/histogram-inl.h" + +namespace art { + +struct DecodeUnsignedLeb128TestCase { + uint32_t decoded; + uint8_t leb128_data[5]; +}; + +static DecodeUnsignedLeb128TestCase uleb128_tests[] = { + {0, {0, 0, 0, 0, 0}}, + {1, {1, 0, 0, 0, 0}}, + {0x7F, {0x7F, 0, 0, 0, 0}}, + {0x80, {0x80, 1, 0, 0, 0}}, + {0x81, {0x81, 1, 0, 0, 0}}, + {0xFF, {0xFF, 1, 0, 0, 0}}, + {0x4000, {0x80, 0x80, 1, 0, 0}}, + {0x4001, {0x81, 0x80, 1, 0, 0}}, + {0x4081, {0x81, 0x81, 1, 0, 0}}, + {0x0FFFFFFF, {0xFF, 0xFF, 0xFF, 0x7F, 0}}, + {0xFFFFFFFF, {0xFF, 0xFF, 0xFF, 0xFF, 0xF}}, +}; + +struct DecodeSignedLeb128TestCase { + int32_t decoded; + uint8_t leb128_data[5]; +}; + +static DecodeSignedLeb128TestCase sleb128_tests[] = { + {0, {0, 0, 0, 0, 0}}, + {1, {1, 0, 0, 0, 0}}, + {0x3F, {0x3F, 0, 0, 0, 0}}, + {0x40, {0xC0, 0 /* sign bit */, 0, 0, 0}}, + {0x41, {0xC1, 0 /* sign bit */, 0, 0, 0}}, + {0x80, {0x80, 1, 0, 0, 0}}, + {0xFF, {0xFF, 1, 0, 0, 0}}, + {0x1FFF, {0xFF, 0x3F, 0, 0, 0}}, + {0x2000, {0x80, 0xC0, 0 /* sign bit */, 0, 0}}, + {0x2001, {0x81, 0xC0, 0 /* sign bit */, 0, 0}}, + {0x2081, {0x81, 0xC1, 0 /* sign bit */, 0, 0}}, + {0x4000, {0x80, 0x80, 1, 0, 0}}, + {0x0FFFFF, {0xFF, 0xFF, 0x3F, 0, 0}}, + {0x100000, {0x80, 0x80, 0xC0, 0 /* sign bit */, 0}}, + {0x100001, {0x81, 0x80, 0xC0, 0 /* sign bit */, 0}}, + {0x100081, {0x81, 0x81, 0xC0, 0 /* sign bit */, 0}}, + {0x104081, {0x81, 0x81, 0xC1, 0 /* sign bit */, 0}}, + {0x200000, {0x80, 0x80, 0x80, 1, 0}}, + {0x7FFFFFF, {0xFF, 0xFF, 0xFF, 0x3F, 0}}, + {0x8000000, {0x80, 0x80, 0x80, 0xC0, 0 /* sign bit */}}, + {0x8000001, {0x81, 0x80, 0x80, 0xC0, 0 /* sign bit */}}, + {0x8000081, {0x81, 0x81, 0x80, 0xC0, 0 /* sign bit */}}, + {0x8004081, {0x81, 0x81, 0x81, 0xC0, 0 /* sign bit */}}, + {0x8204081, {0x81, 0x81, 0x81, 0xC1, 0 /* sign bit */}}, + {0x0FFFFFFF, {0xFF, 0xFF, 0xFF, 0xFF, 0 /* sign bit */}}, + {0x10000000, {0x80, 0x80, 0x80, 0x80, 1}}, + {0x7FFFFFFF, {0xFF, 0xFF, 0xFF, 0xFF, 0x7}}, + {-1, {0x7F, 0, 0, 0, 0}}, + {-2, {0x7E, 0, 0, 0, 0}}, + {-0x3F, {0x41, 0, 0, 0, 0}}, + {-0x40, {0x40, 0, 0, 0, 0}}, + {-0x41, {0xBF, 0x7F, 0, 0, 0}}, + {-0x80, {0x80, 0x7F, 0, 0, 0}}, + {-0x81, {0xFF, 0x7E, 0, 0, 0}}, + {-0x00002000, {0x80, 0x40, 0, 0, 0}}, + {-0x00002001, {0xFF, 0xBF, 0x7F, 0, 0}}, + {-0x00100000, {0x80, 0x80, 0x40, 0, 0}}, + {-0x00100001, {0xFF, 0xFF, 0xBF, 0x7F, 0}}, + {-0x08000000, {0x80, 0x80, 0x80, 0x40, 0}}, + {-0x08000001, {0xFF, 0xFF, 0xFF, 0xBF, 0x7F}}, + {-0x20000000, {0x80, 0x80, 0x80, 0x80, 0x7E}}, + {(-1) << 31, {0x80, 0x80, 0x80, 0x80, 0x78}}, +}; + +TEST(Leb128Test, UnsignedSinglesVector) { + // Test individual encodings. + for (size_t i = 0; i < arraysize(uleb128_tests); ++i) { + Leb128EncodingVector builder; + builder.PushBackUnsigned(uleb128_tests[i].decoded); + EXPECT_EQ(UnsignedLeb128Size(uleb128_tests[i].decoded), builder.GetData().size()); + const uint8_t* data_ptr = &uleb128_tests[i].leb128_data[0]; + const uint8_t* encoded_data_ptr = &builder.GetData()[0]; + for (size_t j = 0; j < 5; ++j) { + if (j < builder.GetData().size()) { + EXPECT_EQ(data_ptr[j], encoded_data_ptr[j]) << " i = " << i << " j = " << j; + } else { + EXPECT_EQ(data_ptr[j], 0U) << " i = " << i << " j = " << j; + } + } + EXPECT_EQ(DecodeUnsignedLeb128(&data_ptr), uleb128_tests[i].decoded) << " i = " << i; + } +} + +TEST(Leb128Test, UnsignedSingles) { + // Test individual encodings. + for (size_t i = 0; i < arraysize(uleb128_tests); ++i) { + uint8_t encoded_data[5]; + uint8_t* end = EncodeUnsignedLeb128(encoded_data, uleb128_tests[i].decoded); + size_t data_size = static_cast<size_t>(end - encoded_data); + EXPECT_EQ(UnsignedLeb128Size(uleb128_tests[i].decoded), data_size); + const uint8_t* data_ptr = &uleb128_tests[i].leb128_data[0]; + for (size_t j = 0; j < 5; ++j) { + if (j < data_size) { + EXPECT_EQ(data_ptr[j], encoded_data[j]) << " i = " << i << " j = " << j; + } else { + EXPECT_EQ(data_ptr[j], 0U) << " i = " << i << " j = " << j; + } + } + EXPECT_EQ(DecodeUnsignedLeb128(&data_ptr), uleb128_tests[i].decoded) << " i = " << i; + } +} + +TEST(Leb128Test, UnsignedStreamVector) { + // Encode a number of entries. + Leb128EncodingVector builder; + for (size_t i = 0; i < arraysize(uleb128_tests); ++i) { + builder.PushBackUnsigned(uleb128_tests[i].decoded); + } + const uint8_t* encoded_data_ptr = &builder.GetData()[0]; + for (size_t i = 0; i < arraysize(uleb128_tests); ++i) { + const uint8_t* data_ptr = &uleb128_tests[i].leb128_data[0]; + for (size_t j = 0; j < UnsignedLeb128Size(uleb128_tests[i].decoded); ++j) { + EXPECT_EQ(data_ptr[j], encoded_data_ptr[j]) << " i = " << i << " j = " << j; + } + for (size_t j = UnsignedLeb128Size(uleb128_tests[i].decoded); j < 5; ++j) { + EXPECT_EQ(data_ptr[j], 0) << " i = " << i << " j = " << j; + } + EXPECT_EQ(DecodeUnsignedLeb128(&encoded_data_ptr), uleb128_tests[i].decoded) << " i = " << i; + } + EXPECT_EQ(builder.GetData().size(), + static_cast<size_t>(encoded_data_ptr - &builder.GetData()[0])); +} + +TEST(Leb128Test, UnsignedStream) { + // Encode a number of entries. + uint8_t encoded_data[5 * arraysize(uleb128_tests)]; + uint8_t* end = encoded_data; + for (size_t i = 0; i < arraysize(uleb128_tests); ++i) { + end = EncodeUnsignedLeb128(end, uleb128_tests[i].decoded); + } + size_t data_size = static_cast<size_t>(end - encoded_data); + const uint8_t* encoded_data_ptr = encoded_data; + for (size_t i = 0; i < arraysize(uleb128_tests); ++i) { + const uint8_t* data_ptr = &uleb128_tests[i].leb128_data[0]; + for (size_t j = 0; j < UnsignedLeb128Size(uleb128_tests[i].decoded); ++j) { + EXPECT_EQ(data_ptr[j], encoded_data_ptr[j]) << " i = " << i << " j = " << j; + } + for (size_t j = UnsignedLeb128Size(uleb128_tests[i].decoded); j < 5; ++j) { + EXPECT_EQ(data_ptr[j], 0) << " i = " << i << " j = " << j; + } + EXPECT_EQ(DecodeUnsignedLeb128(&encoded_data_ptr), uleb128_tests[i].decoded) << " i = " << i; + } + EXPECT_EQ(data_size, static_cast<size_t>(encoded_data_ptr - encoded_data)); +} + +TEST(Leb128Test, SignedSinglesVector) { + // Test individual encodings. + for (size_t i = 0; i < arraysize(sleb128_tests); ++i) { + Leb128EncodingVector builder; + builder.PushBackSigned(sleb128_tests[i].decoded); + EXPECT_EQ(SignedLeb128Size(sleb128_tests[i].decoded), builder.GetData().size()); + const uint8_t* data_ptr = &sleb128_tests[i].leb128_data[0]; + const uint8_t* encoded_data_ptr = &builder.GetData()[0]; + for (size_t j = 0; j < 5; ++j) { + if (j < builder.GetData().size()) { + EXPECT_EQ(data_ptr[j], encoded_data_ptr[j]) << " i = " << i << " j = " << j; + } else { + EXPECT_EQ(data_ptr[j], 0U) << " i = " << i << " j = " << j; + } + } + EXPECT_EQ(DecodeSignedLeb128(&data_ptr), sleb128_tests[i].decoded) << " i = " << i; + } +} + +TEST(Leb128Test, SignedSingles) { + // Test individual encodings. + for (size_t i = 0; i < arraysize(sleb128_tests); ++i) { + uint8_t encoded_data[5]; + uint8_t* end = EncodeSignedLeb128(encoded_data, sleb128_tests[i].decoded); + size_t data_size = static_cast<size_t>(end - encoded_data); + EXPECT_EQ(SignedLeb128Size(sleb128_tests[i].decoded), data_size); + const uint8_t* data_ptr = &sleb128_tests[i].leb128_data[0]; + for (size_t j = 0; j < 5; ++j) { + if (j < data_size) { + EXPECT_EQ(data_ptr[j], encoded_data[j]) << " i = " << i << " j = " << j; + } else { + EXPECT_EQ(data_ptr[j], 0U) << " i = " << i << " j = " << j; + } + } + EXPECT_EQ(DecodeSignedLeb128(&data_ptr), sleb128_tests[i].decoded) << " i = " << i; + } +} + +TEST(Leb128Test, SignedStreamVector) { + // Encode a number of entries. + Leb128EncodingVector builder; + for (size_t i = 0; i < arraysize(sleb128_tests); ++i) { + builder.PushBackSigned(sleb128_tests[i].decoded); + } + const uint8_t* encoded_data_ptr = &builder.GetData()[0]; + for (size_t i = 0; i < arraysize(sleb128_tests); ++i) { + const uint8_t* data_ptr = &sleb128_tests[i].leb128_data[0]; + for (size_t j = 0; j < SignedLeb128Size(sleb128_tests[i].decoded); ++j) { + EXPECT_EQ(data_ptr[j], encoded_data_ptr[j]) << " i = " << i << " j = " << j; + } + for (size_t j = SignedLeb128Size(sleb128_tests[i].decoded); j < 5; ++j) { + EXPECT_EQ(data_ptr[j], 0) << " i = " << i << " j = " << j; + } + EXPECT_EQ(DecodeSignedLeb128(&encoded_data_ptr), sleb128_tests[i].decoded) << " i = " << i; + } + EXPECT_EQ(builder.GetData().size(), + static_cast<size_t>(encoded_data_ptr - &builder.GetData()[0])); +} + +TEST(Leb128Test, SignedStream) { + // Encode a number of entries. + uint8_t encoded_data[5 * arraysize(sleb128_tests)]; + uint8_t* end = encoded_data; + for (size_t i = 0; i < arraysize(sleb128_tests); ++i) { + end = EncodeSignedLeb128(end, sleb128_tests[i].decoded); + } + size_t data_size = static_cast<size_t>(end - encoded_data); + const uint8_t* encoded_data_ptr = encoded_data; + for (size_t i = 0; i < arraysize(sleb128_tests); ++i) { + const uint8_t* data_ptr = &sleb128_tests[i].leb128_data[0]; + for (size_t j = 0; j < SignedLeb128Size(sleb128_tests[i].decoded); ++j) { + EXPECT_EQ(data_ptr[j], encoded_data_ptr[j]) << " i = " << i << " j = " << j; + } + for (size_t j = SignedLeb128Size(sleb128_tests[i].decoded); j < 5; ++j) { + EXPECT_EQ(data_ptr[j], 0) << " i = " << i << " j = " << j; + } + EXPECT_EQ(DecodeSignedLeb128(&encoded_data_ptr), sleb128_tests[i].decoded) << " i = " << i; + } + EXPECT_EQ(data_size, static_cast<size_t>(encoded_data_ptr - encoded_data)); +} + +TEST(Leb128Test, Speed) { + UniquePtr<Histogram<uint64_t> > enc_hist(new Histogram<uint64_t>("Leb128EncodeSpeedTest", 5)); + UniquePtr<Histogram<uint64_t> > dec_hist(new Histogram<uint64_t>("Leb128DecodeSpeedTest", 5)); + Leb128EncodingVector builder; + // Push back 1024 chunks of 1024 values measuring encoding speed. + uint64_t last_time = NanoTime(); + for (size_t i = 0; i < 1024; i++) { + for (size_t j = 0; j < 1024; j++) { + builder.PushBackUnsigned((i * 1024) + j); + } + uint64_t cur_time = NanoTime(); + enc_hist->AddValue(cur_time - last_time); + last_time = cur_time; + } + // Verify encoding and measure decode speed. + const uint8_t* encoded_data_ptr = &builder.GetData()[0]; + last_time = NanoTime(); + for (size_t i = 0; i < 1024; i++) { + for (size_t j = 0; j < 1024; j++) { + EXPECT_EQ(DecodeUnsignedLeb128(&encoded_data_ptr), (i * 1024) + j); + } + uint64_t cur_time = NanoTime(); + dec_hist->AddValue(cur_time - last_time); + last_time = cur_time; + } + + Histogram<uint64_t>::CumulativeData enc_data; + enc_hist->CreateHistogram(&enc_data); + enc_hist->PrintConfidenceIntervals(std::cout, 0.99, enc_data); + + Histogram<uint64_t>::CumulativeData dec_data; + dec_hist->CreateHistogram(&dec_data); + dec_hist->PrintConfidenceIntervals(std::cout, 0.99, dec_data); +} + +} // namespace art diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h index 90aaccd..8158bc5 100644 --- a/runtime/mirror/array-inl.h +++ b/runtime/mirror/array-inl.h @@ -27,6 +27,10 @@ namespace art { namespace mirror { +static inline size_t HeaderSize(size_t component_size) { + return sizeof(Object) + (component_size == sizeof(int64_t) ? 8 : 4); +} + template<VerifyObjectFlags kVerifyFlags> inline size_t Array::SizeOf() { // This is safe from overflow because the array was already allocated, so we know it's sane. @@ -34,7 +38,7 @@ inline size_t Array::SizeOf() { // Don't need to check this since we already check this in GetClass. int32_t component_count = GetLength<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>(); - size_t header_size = sizeof(Object) + (component_size == sizeof(int64_t) ? 8 : 4); + size_t header_size = HeaderSize(component_size); size_t data_size = component_count * component_size; return header_size + data_size; } @@ -46,7 +50,7 @@ static inline size_t ComputeArraySize(Thread* self, Class* array_class, int32_t DCHECK_GE(component_count, 0); DCHECK(array_class->IsArrayClass()); - size_t header_size = sizeof(Object) + (component_size == sizeof(int64_t) ? 8 : 4); + size_t header_size = HeaderSize(component_size); size_t data_size = component_count * component_size; size_t size = header_size + data_size; @@ -61,13 +65,16 @@ static inline size_t ComputeArraySize(Thread* self, Class* array_class, int32_t return size; } -// Used for setting the array length in the allocation code path to ensure it is guarded by a CAS. +// Used for setting the array length in the allocation code path to ensure it is guarded by a +// StoreStore fence. class SetLengthVisitor { public: explicit SetLengthVisitor(int32_t length) : length_(length) { } - void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void operator()(Object* obj, size_t usable_size) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + UNUSED(usable_size); // Avoid AsArray as object is not yet in live bitmap or allocation stack. Array* array = down_cast<Array*>(obj); // DCHECK(array->IsArrayInstance()); @@ -76,41 +83,72 @@ class SetLengthVisitor { private: const int32_t length_; + + DISALLOW_COPY_AND_ASSIGN(SetLengthVisitor); +}; + +// Similar to SetLengthVisitor, used for setting the array length to fill the usable size of an +// array. +class SetLengthToUsableSizeVisitor { + public: + SetLengthToUsableSizeVisitor(int32_t min_length, size_t header_size, size_t component_size) : + minimum_length_(min_length), header_size_(header_size), component_size_(component_size) { + } + + void operator()(Object* obj, size_t usable_size) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // Avoid AsArray as object is not yet in live bitmap or allocation stack. + Array* array = down_cast<Array*>(obj); + // DCHECK(array->IsArrayInstance()); + int32_t length = (usable_size - header_size_) / component_size_; + DCHECK_GE(length, minimum_length_); + byte* old_end = reinterpret_cast<byte*>(array->GetRawData(component_size_, minimum_length_)); + byte* new_end = reinterpret_cast<byte*>(array->GetRawData(component_size_, length)); + // Ensure space beyond original allocation is zeroed. + memset(old_end, 0, new_end - old_end); + array->SetLength(length); + } + + private: + const int32_t minimum_length_; + const size_t header_size_; + const size_t component_size_; + + DISALLOW_COPY_AND_ASSIGN(SetLengthToUsableSizeVisitor); }; template <bool kIsInstrumented> inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count, - size_t component_size, gc::AllocatorType allocator_type) { + size_t component_size, gc::AllocatorType allocator_type, + bool fill_usable) { + DCHECK(allocator_type != gc::kAllocatorTypeLOS); size_t size = ComputeArraySize(self, array_class, component_count, component_size); if (UNLIKELY(size == 0)) { return nullptr; } gc::Heap* heap = Runtime::Current()->GetHeap(); - SetLengthVisitor visitor(component_count); - DCHECK(allocator_type != gc::kAllocatorTypeLOS); - return down_cast<Array*>( - heap->AllocObjectWithAllocator<kIsInstrumented, true>(self, array_class, size, - allocator_type, visitor)); -} - -template <bool kIsInstrumented> -inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count, - gc::AllocatorType allocator_type) { - DCHECK(array_class->IsArrayClass()); - return Alloc<kIsInstrumented>(self, array_class, component_count, array_class->GetComponentSize(), - allocator_type); -} -template <bool kIsInstrumented> -inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count) { - return Alloc<kIsInstrumented>(self, array_class, component_count, - Runtime::Current()->GetHeap()->GetCurrentAllocator()); -} - -template <bool kIsInstrumented> -inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count, - size_t component_size) { - return Alloc<kIsInstrumented>(self, array_class, component_count, component_size, - Runtime::Current()->GetHeap()->GetCurrentAllocator()); + Array* result; + if (!fill_usable) { + SetLengthVisitor visitor(component_count); + result = down_cast<Array*>( + heap->AllocObjectWithAllocator<kIsInstrumented, true>(self, array_class, size, + allocator_type, visitor)); + } else { + SetLengthToUsableSizeVisitor visitor(component_count, HeaderSize(component_size), + component_size); + result = down_cast<Array*>( + heap->AllocObjectWithAllocator<kIsInstrumented, true>(self, array_class, size, + allocator_type, visitor)); + } + if (kIsDebugBuild && result != nullptr && Runtime::Current()->IsStarted()) { + CHECK_EQ(array_class->GetComponentSize(), component_size); + if (!fill_usable) { + CHECK_EQ(result->SizeOf(), size); + } else { + CHECK_GE(result->SizeOf(), size); + } + } + return result; } template<class T> @@ -133,9 +171,17 @@ static inline void ArrayBackwardCopy(T* d, const T* s, int32_t count) { } } +template<typename T> +inline PrimitiveArray<T>* PrimitiveArray<T>::Alloc(Thread* self, size_t length) { + DCHECK(array_class_ != NULL); + Array* raw_array = Array::Alloc<true>(self, array_class_, length, sizeof(T), + Runtime::Current()->GetHeap()->GetCurrentAllocator()); + return down_cast<PrimitiveArray<T>*>(raw_array); +} + template<class T> -void PrimitiveArray<T>::Memmove(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, - int32_t count) { +inline void PrimitiveArray<T>::Memmove(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, + int32_t count) { if (UNLIKELY(count == 0)) { return; } @@ -192,8 +238,8 @@ static inline void ArrayForwardCopy(T* d, const T* s, int32_t count) { template<class T> -void PrimitiveArray<T>::Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, - int32_t count) { +inline void PrimitiveArray<T>::Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, + int32_t count) { if (UNLIKELY(count == 0)) { return; } diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc index 715f072..139e2d0 100644 --- a/runtime/mirror/array.cc +++ b/runtime/mirror/array.cc @@ -46,7 +46,9 @@ static Array* RecursiveCreateMultiArray(Thread* self, const SirtRef<mirror::IntArray>& dimensions) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { int32_t array_length = dimensions->Get(current_dimension); - SirtRef<Array> new_array(self, Array::Alloc<true>(self, array_class.get(), array_length)); + SirtRef<Array> new_array(self, Array::Alloc<true>(self, array_class.get(), array_length, + array_class->GetComponentSize(), + Runtime::Current()->GetHeap()->GetCurrentAllocator())); if (UNLIKELY(new_array.get() == nullptr)) { CHECK(self->IsExceptionPending()); return nullptr; @@ -117,13 +119,6 @@ void Array::ThrowArrayStoreException(Object* object) { art::ThrowArrayStoreException(object->GetClass(), this->GetClass()); } -template<typename T> -PrimitiveArray<T>* PrimitiveArray<T>::Alloc(Thread* self, size_t length) { - DCHECK(array_class_ != NULL); - Array* raw_array = Array::Alloc<true>(self, array_class_, length, sizeof(T)); - return down_cast<PrimitiveArray<T>*>(raw_array); -} - template <typename T> Class* PrimitiveArray<T>::array_class_ = NULL; // Explicitly instantiate all the primitive array types. diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h index c4f9a75..772d303 100644 --- a/runtime/mirror/array.h +++ b/runtime/mirror/array.h @@ -28,25 +28,13 @@ namespace mirror { class MANAGED Array : public Object { public: - // A convenience for code that doesn't know the component size, and doesn't want to have to work - // it out itself. + // Allocates an array with the given properties, if fill_usable is true the array will be of at + // least component_count size, however, if there's usable space at the end of the allocation the + // array will fill it. template <bool kIsInstrumented> static Array* Alloc(Thread* self, Class* array_class, int32_t component_count, - gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - template <bool kIsInstrumented> - static Array* Alloc(Thread* self, Class* array_class, int32_t component_count, - size_t component_size, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - template <bool kIsInstrumented> - static Array* Alloc(Thread* self, Class* array_class, int32_t component_count) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - template <bool kIsInstrumented> - static Array* Alloc(Thread* self, Class* array_class, int32_t component_count, - size_t component_size) + size_t component_size, gc::AllocatorType allocator_type, + bool fill_usable = false) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static Array* CreateMultiArray(Thread* self, const SirtRef<Class>& element_class, diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc index 6bed224..fef1f9b 100644 --- a/runtime/mirror/dex_cache_test.cc +++ b/runtime/mirror/dex_cache_test.cc @@ -14,20 +14,21 @@ * limitations under the License. */ -#include "class_linker.h" -#include "common_test.h" #include "dex_cache.h" + +#include <stdio.h> + +#include "class_linker.h" +#include "common_runtime_test.h" #include "gc/heap.h" #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" #include "sirt_ref.h" -#include <stdio.h> - namespace art { namespace mirror { -class DexCacheTest : public CommonTest {}; +class DexCacheTest : public CommonRuntimeTest {}; TEST_F(DexCacheTest, Open) { ScopedObjectAccess soa(Thread::Current()); diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h index df8104d..478cc36 100644 --- a/runtime/mirror/object-inl.h +++ b/runtime/mirror/object-inl.h @@ -92,6 +92,38 @@ inline void Object::Wait(Thread* self, int64_t ms, int32_t ns) { Monitor::Wait(self, this, ms, ns, true, kTimedWaiting); } +inline Object* Object::GetBrooksPointer() { +#ifdef USE_BROOKS_POINTER + DCHECK(kUseBrooksPointer); + return GetFieldObject<Object, kVerifyNone>(OFFSET_OF_OBJECT_MEMBER(Object, x_brooks_ptr_), false); +#else + LOG(FATAL) << "Unreachable"; + return nullptr; +#endif +} + +inline void Object::SetBrooksPointer(Object* brooks_pointer) { +#ifdef USE_BROOKS_POINTER + DCHECK(kUseBrooksPointer); + // We don't mark the card as this occurs as part of object allocation. Not all objects have + // backing cards, such as large objects. + SetFieldObjectWithoutWriteBarrier<false, false, kVerifyNone>( + OFFSET_OF_OBJECT_MEMBER(Object, x_brooks_ptr_), brooks_pointer, false); +#else + LOG(FATAL) << "Unreachable"; +#endif +} + +inline void Object::AssertSelfBrooksPointer() const { +#ifdef USE_BROOKS_POINTER + DCHECK(kUseBrooksPointer); + Object* obj = const_cast<Object*>(this); + DCHECK_EQ(obj, obj->GetBrooksPointer()); +#else + LOG(FATAL) << "Unreachable"; +#endif +} + template<VerifyObjectFlags kVerifyFlags> inline bool Object::VerifierInstanceOf(Class* klass) { DCHECK(klass != NULL); diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h index 7487dd2..ded4e0a 100644 --- a/runtime/mirror/object.h +++ b/runtime/mirror/object.h @@ -76,6 +76,10 @@ class MANAGED Object { template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> void SetClass(Class* new_klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Object* GetBrooksPointer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetBrooksPointer(Object* brooks_pointer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void AssertSelfBrooksPointer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // The verifier treats all interfaces as java.lang.Object and relies on runtime checks in // invoke-interface to detect incompatible interface types. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> @@ -260,6 +264,14 @@ class MANAGED Object { // Monitor and hash code information. uint32_t monitor_; +#ifdef USE_BROOKS_POINTER + // Note names use a 'x' prefix and the x_brooks_ptr_ is of type int + // instead of Object to go with the alphabetical/by-type field order + // on the Java side. + uint32_t x_brooks_ptr_; // For the Brooks pointer. + uint32_t x_padding_; // For 8-byte alignment. TODO: get rid of this. +#endif + friend class art::ImageWriter; friend class art::Monitor; friend struct art::ObjectOffsets; // for verifying offset information diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc index 34fb15e..7d8da14 100644 --- a/runtime/mirror/object_test.cc +++ b/runtime/mirror/object_test.cc @@ -25,7 +25,7 @@ #include "class-inl.h" #include "class_linker.h" #include "class_linker-inl.h" -#include "common_test.h" +#include "common_runtime_test.h" #include "dex_file.h" #include "entrypoints/entrypoint_utils.h" #include "gc/accounting/card_table-inl.h" @@ -40,7 +40,7 @@ namespace art { namespace mirror { -class ObjectTest : public CommonTest { +class ObjectTest : public CommonRuntimeTest { protected: void AssertString(int32_t expected_utf16_length, const char* utf8_in, @@ -148,16 +148,52 @@ TEST_F(ObjectTest, AllocObjectArray) { TEST_F(ObjectTest, AllocArray) { ScopedObjectAccess soa(Thread::Current()); Class* c = class_linker_->FindSystemClass(soa.Self(), "[I"); - SirtRef<Array> a(soa.Self(), Array::Alloc<true>(soa.Self(), c, 1)); - ASSERT_TRUE(c == a->GetClass()); + SirtRef<Array> a(soa.Self(), Array::Alloc<true>(soa.Self(), c, 1, c->GetComponentSize(), + Runtime::Current()->GetHeap()->GetCurrentAllocator())); + EXPECT_TRUE(c == a->GetClass()); + EXPECT_EQ(1, a->GetLength()); c = class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;"); - a.reset(Array::Alloc<true>(soa.Self(), c, 1)); - ASSERT_TRUE(c == a->GetClass()); + a.reset(Array::Alloc<true>(soa.Self(), c, 1, c->GetComponentSize(), + Runtime::Current()->GetHeap()->GetCurrentAllocator())); + EXPECT_TRUE(c == a->GetClass()); + EXPECT_EQ(1, a->GetLength()); c = class_linker_->FindSystemClass(soa.Self(), "[[Ljava/lang/Object;"); - a.reset(Array::Alloc<true>(soa.Self(), c, 1)); - ASSERT_TRUE(c == a->GetClass()); + a.reset(Array::Alloc<true>(soa.Self(), c, 1, c->GetComponentSize(), + Runtime::Current()->GetHeap()->GetCurrentAllocator())); + EXPECT_TRUE(c == a->GetClass()); + EXPECT_EQ(1, a->GetLength()); +} + +TEST_F(ObjectTest, AllocArray_FillUsable) { + ScopedObjectAccess soa(Thread::Current()); + Class* c = class_linker_->FindSystemClass(soa.Self(), "[B"); + SirtRef<Array> a(soa.Self(), Array::Alloc<true>(soa.Self(), c, 1, c->GetComponentSize(), + Runtime::Current()->GetHeap()->GetCurrentAllocator(), + true)); + EXPECT_TRUE(c == a->GetClass()); + EXPECT_LE(1, a->GetLength()); + + c = class_linker_->FindSystemClass(soa.Self(), "[I"); + a.reset(Array::Alloc<true>(soa.Self(), c, 2, c->GetComponentSize(), + Runtime::Current()->GetHeap()->GetCurrentAllocator(), + true)); + EXPECT_TRUE(c == a->GetClass()); + EXPECT_LE(2, a->GetLength()); + + c = class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;"); + a.reset(Array::Alloc<true>(soa.Self(), c, 2, c->GetComponentSize(), + Runtime::Current()->GetHeap()->GetCurrentAllocator(), + true)); + EXPECT_TRUE(c == a->GetClass()); + EXPECT_LE(2, a->GetLength()); + + c = class_linker_->FindSystemClass(soa.Self(), "[[Ljava/lang/Object;"); + a.reset(Array::Alloc<true>(soa.Self(), c, 2, c->GetComponentSize(), + Runtime::Current()->GetHeap()->GetCurrentAllocator(), true)); + EXPECT_TRUE(c == a->GetClass()); + EXPECT_LE(2, a->GetLength()); } template<typename ArrayT> diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc index 5779442..3c703ba 100644 --- a/runtime/native/dalvik_system_VMRuntime.cc +++ b/runtime/native/dalvik_system_VMRuntime.cc @@ -72,7 +72,7 @@ static jobject VMRuntime_newNonMovableArray(JNIEnv* env, jobject, jclass javaEle } gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentNonMovingAllocator(); mirror::Array* result = mirror::Array::Alloc<true>(soa.Self(), array_class, length, - allocator); + array_class->GetComponentSize(), allocator); return soa.AddLocalReference<jobject>(result); } diff --git a/runtime/native/java_lang_reflect_Array.cc b/runtime/native/java_lang_reflect_Array.cc index fc30aa6..a991818 100644 --- a/runtime/native/java_lang_reflect_Array.cc +++ b/runtime/native/java_lang_reflect_Array.cc @@ -50,14 +50,17 @@ static jobject Array_createObjectArray(JNIEnv* env, jclass, jclass javaElementCl ThrowNegativeArraySizeException(length); return NULL; } - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + Runtime* runtime = Runtime::Current(); + ClassLinker* class_linker = runtime->GetClassLinker(); mirror::Class* array_class = class_linker->FindArrayClass(soa.Self(), element_class); if (UNLIKELY(array_class == NULL)) { CHECK(soa.Self()->IsExceptionPending()); return NULL; } - DCHECK(array_class->IsArrayClass()); - mirror::Array* new_array = mirror::Array::Alloc<true>(soa.Self(), array_class, length); + DCHECK(array_class->IsObjectArrayClass()); + mirror::Array* new_array = mirror::Array::Alloc<true>(soa.Self(), array_class, length, + sizeof(mirror::HeapReference<mirror::Object>), + runtime->GetHeap()->GetCurrentAllocator()); return soa.AddLocalReference<jobject>(new_array); } diff --git a/runtime/reference_table_test.cc b/runtime/reference_table_test.cc index 16fbd94..3229039 100644 --- a/runtime/reference_table_test.cc +++ b/runtime/reference_table_test.cc @@ -14,14 +14,13 @@ * limitations under the License. */ -#include "common_test.h" - #include "reference_table.h" +#include "common_runtime_test.h" + namespace art { -class ReferenceTableTest : public CommonTest { -}; +class ReferenceTableTest : public CommonRuntimeTest {}; TEST_F(ReferenceTableTest, Basics) { ScopedObjectAccess soa(Thread::Current()); diff --git a/runtime/runtime_test.cc b/runtime/runtime_test.cc index d53b4a6..5b881e5 100644 --- a/runtime/runtime_test.cc +++ b/runtime/runtime_test.cc @@ -17,11 +17,11 @@ #include "runtime.h" #include "UniquePtr.h" -#include "common_test.h" +#include "common_runtime_test.h" namespace art { -class RuntimeTest : public CommonTest {}; +class RuntimeTest : public CommonRuntimeTest {}; TEST_F(RuntimeTest, ParsedOptions) { void* test_vfprintf = reinterpret_cast<void*>(0xa); diff --git a/runtime/thread_pool_test.cc b/runtime/thread_pool_test.cc index c6f0e92..c1a1ad7 100644 --- a/runtime/thread_pool_test.cc +++ b/runtime/thread_pool_test.cc @@ -14,12 +14,12 @@ * limitations under the License. */ +#include "thread_pool.h" #include <string> #include "atomic.h" -#include "common_test.h" -#include "thread_pool.h" +#include "common_runtime_test.h" namespace art { @@ -49,7 +49,7 @@ class CountTask : public Task { const bool verbose_; }; -class ThreadPoolTest : public CommonTest { +class ThreadPoolTest : public CommonRuntimeTest { public: static int32_t num_threads; }; diff --git a/runtime/transaction_test.cc b/runtime/transaction_test.cc index 9dc7b44..7242b81 100644 --- a/runtime/transaction_test.cc +++ b/runtime/transaction_test.cc @@ -14,17 +14,17 @@ * limitations under the License. */ -#include "common_test.h" +#include "transaction.h" + +#include "common_runtime_test.h" #include "invoke_arg_array_builder.h" #include "mirror/array-inl.h" #include "mirror/art_field-inl.h" #include "mirror/art_method-inl.h" -#include "transaction.h" namespace art { -class TransactionTest : public CommonTest { -}; +class TransactionTest : public CommonRuntimeTest {}; TEST_F(TransactionTest, Object_class) { ScopedObjectAccess soa(Thread::Current()); @@ -86,7 +86,10 @@ TEST_F(TransactionTest, Array_length) { // Allocate an array during transaction. SirtRef<mirror::Array> sirt_obj(soa.Self(), - mirror::Array::Alloc<false>(soa.Self(), sirt_klass.get(), kArraySize)); + mirror::Array::Alloc<false>(soa.Self(), sirt_klass.get(), + kArraySize, + sirt_klass->GetComponentSize(), + Runtime::Current()->GetHeap()->GetCurrentAllocator())); ASSERT_TRUE(sirt_obj.get() != nullptr); ASSERT_EQ(sirt_obj->GetClass(), sirt_klass.get()); Runtime::Current()->ExitTransactionMode(); diff --git a/runtime/utils.cc b/runtime/utils.cc index 68d8417..df1ab94 100644 --- a/runtime/utils.cc +++ b/runtime/utils.cc @@ -1232,7 +1232,7 @@ bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg) { execv(program, &args[0]); - *error_msg = StringPrintf("Failed to execv(%s): %s", command_line.c_str(), strerror(errno)); + PLOG(FATAL) << "Failed to execv(" << command_line << ")"; return false; } else { if (pid == -1) { diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc index 0d237e2..d804f6a 100644 --- a/runtime/utils_test.cc +++ b/runtime/utils_test.cc @@ -14,7 +14,9 @@ * limitations under the License. */ -#include "common_test.h" +#include "utils.h" + +#include "common_runtime_test.h" #include "mirror/array.h" #include "mirror/array-inl.h" #include "mirror/object-inl.h" @@ -22,15 +24,13 @@ #include "mirror/string.h" #include "scoped_thread_state_change.h" #include "sirt_ref.h" -#include "utils.h" namespace art { std::string PrettyArguments(const char* signature); std::string PrettyReturnType(const char* signature); -class UtilsTest : public CommonTest { -}; +class UtilsTest : public CommonRuntimeTest {}; TEST_F(UtilsTest, PrettyDescriptor_ArrayReferences) { EXPECT_EQ("java.lang.Class[]", PrettyDescriptor("[Ljava/lang/Class;")); @@ -362,9 +362,7 @@ TEST_F(UtilsTest, ExecSuccess) { EXPECT_EQ(0U, error_msg.size()) << error_msg; } -// TODO: Disabled due to hang tearing down CommonTest. -// Renable after splitting into RuntimeTest and CompilerTest. -TEST_F(UtilsTest, DISABLED_ExecError) { +TEST_F(UtilsTest, ExecError) { std::vector<std::string> command; command.push_back("bogus"); std::string error_msg; diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc index ffa2455..9dca7f5 100644 --- a/runtime/verifier/method_verifier_test.cc +++ b/runtime/verifier/method_verifier_test.cc @@ -14,18 +14,19 @@ * limitations under the License. */ +#include "method_verifier.h" + #include <stdio.h> #include "UniquePtr.h" #include "class_linker.h" -#include "common_test.h" +#include "common_runtime_test.h" #include "dex_file.h" -#include "method_verifier.h" namespace art { namespace verifier { -class MethodVerifierTest : public CommonTest { +class MethodVerifierTest : public CommonRuntimeTest { protected: void VerifyClass(const std::string& descriptor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc index dc320be..1a64c00 100644 --- a/runtime/verifier/reg_type_test.cc +++ b/runtime/verifier/reg_type_test.cc @@ -15,16 +15,17 @@ */ #include "reg_type.h" -#include "reg_type_cache-inl.h" -#include "base/casts.h" -#include "common_test.h" #include <set> +#include "base/casts.h" +#include "common_runtime_test.h" +#include "reg_type_cache-inl.h" + namespace art { namespace verifier { -class RegTypeTest : public CommonTest {}; +class RegTypeTest : public CommonRuntimeTest {}; TEST_F(RegTypeTest, ConstLoHi) { // Tests creating primitive types types. @@ -335,7 +336,7 @@ TEST_F(RegTypeTest, Primitives) { } -class RegTypeReferenceTest : public CommonTest {}; +class RegTypeReferenceTest : public CommonRuntimeTest {}; TEST_F(RegTypeReferenceTest, JavalangObjectImprecise) { // Tests matching precisions. A reference type that was created precise doesn't diff --git a/runtime/zip_archive_test.cc b/runtime/zip_archive_test.cc index 16394b0..0bf6767 100644 --- a/runtime/zip_archive_test.cc +++ b/runtime/zip_archive_test.cc @@ -22,12 +22,12 @@ #include <zlib.h> #include "UniquePtr.h" -#include "common_test.h" +#include "common_runtime_test.h" #include "os.h" namespace art { -class ZipArchiveTest : public CommonTest {}; +class ZipArchiveTest : public CommonRuntimeTest {}; TEST_F(ZipArchiveTest, FindAndExtract) { std::string error_msg; |