summaryrefslogtreecommitdiffstats
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/Android.mk1
-rw-r--r--runtime/arch/arm/fault_handler_arm.cc6
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S8
-rw-r--r--runtime/arch/arm64/registers_arm64.h5
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S8
-rw-r--r--runtime/asm_support.h2
-rw-r--r--runtime/base/mutex.h1
-rw-r--r--runtime/class_linker.cc5
-rw-r--r--runtime/debugger.cc3
-rw-r--r--runtime/elf_file.cc35
-rw-r--r--runtime/entrypoints/quick/quick_invoke_entrypoints.cc248
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc305
-rw-r--r--runtime/gc/accounting/space_bitmap-inl.h19
-rw-r--r--runtime/gc/accounting/space_bitmap.h3
-rw-r--r--runtime/gc/accounting/space_bitmap_test.cc73
-rw-r--r--runtime/gc/allocator/rosalloc.cc57
-rw-r--r--runtime/gc/allocator/rosalloc.h2
-rw-r--r--runtime/gc/collector/garbage_collector.cc2
-rw-r--r--runtime/gc/collector/mark_sweep.cc15
-rw-r--r--runtime/gc/collector/mark_sweep.h6
-rw-r--r--runtime/gc/heap-inl.h18
-rw-r--r--runtime/gc/heap.cc38
-rw-r--r--runtime/gc/heap.h17
-rw-r--r--runtime/gc/space/rosalloc_space.cc6
-rw-r--r--runtime/gc/space/space_test.h9
-rw-r--r--runtime/globals.h10
-rw-r--r--runtime/instruction_set.h14
-rw-r--r--runtime/mirror/class-inl.h11
-rw-r--r--runtime/mirror/object-inl.h37
-rw-r--r--runtime/mirror/object.cc28
-rw-r--r--runtime/mirror/object.h2
-rw-r--r--runtime/monitor.cc42
-rw-r--r--runtime/monitor.h8
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc5
-rw-r--r--runtime/object_callbacks.h2
-rw-r--r--runtime/quick/inline_method_analyser.cc74
-rw-r--r--runtime/quick/inline_method_analyser.h8
-rw-r--r--runtime/read_barrier-inl.h47
-rw-r--r--runtime/read_barrier.h31
-rw-r--r--runtime/read_barrier_c.h38
-rw-r--r--runtime/runtime.cc13
-rw-r--r--runtime/runtime.h2
-rw-r--r--runtime/sirt_ref-inl.h13
-rw-r--r--runtime/sirt_ref.h16
-rw-r--r--runtime/stack_indirect_reference_table.h10
-rw-r--r--runtime/thread.cc3
-rw-r--r--runtime/thread_list.cc4
47 files changed, 883 insertions, 427 deletions
diff --git a/runtime/Android.mk b/runtime/Android.mk
index cf7f895..9df69f0 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -182,7 +182,6 @@ LIBART_COMMON_SRC_FILES += \
entrypoints/quick/quick_field_entrypoints.cc \
entrypoints/quick/quick_fillarray_entrypoints.cc \
entrypoints/quick/quick_instrumentation_entrypoints.cc \
- entrypoints/quick/quick_invoke_entrypoints.cc \
entrypoints/quick/quick_jni_entrypoints.cc \
entrypoints/quick/quick_lock_entrypoints.cc \
entrypoints/quick/quick_math_entrypoints.cc \
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
index aaba598..3bbec71 100644
--- a/runtime/arch/arm/fault_handler_arm.cc
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -35,7 +35,7 @@ namespace art {
extern "C" void art_quick_throw_null_pointer_exception();
extern "C" void art_quick_throw_stack_overflow(void*);
-extern "C" void art_quick_test_suspend();
+extern "C" void art_quick_implicit_suspend();
// Get the size of a thumb2 instruction in bytes.
static uint32_t GetInstructionSize(uint8_t* pc) {
@@ -142,7 +142,7 @@ bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
if (found) {
LOG(DEBUG) << "suspend check match";
// This is a suspend check. Arrange for the signal handler to return to
- // art_quick_test_suspend. Also set LR so that after the suspend check it
+ // art_quick_implicit_suspend. Also set LR so that after the suspend check it
// will resume the instruction (current PC + 2). PC points to the
// ldr r0,[r0,#0] instruction (r0 will be 0, set by the trigger).
@@ -151,7 +151,7 @@ bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
LOG(DEBUG) << "arm lr: " << std::hex << sc->arm_lr;
LOG(DEBUG) << "arm pc: " << std::hex << sc->arm_pc;
sc->arm_lr = sc->arm_pc + 3; // +2 + 1 (for thumb)
- sc->arm_pc = reinterpret_cast<uintptr_t>(art_quick_test_suspend);
+ sc->arm_pc = reinterpret_cast<uintptr_t>(art_quick_implicit_suspend);
// Now remove the suspend trigger that caused this fault.
Thread::Current()->RemoveSuspendTrigger();
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 71dcd7f..4903732 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -888,6 +888,14 @@ ENTRY art_quick_test_suspend
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
END art_quick_test_suspend
+ENTRY art_quick_implicit_suspend
+ mov r0, rSELF
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves for stack crawl
+ mov r1, sp
+ bl artTestSuspendFromCode @ (Thread*, SP)
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+END art_quick_implicit_suspend
+
/*
* Called by managed code that is attempting to call a method on a proxy class. On entry
* r0 holds the proxy method and r1 holds the receiver; r2 and r3 may contain arguments. The
diff --git a/runtime/arch/arm64/registers_arm64.h b/runtime/arch/arm64/registers_arm64.h
index ca904bc..43c0ad6 100644
--- a/runtime/arch/arm64/registers_arm64.h
+++ b/runtime/arch/arm64/registers_arm64.h
@@ -63,8 +63,8 @@ enum Register {
LR = 30,
SP = 31, // SP is X31 and overlaps with XRZ but we encode it as a
// special register, due to the different instruction semantics.
- XZR = 32, // FIXME This needs to be reconciled with the JNI assembler.
- kNumberOfCoreRegisters = 32,
+ XZR = 32,
+ kNumberOfCoreRegisters = 33,
kNoRegister = -1,
};
std::ostream& operator<<(std::ostream& os, const Register& rhs);
@@ -103,7 +103,6 @@ enum WRegister {
W29 = 29,
W30 = 30,
W31 = 31,
- WSP = 31,
WZR = 31,
kNumberOfWRegisters = 32,
kNoWRegister = -1,
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 4bde8b7..336a0cc 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -532,12 +532,12 @@ DEFINE_FUNCTION art_quick_lock_object
movl %ecx, %eax // restore eax
jmp .Lretry_lock
.Lalready_thin:
- cmpw %ax, %dx // do we hold the lock already?
+ cmpw %cx, %dx // do we hold the lock already?
jne .Lslow_lock
- addl LITERAL(65536), %eax // increment recursion count
- test LITERAL(0xC0000000), %eax // overflowed if either of top two bits are set
+ addl LITERAL(65536), %ecx // increment recursion count
+ test LITERAL(0xC0000000), %ecx // overflowed if either of top two bits are set
jne .Lslow_lock // count overflowed so go slow
- movl %eax, LOCK_WORD_OFFSET(%ecx) // update lockword, cmpxchg not necessary as we hold lock
+ movl %ecx, LOCK_WORD_OFFSET(%eax) // update lockword, cmpxchg not necessary as we hold lock
ret
.Lslow_lock:
SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 8ef407d..62f3593 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -17,7 +17,7 @@
#ifndef ART_RUNTIME_ASM_SUPPORT_H_
#define ART_RUNTIME_ASM_SUPPORT_H_
-#include "read_barrier.h"
+#include "read_barrier_c.h"
// Value loaded into rSUSPEND for quick. When this value is counted down to zero we do a suspend
// check.
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 4b881f6..b50c098 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -76,6 +76,7 @@ enum LockLevel {
kClassLinkerClassesLock,
kBreakpointLock,
kMonitorLock,
+ kMonitorListLock,
kThreadListLock,
kBreakpointInvokeLock,
kDeoptimizationLock,
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 6c5406e..78b7cc0 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -202,7 +202,7 @@ void ClassLinker::InitFromCompiler(const std::vector<const DexFile*>& boot_class
// The GC can't handle an object with a null class since we can't get the size of this object.
heap->IncrementDisableMovingGC(self);
SirtRef<mirror::Class> java_lang_Class(self, down_cast<mirror::Class*>(
- heap->AllocNonMovableObject<true>(self, nullptr, sizeof(mirror::ClassClass))));
+ heap->AllocNonMovableObject<true>(self, nullptr, sizeof(mirror::ClassClass), VoidFunctor())));
CHECK(java_lang_Class.get() != NULL);
mirror::Class::SetClassClass(java_lang_Class.get());
java_lang_Class->SetClass(java_lang_Class.get());
@@ -1180,7 +1180,8 @@ mirror::DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_fi
SirtRef<mirror::Class> dex_cache_class(self, GetClassRoot(kJavaLangDexCache));
SirtRef<mirror::DexCache> dex_cache(
self, down_cast<mirror::DexCache*>(
- heap->AllocObject<true>(self, dex_cache_class.get(), dex_cache_class->GetObjectSize())));
+ heap->AllocObject<true>(self, dex_cache_class.get(), dex_cache_class->GetObjectSize(),
+ VoidFunctor())));
if (dex_cache.get() == NULL) {
return NULL;
}
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 2872a02..514ad4c 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -1217,7 +1217,8 @@ JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int c
LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
return JDWP::ERR_INVALID_LENGTH;
}
- const char* descriptor = ClassHelper(dst->GetClass()).GetDescriptor();
+ ClassHelper ch(dst->GetClass());
+ const char* descriptor = ch.GetDescriptor();
JDWP::JdwpTag tag = BasicTagFromDescriptor(descriptor + 1);
if (IsPrimitiveTag(tag)) {
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 0c8a4f0..01ca60f 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -22,6 +22,7 @@
#include "base/logging.h"
#include "base/stl_util.h"
#include "utils.h"
+#include "instruction_set.h"
namespace art {
@@ -773,6 +774,40 @@ size_t ElfFile::GetLoadedSize() const {
bool ElfFile::Load(bool executable, std::string* error_msg) {
CHECK(program_header_only_) << file_->GetPath();
+
+ if (executable) {
+ InstructionSet elf_ISA = kNone;
+ switch (GetHeader().e_machine) {
+ case EM_ARM: {
+ elf_ISA = kArm;
+ break;
+ }
+ case EM_AARCH64: {
+ elf_ISA = kArm64;
+ break;
+ }
+ case EM_386: {
+ elf_ISA = kX86;
+ break;
+ }
+ case EM_X86_64: {
+ elf_ISA = kX86_64;
+ break;
+ }
+ case EM_MIPS: {
+ elf_ISA = kMips;
+ break;
+ }
+ }
+
+ if (elf_ISA != kRuntimeISA) {
+ std::ostringstream oss;
+ oss << "Expected ISA " << kRuntimeISA << " but found " << elf_ISA;
+ *error_msg = oss.str();
+ return false;
+ }
+ }
+
for (Elf32_Word i = 0; i < GetProgramHeaderNum(); i++) {
Elf32_Phdr& program_header = GetProgramHeader(i);
diff --git a/runtime/entrypoints/quick/quick_invoke_entrypoints.cc b/runtime/entrypoints/quick/quick_invoke_entrypoints.cc
deleted file mode 100644
index e024a90..0000000
--- a/runtime/entrypoints/quick/quick_invoke_entrypoints.cc
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "callee_save_frame.h"
-#include "dex_instruction-inl.h"
-#include "entrypoints/entrypoint_utils.h"
-#include "mirror/art_method-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/dex_cache-inl.h"
-#include "mirror/object-inl.h"
-#include "mirror/object_array-inl.h"
-
-namespace art {
-
-// Determine target of interface dispatch. This object is known non-null.
-extern "C" uint64_t artInvokeInterfaceTrampoline(mirror::ArtMethod* interface_method,
- mirror::Object* this_object,
- mirror::ArtMethod* caller_method,
- Thread* self, mirror::ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method;
- if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
- method = this_object->GetClass()->FindVirtualMethodForInterface(interface_method);
- if (UNLIKELY(method == NULL)) {
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
- ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(interface_method, this_object,
- caller_method);
- return 0; // Failure.
- }
- } else {
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
- DCHECK(interface_method == Runtime::Current()->GetResolutionMethod());
- // Determine method index from calling dex instruction.
-#if defined(__arm__)
- // On entry the stack pointed by sp is:
- // | argN | |
- // | ... | |
- // | arg4 | |
- // | arg3 spill | | Caller's frame
- // | arg2 spill | |
- // | arg1 spill | |
- // | Method* | ---
- // | LR |
- // | ... | callee saves
- // | R3 | arg3
- // | R2 | arg2
- // | R1 | arg1
- // | R0 |
- // | Method* | <- sp
- DCHECK_EQ(48U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
- uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp) + kPointerSize);
- uintptr_t caller_pc = regs[10];
-#elif defined(__i386__)
- // On entry the stack pointed by sp is:
- // | argN | |
- // | ... | |
- // | arg4 | |
- // | arg3 spill | | Caller's frame
- // | arg2 spill | |
- // | arg1 spill | |
- // | Method* | ---
- // | Return |
- // | EBP,ESI,EDI | callee saves
- // | EBX | arg3
- // | EDX | arg2
- // | ECX | arg1
- // | EAX/Method* | <- sp
- DCHECK_EQ(32U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
- uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp));
- uintptr_t caller_pc = regs[7];
-#elif defined(__mips__)
- // On entry the stack pointed by sp is:
- // | argN | |
- // | ... | |
- // | arg4 | |
- // | arg3 spill | | Caller's frame
- // | arg2 spill | |
- // | arg1 spill | |
- // | Method* | ---
- // | RA |
- // | ... | callee saves
- // | A3 | arg3
- // | A2 | arg2
- // | A1 | arg1
- // | A0/Method* | <- sp
- DCHECK_EQ(64U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
- uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp));
- uintptr_t caller_pc = regs[15];
-#else
- UNIMPLEMENTED(FATAL);
- uintptr_t caller_pc = 0;
-#endif
- uint32_t dex_pc = caller_method->ToDexPc(caller_pc);
- const DexFile::CodeItem* code = MethodHelper(caller_method).GetCodeItem();
- CHECK_LT(dex_pc, code->insns_size_in_code_units_);
- const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
- Instruction::Code instr_code = instr->Opcode();
- CHECK(instr_code == Instruction::INVOKE_INTERFACE ||
- instr_code == Instruction::INVOKE_INTERFACE_RANGE)
- << "Unexpected call into interface trampoline: " << instr->DumpString(NULL);
- uint32_t dex_method_idx;
- if (instr_code == Instruction::INVOKE_INTERFACE) {
- dex_method_idx = instr->VRegB_35c();
- } else {
- DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE);
- dex_method_idx = instr->VRegB_3rc();
- }
- method = FindMethodFromCode<kInterface, false>(dex_method_idx, this_object, caller_method, self);
- if (UNLIKELY(method == NULL)) {
- CHECK(self->IsExceptionPending());
- return 0; // Failure.
- }
- }
- const void* code = method->GetEntryPointFromQuickCompiledCode();
-
- // When we return, the caller will branch to this address, so it had better not be 0!
- if (kIsDebugBuild && UNLIKELY(code == nullptr)) {
- MethodHelper mh(method);
- LOG(FATAL) << "Code was NULL in method: " << PrettyMethod(method)
- << " location: " << mh.GetDexFile().GetLocation();
- }
-#ifdef __LP64__
- UNIMPLEMENTED(FATAL);
- return 0;
-#else
- uint32_t method_uint = reinterpret_cast<uint32_t>(method);
- uint64_t code_uint = reinterpret_cast<uint32_t>(code);
- uint64_t result = ((code_uint << 32) | method_uint);
- return result;
-#endif
-}
-
-template<InvokeType type, bool access_check>
-uint64_t artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
- mirror::ArtMethod* caller_method,
- Thread* self, mirror::ArtMethod** sp) {
- mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check,
- type);
- if (UNLIKELY(method == NULL)) {
- FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
- method = FindMethodFromCode<type, access_check>(method_idx, this_object, caller_method, self);
- if (UNLIKELY(method == NULL)) {
- CHECK(self->IsExceptionPending());
- return 0; // failure
- }
- }
- DCHECK(!self->IsExceptionPending());
- const void* code = method->GetEntryPointFromQuickCompiledCode();
-
- // When we return, the caller will branch to this address, so it had better not be 0!
- if (kIsDebugBuild && UNLIKELY(code == NULL)) {
- MethodHelper mh(method);
- LOG(FATAL) << "Code was NULL in method: " << PrettyMethod(method)
- << " location: " << mh.GetDexFile().GetLocation();
- }
-#ifdef __LP64__
- UNIMPLEMENTED(FATAL);
- return 0;
-#else
- uint32_t method_uint = reinterpret_cast<uint32_t>(method);
- uint64_t code_uint = reinterpret_cast<uint32_t>(code);
- uint64_t result = ((code_uint << 32) | method_uint);
- return result;
-#endif
-}
-
-// Explicit template declarations of artInvokeCommon for all invoke types.
-#define EXPLICIT_ART_INVOKE_COMMON_TEMPLATE_DECL(_type, _access_check) \
- template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \
- uint64_t artInvokeCommon<_type, _access_check>(uint32_t method_idx, \
- mirror::Object* this_object, \
- mirror::ArtMethod* caller_method, \
- Thread* self, mirror::ArtMethod** sp)
-
-#define EXPLICIT_ART_INVOKE_COMMON_TYPED_TEMPLATE_DECL(_type) \
- EXPLICIT_ART_INVOKE_COMMON_TEMPLATE_DECL(_type, false); \
- EXPLICIT_ART_INVOKE_COMMON_TEMPLATE_DECL(_type, true)
-
-EXPLICIT_ART_INVOKE_COMMON_TYPED_TEMPLATE_DECL(kStatic);
-EXPLICIT_ART_INVOKE_COMMON_TYPED_TEMPLATE_DECL(kDirect);
-EXPLICIT_ART_INVOKE_COMMON_TYPED_TEMPLATE_DECL(kVirtual);
-EXPLICIT_ART_INVOKE_COMMON_TYPED_TEMPLATE_DECL(kSuper);
-EXPLICIT_ART_INVOKE_COMMON_TYPED_TEMPLATE_DECL(kInterface);
-
-#undef EXPLICIT_ART_INVOKE_COMMON_TYPED_TEMPLATE_DECL
-#undef EXPLICIT_ART_INVOKE_COMMON_TEMPLATE_DECL
-
-// See comments in runtime_support_asm.S
-extern "C" uint64_t artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_idx,
- mirror::Object* this_object,
- mirror::ArtMethod* caller_method,
- Thread* self,
- mirror::ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return artInvokeCommon<kInterface, true>(method_idx, this_object, caller_method, self, sp);
-}
-
-
-extern "C" uint64_t artInvokeDirectTrampolineWithAccessCheck(uint32_t method_idx,
- mirror::Object* this_object,
- mirror::ArtMethod* caller_method,
- Thread* self,
- mirror::ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return artInvokeCommon<kDirect, true>(method_idx, this_object, caller_method, self, sp);
-}
-
-extern "C" uint64_t artInvokeStaticTrampolineWithAccessCheck(uint32_t method_idx,
- mirror::Object* this_object,
- mirror::ArtMethod* caller_method,
- Thread* self,
- mirror::ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return artInvokeCommon<kStatic, true>(method_idx, this_object, caller_method, self, sp);
-}
-
-extern "C" uint64_t artInvokeSuperTrampolineWithAccessCheck(uint32_t method_idx,
- mirror::Object* this_object,
- mirror::ArtMethod* caller_method,
- Thread* self,
- mirror::ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return artInvokeCommon<kSuper, true>(method_idx, this_object, caller_method, self, sp);
-}
-
-extern "C" uint64_t artInvokeVirtualTrampolineWithAccessCheck(uint32_t method_idx,
- mirror::Object* this_object,
- mirror::ArtMethod* caller_method,
- Thread* self,
- mirror::ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return artInvokeCommon<kVirtual, true>(method_idx, this_object, caller_method, self, sp);
-}
-
-} // namespace art
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 20432c6..fcbcac2 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -23,6 +23,7 @@
#include "interpreter/interpreter.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
+#include "mirror/dex_cache-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "object_utils.h"
@@ -618,6 +619,7 @@ void BuildQuickArgumentVisitor::FixupReferences() {
// Fixup any references which may have changed.
for (const auto& pair : references_) {
pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first));
+ soa_->Env()->DeleteLocalRef(pair.first);
}
}
@@ -708,6 +710,7 @@ void RememberForGcArgumentVisitor::FixupReferences() {
// Fixup any references which may have changed.
for (const auto& pair : references_) {
pair.second->Assign(soa_->Decode<mirror::Object*>(pair.first));
+ soa_->Env()->DeleteLocalRef(pair.first);
}
}
@@ -1493,6 +1496,22 @@ void BuildGenericJniFrameVisitor::FinalizeSirt(Thread* self) {
extern "C" void* artFindNativeMethod();
+uint64_t artQuickGenericJniEndJNIRef(Thread* self, uint32_t cookie, jobject l, jobject lock) {
+ if (lock != nullptr) {
+ return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self));
+ } else {
+ return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self));
+ }
+}
+
+void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock) {
+ if (lock != nullptr) {
+ JniMethodEndSynchronized(cookie, lock, self);
+ } else {
+ JniMethodEnd(cookie, self);
+ }
+}
+
/*
* Initializes an alloca region assumed to be directly below sp for a native call:
* Create a Sirt and call stack and fill a mini stack with values to be pushed to registers.
@@ -1552,6 +1571,15 @@ extern "C" ssize_t artQuickGenericJniTrampoline(Thread* self, mirror::ArtMethod*
if (nativeCode == nullptr) {
DCHECK(self->IsExceptionPending()); // There should be an exception pending now.
+
+ // End JNI, as the assembly will move to deliver the exception.
+ jobject lock = called->IsSynchronized() ? visitor.GetFirstSirtEntry() : nullptr;
+ if (mh.GetShorty()[0] == 'L') {
+ artQuickGenericJniEndJNIRef(self, cookie, nullptr, lock);
+ } else {
+ artQuickGenericJniEndJNINonRef(self, cookie, lock);
+ }
+
return -1;
}
// Note that the native code pointer will be automatically set by artFindNativeMethod().
@@ -1577,33 +1605,21 @@ extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, mirror::ArtMet
mirror::ArtMethod* called = *sp;
uint32_t cookie = *(sp32 - 1);
+ jobject lock = nullptr;
+ if (called->IsSynchronized()) {
+ StackIndirectReferenceTable* table =
+ reinterpret_cast<StackIndirectReferenceTable*>(
+ reinterpret_cast<uint8_t*>(sp) + kPointerSize);
+ lock = reinterpret_cast<jobject>(table->GetStackReference(0));
+ }
+
MethodHelper mh(called);
char return_shorty_char = mh.GetShorty()[0];
if (return_shorty_char == 'L') {
- // the only special ending call
- if (called->IsSynchronized()) {
- StackIndirectReferenceTable* table =
- reinterpret_cast<StackIndirectReferenceTable*>(
- reinterpret_cast<uint8_t*>(sp) + kPointerSize);
- jobject tmp = reinterpret_cast<jobject>(table->GetStackReference(0));
-
- return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(result.l, cookie, tmp,
- self));
- } else {
- return reinterpret_cast<uint64_t>(JniMethodEndWithReference(result.l, cookie, self));
- }
+ return artQuickGenericJniEndJNIRef(self, cookie, result.l, lock);
} else {
- if (called->IsSynchronized()) {
- StackIndirectReferenceTable* table =
- reinterpret_cast<StackIndirectReferenceTable*>(
- reinterpret_cast<uint8_t*>(sp) + kPointerSize);
- jobject tmp = reinterpret_cast<jobject>(table->GetStackReference(0));
-
- JniMethodEndSynchronized(cookie, tmp, self);
- } else {
- JniMethodEnd(cookie, self);
- }
+ artQuickGenericJniEndJNINonRef(self, cookie, lock);
switch (return_shorty_char) {
case 'F': // Fall-through.
@@ -1630,4 +1646,249 @@ extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, mirror::ArtMet
}
}
+template<InvokeType type, bool access_check>
+static uint64_t artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
+ mirror::ArtMethod* caller_method,
+ Thread* self, mirror::ArtMethod** sp);
+
+template<InvokeType type, bool access_check>
+static uint64_t artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
+ mirror::ArtMethod* caller_method,
+ Thread* self, mirror::ArtMethod** sp) {
+ mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check,
+ type);
+ if (UNLIKELY(method == nullptr)) {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
+ const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
+ uint32_t shorty_len;
+ const char* shorty =
+ dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len);
+ {
+ // Remember the args in case a GC happens in FindMethodFromCode.
+ ScopedObjectAccessUnchecked soa(self->GetJniEnv());
+ RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa);
+ visitor.VisitArguments();
+ method = FindMethodFromCode<type, access_check>(method_idx, this_object, caller_method, self);
+ visitor.FixupReferences();
+ }
+
+ if (UNLIKELY(method == NULL)) {
+ CHECK(self->IsExceptionPending());
+ return 0; // failure
+ }
+ }
+ DCHECK(!self->IsExceptionPending());
+ const void* code = method->GetEntryPointFromQuickCompiledCode();
+
+ // When we return, the caller will branch to this address, so it had better not be 0!
+ DCHECK(code != nullptr) << "Code was NULL in method: " << PrettyMethod(method) << " location: "
+ << MethodHelper(method).GetDexFile().GetLocation();
+#ifdef __LP64__
+ UNIMPLEMENTED(FATAL);
+ return 0;
+#else
+ uint32_t method_uint = reinterpret_cast<uint32_t>(method);
+ uint64_t code_uint = reinterpret_cast<uint32_t>(code);
+ uint64_t result = ((code_uint << 32) | method_uint);
+ return result;
+#endif
+}
+
+// Explicit artInvokeCommon template function declarations to please analysis tool.
+#define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check) \
+ template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \
+ uint64_t artInvokeCommon<type, access_check>(uint32_t method_idx, \
+ mirror::Object* this_object, \
+ mirror::ArtMethod* caller_method, \
+ Thread* self, mirror::ArtMethod** sp) \
+
+EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, false);
+EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kVirtual, true);
+EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, false);
+EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kInterface, true);
+EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, false);
+EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kDirect, true);
+EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, false);
+EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kStatic, true);
+EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, false);
+EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true);
+#undef EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL
+
+
+// See comments in runtime_support_asm.S
+extern "C" uint64_t artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::ArtMethod* caller_method,
+ Thread* self,
+ mirror::ArtMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return artInvokeCommon<kInterface, true>(method_idx, this_object, caller_method, self, sp);
+}
+
+
+extern "C" uint64_t artInvokeDirectTrampolineWithAccessCheck(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::ArtMethod* caller_method,
+ Thread* self,
+ mirror::ArtMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return artInvokeCommon<kDirect, true>(method_idx, this_object, caller_method, self, sp);
+}
+
+extern "C" uint64_t artInvokeStaticTrampolineWithAccessCheck(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::ArtMethod* caller_method,
+ Thread* self,
+ mirror::ArtMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return artInvokeCommon<kStatic, true>(method_idx, this_object, caller_method, self, sp);
+}
+
+extern "C" uint64_t artInvokeSuperTrampolineWithAccessCheck(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::ArtMethod* caller_method,
+ Thread* self,
+ mirror::ArtMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return artInvokeCommon<kSuper, true>(method_idx, this_object, caller_method, self, sp);
+}
+
+extern "C" uint64_t artInvokeVirtualTrampolineWithAccessCheck(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::ArtMethod* caller_method,
+ Thread* self,
+ mirror::ArtMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return artInvokeCommon<kVirtual, true>(method_idx, this_object, caller_method, self, sp);
+}
+
+// Determine target of interface dispatch. This object is known non-null.
+extern "C" uint64_t artInvokeInterfaceTrampoline(mirror::ArtMethod* interface_method,
+ mirror::Object* this_object,
+ mirror::ArtMethod* caller_method,
+ Thread* self, mirror::ArtMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* method;
+ if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
+ method = this_object->GetClass()->FindVirtualMethodForInterface(interface_method);
+ if (UNLIKELY(method == NULL)) {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
+ ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(interface_method, this_object,
+ caller_method);
+ return 0; // Failure.
+ }
+ } else {
+ FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
+ DCHECK(interface_method == Runtime::Current()->GetResolutionMethod());
+ // Determine method index from calling dex instruction.
+#if defined(__arm__)
+ // On entry the stack pointed by sp is:
+ // | argN | |
+ // | ... | |
+ // | arg4 | |
+ // | arg3 spill | | Caller's frame
+ // | arg2 spill | |
+ // | arg1 spill | |
+ // | Method* | ---
+ // | LR |
+ // | ... | callee saves
+ // | R3 | arg3
+ // | R2 | arg2
+ // | R1 | arg1
+ // | R0 |
+ // | Method* | <- sp
+ DCHECK_EQ(48U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
+ uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp) + kPointerSize);
+ uintptr_t caller_pc = regs[10];
+#elif defined(__i386__)
+ // On entry the stack pointed by sp is:
+ // | argN | |
+ // | ... | |
+ // | arg4 | |
+ // | arg3 spill | | Caller's frame
+ // | arg2 spill | |
+ // | arg1 spill | |
+ // | Method* | ---
+ // | Return |
+ // | EBP,ESI,EDI | callee saves
+ // | EBX | arg3
+ // | EDX | arg2
+ // | ECX | arg1
+ // | EAX/Method* | <- sp
+ DCHECK_EQ(32U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
+ uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp));
+ uintptr_t caller_pc = regs[7];
+#elif defined(__mips__)
+ // On entry the stack pointed by sp is:
+ // | argN | |
+ // | ... | |
+ // | arg4 | |
+ // | arg3 spill | | Caller's frame
+ // | arg2 spill | |
+ // | arg1 spill | |
+ // | Method* | ---
+ // | RA |
+ // | ... | callee saves
+ // | A3 | arg3
+ // | A2 | arg2
+ // | A1 | arg1
+ // | A0/Method* | <- sp
+ DCHECK_EQ(64U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
+ uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp));
+ uintptr_t caller_pc = regs[15];
+#else
+ UNIMPLEMENTED(FATAL);
+ uintptr_t caller_pc = 0;
+#endif
+ uint32_t dex_pc = caller_method->ToDexPc(caller_pc);
+ const DexFile::CodeItem* code = MethodHelper(caller_method).GetCodeItem();
+ CHECK_LT(dex_pc, code->insns_size_in_code_units_);
+ const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
+ Instruction::Code instr_code = instr->Opcode();
+ CHECK(instr_code == Instruction::INVOKE_INTERFACE ||
+ instr_code == Instruction::INVOKE_INTERFACE_RANGE)
+ << "Unexpected call into interface trampoline: " << instr->DumpString(NULL);
+ uint32_t dex_method_idx;
+ if (instr_code == Instruction::INVOKE_INTERFACE) {
+ dex_method_idx = instr->VRegB_35c();
+ } else {
+ DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE);
+ dex_method_idx = instr->VRegB_3rc();
+ }
+
+ const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
+ uint32_t shorty_len;
+ const char* shorty =
+ dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), &shorty_len);
+ {
+ // Remember the args in case a GC happens in FindMethodFromCode.
+ ScopedObjectAccessUnchecked soa(self->GetJniEnv());
+ RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa);
+ visitor.VisitArguments();
+ method = FindMethodFromCode<kInterface, false>(dex_method_idx, this_object, caller_method,
+ self);
+ visitor.FixupReferences();
+ }
+
+ if (UNLIKELY(method == nullptr)) {
+ CHECK(self->IsExceptionPending());
+ return 0; // Failure.
+ }
+ }
+ const void* code = method->GetEntryPointFromQuickCompiledCode();
+
+ // When we return, the caller will branch to this address, so it had better not be 0!
+ DCHECK(code != nullptr) << "Code was NULL in method: " << PrettyMethod(method) << " location: "
+ << MethodHelper(method).GetDexFile().GetLocation();
+#ifdef __LP64__
+ UNIMPLEMENTED(FATAL);
+ return 0;
+#else
+ uint32_t method_uint = reinterpret_cast<uint32_t>(method);
+ uint64_t code_uint = reinterpret_cast<uint32_t>(code);
+ uint64_t result = ((code_uint << 32) | method_uint);
+ return result;
+#endif
+}
+
} // namespace art
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index 0fbd27c..880ff1f 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -58,6 +58,14 @@ template <typename Visitor>
void SpaceBitmap::VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end,
const Visitor& visitor) const {
DCHECK_LT(visit_begin, visit_end);
+#if 0
+ for (uintptr_t i = visit_begin; i < visit_end; i += kAlignment) {
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(i);
+ if (Test(obj)) {
+ visitor(obj);
+ }
+ }
+#else
DCHECK_LE(heap_begin_, visit_begin);
DCHECK_LE(visit_end, HeapLimit());
@@ -114,14 +122,20 @@ void SpaceBitmap::VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end,
}
// Right edge is unique.
- right_edge = bitmap_begin_[index_end];
+ // But maybe we don't have anything to do: visit_end starts in a new word...
+ if (bit_end == 0) {
+ // Do not read memory, as it could be after the end of the bitmap.
+ right_edge = 0;
+ } else {
+ right_edge = bitmap_begin_[index_end];
+ }
} else {
// Right edge = left edge.
right_edge = left_edge;
}
// Right edge handling.
- right_edge &= ((static_cast<uword>(1) << bit_end) - 1) | (static_cast<uword>(1) << bit_end);
+ right_edge &= ((static_cast<uword>(1) << bit_end) - 1);
if (right_edge != 0) {
const uintptr_t ptr_base = IndexToOffset(index_end) + heap_begin_;
do {
@@ -131,6 +145,7 @@ void SpaceBitmap::VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end,
right_edge ^= (static_cast<uword>(1)) << shift;
} while (right_edge != 0);
}
+#endif
}
inline bool SpaceBitmap::Modify(const mirror::Object* obj, bool do_set) {
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index aa24b03..a88f3e4 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -123,6 +123,9 @@ class SpaceBitmap {
}
}
+ /**
+ * Visit the live objects in the range [visit_begin, visit_end).
+ */
template <typename Visitor>
void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor) const
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index ba4e2ac..68994a8 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -86,6 +86,79 @@ TEST_F(SpaceBitmapTest, ScanRange) {
}
}
+class SimpleCounter {
+ public:
+ explicit SimpleCounter(size_t* counter) : count_(counter) {}
+
+ void operator()(mirror::Object* obj) const {
+ (*count_)++;
+ }
+
+ size_t* count_;
+};
+
+class RandGen {
+ public:
+ explicit RandGen(uint32_t seed) : val_(seed) {}
+
+ uint32_t next() {
+ val_ = val_ * 48271 % 2147483647;
+ return val_;
+ }
+
+ uint32_t val_;
+};
+
+void compat_test() NO_THREAD_SAFETY_ANALYSIS {
+ byte* heap_begin = reinterpret_cast<byte*>(0x10000000);
+ size_t heap_capacity = 16 * MB;
+
+ // Seed with 0x1234 for reproducability.
+ RandGen r(0x1234);
+
+
+ for (int i = 0; i < 5 ; ++i) {
+ UniquePtr<SpaceBitmap> space_bitmap(SpaceBitmap::Create("test bitmap",
+ heap_begin, heap_capacity));
+
+ for (int j = 0; j < 10000; ++j) {
+ size_t offset = (r.next() % heap_capacity) & ~(0x7);
+ bool set = r.next() % 2 == 1;
+
+ if (set) {
+ space_bitmap->Set(reinterpret_cast<mirror::Object*>(heap_begin + offset));
+ } else {
+ space_bitmap->Clear(reinterpret_cast<mirror::Object*>(heap_begin + offset));
+ }
+ }
+
+ for (int j = 0; j < 50; ++j) {
+ size_t count = 0;
+ SimpleCounter c(&count);
+
+ size_t offset = (r.next() % heap_capacity) & ~(0x7);
+ size_t remain = heap_capacity - offset;
+ size_t end = offset + ((r.next() % (remain + 1)) & ~(0x7));
+
+ space_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(heap_begin) + offset,
+ reinterpret_cast<uintptr_t>(heap_begin) + end, c);
+
+ size_t manual = 0;
+ for (uintptr_t k = offset; k < end; k += kObjectAlignment) {
+ if (space_bitmap->Test(reinterpret_cast<mirror::Object*>(heap_begin + k))) {
+ manual++;
+ }
+ }
+
+ EXPECT_EQ(count, manual);
+ }
+ }
+}
+
+TEST_F(SpaceBitmapTest, Visitor) {
+ compat_test();
+}
+
} // namespace accounting
} // namespace gc
} // namespace art
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index f5f6f16..cbefa6a 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -1997,12 +1997,69 @@ void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc) {
CHECK_LE(obj_size, kLargeSizeThreshold)
<< "A run slot contains a large object " << Dump();
CHECK_EQ(SizeToIndex(obj_size), idx)
+ << PrettyTypeOf(obj) << " "
+ << "obj_size=" << obj_size << ", idx=" << idx << " "
<< "A run slot contains an object with wrong size " << Dump();
}
}
}
}
+size_t RosAlloc::ReleasePages() {
+ VLOG(heap) << "RosAlloc::ReleasePages()";
+ DCHECK(!DoesReleaseAllPages());
+ Thread* self = Thread::Current();
+ size_t reclaimed_bytes = 0;
+ size_t i = 0;
+ while (true) {
+ MutexLock mu(self, lock_);
+ // Check the page map size which might have changed due to grow/shrink.
+ size_t pm_end = page_map_size_;
+ if (i >= pm_end) {
+ // Reached the end.
+ break;
+ }
+ byte pm = page_map_[i];
+ switch (pm) {
+ case kPageMapEmpty: {
+ // The start of a free page run. Release pages.
+ FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
+ DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
+ size_t fpr_size = fpr->ByteSize(this);
+ DCHECK(IsAligned<kPageSize>(fpr_size));
+ byte* start = reinterpret_cast<byte*>(fpr);
+ if (kIsDebugBuild) {
+ // In the debug build, the first page of a free page run
+ // contains a magic number for debugging. Exclude it.
+ start = reinterpret_cast<byte*>(fpr) + kPageSize;
+ }
+ byte* end = reinterpret_cast<byte*>(fpr) + fpr_size;
+ CHECK_EQ(madvise(start, end - start, MADV_DONTNEED), 0);
+ reclaimed_bytes += fpr_size;
+ size_t num_pages = fpr_size / kPageSize;
+ if (kIsDebugBuild) {
+ for (size_t j = i + 1; j < i + num_pages; ++j) {
+ DCHECK_EQ(page_map_[j], kPageMapEmpty);
+ }
+ }
+ i += num_pages;
+ DCHECK_LE(i, pm_end);
+ break;
+ }
+ case kPageMapLargeObject: // Fall through.
+ case kPageMapLargeObjectPart: // Fall through.
+ case kPageMapRun: // Fall through.
+ case kPageMapRunPart: // Fall through.
+ ++i;
+ break; // Skip.
+ default:
+ LOG(FATAL) << "Unreachable - page map type: " << pm;
+ break;
+ }
+ }
+ return reclaimed_bytes;
+}
+
} // namespace allocator
} // namespace gc
} // namespace art
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 0b4b189..5d9d75c 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -539,6 +539,8 @@ class RosAlloc {
void InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
void* arg)
LOCKS_EXCLUDED(lock_);
+ // Release empty pages.
+ size_t ReleasePages() LOCKS_EXCLUDED(lock_);
// Returns the current footprint.
size_t Footprint() LOCKS_EXCLUDED(lock_);
// Returns the current capacity, maximum footprint.
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 07951e0..82340f5 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -201,7 +201,7 @@ uint64_t GarbageCollector::GetEstimatedMeanThroughput() const {
uint64_t GarbageCollector::GetEstimatedLastIterationThroughput() const {
// Add 1ms to prevent possible division by 0.
- return (freed_bytes_ * 1000) / (NsToMs(GetDurationNs()) + 1);
+ return (static_cast<uint64_t>(freed_bytes_) * 1000) / (NsToMs(GetDurationNs()) + 1);
}
} // namespace collector
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index ca2d0bd..944ef8d 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -76,7 +76,7 @@ static constexpr bool kCountMarkedObjects = false;
// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
static constexpr bool kCheckLocks = kDebugLocking;
-static constexpr bool kVerifyRoots = kIsDebugBuild;
+static constexpr bool kVerifyRootsMarked = kIsDebugBuild;
// If true, revoke the rosalloc thread-local buffers at the
// checkpoint, as opposed to during the pause.
@@ -466,16 +466,17 @@ void MarkSweep::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*
}
void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg,
- const StackVisitor* visitor) {
- reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor);
+ const StackVisitor* visitor, RootType root_type) {
+ reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, visitor, root_type);
}
-void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) {
+void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor,
+ RootType root_type) {
// See if the root is on any space bitmap.
- if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) {
+ if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
if (!large_object_space->Contains(root)) {
- LOG(ERROR) << "Found invalid root: " << root;
+ LOG(ERROR) << "Found invalid root: " << root << " with type " << root_type;
if (visitor != NULL) {
LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg;
}
@@ -918,7 +919,7 @@ void MarkSweep::ReMarkRoots() {
kVisitRootFlagStopLoggingNewRoots |
kVisitRootFlagClearRootLog));
timings_.EndSplit();
- if (kVerifyRoots) {
+ if (kVerifyRootsMarked) {
timings_.StartSplit("(Paused)VerifyRoots");
Runtime::Current()->VisitRoots(VerifyRootMarked, this);
timings_.EndSplit();
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index f1fd546..d49e427 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -249,10 +249,10 @@ class MarkSweep : public GarbageCollector {
size_t GetThreadCount(bool paused) const;
static void VerifyRootCallback(const mirror::Object* root, void* arg, size_t vreg,
- const StackVisitor *visitor);
+ const StackVisitor *visitor, RootType root_type);
- void VerifyRoot(const mirror::Object* root, size_t vreg, const StackVisitor* visitor)
- NO_THREAD_SAFETY_ANALYSIS;
+ void VerifyRoot(const mirror::Object* root, size_t vreg, const StackVisitor* visitor,
+ RootType root_type) NO_THREAD_SAFETY_ANALYSIS;
// Push a single reference on a mark stack.
void PushOnMarkStack(mirror::Object* obj);
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 25f20d6..a06f272 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -65,7 +65,7 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas
bool after_is_current_allocator = allocator == GetCurrentAllocator();
if (is_current_allocator && !after_is_current_allocator) {
// If the allocator changed, we need to restart the allocation.
- return AllocObject<kInstrumented>(self, klass, byte_count);
+ return AllocObject<kInstrumented>(self, klass, byte_count, pre_fence_visitor);
}
return nullptr;
}
@@ -111,7 +111,7 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas
DCHECK(!Runtime::Current()->HasStatsEnabled());
}
if (AllocatorHasAllocationStack(allocator)) {
- PushOnAllocationStack(self, obj);
+ PushOnAllocationStack(self, &obj);
}
if (kInstrumented) {
if (Dbg::IsAllocTrackingEnabled()) {
@@ -135,28 +135,34 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas
// The size of a thread-local allocation stack in the number of references.
static constexpr size_t kThreadLocalAllocationStackSize = 128;
-inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object* obj) {
+inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object** obj) {
if (kUseThreadLocalAllocationStack) {
- bool success = self->PushOnThreadLocalAllocationStack(obj);
+ bool success = self->PushOnThreadLocalAllocationStack(*obj);
if (UNLIKELY(!success)) {
// Slow path. Allocate a new thread-local allocation stack.
mirror::Object** start_address;
mirror::Object** end_address;
while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize,
&start_address, &end_address)) {
+ // Disable verify object in SirtRef as obj isn't on the alloc stack yet.
+ SirtRefNoVerify<mirror::Object> ref(self, *obj);
CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
+ *obj = ref.get();
}
self->SetThreadLocalAllocationStack(start_address, end_address);
// Retry on the new thread-local allocation stack.
- success = self->PushOnThreadLocalAllocationStack(obj);
+ success = self->PushOnThreadLocalAllocationStack(*obj);
// Must succeed.
CHECK(success);
}
} else {
// This is safe to do since the GC will never free objects which are neither in the allocation
// stack or the live bitmap.
- while (!allocation_stack_->AtomicPushBack(obj)) {
+ while (!allocation_stack_->AtomicPushBack(*obj)) {
+ // Disable verify object in SirtRef as obj isn't on the alloc stack yet.
+ SirtRefNoVerify<mirror::Object> ref(self, *obj);
CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
+ *obj = ref.get();
}
}
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 915e54f..e3fa834 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -914,8 +914,16 @@ void Heap::DoPendingTransitionOrTrim() {
// Transition the collector if the desired collector type is not the same as the current
// collector type.
TransitionCollector(desired_collector_type);
- // Do a heap trim if it is needed.
- Trim();
+ if (!CareAboutPauseTimes()) {
+ // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
+ // about pauses.
+ Runtime* runtime = Runtime::Current();
+ runtime->GetThreadList()->SuspendAll();
+ runtime->GetMonitorList()->DeflateMonitors();
+ runtime->GetThreadList()->ResumeAll();
+ // Do a heap trim if it is needed.
+ Trim();
+ }
}
void Heap::Trim() {
@@ -2663,6 +2671,10 @@ void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint
}
void Heap::RequestHeapTrim() {
+ // Request a heap trim only if we do not currently care about pause times.
+ if (CareAboutPauseTimes()) {
+ return;
+ }
// GC completed and now we must decide whether to request a heap trim (advising pages back to the
// kernel) or not. Issuing a request will also cause trimming of the libc heap. As a trim scans
// a space it will hold its lock and can become a cause of jank.
@@ -2684,21 +2696,17 @@ void Heap::RequestHeapTrim() {
// as we don't hold the lock while requesting the trim).
return;
}
-
- // Request a heap trim only if we do not currently care about pause times.
- if (!CareAboutPauseTimes()) {
- {
- MutexLock mu(self, *heap_trim_request_lock_);
- if (last_trim_time_ + kHeapTrimWait >= NanoTime()) {
- // We have done a heap trim in the last kHeapTrimWait nanosecs, don't request another one
- // just yet.
- return;
- }
- heap_trim_request_pending_ = true;
+ {
+ MutexLock mu(self, *heap_trim_request_lock_);
+ if (last_trim_time_ + kHeapTrimWait >= NanoTime()) {
+ // We have done a heap trim in the last kHeapTrimWait nanosecs, don't request another one
+ // just yet.
+ return;
}
- // Notify the daemon thread which will actually do the heap trim.
- SignalHeapTrimDaemon(self);
+ heap_trim_request_pending_ = true;
}
+ // Notify the daemon thread which will actually do the heap trim.
+ SignalHeapTrimDaemon(self);
}
void Heap::SignalHeapTrimDaemon(Thread* self) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 5879757..a8989ec 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -115,6 +115,8 @@ enum ProcessState {
};
std::ostream& operator<<(std::ostream& os, const ProcessState& process_state);
+std::ostream& operator<<(std::ostream& os, const RootType& root_type);
+
class Heap {
public:
// If true, measure the total allocation time.
@@ -158,28 +160,28 @@ class Heap {
~Heap();
// Allocates and initializes storage for an object instance.
- template <bool kInstrumented, typename PreFenceVisitor = VoidFunctor>
+ template <bool kInstrumented, typename PreFenceVisitor>
mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes,
- const PreFenceVisitor& pre_fence_visitor = VoidFunctor())
+ const PreFenceVisitor& pre_fence_visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes,
GetCurrentAllocator(),
pre_fence_visitor);
}
- template <bool kInstrumented, typename PreFenceVisitor = VoidFunctor>
+ template <bool kInstrumented, typename PreFenceVisitor>
mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass, size_t num_bytes,
- const PreFenceVisitor& pre_fence_visitor = VoidFunctor())
+ const PreFenceVisitor& pre_fence_visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes,
GetCurrentNonMovingAllocator(),
pre_fence_visitor);
}
- template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor = VoidFunctor>
+ template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(
Thread* self, mirror::Class* klass, size_t byte_count, AllocatorType allocator,
- const PreFenceVisitor& pre_fence_visitor = VoidFunctor())
+ const PreFenceVisitor& pre_fence_visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
AllocatorType GetCurrentAllocator() const {
@@ -691,7 +693,8 @@ class Heap {
void SignalHeapTrimDaemon(Thread* self);
// Push an object onto the allocation stack.
- void PushOnAllocationStack(Thread* self, mirror::Object* obj);
+ void PushOnAllocationStack(Thread* self, mirror::Object** obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
// sweep GC, false for other GC types.
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index 012267b..5c5e7f8 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -222,6 +222,7 @@ extern "C" void* art_heap_rosalloc_morecore(allocator::RosAlloc* rosalloc, intpt
}
size_t RosAllocSpace::Trim() {
+ VLOG(heap) << "RosAllocSpace::Trim() ";
{
MutexLock mu(Thread::Current(), lock_);
// Trim to release memory at the end of the space.
@@ -229,10 +230,7 @@ size_t RosAllocSpace::Trim() {
}
// Attempt to release pages if it does not release all empty pages.
if (!rosalloc_->DoesReleaseAllPages()) {
- VLOG(heap) << "RosAllocSpace::Trim() ";
- size_t reclaimed = 0;
- InspectAllRosAlloc(DlmallocMadviseCallback, &reclaimed, false);
- return reclaimed;
+ return rosalloc_->ReleasePages();
}
return 0;
}
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index 5c735df..9896a48 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -85,8 +85,13 @@ class SpaceTest : public CommonRuntimeTest {
EXPECT_GE(size, SizeOfZeroLengthByteArray());
EXPECT_TRUE(byte_array_class != nullptr);
o->SetClass(byte_array_class);
- if (kUseBrooksReadBarrier) {
- o->SetReadBarrierPointer(o);
+ if (kUseBakerOrBrooksReadBarrier) {
+ // Like the proper heap object allocation, install and verify
+ // the correct read barrier pointer.
+ if (kUseBrooksReadBarrier) {
+ o->SetReadBarrierPointer(o);
+ }
+ o->AssertReadBarrierPointer();
}
mirror::Array* arr = o->AsArray<kVerifyNone>();
size_t header_size = SizeOfZeroLengthByteArray();
diff --git a/runtime/globals.h b/runtime/globals.h
index f2d6862..7e85231 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -19,7 +19,7 @@
#include <stddef.h>
#include <stdint.h>
-#include "read_barrier.h"
+#include "read_barrier_c.h"
namespace art {
@@ -31,9 +31,17 @@ static constexpr size_t KB = 1024;
static constexpr size_t MB = KB * KB;
static constexpr size_t GB = KB * KB * KB;
+// Runtime sizes.
static constexpr size_t kWordSize = sizeof(word);
static constexpr size_t kPointerSize = sizeof(void*);
+// Architecture-specific pointer sizes
+static constexpr size_t kArmPointerSize = 4;
+static constexpr size_t kArm64PointerSize = 8;
+static constexpr size_t kMipsPointerSize = 4;
+static constexpr size_t kX86PointerSize = 4;
+static constexpr size_t kX86_64PointerSize = 8;
+
static constexpr size_t kBitsPerByte = 8;
static constexpr size_t kBitsPerByteLog2 = 3;
static constexpr int kBitsPerWord = kWordSize * kBitsPerByte;
diff --git a/runtime/instruction_set.h b/runtime/instruction_set.h
index c5a4ec8..f4eecfc 100644
--- a/runtime/instruction_set.h
+++ b/runtime/instruction_set.h
@@ -35,6 +35,20 @@ enum InstructionSet {
};
std::ostream& operator<<(std::ostream& os, const InstructionSet& rhs);
+#if defined(__arm__)
+static constexpr InstructionSet kRuntimeISA = kArm;
+#elif defined(__aarch64__)
+static constexpr InstructionSet kRuntimeISA = kArm64;
+#elif defined(__mips__)
+static constexpr InstructionSet kRuntimeISA = kMips;
+#elif defined(__i386__)
+static constexpr InstructionSet kRuntimeISA = kX86;
+#elif defined(__x86_64__)
+static constexpr InstructionSet kRuntimeISA = kX86_64;
+#else
+static constexpr InstructionSet kRuntimeISA = kNone;
+#endif
+
enum InstructionFeatures {
kHwDiv = 1 // Supports hardware divide.
};
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 89d9241..025e62a 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -442,7 +442,14 @@ inline void Class::SetName(String* name) {
}
inline void Class::CheckObjectAlloc() {
- DCHECK(!IsArrayClass()) << PrettyClass(this);
+ DCHECK(!IsArrayClass())
+ << PrettyClass(this)
+ << "A array shouldn't be allocated through this "
+ << "as it requires a pre-fence visitor that sets the class size.";
+ DCHECK(!IsClassClass())
+ << PrettyClass(this)
+ << "A class object shouldn't be allocated through this "
+ << "as it requires a pre-fence visitor that sets the class size.";
DCHECK(IsInstantiable()) << PrettyClass(this);
// TODO: decide whether we want this check. It currently fails during bootstrap.
// DCHECK(!Runtime::Current()->IsStarted() || IsInitializing()) << PrettyClass(this);
@@ -454,7 +461,7 @@ inline Object* Class::Alloc(Thread* self, gc::AllocatorType allocator_type) {
CheckObjectAlloc();
gc::Heap* heap = Runtime::Current()->GetHeap();
return heap->AllocObjectWithAllocator<kIsInstrumented, false>(self, this, this->object_size_,
- allocator_type);
+ allocator_type, VoidFunctor());
}
inline Object* Class::AllocObject(Thread* self) {
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index b6c140d..a6db387 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -26,6 +26,7 @@
#include "class.h"
#include "lock_word-inl.h"
#include "monitor.h"
+#include "read_barrier-inl.h"
#include "runtime.h"
#include "reference.h"
#include "throwable.h"
@@ -96,7 +97,7 @@ inline void Object::Wait(Thread* self, int64_t ms, int32_t ns) {
inline Object* Object::GetReadBarrierPointer() {
#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
DCHECK(kUseBakerOrBrooksReadBarrier);
- return GetFieldObject<Object, kVerifyNone>(OFFSET_OF_OBJECT_MEMBER(Object, x_rb_ptr_), false);
+ return GetFieldObject<Object, kVerifyNone, false>(OFFSET_OF_OBJECT_MEMBER(Object, x_rb_ptr_), false);
#else
LOG(FATAL) << "Unreachable";
return nullptr;
@@ -116,21 +117,19 @@ inline void Object::SetReadBarrierPointer(Object* rb_pointer) {
}
inline void Object::AssertReadBarrierPointer() const {
-#if defined(USE_BAKER_READ_BARRIER)
- DCHECK(kUseBakerReadBarrier);
- Object* obj = const_cast<Object*>(this);
- DCHECK(obj->GetReadBarrierPointer() == nullptr)
- << "Bad Baker pointer: obj=" << reinterpret_cast<void*>(obj)
- << " ptr=" << reinterpret_cast<void*>(obj->GetReadBarrierPointer());
-#elif defined(USE_BROOKS_READ_BARRIER)
- DCHECK(kUseBrooksReadBarrier);
- Object* obj = const_cast<Object*>(this);
- DCHECK_EQ(obj, obj->GetReadBarrierPointer())
- << "Bad Brooks pointer: obj=" << reinterpret_cast<void*>(obj)
- << " ptr=" << reinterpret_cast<void*>(obj->GetReadBarrierPointer());
-#else
- LOG(FATAL) << "Unreachable";
-#endif
+ if (kUseBakerReadBarrier) {
+ Object* obj = const_cast<Object*>(this);
+ DCHECK(obj->GetReadBarrierPointer() == nullptr)
+ << "Bad Baker pointer: obj=" << reinterpret_cast<void*>(obj)
+ << " ptr=" << reinterpret_cast<void*>(obj->GetReadBarrierPointer());
+ } else if (kUseBrooksReadBarrier) {
+ Object* obj = const_cast<Object*>(this);
+ DCHECK_EQ(obj, obj->GetReadBarrierPointer())
+ << "Bad Brooks pointer: obj=" << reinterpret_cast<void*>(obj)
+ << " ptr=" << reinterpret_cast<void*>(obj->GetReadBarrierPointer());
+ } else {
+ LOG(FATAL) << "Unreachable";
+ }
}
template<VerifyObjectFlags kVerifyFlags>
@@ -470,19 +469,17 @@ inline bool Object::CasField64(MemberOffset field_offset, int64_t old_value, int
return QuasiAtomic::Cas64(old_value, new_value, addr);
}
-template<class T, VerifyObjectFlags kVerifyFlags>
+template<class T, VerifyObjectFlags kVerifyFlags, bool kDoReadBarrier>
inline T* Object::GetFieldObject(MemberOffset field_offset, bool is_volatile) {
if (kVerifyFlags & kVerifyThis) {
VerifyObject(this);
}
byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
HeapReference<T>* objref_addr = reinterpret_cast<HeapReference<T>*>(raw_addr);
- HeapReference<T> objref = *objref_addr;
-
+ T* result = ReadBarrier::Barrier<T, kDoReadBarrier>(this, field_offset, objref_addr);
if (UNLIKELY(is_volatile)) {
QuasiAtomic::MembarLoadLoad(); // Ensure loads don't re-order.
}
- T* result = objref.AsMirrorPtr();
if (kVerifyFlags & kVerifyReads) {
VerifyObject(result);
}
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index f1485e5..d9155f5 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -66,6 +66,26 @@ static Object* CopyObject(Thread* self, mirror::Object* dest, mirror::Object* sr
return dest;
}
+// An allocation pre-fence visitor that copies the object.
+class CopyObjectVisitor {
+ public:
+ explicit CopyObjectVisitor(Thread* self, SirtRef<Object>* orig, size_t num_bytes)
+ : self_(self), orig_(orig), num_bytes_(num_bytes) {
+ }
+
+ void operator()(Object* obj, size_t usable_size) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(usable_size);
+ CopyObject(self_, obj, orig_->get(), num_bytes_);
+ }
+
+ private:
+ Thread* const self_;
+ SirtRef<Object>* const orig_;
+ const size_t num_bytes_;
+ DISALLOW_COPY_AND_ASSIGN(CopyObjectVisitor);
+};
+
Object* Object::Clone(Thread* self) {
CHECK(!IsClass()) << "Can't clone classes.";
// Object::SizeOf gets the right size even if we're an array. Using c->AllocObject() here would
@@ -74,13 +94,11 @@ Object* Object::Clone(Thread* self) {
size_t num_bytes = SizeOf();
SirtRef<Object> this_object(self, this);
Object* copy;
+ CopyObjectVisitor visitor(self, &this_object, num_bytes);
if (heap->IsMovableObject(this)) {
- copy = heap->AllocObject<true>(self, GetClass(), num_bytes);
+ copy = heap->AllocObject<true>(self, GetClass(), num_bytes, visitor);
} else {
- copy = heap->AllocNonMovableObject<true>(self, GetClass(), num_bytes);
- }
- if (LIKELY(copy != nullptr)) {
- return CopyObject(self, copy, this_object.get(), num_bytes);
+ copy = heap->AllocNonMovableObject<true>(self, GetClass(), num_bytes, visitor);
}
return copy;
}
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 1ac23ce..f652202 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -185,7 +185,7 @@ class MANAGED LOCKABLE Object {
bool IsPhantomReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Accessor for Java type fields.
- template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kDoReadBarrier = true>
T* GetFieldObject(MemberOffset field_offset, bool is_volatile)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index bcaf8ec..bbc7dd0 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -205,7 +205,7 @@ void Monitor::SetObject(mirror::Object* object) {
void Monitor::Lock(Thread* self) {
MutexLock mu(self, monitor_lock_);
while (true) {
- if (owner_ == NULL) { // Unowned.
+ if (owner_ == nullptr) { // Unowned.
owner_ = self;
CHECK_EQ(lock_count_, 0);
// When debugging, save the current monitor holder for future
@@ -223,15 +223,15 @@ void Monitor::Lock(Thread* self) {
uint64_t wait_start_ms = log_contention ? 0 : MilliTime();
mirror::ArtMethod* owners_method = locking_method_;
uint32_t owners_dex_pc = locking_dex_pc_;
+ // Do this before releasing the lock so that we don't get deflated.
+ ++num_waiters_;
monitor_lock_.Unlock(self); // Let go of locks in order.
{
ScopedThreadStateChange tsc(self, kBlocked); // Change to blocked and give up mutator_lock_.
self->SetMonitorEnterObject(obj_);
MutexLock mu2(self, monitor_lock_); // Reacquire monitor_lock_ without mutator_lock_ for Wait.
if (owner_ != NULL) { // Did the owner_ give the lock up?
- ++num_waiters_;
monitor_contenders_.Wait(self); // Still contended so wait.
- --num_waiters_;
// Woken from contention.
if (log_contention) {
uint64_t wait_ms = MilliTime() - wait_start_ms;
@@ -252,6 +252,7 @@ void Monitor::Lock(Thread* self) {
self->SetMonitorEnterObject(nullptr);
}
monitor_lock_.Lock(self); // Reacquire locks in order.
+ --num_waiters_;
}
}
@@ -431,6 +432,7 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
* not order sensitive as we hold the pthread mutex.
*/
AppendToWaitSet(self);
+ ++num_waiters_;
int prev_lock_count = lock_count_;
lock_count_ = 0;
owner_ = NULL;
@@ -507,6 +509,7 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
lock_count_ = prev_lock_count;
locking_method_ = saved_method;
locking_dex_pc_ = saved_dex_pc;
+ --num_waiters_;
RemoveFromWaitSet(self);
if (was_interrupted) {
@@ -575,8 +578,12 @@ bool Monitor::Deflate(Thread* self, mirror::Object* obj) {
// If the lock isn't an inflated monitor, then we don't need to deflate anything.
if (lw.GetState() == LockWord::kFatLocked) {
Monitor* monitor = lw.FatLockMonitor();
- CHECK(monitor != nullptr);
+ DCHECK(monitor != nullptr);
MutexLock mu(self, monitor->monitor_lock_);
+ // Can't deflate if we have anybody waiting on the CV.
+ if (monitor->num_waiters_ > 0) {
+ return false;
+ }
Thread* owner = monitor->owner_;
if (owner != nullptr) {
// Can't deflate if we are locked and have a hash code.
@@ -587,17 +594,16 @@ bool Monitor::Deflate(Thread* self, mirror::Object* obj) {
if (monitor->lock_count_ > LockWord::kThinLockMaxCount) {
return false;
}
- // Can't deflate if we have anybody waiting on the CV.
- if (monitor->num_waiters_ > 0) {
- return false;
- }
// Deflate to a thin lock.
- obj->SetLockWord(LockWord::FromThinLockId(owner->GetTid(), monitor->lock_count_));
+ obj->SetLockWord(LockWord::FromThinLockId(owner->GetThreadId(), monitor->lock_count_));
+ VLOG(monitor) << "Deflated " << obj << " to thin lock " << owner->GetTid() << " / " << monitor->lock_count_;
} else if (monitor->HasHashCode()) {
obj->SetLockWord(LockWord::FromHashCode(monitor->GetHashCode()));
+ VLOG(monitor) << "Deflated " << obj << " to hash monitor " << monitor->GetHashCode();
} else {
// No lock and no hash, just put an empty lock word inside the object.
obj->SetLockWord(LockWord());
+ VLOG(monitor) << "Deflated" << obj << " to empty lock word";
}
// The monitor is deflated, mark the object as nullptr so that we know to delete it during the
// next GC.
@@ -1054,7 +1060,7 @@ uint32_t Monitor::GetOwnerThreadId() {
}
MonitorList::MonitorList()
- : allow_new_monitors_(true), monitor_list_lock_("MonitorList lock"),
+ : allow_new_monitors_(true), monitor_list_lock_("MonitorList lock", kMonitorListLock),
monitor_add_condition_("MonitorList disallow condition", monitor_list_lock_) {
}
@@ -1103,6 +1109,22 @@ void MonitorList::SweepMonitorList(IsMarkedCallback* callback, void* arg) {
}
}
+static mirror::Object* MonitorDeflateCallback(mirror::Object* object, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (Monitor::Deflate(reinterpret_cast<Thread*>(arg), object)) {
+ DCHECK_NE(object->GetLockWord().GetState(), LockWord::kFatLocked);
+ // If we deflated, return nullptr so that the monitor gets removed from the array.
+ return nullptr;
+ }
+ return object; // Monitor was not deflated.
+}
+
+void MonitorList::DeflateMonitors() {
+ Thread* self = Thread::Current();
+ Locks::mutator_lock_->AssertExclusiveHeld(self);
+ SweepMonitorList(MonitorDeflateCallback, reinterpret_cast<Thread*>(self));
+}
+
MonitorInfo::MonitorInfo(mirror::Object* obj) : owner_(NULL), entry_count_(0) {
DCHECK(obj != NULL);
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 55504b5..c459278 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -224,9 +224,11 @@ class MonitorList {
void Add(Monitor* m);
void SweepMonitorList(IsMarkedCallback* callback, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void DisallowNewMonitors();
- void AllowNewMonitors();
+ LOCKS_EXCLUDED(monitor_list_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void DisallowNewMonitors() LOCKS_EXCLUDED(monitor_list_lock_);
+ void AllowNewMonitors() LOCKS_EXCLUDED(monitor_list_lock_);
+ void DeflateMonitors() LOCKS_EXCLUDED(monitor_list_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
bool allow_new_monitors_ GUARDED_BY(monitor_list_lock_);
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 2af5324..1008491 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -91,9 +91,8 @@ static void EnableDebugFeatures(uint32_t debug_flags) {
static jlong ZygoteHooks_nativePreFork(JNIEnv* env, jclass) {
Runtime* runtime = Runtime::Current();
CHECK(runtime->IsZygote()) << "runtime instance not started with -Xzygote";
- if (!runtime->PreZygoteFork()) {
- LOG(FATAL) << "pre-fork heap failed";
- }
+
+ runtime->PreZygoteFork();
// Grab thread before fork potentially makes Thread::pthread_key_self_ unusable.
Thread* self = Thread::Current();
diff --git a/runtime/object_callbacks.h b/runtime/object_callbacks.h
index 89ee34e..9198c90 100644
--- a/runtime/object_callbacks.h
+++ b/runtime/object_callbacks.h
@@ -56,7 +56,7 @@ typedef mirror::Object* (MarkObjectCallback)(mirror::Object* obj, void* arg)
__attribute__((warn_unused_result));
// A callback for verifying roots.
typedef void (VerifyRootCallback)(const mirror::Object* root, void* arg, size_t vreg,
- const StackVisitor* visitor);
+ const StackVisitor* visitor, RootType root_type);
typedef void (MarkHeapReferenceCallback)(mirror::HeapReference<mirror::Object>* ref, void* arg);
diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc
index a9072d8..8bd8dba 100644
--- a/runtime/quick/inline_method_analyser.cc
+++ b/runtime/quick/inline_method_analyser.cc
@@ -135,6 +135,12 @@ bool InlineMethodAnalyser::AnalyseMethodCode(verifier::MethodVerifier* verifier,
}
}
+bool InlineMethodAnalyser::IsSyntheticAccessor(MethodReference ref) {
+ const DexFile::MethodId& method_id = ref.dex_file->GetMethodId(ref.dex_method_index);
+ const char* method_name = ref.dex_file->GetMethodName(method_id);
+ return strncmp(method_name, "access$", strlen("access$")) == 0;
+}
+
bool InlineMethodAnalyser::AnalyseReturnMethod(const DexFile::CodeItem* code_item,
InlineMethod* result) {
const Instruction* return_instruction = Instruction::At(code_item->insns_);
@@ -218,13 +224,24 @@ bool InlineMethodAnalyser::AnalyseIGetMethod(verifier::MethodVerifier* verifier,
uint32_t arg_start = code_item->registers_size_ - code_item->ins_size_;
DCHECK_GE(object_reg, arg_start);
DCHECK_LT(object_reg, code_item->registers_size_);
+ uint32_t object_arg = object_reg - arg_start;
+
DCHECK_LT(opcode == Instruction::IGET_WIDE ? dst_reg + 1 : dst_reg, code_item->registers_size_);
if (dst_reg != return_reg) {
return false; // Not returning the value retrieved by IGET?
}
- if ((verifier->GetAccessFlags() & kAccStatic) != 0 || object_reg != arg_start) {
- // TODO: Support inlining IGET on other register than "this".
+ if ((verifier->GetAccessFlags() & kAccStatic) != 0u || object_arg != 0u) {
+ // TODO: Implement inlining of IGET on non-"this" registers (needs correct stack trace for NPE).
+ // Allow synthetic accessors. We don't care about losing their stack frame in NPE.
+ if (!IsSyntheticAccessor(verifier->GetMethodReference())) {
+ return false;
+ }
+ }
+
+ // InlineIGetIPutData::object_arg is only 4 bits wide.
+ static constexpr uint16_t kMaxObjectArg = 15u;
+ if (object_arg > kMaxObjectArg) {
return false;
}
@@ -236,10 +253,10 @@ bool InlineMethodAnalyser::AnalyseIGetMethod(verifier::MethodVerifier* verifier,
result->opcode = kInlineOpIGet;
result->flags = kInlineSpecial;
data->op_variant = IGetVariant(opcode);
- data->object_arg = object_reg - arg_start; // Allow IGET on any register, not just "this".
- data->src_arg = 0;
- data->method_is_static = (verifier->GetAccessFlags() & kAccStatic) != 0;
- data->reserved = 0;
+ data->method_is_static = (verifier->GetAccessFlags() & kAccStatic) != 0u ? 1u : 0u;
+ data->object_arg = object_arg; // Allow IGET on any register, not just "this".
+ data->src_arg = 0u;
+ data->return_arg_plus1 = 0u;
}
return true;
}
@@ -253,26 +270,45 @@ bool InlineMethodAnalyser::AnalyseIPutMethod(verifier::MethodVerifier* verifier,
const Instruction* return_instruction = instruction->Next();
Instruction::Code return_opcode = return_instruction->Opcode();
+ uint32_t arg_start = code_item->registers_size_ - code_item->ins_size_;
+ uint16_t return_arg_plus1 = 0u;
if (return_opcode != Instruction::RETURN_VOID) {
- // TODO: Support returning an argument.
- // This is needed by builder classes and generated accessor setters.
- // builder.setX(value): iput value, this, fieldX; return-object this;
- // object.access$nnn(value): iput value, this, fieldX; return value;
- // Use InlineIGetIPutData::reserved to hold the information.
- return false;
+ if (return_opcode != Instruction::RETURN &&
+ return_opcode != Instruction::RETURN_OBJECT &&
+ return_opcode != Instruction::RETURN_WIDE) {
+ return false;
+ }
+ // Returning an argument.
+ uint32_t return_reg = return_instruction->VRegA_11x();
+ DCHECK_GE(return_reg, arg_start);
+ DCHECK_LT(return_opcode == Instruction::RETURN_WIDE ? return_reg + 1u : return_reg,
+ code_item->registers_size_);
+ return_arg_plus1 = return_reg - arg_start + 1u;
}
uint32_t src_reg = instruction->VRegA_22c();
uint32_t object_reg = instruction->VRegB_22c();
uint32_t field_idx = instruction->VRegC_22c();
- uint32_t arg_start = code_item->registers_size_ - code_item->ins_size_;
DCHECK_GE(object_reg, arg_start);
DCHECK_LT(object_reg, code_item->registers_size_);
DCHECK_GE(src_reg, arg_start);
DCHECK_LT(opcode == Instruction::IPUT_WIDE ? src_reg + 1 : src_reg, code_item->registers_size_);
+ uint32_t object_arg = object_reg - arg_start;
+ uint32_t src_arg = src_reg - arg_start;
+
+ if ((verifier->GetAccessFlags() & kAccStatic) != 0u || object_arg != 0u) {
+ // TODO: Implement inlining of IPUT on non-"this" registers (needs correct stack trace for NPE).
+ // Allow synthetic accessors. We don't care about losing their stack frame in NPE.
+ if (!IsSyntheticAccessor(verifier->GetMethodReference())) {
+ return false;
+ }
+ }
- if ((verifier->GetAccessFlags() & kAccStatic) != 0 || object_reg != arg_start) {
- // TODO: Support inlining IPUT on other register than "this".
+ // InlineIGetIPutData::object_arg/src_arg/return_arg_plus1 are each only 4 bits wide.
+ static constexpr uint16_t kMaxObjectArg = 15u;
+ static constexpr uint16_t kMaxSrcArg = 15u;
+ static constexpr uint16_t kMaxReturnArgPlus1 = 15u;
+ if (object_arg > kMaxObjectArg || src_arg > kMaxSrcArg || return_arg_plus1 > kMaxReturnArgPlus1) {
return false;
}
@@ -284,10 +320,10 @@ bool InlineMethodAnalyser::AnalyseIPutMethod(verifier::MethodVerifier* verifier,
result->opcode = kInlineOpIPut;
result->flags = kInlineSpecial;
data->op_variant = IPutVariant(opcode);
- data->object_arg = object_reg - arg_start; // Allow IPUT on any register, not just "this".
- data->src_arg = src_reg - arg_start;
- data->method_is_static = (verifier->GetAccessFlags() & kAccStatic) != 0;
- data->reserved = 0;
+ data->method_is_static = (verifier->GetAccessFlags() & kAccStatic) != 0u ? 1u : 0u;
+ data->object_arg = object_arg; // Allow IPUT on any register, not just "this".
+ data->src_arg = src_arg;
+ data->return_arg_plus1 = return_arg_plus1;
}
return true;
}
diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h
index 8e1a408..ddee89b 100644
--- a/runtime/quick/inline_method_analyser.h
+++ b/runtime/quick/inline_method_analyser.h
@@ -21,6 +21,7 @@
#include "base/mutex.h"
#include "dex_file.h"
#include "dex_instruction.h"
+#include "method_reference.h"
/*
* NOTE: This code is part of the quick compiler. It lives in the runtime
@@ -98,10 +99,10 @@ struct InlineIGetIPutData {
// opcode-Instruction::IPUT for IPUTs. This is because the runtime
// doesn't know the OpSize enumeration.
uint16_t op_variant : 3;
+ uint16_t method_is_static : 1;
uint16_t object_arg : 4;
uint16_t src_arg : 4; // iput only
- uint16_t method_is_static : 1;
- uint16_t reserved : 4;
+ uint16_t return_arg_plus1 : 4; // iput only, method argument to return + 1, 0 = return void.
uint16_t field_idx;
uint32_t is_volatile : 1;
uint32_t field_offset : 31;
@@ -156,6 +157,9 @@ class InlineMethodAnalyser {
return opcode - Instruction::IPUT;
}
+ // Determines whether the method is a synthetic accessor (method name starts with "access$").
+ static bool IsSyntheticAccessor(MethodReference ref);
+
private:
static bool AnalyseReturnMethod(const DexFile::CodeItem* code_item, InlineMethod* result);
static bool AnalyseConstMethod(const DexFile::CodeItem* code_item, InlineMethod* result);
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
new file mode 100644
index 0000000..ea2f830
--- /dev/null
+++ b/runtime/read_barrier-inl.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_READ_BARRIER_INL_H_
+#define ART_RUNTIME_READ_BARRIER_INL_H_
+
+#include "read_barrier.h"
+
+#include "mirror/object_reference.h"
+
+namespace art {
+
+template <typename MirrorType, bool kDoReadBarrier>
+inline MirrorType* ReadBarrier::Barrier(
+ mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
+ // Unused for now.
+ UNUSED(obj);
+ UNUSED(offset);
+ UNUSED(ref_addr);
+ if (kDoReadBarrier && kUseBakerReadBarrier) {
+ // To be implemented.
+ return ref_addr->AsMirrorPtr();
+ } else if (kDoReadBarrier && kUseBrooksReadBarrier) {
+ // To be implemented.
+ return ref_addr->AsMirrorPtr();
+ } else {
+ // No read barrier.
+ return ref_addr->AsMirrorPtr();
+ }
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_READ_BARRIER_INL_H_
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index ba0d830..6f59004 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -17,21 +17,28 @@
#ifndef ART_RUNTIME_READ_BARRIER_H_
#define ART_RUNTIME_READ_BARRIER_H_
-// This is in a separate file (from globals.h) because asm_support.h
-// (a C header, not C++) can't include globals.h.
+#include "base/mutex.h"
+#include "base/macros.h"
+#include "offsets.h"
+#include "read_barrier_c.h"
-// Uncomment one of the following two and the two fields in
-// Object.java (libcore) to enable baker or brooks pointers.
+// This is a C++ (not C) header file, separate from read_barrier_c.h
+// which needs to be a C header file for asm_support.h.
-// #define USE_BAKER_READ_BARRIER
-// #define USE_BROOKS_READ_BARRIER
+namespace art {
+namespace mirror {
+ class Object;
+ template<typename MirrorType> class HeapReference;
+} // namespace mirror
-#if defined(USE_BAKER_READ_BARRIER) || defined(USE_BROOKS_READ_BARRIER)
-#define USE_BAKER_OR_BROOKS_READ_BARRIER
-#endif
+class ReadBarrier {
+ public:
+ template <typename MirrorType, bool kDoReadBarrier = true>
+ ALWAYS_INLINE static MirrorType* Barrier(
+ mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+};
-#if defined(USE_BAKER_READ_BARRIER) && defined(USE_BROOKS_READ_BARRIER)
-#error "Only one of Baker or Brooks can be enabled at a time."
-#endif
+} // namespace art
#endif // ART_RUNTIME_READ_BARRIER_H_
diff --git a/runtime/read_barrier_c.h b/runtime/read_barrier_c.h
new file mode 100644
index 0000000..f4af61f
--- /dev/null
+++ b/runtime/read_barrier_c.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_READ_BARRIER_C_H_
+#define ART_RUNTIME_READ_BARRIER_C_H_
+
+// This is a C (not C++) header file and is in a separate file (from
+// globals.h) because asm_support.h is a C header file and can't
+// include globals.h.
+
+// Uncomment one of the following two and the two fields in
+// Object.java (libcore) to enable baker or brooks pointers.
+
+// #define USE_BAKER_READ_BARRIER
+// #define USE_BROOKS_READ_BARRIER
+
+#if defined(USE_BAKER_READ_BARRIER) || defined(USE_BROOKS_READ_BARRIER)
+#define USE_BAKER_OR_BROOKS_READ_BARRIER
+#endif
+
+#if defined(USE_BAKER_READ_BARRIER) && defined(USE_BROOKS_READ_BARRIER)
+#error "Only one of Baker or Brooks can be enabled at a time."
+#endif
+
+#endif // ART_RUNTIME_READ_BARRIER_C_H_
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 3c23855..a19fa53 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -273,9 +273,8 @@ void Runtime::Abort() {
// notreached
}
-bool Runtime::PreZygoteFork() {
+void Runtime::PreZygoteFork() {
heap_->PreZygoteFork();
- return true;
}
void Runtime::CallExitHook(jint status) {
@@ -1002,7 +1001,7 @@ mirror::ArtMethod* Runtime::CreateCalleeSaveMethod(InstructionSet instruction_se
uint32_t fp_spills = type == kSaveAll ? fp_all_spills : 0;
size_t frame_size = RoundUp((__builtin_popcount(core_spills) /* gprs */ +
__builtin_popcount(fp_spills) /* fprs */ +
- 1 /* Method* */) * kPointerSize, kStackAlignment);
+ 1 /* Method* */) * kArmPointerSize, kStackAlignment);
method->SetFrameSizeInBytes(frame_size);
method->SetCoreSpillMask(core_spills);
method->SetFpSpillMask(fp_spills);
@@ -1016,7 +1015,7 @@ mirror::ArtMethod* Runtime::CreateCalleeSaveMethod(InstructionSet instruction_se
(type == kSaveAll ? all_spills : 0) | (1 << art::mips::RA);
size_t frame_size = RoundUp((__builtin_popcount(core_spills) /* gprs */ +
(type == kRefsAndArgs ? 0 : 3) + 1 /* Method* */) *
- kPointerSize, kStackAlignment);
+ kMipsPointerSize, kStackAlignment);
method->SetFrameSizeInBytes(frame_size);
method->SetCoreSpillMask(core_spills);
method->SetFpSpillMask(0);
@@ -1026,7 +1025,7 @@ mirror::ArtMethod* Runtime::CreateCalleeSaveMethod(InstructionSet instruction_se
uint32_t core_spills = ref_spills | (type == kRefsAndArgs ? arg_spills : 0) |
(1 << art::x86::kNumberOfCpuRegisters); // fake return address callee save
size_t frame_size = RoundUp((__builtin_popcount(core_spills) /* gprs */ +
- 1 /* Method* */) * kPointerSize, kStackAlignment);
+ 1 /* Method* */) * kX86PointerSize, kStackAlignment);
method->SetFrameSizeInBytes(frame_size);
method->SetCoreSpillMask(core_spills);
method->SetFpSpillMask(0);
@@ -1046,7 +1045,7 @@ mirror::ArtMethod* Runtime::CreateCalleeSaveMethod(InstructionSet instruction_se
uint32_t fp_spills = (type == kRefsAndArgs ? fp_arg_spills : 0);
size_t frame_size = RoundUp((__builtin_popcount(core_spills) /* gprs */ +
__builtin_popcount(fp_spills) /* fprs */ +
- 1 /* Method* */) * kPointerSize, kStackAlignment);
+ 1 /* Method* */) * kX86_64PointerSize, kStackAlignment);
method->SetFrameSizeInBytes(frame_size);
method->SetCoreSpillMask(core_spills);
method->SetFpSpillMask(fp_spills);
@@ -1086,7 +1085,7 @@ mirror::ArtMethod* Runtime::CreateCalleeSaveMethod(InstructionSet instruction_se
| (type == kSaveAll ? fp_all_spills : 0);
size_t frame_size = RoundUp((__builtin_popcount(core_spills) /* gprs */ +
__builtin_popcount(fp_spills) /* fprs */ +
- 1 /* Method* */) * kPointerSize, kStackAlignment);
+ 1 /* Method* */) * kArm64PointerSize, kStackAlignment);
method->SetFrameSizeInBytes(frame_size);
method->SetCoreSpillMask(core_spills);
method->SetFpSpillMask(fp_spills);
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 9e6bd2a..462711e 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -355,7 +355,7 @@ class Runtime {
void SetStatsEnabled(bool new_state);
- bool PreZygoteFork();
+ void PreZygoteFork();
bool InitZygote();
void DidForkFromZygote();
diff --git a/runtime/sirt_ref-inl.h b/runtime/sirt_ref-inl.h
index 7f2d847..7de624a 100644
--- a/runtime/sirt_ref-inl.h
+++ b/runtime/sirt_ref-inl.h
@@ -23,8 +23,11 @@
namespace art {
-template<class T> inline SirtRef<T>::SirtRef(Thread* self, T* object) : self_(self), sirt_(object) {
- VerifyObject(object);
+template<class T> inline SirtRef<T>::SirtRef(Thread* self, T* object, bool should_verify)
+ : self_(self), sirt_(object) {
+ if (should_verify) {
+ VerifyObject(object);
+ }
self_->PushSirt(&sirt_);
}
@@ -33,8 +36,10 @@ template<class T> inline SirtRef<T>::~SirtRef() {
DCHECK_EQ(top_sirt, &sirt_);
}
-template<class T> inline T* SirtRef<T>::reset(T* object) {
- VerifyObject(object);
+template<class T> inline T* SirtRef<T>::reset(T* object, bool should_verify) {
+ if (should_verify) {
+ VerifyObject(object);
+ }
T* old_ref = get();
sirt_.SetReference(0, object);
return old_ref;
diff --git a/runtime/sirt_ref.h b/runtime/sirt_ref.h
index 2226e17..cf23891 100644
--- a/runtime/sirt_ref.h
+++ b/runtime/sirt_ref.h
@@ -28,7 +28,7 @@ namespace art {
template<class T>
class SirtRef {
public:
- SirtRef(Thread* self, T* object);
+ SirtRef(Thread* self, T* object, bool should_verify = true);
~SirtRef();
T& operator*() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -42,7 +42,8 @@ class SirtRef {
}
// Returns the old reference.
- T* reset(T* object = nullptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ T* reset(T* object = nullptr, bool should_verify = true)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
Thread* const self_;
@@ -51,6 +52,17 @@ class SirtRef {
DISALLOW_COPY_AND_ASSIGN(SirtRef);
};
+// A version of SirtRef which disables the object verification.
+template<class T>
+class SirtRefNoVerify : public SirtRef<T> {
+ public:
+ SirtRefNoVerify(Thread* self, T* object) : SirtRef<T>(self, object, false) {}
+ // Returns the old reference.
+ T* reset(T* object = nullptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return SirtRef<T>::reset(object, false);
+ }
+};
+
} // namespace art
#endif // ART_RUNTIME_SIRT_REF_H_
diff --git a/runtime/stack_indirect_reference_table.h b/runtime/stack_indirect_reference_table.h
index daef3ff..6049e06 100644
--- a/runtime/stack_indirect_reference_table.h
+++ b/runtime/stack_indirect_reference_table.h
@@ -57,6 +57,16 @@ class StackIndirectReferenceTable {
return RoundUp(sirt_size, 8);
}
+ // Get the size of the SIRT for the number of entries, with padding added for potential alignment.
+ static size_t GetAlignedSirtSizeTarget(size_t pointer_size, uint32_t num_references) {
+ // Assume that the layout is packed.
+ size_t header_size = pointer_size + sizeof(uint32_t);
+ // This assumes there is no layout change between 32 and 64b.
+ size_t data_size = sizeof(StackReference<mirror::Object>) * num_references;
+ size_t sirt_size = header_size + data_size;
+ return RoundUp(sirt_size, 8);
+ }
+
// Link to previous SIRT or NULL.
StackIndirectReferenceTable* GetLink() const {
return link_;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 29d011c..5a2410a 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2070,6 +2070,9 @@ void Thread::VisitRoots(RootCallback* visitor, void* arg) {
visitor(reinterpret_cast<mirror::Object**>(&tlsPtr_.class_loader_override), arg, thread_id,
kRootNativeStack);
}
+ if (tlsPtr_.monitor_enter_object != nullptr) {
+ visitor(&tlsPtr_.monitor_enter_object, arg, thread_id, kRootNativeStack);
+ }
tlsPtr_.jni_env->locals.VisitRoots(visitor, arg, thread_id, kRootJNILocal);
tlsPtr_.jni_env->monitors.VisitRoots(visitor, arg, thread_id, kRootJNIMonitor);
SirtVisitRoots(visitor, arg, thread_id);
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 7de9433..8dad419 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -823,9 +823,9 @@ class VerifyRootWrapperArg {
};
static void VerifyRootWrapperCallback(mirror::Object** root, void* arg, uint32_t /*thread_id*/,
- RootType /*root_type*/) {
+ RootType root_type) {
VerifyRootWrapperArg* wrapperArg = reinterpret_cast<VerifyRootWrapperArg*>(arg);
- wrapperArg->callback_(*root, wrapperArg->arg_, 0, NULL);
+ wrapperArg->callback_(*root, wrapperArg->arg_, 0, NULL, root_type);
}
void ThreadList::VerifyRoots(VerifyRootCallback* callback, void* arg) const {