summaryrefslogtreecommitdiffstats
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/base/mutex.h2
-rw-r--r--runtime/gc/heap-inl.h9
-rw-r--r--runtime/gc/heap.cc95
-rw-r--r--runtime/gc/heap.h17
-rw-r--r--runtime/mirror/class-inl.h2
-rw-r--r--runtime/mirror/class.h6
-rw-r--r--runtime/mirror/string-inl.h4
-rw-r--r--runtime/runtime.cc6
-rw-r--r--runtime/verifier/method_verifier.cc73
9 files changed, 190 insertions, 24 deletions
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 0ab148e..aa91ca1 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -61,6 +61,7 @@ enum LockLevel {
kAbortLock,
kJdwpSocketLock,
kRegionSpaceRegionLock,
+ kTransactionLogLock,
kReferenceQueueSoftReferencesLock,
kReferenceQueuePhantomReferencesLock,
kReferenceQueueFinalizerReferencesLock,
@@ -77,7 +78,6 @@ enum LockLevel {
kDexFileMethodInlinerLock,
kDexFileToMethodInlinerMapLock,
kMarkSweepMarkStackLock,
- kTransactionLogLock,
kInternTableLock,
kOatFileSecondaryLookupLock,
kDefaultMutexLevel,
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 2d54330..2ec9c86 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -174,6 +174,13 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas
} else {
DCHECK(!Dbg::IsAllocTrackingEnabled());
}
+ if (kInstrumented) {
+ if (gc_stress_mode_) {
+ CheckGcStressMode(self, &obj);
+ }
+ } else {
+ DCHECK(!gc_stress_mode_);
+ }
// IsConcurrentGc() isn't known at compile time so we can optimize by not checking it for
// the BumpPointer or TLAB allocators. This is nice since it allows the entire if statement to be
// optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant since
@@ -391,7 +398,7 @@ inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) co
// Zygote resulting in it being prematurely freed.
// We can only do this for primitive objects since large objects will not be within the card table
// range. This also means that we rely on SetClass not dirtying the object's card.
- return byte_count >= large_object_threshold_ && c->IsPrimitiveArray();
+ return byte_count >= large_object_threshold_ && (c->IsPrimitiveArray() || c->IsStringClass());
}
template <bool kGrow>
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 9a70d69..57557e2 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -21,6 +21,7 @@
#include <limits>
#include <memory>
+#include <unwind.h> // For GC verification.
#include <vector>
#include "art_field-inl.h"
@@ -125,7 +126,8 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
bool ignore_max_footprint, bool use_tlab,
bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap,
bool verify_pre_gc_rosalloc, bool verify_pre_sweeping_rosalloc,
- bool verify_post_gc_rosalloc, bool use_homogeneous_space_compaction_for_oom,
+ bool verify_post_gc_rosalloc, bool gc_stress_mode,
+ bool use_homogeneous_space_compaction_for_oom,
uint64_t min_interval_homogeneous_space_compaction_by_oom)
: non_moving_space_(nullptr),
rosalloc_space_(nullptr),
@@ -170,6 +172,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
verify_pre_gc_rosalloc_(verify_pre_gc_rosalloc),
verify_pre_sweeping_rosalloc_(verify_pre_sweeping_rosalloc),
verify_post_gc_rosalloc_(verify_post_gc_rosalloc),
+ gc_stress_mode_(gc_stress_mode),
/* For GC a lot mode, we limit the allocations stacks to be kGcAlotInterval allocations. This
* causes a lot of GC since we do a GC for alloc whenever the stack is full. When heap
* verification is enabled, we limit the size of allocation stacks to speed up their
@@ -209,13 +212,17 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
blocking_gc_count_last_window_(0U),
gc_count_rate_histogram_("gc count rate histogram", 1U, kGcCountRateMaxBucketCount),
blocking_gc_count_rate_histogram_("blocking gc count rate histogram", 1U,
- kGcCountRateMaxBucketCount) {
+ kGcCountRateMaxBucketCount),
+ backtrace_lock_(nullptr),
+ seen_backtrace_count_(0u),
+ unique_backtrace_count_(0u) {
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Heap() entering";
}
+ Runtime* const runtime = Runtime::Current();
// If we aren't the zygote, switch to the default non zygote allocator. This may update the
// entrypoints.
- const bool is_zygote = Runtime::Current()->IsZygote();
+ const bool is_zygote = runtime->IsZygote();
if (!is_zygote) {
// Background compaction is currently not supported for command line runs.
if (background_collector_type_ != foreground_collector_type_) {
@@ -507,8 +514,12 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
LOG(FATAL) << "There's a gap between the image space and the non-moving space";
}
}
- if (running_on_valgrind_) {
- Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
+ instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation();
+ if (gc_stress_mode_) {
+ backtrace_lock_ = new Mutex("GC complete lock");
+ }
+ if (running_on_valgrind_ || gc_stress_mode_) {
+ instrumentation->InstrumentQuickAllocEntryPoints();
}
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Heap() exiting";
@@ -1072,6 +1083,12 @@ Heap::~Heap() {
STLDeleteElements(&discontinuous_spaces_);
delete gc_complete_lock_;
delete pending_task_lock_;
+ delete backtrace_lock_;
+ if (unique_backtrace_count_.LoadRelaxed() != 0 || seen_backtrace_count_.LoadRelaxed() != 0) {
+ LOG(INFO) << "gc stress unique=" << unique_backtrace_count_.LoadRelaxed()
+ << " total=" << seen_backtrace_count_.LoadRelaxed() +
+ unique_backtrace_count_.LoadRelaxed();
+ }
VLOG(heap) << "Finished ~Heap()";
}
@@ -3675,5 +3692,73 @@ void Heap::ClearMarkedObjects() {
}
}
+// Based on debug malloc logic from libc/bionic/debug_stacktrace.cpp.
+class StackCrawlState {
+ public:
+ StackCrawlState(uintptr_t* frames, size_t max_depth, size_t skip_count)
+ : frames_(frames), frame_count_(0), max_depth_(max_depth), skip_count_(skip_count) {
+ }
+ size_t GetFrameCount() const {
+ return frame_count_;
+ }
+ static _Unwind_Reason_Code Callback(_Unwind_Context* context, void* arg) {
+ auto* const state = reinterpret_cast<StackCrawlState*>(arg);
+ const uintptr_t ip = _Unwind_GetIP(context);
+ // The first stack frame is get_backtrace itself. Skip it.
+ if (ip != 0 && state->skip_count_ > 0) {
+ --state->skip_count_;
+ return _URC_NO_REASON;
+ }
+ // ip may be off for ARM but it shouldn't matter since we only use it for hashing.
+ state->frames_[state->frame_count_] = ip;
+ state->frame_count_++;
+ return state->frame_count_ >= state->max_depth_ ? _URC_END_OF_STACK : _URC_NO_REASON;
+ }
+
+ private:
+ uintptr_t* const frames_;
+ size_t frame_count_;
+ const size_t max_depth_;
+ size_t skip_count_;
+};
+
+static size_t get_backtrace(uintptr_t* frames, size_t max_depth) {
+ StackCrawlState state(frames, max_depth, 0u);
+ _Unwind_Backtrace(&StackCrawlState::Callback, &state);
+ return state.GetFrameCount();
+}
+
+void Heap::CheckGcStressMode(Thread* self, mirror::Object** obj) {
+ auto* const runtime = Runtime::Current();
+ if (gc_stress_mode_ && runtime->GetClassLinker()->IsInitialized() &&
+ !runtime->IsActiveTransaction() && mirror::Class::HasJavaLangClass()) {
+ // Check if we should GC.
+ bool new_backtrace = false;
+ {
+ static constexpr size_t kMaxFrames = 16u;
+ uintptr_t backtrace[kMaxFrames];
+ const size_t frames = get_backtrace(backtrace, kMaxFrames);
+ uint64_t hash = 0;
+ for (size_t i = 0; i < frames; ++i) {
+ hash = hash * 2654435761 + backtrace[i];
+ hash += (hash >> 13) ^ (hash << 6);
+ }
+ MutexLock mu(self, *backtrace_lock_);
+ new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
+ if (new_backtrace) {
+ seen_backtraces_.insert(hash);
+ }
+ }
+ if (new_backtrace) {
+ StackHandleScope<1> hs(self);
+ auto h = hs.NewHandleWrapper(obj);
+ CollectGarbage(false);
+ unique_backtrace_count_.FetchAndAddSequentiallyConsistent(1);
+ } else {
+ seen_backtrace_count_.FetchAndAddSequentiallyConsistent(1);
+ }
+ }
+}
+
} // namespace gc
} // namespace art
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index dac747b..81476a4 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -19,6 +19,7 @@
#include <iosfwd>
#include <string>
+#include <unordered_set>
#include <vector>
#include "allocator_type.h"
@@ -180,7 +181,8 @@ class Heap {
bool ignore_max_footprint, bool use_tlab,
bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap,
bool verify_pre_gc_rosalloc, bool verify_pre_sweeping_rosalloc,
- bool verify_post_gc_rosalloc, bool use_homogeneous_space_compaction,
+ bool verify_post_gc_rosalloc, bool gc_stress_mode,
+ bool use_homogeneous_space_compaction,
uint64_t min_interval_homogeneous_space_compaction_by_oom);
~Heap();
@@ -887,6 +889,10 @@ class Heap {
void UpdateGcCountRateHistograms() EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_);
+ // GC stress mode attempts to do one GC per unique backtrace.
+ void CheckGcStressMode(Thread* self, mirror::Object** obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// All-known continuous spaces, where objects lie within fixed bounds.
std::vector<space::ContinuousSpace*> continuous_spaces_;
@@ -1042,6 +1048,7 @@ class Heap {
bool verify_pre_gc_rosalloc_;
bool verify_pre_sweeping_rosalloc_;
bool verify_post_gc_rosalloc_;
+ const bool gc_stress_mode_;
// RAII that temporarily disables the rosalloc verification during
// the zygote fork.
@@ -1192,6 +1199,14 @@ class Heap {
// The histogram of the number of blocking GC invocations per window duration.
Histogram<uint64_t> blocking_gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
+ // GC stress related data structures.
+ Mutex* backtrace_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ // Debugging variables, seen backtraces vs unique backtraces.
+ Atomic<uint64_t> seen_backtrace_count_;
+ Atomic<uint64_t> unique_backtrace_count_;
+ // Stack trace hashes that we already saw,
+ std::unordered_set<uint64_t> seen_backtraces_ GUARDED_BY(backtrace_lock_);
+
friend class CollectorTransitionTask;
friend class collector::GarbageCollector;
friend class collector::MarkCompact;
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 835b94a..0538f4b 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -757,7 +757,7 @@ inline bool Class::GetSlowPathEnabled() {
}
inline void Class::SetSlowPath(bool enabled) {
- SetFieldBoolean<false>(GetSlowPathFlagOffset(), enabled);
+ SetFieldBoolean<false, false>(GetSlowPathFlagOffset(), enabled);
}
inline void Class::InitializeClassVisitor::operator()(
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index ba8a693..0453906 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -1030,10 +1030,14 @@ class MANAGED Class FINAL : public Object {
}
static Class* GetJavaLangClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(!java_lang_Class_.IsNull());
+ DCHECK(HasJavaLangClass());
return java_lang_Class_.Read();
}
+ static bool HasJavaLangClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return !java_lang_Class_.IsNull();
+ }
+
// Can't call this SetClass or else gets called instead of Object::SetClass in places.
static void SetClassClass(Class* java_lang_Class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void ResetClass();
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index 9f6cd11..d283f58 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -162,8 +162,8 @@ inline String* String::Alloc(Thread* self, int32_t utf16_length, gc::AllocatorTy
}
gc::Heap* heap = Runtime::Current()->GetHeap();
return down_cast<String*>(
- heap->AllocObjectWithAllocator<kIsInstrumented, false>(self, string_class, size,
- allocator_type, pre_fence_visitor));
+ heap->AllocObjectWithAllocator<kIsInstrumented, true>(self, string_class, size,
+ allocator_type, pre_fence_visitor));
}
template <bool kIsInstrumented>
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 4a2a0c9..6c55129 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -144,7 +144,10 @@ struct TraceConfig {
};
Runtime::Runtime()
- : instruction_set_(kNone),
+ : resolution_method_(nullptr),
+ imt_conflict_method_(nullptr),
+ imt_unimplemented_method_(nullptr),
+ instruction_set_(kNone),
compiler_callbacks_(nullptr),
is_zygote_(false),
must_relocate_(false),
@@ -870,6 +873,7 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
xgc_option.verify_pre_gc_rosalloc_,
xgc_option.verify_pre_sweeping_rosalloc_,
xgc_option.verify_post_gc_rosalloc_,
+ xgc_option.gcstress_,
runtime_options.GetOrDefault(Opt::EnableHSpaceCompactForOOM),
runtime_options.GetOrDefault(Opt::HSpaceCompactForOOMMinIntervalsMs));
ATRACE_END();
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index aa54b17..6c55356 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -172,6 +172,15 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self,
bool allow_soft_failures,
std::string* error) {
DCHECK(class_def != nullptr);
+
+ // A class must not be abstract and final.
+ if ((class_def->access_flags_ & (kAccAbstract | kAccFinal)) == (kAccAbstract | kAccFinal)) {
+ *error = "Verifier rejected class ";
+ *error += PrettyDescriptor(dex_file->GetClassDescriptor(*class_def));
+ *error += ": class is abstract and final.";
+ return kHardFailure;
+ }
+
const uint8_t* class_data = dex_file->GetClassData(*class_def);
if (class_data == nullptr) {
// empty class, probably a marker interface
@@ -857,14 +866,18 @@ bool MethodVerifier::VerifyInstruction(const Instruction* inst, uint32_t code_of
case Instruction::kVerifyVarArgNonZero:
// Fall-through.
case Instruction::kVerifyVarArg: {
- if (inst->GetVerifyExtraFlags() == Instruction::kVerifyVarArgNonZero && inst->VRegA() <= 0) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid arg count (" << inst->VRegA() << ") in "
+ // Instructions that can actually return a negative value shouldn't have this flag.
+ uint32_t v_a = dchecked_integral_cast<uint32_t>(inst->VRegA());
+ if ((inst->GetVerifyExtraFlags() == Instruction::kVerifyVarArgNonZero && v_a == 0) ||
+ v_a > Instruction::kMaxVarArgRegs) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid arg count (" << v_a << ") in "
"non-range invoke";
return false;
}
+
uint32_t args[Instruction::kMaxVarArgRegs];
inst->GetVarArgs(args);
- result = result && CheckVarArgRegs(inst->VRegA(), args);
+ result = result && CheckVarArgRegs(v_a, args);
break;
}
case Instruction::kVerifyVarArgRangeNonZero:
@@ -1175,10 +1188,6 @@ bool MethodVerifier::CheckSwitchTargets(uint32_t cur_offset) {
}
bool MethodVerifier::CheckVarArgRegs(uint32_t vA, uint32_t arg[]) {
- if (vA > Instruction::kMaxVarArgRegs) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid arg count (" << vA << ") in non-range invoke)";
- return false;
- }
uint16_t registers_size = code_item_->registers_size_;
for (uint32_t idx = 0; idx < vA; idx++) {
if (arg[idx] >= registers_size) {
@@ -1291,13 +1300,22 @@ static bool IsPrimitiveDescriptor(char descriptor) {
bool MethodVerifier::SetTypesFromSignature() {
RegisterLine* reg_line = reg_table_.GetLine(0);
- int arg_start = code_item_->registers_size_ - code_item_->ins_size_;
+
+ // Should have been verified earlier.
+ DCHECK_GE(code_item_->registers_size_, code_item_->ins_size_);
+
+ uint32_t arg_start = code_item_->registers_size_ - code_item_->ins_size_;
size_t expected_args = code_item_->ins_size_; /* long/double count as two */
- DCHECK_GE(arg_start, 0); /* should have been verified earlier */
// Include the "this" pointer.
size_t cur_arg = 0;
if (!IsStatic()) {
+ if (expected_args == 0) {
+ // Expect at least a receiver.
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected 0 args, but method is not static";
+ return false;
+ }
+
// If this is a constructor for a class other than java.lang.Object, mark the first ("this")
// argument as uninitialized. This restricts field access until the superclass constructor is
// called.
@@ -1543,6 +1561,27 @@ bool MethodVerifier::CodeFlowVerifyMethod() {
return true;
}
+// Returns the index of the first final instance field of the given class, or kDexNoIndex if there
+// is no such field.
+static uint32_t GetFirstFinalInstanceFieldIndex(const DexFile& dex_file, uint16_t type_idx) {
+ const DexFile::ClassDef* class_def = dex_file.FindClassDef(type_idx);
+ DCHECK(class_def != nullptr);
+ const uint8_t* class_data = dex_file.GetClassData(*class_def);
+ DCHECK(class_data != nullptr);
+ ClassDataItemIterator it(dex_file, class_data);
+ // Skip static fields.
+ while (it.HasNextStaticField()) {
+ it.Next();
+ }
+ while (it.HasNextInstanceField()) {
+ if ((it.GetFieldAccessFlags() & kAccFinal) != 0) {
+ return it.GetMemberIndex();
+ }
+ it.Next();
+ }
+ return DexFile::kDexNoIndex;
+}
+
bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
// If we're doing FindLocksAtDexPc, check whether we're at the dex pc we care about.
// We want the state _before_ the instruction, for the case where the dex pc we're
@@ -2766,6 +2805,17 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
case Instruction::RETURN_VOID_NO_BARRIER:
if (IsConstructor() && !IsStatic()) {
auto& declaring_class = GetDeclaringClass();
+ if (declaring_class.IsUnresolvedReference()) {
+ // We must iterate over the fields, even if we cannot use mirror classes to do so. Do it
+ // manually over the underlying dex file.
+ uint32_t first_index = GetFirstFinalInstanceFieldIndex(*dex_file_,
+ dex_file_->GetMethodId(dex_method_idx_).class_idx_);
+ if (first_index != DexFile::kDexNoIndex) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-void-no-barrier not expected for field "
+ << first_index;
+ }
+ break;
+ }
auto* klass = declaring_class.GetClass();
for (uint32_t i = 0, num_fields = klass->NumInstanceFields(); i < num_fields; ++i) {
if (klass->GetInstanceField(i)->IsFinal()) {
@@ -3780,8 +3830,9 @@ void MethodVerifier::VerifyAPut(const Instruction* inst,
} else {
const RegType& array_type = work_line_->GetRegisterType(this, inst->VRegB_23x());
if (array_type.IsZero()) {
- // Null array type; this code path will fail at runtime. Infer a merge-able type from the
- // instruction type.
+ // Null array type; this code path will fail at runtime.
+ // Still check that the given value matches the instruction's type.
+ work_line_->VerifyRegisterType(this, inst->VRegA_23x(), insn_type);
} else if (!array_type.IsArrayTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aput";
} else {