summaryrefslogtreecommitdiffstats
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/arch/arm/jni_entrypoints_arm.S24
-rw-r--r--runtime/arch/arm64/jni_entrypoints_arm64.S38
-rw-r--r--runtime/arch/mips/jni_entrypoints_mips.S33
-rw-r--r--runtime/check_jni.cc11
-rw-r--r--runtime/gc/allocator/rosalloc.cc48
-rw-r--r--runtime/gc/allocator/rosalloc.h9
-rw-r--r--runtime/gc/space/rosalloc_space.cc28
-rw-r--r--runtime/indirect_reference_table.cc13
-rw-r--r--runtime/jni_internal.cc19
-rw-r--r--runtime/jni_internal.h10
-rw-r--r--runtime/mirror/art_method.cc21
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc19
-rw-r--r--runtime/native/java_lang_Thread.cc1
-rw-r--r--runtime/scoped_thread_state_change.h2
-rw-r--r--runtime/thread.cc4
15 files changed, 62 insertions, 218 deletions
diff --git a/runtime/arch/arm/jni_entrypoints_arm.S b/runtime/arch/arm/jni_entrypoints_arm.S
index 4a69644..1be34ba 100644
--- a/runtime/arch/arm/jni_entrypoints_arm.S
+++ b/runtime/arch/arm/jni_entrypoints_arm.S
@@ -41,27 +41,3 @@ ENTRY art_jni_dlsym_lookup_stub
pop {r0, r1, r2, r3, pc} @ restore regs and return to caller to handle exception
.cfi_adjust_cfa_offset -20
END art_jni_dlsym_lookup_stub
-
- /*
- * Entry point of native methods when JNI bug compatibility is enabled.
- */
- .extern artWorkAroundAppJniBugs
-ENTRY art_work_around_app_jni_bugs
- @ save registers that may contain arguments and LR that will be crushed by a call
- push {r0-r3, lr}
- .save {r0-r3, lr}
- .cfi_adjust_cfa_offset 16
- .cfi_rel_offset r0, 0
- .cfi_rel_offset r1, 4
- .cfi_rel_offset r2, 8
- .cfi_rel_offset r3, 12
- sub sp, #12 @ 3 words of space for alignment
- mov r0, r9 @ pass Thread::Current
- mov r1, sp @ pass SP
- bl artWorkAroundAppJniBugs @ (Thread*, SP)
- add sp, #12 @ rewind stack
- mov r12, r0 @ save target address
- pop {r0-r3, lr} @ restore possibly modified argument registers
- .cfi_adjust_cfa_offset -16
- bx r12 @ tail call into JNI routine
-END art_work_around_app_jni_bugs
diff --git a/runtime/arch/arm64/jni_entrypoints_arm64.S b/runtime/arch/arm64/jni_entrypoints_arm64.S
index ba783ab..c59a304 100644
--- a/runtime/arch/arm64/jni_entrypoints_arm64.S
+++ b/runtime/arch/arm64/jni_entrypoints_arm64.S
@@ -55,41 +55,3 @@ ENTRY art_jni_dlsym_lookup_stub
1:
ret // restore regs and return to caller to handle exception.
END art_jni_dlsym_lookup_stub
-
- /*
- * Entry point of native methods when JNI bug compatibility is enabled.
- */
- .extern artWorkAroundAppJniBugs
-ENTRY art_work_around_app_jni_bugs
- // spill regs.
- stp x29, x30, [sp, #-16]!
- mov x29, sp
- stp d6, d7, [sp, #-16]!
- stp d4, d5, [sp, #-16]!
- stp d2, d3, [sp, #-16]!
- stp d0, d1, [sp, #-16]!
- stp x6, x7, [sp, #-16]!
- stp x4, x5, [sp, #-16]!
- stp x2, x3, [sp, #-16]!
- stp x0, x1, [sp, #-16]!
-
- mov x0, x19 // Thread::Current.
- mov x1, sp // SP.
- bl artWorkAroundAppJniBugs // (Thread*, SP).
- mov x17, x0 // save target return.
-
- // load spill regs.
- ldp x0, x1, [sp], #16
- ldp x2, x3, [sp], #16
- ldp x4, x5, [sp], #16
- ldp x6, x7, [sp], #16
- ldp d0, d1, [sp], #16
- ldp d2, d3, [sp], #16
- ldp d4, d5, [sp], #16
- ldp d6, d7, [sp], #16
- ldp x29, x30, [sp], #16
-
- //tail call into JNI routine.
- br x17
-END art_work_around_app_jni_bugs
-
diff --git a/runtime/arch/mips/jni_entrypoints_mips.S b/runtime/arch/mips/jni_entrypoints_mips.S
index f9ca7df..e5f4a79 100644
--- a/runtime/arch/mips/jni_entrypoints_mips.S
+++ b/runtime/arch/mips/jni_entrypoints_mips.S
@@ -54,36 +54,3 @@ ENTRY art_jni_dlsym_lookup_stub
jr $ra
nop
END art_jni_dlsym_lookup_stub
-
- /*
- * Entry point of native methods when JNI bug compatibility is enabled.
- */
- .extern artWorkAroundAppJniBugs
-ENTRY art_work_around_app_jni_bugs
- GENERATE_GLOBAL_POINTER
- # save registers that may contain arguments and LR that will be crushed by a call
- addiu $sp, $sp, -32
- .cfi_adjust_cfa_offset 32
- sw $ra, 28($sp)
- .cfi_rel_offset 31, 28
- sw $a3, 24($sp)
- .cfi_rel_offset 7, 28
- sw $a2, 20($sp)
- .cfi_rel_offset 6, 28
- sw $a1, 16($sp)
- .cfi_rel_offset 5, 28
- sw $a0, 12($sp)
- .cfi_rel_offset 4, 28
- move $a0, rSELF # pass Thread::Current
- jal artWorkAroundAppJniBugs # (Thread*, $sp)
- move $a1, $sp # pass $sp
- move $t9, $v0 # save target address
- lw $a0, 12($sp)
- lw $a1, 16($sp)
- lw $a2, 20($sp)
- lw $a3, 24($sp)
- lw $ra, 28($sp)
- jr $t9 # tail call into JNI routine
- addiu $sp, $sp, 32
- .cfi_adjust_cfa_offset -32
-END art_work_around_app_jni_bugs
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 960c26d..b52941b 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -765,14 +765,9 @@ class ScopedCheck {
// Verify that the current thread is (a) attached and (b) associated with
// this particular instance of JNIEnv.
if (soa_.Env() != threadEnv) {
- if (soa_.Vm()->work_around_app_jni_bugs) {
- // If we're keeping broken code limping along, we need to suppress the abort...
- LOG(ERROR) << "APP BUG DETECTED: thread " << *self << " using JNIEnv* from thread " << *soa_.Self();
- } else {
- JniAbortF(function_name_, "thread %s using JNIEnv* from thread %s",
- ToStr<Thread>(*self).c_str(), ToStr<Thread>(*soa_.Self()).c_str());
- return;
- }
+ JniAbortF(function_name_, "thread %s using JNIEnv* from thread %s",
+ ToStr<Thread>(*self).c_str(), ToStr<Thread>(*soa_.Self()).c_str());
+ return;
}
// Verify that, if this thread previously made a critical "get" call, we
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index cbefa6a..0f2d6a9 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -279,7 +279,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, byte page_map_type) {
return nullptr;
}
-void RosAlloc::FreePages(Thread* self, void* ptr) {
+size_t RosAlloc::FreePages(Thread* self, void* ptr) {
lock_.AssertHeld(self);
size_t pm_idx = ToPageMapIndex(ptr);
DCHECK_LT(pm_idx, page_map_size_);
@@ -298,7 +298,7 @@ void RosAlloc::FreePages(Thread* self, void* ptr) {
LOG(FATAL) << "Unreachable - RosAlloc::FreePages() : " << "pm_idx=" << pm_idx << ", pm_type="
<< static_cast<int>(pm_type) << ", ptr=" << std::hex
<< reinterpret_cast<intptr_t>(ptr);
- return;
+ return 0;
}
// Update the page map and count the number of pages.
size_t num_pages = 1;
@@ -422,6 +422,7 @@ void RosAlloc::FreePages(Thread* self, void* ptr) {
LOG(INFO) << "RosAlloc::FreePages() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
<< " into free_page_runs_";
}
+ return num_pages;
}
void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) {
@@ -460,12 +461,11 @@ void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_alloca
return r;
}
-void RosAlloc::FreeInternal(Thread* self, void* ptr) {
+size_t RosAlloc::FreeInternal(Thread* self, void* ptr) {
DCHECK_LE(base_, ptr);
DCHECK_LT(ptr, base_ + footprint_);
size_t pm_idx = RoundDownToPageMapIndex(ptr);
- bool free_from_run = false;
- Run* run = NULL;
+ Run* run = nullptr;
{
MutexLock mu(self, lock_);
DCHECK_LT(pm_idx, page_map_size_);
@@ -477,16 +477,14 @@ void RosAlloc::FreeInternal(Thread* self, void* ptr) {
switch (page_map_[pm_idx]) {
case kPageMapEmpty:
LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
- return;
+ return 0;
case kPageMapLargeObject:
- FreePages(self, ptr);
- return;
+ return FreePages(self, ptr) * kPageSize;
case kPageMapLargeObjectPart:
LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
- return;
+ return 0;
case kPageMapRun:
case kPageMapRunPart: {
- free_from_run = true;
size_t pi = pm_idx;
DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
// Find the beginning of the run.
@@ -501,18 +499,18 @@ void RosAlloc::FreeInternal(Thread* self, void* ptr) {
}
default:
LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
- return;
+ return 0;
}
}
- if (LIKELY(free_from_run)) {
- DCHECK(run != NULL);
- FreeFromRun(self, ptr, run);
- }
+ DCHECK(run != nullptr);
+ const size_t size = IndexToBracketSize(run->size_bracket_idx_);
+ FreeFromRun(self, ptr, run);
+ return size;
}
-void RosAlloc::Free(Thread* self, void* ptr) {
+size_t RosAlloc::Free(Thread* self, void* ptr) {
ReaderMutexLock rmu(self, bulk_free_lock_);
- FreeInternal(self, ptr);
+ return FreeInternal(self, ptr);
}
RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) {
@@ -1077,13 +1075,14 @@ void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size
// the page map entry won't change. Disabled for now.
static constexpr bool kReadPageMapEntryWithoutLockInBulkFree = false;
-void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
+size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
+ size_t freed_bytes = 0;
if (false) {
// Used only to test Free() as GC uses only BulkFree().
for (size_t i = 0; i < num_ptrs; ++i) {
- FreeInternal(self, ptrs[i]);
+ freed_bytes += FreeInternal(self, ptrs[i]);
}
- return;
+ return freed_bytes;
}
WriterMutexLock wmu(self, bulk_free_lock_);
@@ -1126,14 +1125,15 @@ void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
DCHECK_EQ(run->magic_num_, kMagicNum);
} else if (page_map_entry == kPageMapLargeObject) {
MutexLock mu(self, lock_);
- FreePages(self, ptr);
+ freed_bytes += FreePages(self, ptr) * kPageSize;
continue;
} else {
LOG(FATAL) << "Unreachable - page map type: " << page_map_entry;
}
- DCHECK(run != NULL);
+ DCHECK(run != nullptr);
// Set the bit in the bulk free bit map.
run->MarkBulkFreeBitMap(ptr);
+ freed_bytes += IndexToBracketSize(run->size_bracket_idx_);
#ifdef HAVE_ANDROID_OS
if (!run->to_be_bulk_freed_) {
run->to_be_bulk_freed_ = true;
@@ -1171,7 +1171,7 @@ void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
DCHECK_EQ(run->magic_num_, kMagicNum);
} else if (page_map_entry == kPageMapLargeObject) {
- FreePages(self, ptr);
+ freed_bytes += FreePages(self, ptr) * kPageSize;
} else {
LOG(FATAL) << "Unreachable - page map type: " << page_map_entry;
}
@@ -1180,6 +1180,7 @@ void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
DCHECK(run != NULL);
// Set the bit in the bulk free bit map.
run->MarkBulkFreeBitMap(ptr);
+ freed_bytes += IndexToBracketSize(run->size_bracket_idx_);
#ifdef HAVE_ANDROID_OS
if (!run->to_be_bulk_freed_) {
run->to_be_bulk_freed_ = true;
@@ -1306,6 +1307,7 @@ void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
}
}
}
+ return freed_bytes;
}
std::string RosAlloc::DumpPageMap() {
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 5d9d75c..0c508b7 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -493,7 +493,8 @@ class RosAlloc {
// Page-granularity alloc/free
void* AllocPages(Thread* self, size_t num_pages, byte page_map_type)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
- void FreePages(Thread* self, void* ptr) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ // Returns how many pages were freed.
+ size_t FreePages(Thread* self, void* ptr) EXCLUSIVE_LOCKS_REQUIRED(lock_);
// Allocate/free a run slot.
void* AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated)
@@ -506,7 +507,7 @@ class RosAlloc {
Run* RefillRun(Thread* self, size_t idx) LOCKS_EXCLUDED(lock_);
// The internal of non-bulk Free().
- void FreeInternal(Thread* self, void* ptr) LOCKS_EXCLUDED(lock_);
+ size_t FreeInternal(Thread* self, void* ptr) LOCKS_EXCLUDED(lock_);
// Allocates large objects.
void* AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) LOCKS_EXCLUDED(lock_);
@@ -518,9 +519,9 @@ class RosAlloc {
~RosAlloc();
void* Alloc(Thread* self, size_t size, size_t* bytes_allocated)
LOCKS_EXCLUDED(lock_);
- void Free(Thread* self, void* ptr)
+ size_t Free(Thread* self, void* ptr)
LOCKS_EXCLUDED(bulk_free_lock_);
- void BulkFree(Thread* self, void** ptrs, size_t num_ptrs)
+ size_t BulkFree(Thread* self, void** ptrs, size_t num_ptrs)
LOCKS_EXCLUDED(bulk_free_lock_);
// Returns the size of the allocated slot for a given allocated memory chunk.
size_t UsableSize(void* ptr);
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index 5a7d941..a5a6da0 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -33,6 +33,10 @@ namespace gc {
namespace space {
static constexpr bool kPrefetchDuringRosAllocFreeList = true;
+static constexpr size_t kPrefetchLookAhead = 8;
+// Use this only for verification, it is not safe to use since the class of the object may have
+// been freed.
+static constexpr bool kVerifyFreedBytes = false;
// TODO: Fix
// template class ValgrindMallocSpace<RosAllocSpace, allocator::RosAlloc*>;
@@ -172,27 +176,24 @@ size_t RosAllocSpace::Free(Thread* self, mirror::Object* ptr) {
CHECK(ptr != NULL);
CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
}
- const size_t bytes_freed = AllocationSizeNonvirtual(ptr, nullptr);
if (kRecentFreeCount > 0) {
MutexLock mu(self, lock_);
RegisterRecentFree(ptr);
}
- rosalloc_->Free(self, ptr);
- return bytes_freed;
+ return rosalloc_->Free(self, ptr);
}
size_t RosAllocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
- DCHECK(ptrs != NULL);
+ DCHECK(ptrs != nullptr);
- // Don't need the lock to calculate the size of the freed pointers.
- size_t bytes_freed = 0;
+ size_t verify_bytes = 0;
for (size_t i = 0; i < num_ptrs; i++) {
- mirror::Object* ptr = ptrs[i];
- const size_t look_ahead = 8;
- if (kPrefetchDuringRosAllocFreeList && i + look_ahead < num_ptrs) {
- __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + look_ahead]));
+ if (kPrefetchDuringRosAllocFreeList && i + kPrefetchLookAhead < num_ptrs) {
+ __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + kPrefetchLookAhead]));
+ }
+ if (kVerifyFreedBytes) {
+ verify_bytes += AllocationSizeNonvirtual(ptrs[i], nullptr);
}
- bytes_freed += AllocationSizeNonvirtual(ptr, nullptr);
}
if (kRecentFreeCount > 0) {
@@ -216,7 +217,10 @@ size_t RosAllocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** p
CHECK_EQ(num_broken_ptrs, 0u);
}
- rosalloc_->BulkFree(self, reinterpret_cast<void**>(ptrs), num_ptrs);
+ const size_t bytes_freed = rosalloc_->BulkFree(self, reinterpret_cast<void**>(ptrs), num_ptrs);
+ if (kVerifyFreedBytes) {
+ CHECK_EQ(verify_bytes, bytes_freed);
+ }
return bytes_freed;
}
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index bbad884..987df91 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -254,20 +254,11 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
int idx = ExtractIndex(iref);
- JavaVMExt* vm = Runtime::Current()->GetJavaVM();
if (GetIndirectRefKind(iref) == kSirtOrInvalid &&
Thread::Current()->SirtContains(reinterpret_cast<jobject>(iref))) {
LOG(WARNING) << "Attempt to remove local SIRT entry from IRT, ignoring";
return true;
}
- if (GetIndirectRefKind(iref) == kSirtOrInvalid && vm->work_around_app_jni_bugs) {
- mirror::Object* direct_pointer = reinterpret_cast<mirror::Object*>(iref);
- idx = Find(direct_pointer, bottomIndex, topIndex, table_);
- if (idx == -1) {
- LOG(WARNING) << "Trying to work around app JNI bugs, but didn't find " << iref << " in table!";
- return false;
- }
- }
if (idx < bottomIndex) {
// Wrong segment.
@@ -285,7 +276,7 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
if (idx == topIndex-1) {
// Top-most entry. Scan up and consume holes.
- if (!vm->work_around_app_jni_bugs && !CheckEntry("remove", iref, idx)) {
+ if (!CheckEntry("remove", iref, idx)) {
return false;
}
@@ -321,7 +312,7 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
LOG(INFO) << "--- WEIRD: removing null entry " << idx;
return false;
}
- if (!vm->work_around_app_jni_bugs && !CheckEntry("remove", iref, idx)) {
+ if (!CheckEntry("remove", iref, idx)) {
return false;
}
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index f7aeffd..38aeaee 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -2444,23 +2444,6 @@ class JNI {
if (static_cast<JNIEnvExt*>(env)->self->SirtContains(java_object)) {
return JNILocalRefType;
}
-
- if (!static_cast<JNIEnvExt*>(env)->vm->work_around_app_jni_bugs) {
- return JNIInvalidRefType;
- }
-
- // If we're handing out direct pointers, check whether it's a direct pointer to a local
- // reference.
- {
- ScopedObjectAccess soa(env);
- if (soa.Decode<mirror::Object*>(java_object) ==
- reinterpret_cast<mirror::Object*>(java_object)) {
- mirror::Object* object = reinterpret_cast<mirror::Object*>(java_object);
- if (soa.Env()->locals.ContainsDirectPointer(object)) {
- return JNILocalRefType;
- }
- }
- }
return JNIInvalidRefType;
}
LOG(FATAL) << "IndirectRefKind[" << kind << "]";
@@ -2993,7 +2976,6 @@ JavaVMExt::JavaVMExt(Runtime* runtime, ParsedOptions* options)
check_jni(false),
force_copy(false), // TODO: add a way to enable this
trace(options->jni_trace_),
- work_around_app_jni_bugs(false),
pins_lock("JNI pin table lock", kPinTableLock),
pin_table("pin table", kPinTableInitial, kPinTableMax),
globals_lock("JNI global reference table lock"),
@@ -3044,7 +3026,6 @@ void JavaVMExt::DumpForSigQuit(std::ostream& os) {
if (force_copy) {
os << " (with forcecopy)";
}
- os << "; workarounds are " << (work_around_app_jni_bugs ? "on" : "off");
Thread* self = Thread::Current();
{
MutexLock mu(self, pins_lock);
diff --git a/runtime/jni_internal.h b/runtime/jni_internal.h
index 42796db..ec911b2 100644
--- a/runtime/jni_internal.h
+++ b/runtime/jni_internal.h
@@ -110,9 +110,6 @@ class JavaVMExt : public JavaVM {
// Extra diagnostics.
std::string trace;
- // Used to provide compatibility for apps that assumed direct references.
- bool work_around_app_jni_bugs;
-
// Used to hold references to pinned primitive arrays.
Mutex pins_lock DEFAULT_MUTEX_ACQUIRED_AFTER;
ReferenceTable pin_table GUARDED_BY(pins_lock);
@@ -149,7 +146,7 @@ struct JNIEnvExt : public JNIEnv {
void PopFrame();
template<typename T>
- T AddLocalReference(mirror::Object* obj, bool jni_work_arounds)
+ T AddLocalReference(mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static Offset SegmentStateOffset();
@@ -216,7 +213,7 @@ class ScopedJniEnvLocalRefState {
};
template<typename T>
-inline T JNIEnvExt::AddLocalReference(mirror::Object* obj, bool jni_work_arounds) {
+inline T JNIEnvExt::AddLocalReference(mirror::Object* obj) {
IndirectRef ref = locals.Add(local_ref_cookie, obj);
// TODO: fix this to understand PushLocalFrame, so we can turn it on.
@@ -231,9 +228,6 @@ inline T JNIEnvExt::AddLocalReference(mirror::Object* obj, bool jni_work_arounds
}
}
- if (jni_work_arounds) {
- return reinterpret_cast<T>(obj);
- }
return reinterpret_cast<T>(ref);
}
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 2151fc7..f3303a8 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -351,30 +351,15 @@ bool ArtMethod::IsRegistered() {
return native_method != jni_stub;
}
-extern "C" void art_work_around_app_jni_bugs(JNIEnv*, jobject);
void ArtMethod::RegisterNative(Thread* self, const void* native_method, bool is_fast) {
DCHECK(Thread::Current() == self);
CHECK(IsNative()) << PrettyMethod(this);
CHECK(!IsFastNative()) << PrettyMethod(this);
CHECK(native_method != NULL) << PrettyMethod(this);
- if (!self->GetJniEnv()->vm->work_around_app_jni_bugs) {
- if (is_fast) {
- SetAccessFlags(GetAccessFlags() | kAccFastNative);
- }
- SetNativeMethod(native_method);
- } else {
- // We've been asked to associate this method with the given native method but are working
- // around JNI bugs, that include not giving Object** SIRT references to native methods. Direct
- // the native method to runtime support and store the target somewhere runtime support will
- // find it.
-#if defined(__i386__) || defined(__x86_64__)
- UNIMPLEMENTED(FATAL);
-#else
- SetNativeMethod(reinterpret_cast<void*>(art_work_around_app_jni_bugs));
-#endif
- SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, gc_map_),
- reinterpret_cast<const uint8_t*>(native_method), false);
+ if (is_fast) {
+ SetAccessFlags(GetAccessFlags() | kAccFastNative);
}
+ SetNativeMethod(native_method);
}
void ArtMethod::UnregisterNative(Thread* self) {
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 5c5eaa1..76c5866 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -164,23 +164,12 @@ static jstring VMRuntime_vmLibrary(JNIEnv* env, jobject) {
}
static void VMRuntime_setTargetSdkVersionNative(JNIEnv* env, jobject, jint targetSdkVersion) {
- // This is the target SDK version of the app we're about to run.
+ // This is the target SDK version of the app we're about to run. It is intended that this a place
+ // where workarounds can be enabled.
// Note that targetSdkVersion may be CUR_DEVELOPMENT (10000).
// Note that targetSdkVersion may be 0, meaning "current".
- if (targetSdkVersion > 0 && targetSdkVersion <= 13 /* honeycomb-mr2 */) {
- Runtime* runtime = Runtime::Current();
- JavaVMExt* vm = runtime->GetJavaVM();
- if (vm->check_jni) {
- LOG(INFO) << "CheckJNI enabled: not enabling JNI app bug workarounds.";
- } else {
- LOG(INFO) << "Turning on JNI app bug workarounds for target SDK version "
- << targetSdkVersion << "...";
-
- vm->work_around_app_jni_bugs = true;
- LOG(WARNING) << "Permenantly disabling heap compaction due to jni workarounds";
- Runtime::Current()->GetHeap()->DisableCompaction();
- }
- }
+ UNUSED(env);
+ UNUSED(targetSdkVersion);
}
static void VMRuntime_registerNativeAllocation(JNIEnv* env, jobject, jint bytes) {
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index 0b84005..86db893 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -88,6 +88,7 @@ static jint Thread_nativeGetStatus(JNIEnv* env, jobject java_thread, jboolean ha
case kSuspended: return kJavaRunnable;
// Don't add a 'default' here so the compiler can spot incompatible enum changes.
}
+ LOG(ERROR) << "Unexpected thread state: " << internal_thread_state;
return -1; // Unreachable.
}
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index ebc5452..404c616 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -171,7 +171,7 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange {
DCHECK_NE((reinterpret_cast<uintptr_t>(obj) & 0xffff0000), 0xebad0000);
- return Env()->AddLocalReference<T>(obj, Vm()->work_around_app_jni_bugs);
+ return Env()->AddLocalReference<T>(obj);
}
template<typename T>
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 3408dd3..998579d 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1248,10 +1248,6 @@ mirror::Object* Thread::DecodeJObject(jobject obj) const {
// Read from SIRT.
result = reinterpret_cast<StackReference<mirror::Object>*>(obj)->AsMirrorPtr();
VerifyObject(result);
- } else if (Runtime::Current()->GetJavaVM()->work_around_app_jni_bugs) {
- // Assume an invalid local reference is actually a direct pointer.
- result = reinterpret_cast<mirror::Object*>(obj);
- VerifyObject(result);
} else {
result = kInvalidIndirectRefObject;
}