summaryrefslogtreecommitdiffstats
path: root/runtime/thread.h
diff options
context:
space:
mode:
authorDave Allison <dallison@google.com>2014-03-27 15:10:22 -0700
committerDave Allison <dallison@google.com>2014-03-31 18:04:08 -0700
commitf943914730db8ad2ff03d49a2cacd31885d08fd7 (patch)
tree885a781e5f8bd852e2c1615108ae7b17576a6567 /runtime/thread.h
parentcfd5acf281b0c509f86b13d73c6a8dfa3ea9922c (diff)
downloadart-f943914730db8ad2ff03d49a2cacd31885d08fd7.zip
art-f943914730db8ad2ff03d49a2cacd31885d08fd7.tar.gz
art-f943914730db8ad2ff03d49a2cacd31885d08fd7.tar.bz2
Implement implicit stack overflow checks
This also fixes some failing run tests due to missing null pointer markers. The implementation of the implicit stack overflow checks introduces the ability to have a gap in the stack that is skipped during stack walk backs. This gap is protected against read/write and is used to trigger a SIGSEGV at function entry if the stack will overflow. Change-Id: I0c3e214c8b87dc250cf886472c6d327b5d58653e
Diffstat (limited to 'runtime/thread.h')
-rw-r--r--runtime/thread.h21
1 files changed, 19 insertions, 2 deletions
diff --git a/runtime/thread.h b/runtime/thread.h
index 32875e6..63d22c5 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -112,6 +112,14 @@ class PACKED(4) Thread {
static constexpr size_t kStackOverflowReservedUsableBytes =
kStackOverflowReservedBytes - kStackOverflowSignalReservedBytes;
+ // For implicit overflow checks we reserve an extra piece of memory at the bottom
+ // of the stack (lowest memory). The higher portion of the memory
+ // is protected against reads and the lower is available for use while
+ // throwing the StackOverflow exception.
+ static constexpr size_t kStackOverflowProtectedSize = 32 * KB;
+ static constexpr size_t kStackOverflowImplicitCheckSize = kStackOverflowProtectedSize +
+ kStackOverflowReservedBytes;
+
// Creates a new native thread corresponding to the given managed peer.
// Used to implement Thread.start.
static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
@@ -461,12 +469,21 @@ class PACKED(4) Thread {
void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Set the stack end to that to be used during regular execution
- void ResetDefaultStackEnd() {
+ void ResetDefaultStackEnd(bool implicit_overflow_check) {
// Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
// to throw a StackOverflowError.
- stack_end_ = stack_begin_ + kStackOverflowReservedBytes;
+ if (implicit_overflow_check) {
+ // For implicit checks we also need to add in the protected region above the
+ // overflow region.
+ stack_end_ = stack_begin_ + kStackOverflowImplicitCheckSize;
+ } else {
+ stack_end_ = stack_begin_ + kStackOverflowReservedBytes;
+ }
}
+ // Install the protected region for implicit stack checks.
+ void InstallImplicitProtection(bool is_main_stack);
+
bool IsHandlingStackOverflow() const {
return stack_end_ == stack_begin_;
}