summaryrefslogtreecommitdiffstats
path: root/runtime/transaction.h
diff options
context:
space:
mode:
authorSebastien Hertz <shertz@google.com>2014-01-15 10:20:56 +0100
committerSebastien Hertz <shertz@google.com>2014-02-17 11:32:15 +0100
commitd2fe10a3a34af171bf1631219cd2d6ff6b7778b5 (patch)
treeb6b7eb8eba23a5c2723518da99c03bf47b97f58a /runtime/transaction.h
parent5a3f55ad9519e87c0d3bbddaf3d8a186a887a79b (diff)
downloadart-d2fe10a3a34af171bf1631219cd2d6ff6b7778b5.zip
art-d2fe10a3a34af171bf1631219cd2d6ff6b7778b5.tar.gz
art-d2fe10a3a34af171bf1631219cd2d6ff6b7778b5.tar.bz2
Remove blacklist
Removes the class initialization blacklist and use transaction to detect and revert class initialization attempting to invoke native method. This only concerns class initialization happening at compilation time when generating an image (like boot.art for the system). In transactional mode, we log every object's field assignment and array update. Therefore we're able to abort a transaction to restore values of fields and array as they were before the transaction starts. We also log changes to the intern string table so we can restore its state prior to transaction start. Since transactional mode only happens at compilation time, we don't need to log all these changes at runtime. In order to reduce the overhead of testing if transactional mode is on/off, we templatize interfaces of mirror::Object and mirror::Array, respectively responsible for setting a field and setting an array element. For various reasons, we skip some specific fields from transaction: - Object's class and array's length must remain unchanged so garbage collector can compute object's size. - Immutable fields only set during class loading: list of fields, method, dex caches, vtables, ... as all classes have been loaded and verified before a transaction occurs. - Object's monitor for performance reason. Before generating the image, we browse the heap to collect objects that need to be written into it. Since the heap may still holds references to unreachable objects due to aborted transactions, we trigger one collection at the end of the class preinitialization phase. Since the transaction is held by the runtime and all compilation threads share the same runtime, we need to ensure only one compilation thread has exclusive access to the runtime. To workaround this issue, we force class initialization phase to run with only one thread. Note this is only done when generating image so application compilation is not impacted. This issue will be addressed in a separate CL. Bug: 9676614 Change-Id: I221910a9183a5ba6c2b99a277f5a5a68bc69b5f9
Diffstat (limited to 'runtime/transaction.h')
-rw-r--r--runtime/transaction.h195
1 files changed, 195 insertions, 0 deletions
diff --git a/runtime/transaction.h b/runtime/transaction.h
new file mode 100644
index 0000000..68f9540
--- /dev/null
+++ b/runtime/transaction.h
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_TRANSACTION_H_
+#define ART_RUNTIME_TRANSACTION_H_
+
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "locks.h"
+#include "offsets.h"
+#include "primitive.h"
+#include "object_callbacks.h"
+#include "safe_map.h"
+
+#include <list>
+#include <map>
+
+namespace art {
+namespace mirror {
+class Array;
+class Object;
+class String;
+}
+class InternTable;
+
+class Transaction {
+ public:
+ Transaction();
+ ~Transaction();
+
+ // Record object field changes.
+ void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
+ bool is_volatile)
+ LOCKS_EXCLUDED(log_lock_);
+ void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
+ bool is_volatile)
+ LOCKS_EXCLUDED(log_lock_);
+ void RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset,
+ mirror::Object* value, bool is_volatile)
+ LOCKS_EXCLUDED(log_lock_);
+
+ // Record array change.
+ void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value)
+ LOCKS_EXCLUDED(log_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Record intern string table changes.
+ void RecordStrongStringInsertion(mirror::String* s, uint32_t hash_code)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
+ LOCKS_EXCLUDED(log_lock_);
+ void RecordWeakStringInsertion(mirror::String* s, uint32_t hash_code)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
+ LOCKS_EXCLUDED(log_lock_);
+ void RecordStrongStringRemoval(mirror::String* s, uint32_t hash_code)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
+ LOCKS_EXCLUDED(log_lock_);
+ void RecordWeakStringRemoval(mirror::String* s, uint32_t hash_code)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
+ LOCKS_EXCLUDED(log_lock_);
+
+ // Abort transaction by undoing all recorded changes.
+ void Abort()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(log_lock_);
+
+ void VisitRoots(RootCallback* callback, void* arg)
+ LOCKS_EXCLUDED(log_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ private:
+ class ObjectLog {
+ public:
+ void Log32BitsValue(MemberOffset offset, uint32_t value, bool is_volatile);
+ void Log64BitsValue(MemberOffset offset, uint64_t value, bool is_volatile);
+ void LogReferenceValue(MemberOffset offset, mirror::Object* obj, bool is_volatile);
+
+ void Undo(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VisitRoots(RootCallback* callback, void* arg);
+
+ size_t Size() const {
+ return field_values_.size();
+ }
+
+ private:
+ enum FieldValueKind {
+ k32Bits,
+ k64Bits,
+ kReference
+ };
+ struct FieldValue {
+ // TODO use JValue instead ?
+ uint64_t value;
+ FieldValueKind kind;
+ bool is_volatile;
+ };
+
+ void UndoFieldWrite(mirror::Object* obj, MemberOffset field_offset,
+ const FieldValue& field_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Maps field's offset to its value.
+ std::map<uint32_t, FieldValue> field_values_;
+ };
+
+ class ArrayLog {
+ public:
+ void LogValue(size_t index, uint64_t value);
+
+ void Undo(mirror::Array* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VisitRoots(RootCallback* callback, void* arg);
+
+ size_t Size() const {
+ return array_values_.size();
+ }
+
+ private:
+ void UndoArrayWrite(mirror::Array* array, Primitive::Type array_type, size_t index,
+ uint64_t value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Maps index to value.
+ // TODO use JValue instead ?
+ std::map<size_t, uint64_t> array_values_;
+ };
+
+ class InternStringLog {
+ public:
+ enum StringKind {
+ kStrongString,
+ kWeakString
+ };
+ enum StringOp {
+ kInsert,
+ kRemove
+ };
+ InternStringLog(mirror::String* s, uint32_t hash_code, StringKind kind, StringOp op)
+ : str_(s), hash_code_(hash_code), string_kind_(kind), string_op_(op) {
+ }
+
+ void Undo(InternTable* intern_table) EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ void VisitRoots(RootCallback* callback, void* arg);
+
+ private:
+ mirror::String* str_;
+ uint32_t hash_code_;
+ StringKind string_kind_;
+ StringOp string_op_;
+ };
+
+ void LogInternedString(InternStringLog& log)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
+ LOCKS_EXCLUDED(log_lock_);
+
+ void UndoObjectModifications()
+ EXCLUSIVE_LOCKS_REQUIRED(log_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void UndoArrayModifications()
+ EXCLUSIVE_LOCKS_REQUIRED(log_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void UndoInternStringTableModifications()
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(log_lock_);
+
+ void VisitObjectLogs(RootCallback* callback, void* arg)
+ EXCLUSIVE_LOCKS_REQUIRED(log_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VisitArrayLogs(RootCallback* callback, void* arg)
+ EXCLUSIVE_LOCKS_REQUIRED(log_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VisitStringLogs(RootCallback* callback, void* arg)
+ EXCLUSIVE_LOCKS_REQUIRED(log_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ Mutex log_lock_ ACQUIRED_AFTER(Locks::intern_table_lock_);
+ std::map<mirror::Object*, ObjectLog> object_logs_ GUARDED_BY(log_lock_);
+ std::map<mirror::Array*, ArrayLog> array_logs_ GUARDED_BY(log_lock_);
+ std::list<InternStringLog> intern_string_logs_ GUARDED_BY(log_lock_);
+
+ DISALLOW_COPY_AND_ASSIGN(Transaction);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_TRANSACTION_H_