summaryrefslogtreecommitdiffstats
path: root/runtime/mirror
diff options
context:
space:
mode:
authorIan Rogers <irogers@google.com>2014-10-08 12:43:28 -0700
committerIan Rogers <irogers@google.com>2014-10-09 16:05:58 -0700
commit13735955f39b3b304c37d2b2840663c131262c18 (patch)
tree0a731ac42b8230f9929172fa3e3d8051874e2b18 /runtime/mirror
parent25b18bbdaa36ff936eb44f228f0518d4223e9d52 (diff)
downloadart-13735955f39b3b304c37d2b2840663c131262c18.zip
art-13735955f39b3b304c37d2b2840663c131262c18.tar.gz
art-13735955f39b3b304c37d2b2840663c131262c18.tar.bz2
stdint types all the way!
Change-Id: I4e4ef3a2002fc59ebd9097087f150eaf3f2a7e08
Diffstat (limited to 'runtime/mirror')
-rw-r--r--runtime/mirror/array-inl.h8
-rw-r--r--runtime/mirror/art_method-inl.h2
-rw-r--r--runtime/mirror/art_method.h4
-rw-r--r--runtime/mirror/object-inl.h26
-rw-r--r--runtime/mirror/object.cc4
5 files changed, 22 insertions, 22 deletions
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 13b5a8b..7e1ad78 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -136,10 +136,10 @@ class SetLengthToUsableSizeVisitor {
// DCHECK(array->IsArrayInstance());
int32_t length = (usable_size - header_size_) >> component_size_shift_;
DCHECK_GE(length, minimum_length_);
- byte* old_end = reinterpret_cast<byte*>(array->GetRawData(1U << component_size_shift_,
- minimum_length_));
- byte* new_end = reinterpret_cast<byte*>(array->GetRawData(1U << component_size_shift_,
- length));
+ uint8_t* old_end = reinterpret_cast<uint8_t*>(array->GetRawData(1U << component_size_shift_,
+ minimum_length_));
+ uint8_t* new_end = reinterpret_cast<uint8_t*>(array->GetRawData(1U << component_size_shift_,
+ length));
// Ensure space beyond original allocation is zeroed.
memset(old_end, 0, new_end - old_end);
array->SetLength(length);
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index 8447616..1a65d99 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -393,7 +393,7 @@ inline QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() {
// Callee saves + handle scope + method ref + alignment
size_t frame_size = RoundUp(callee_info.FrameSizeInBytes() + scope_size
- - kPointerSize // callee-save frame stores a whole method pointer
+ - sizeof(void*) // callee-save frame stores a whole method pointer
+ sizeof(StackReference<mirror::ArtMethod>),
kStackAlignment);
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index de6ec05..939d856 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -385,11 +385,11 @@ class MANAGED ArtMethod FINAL : public Object {
size_t GetReturnPcOffsetInBytes(uint32_t frame_size_in_bytes)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK_EQ(frame_size_in_bytes, GetFrameSizeInBytes());
- return frame_size_in_bytes - kPointerSize;
+ return frame_size_in_bytes - sizeof(void*);
}
size_t GetHandleScopeOffsetInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return kPointerSize;
+ return sizeof(void*);
}
void RegisterNative(Thread* self, const void* native_method, bool is_fast)
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 4d5f621..b89da9d 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -141,7 +141,7 @@ inline bool Object::AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object*
#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
DCHECK(kUseBakerOrBrooksReadBarrier);
MemberOffset offset = OFFSET_OF_OBJECT_MEMBER(Object, x_rb_ptr_);
- byte* raw_addr = reinterpret_cast<byte*>(this) + offset.SizeValue();
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + offset.SizeValue();
Atomic<uint32_t>* atomic_rb_ptr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
HeapReference<Object> expected_ref(HeapReference<Object>::FromMirrorPtr(expected_rb_ptr));
HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(rb_ptr));
@@ -602,7 +602,7 @@ inline bool Object::CasFieldWeakSequentiallyConsistent32(MemberOffset field_offs
if (kVerifyFlags & kVerifyThis) {
VerifyObject(this);
}
- byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
return atomic_addr->CompareExchangeWeakSequentiallyConsistent(old_value, new_value);
@@ -620,7 +620,7 @@ inline bool Object::CasFieldWeakRelaxed32(MemberOffset field_offset,
if (kVerifyFlags & kVerifyThis) {
VerifyObject(this);
}
- byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
return atomic_addr->CompareExchangeWeakRelaxed(old_value, new_value);
@@ -638,7 +638,7 @@ inline bool Object::CasFieldStrongSequentiallyConsistent32(MemberOffset field_of
if (kVerifyFlags & kVerifyThis) {
VerifyObject(this);
}
- byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
return atomic_addr->CompareExchangeStrongSequentiallyConsistent(old_value, new_value);
@@ -682,7 +682,7 @@ inline void Object::SetField64Volatile(MemberOffset field_offset, int64_t new_va
template<typename kSize, bool kIsVolatile>
inline void Object::SetField(MemberOffset field_offset, kSize new_value) {
- byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
kSize* addr = reinterpret_cast<kSize*>(raw_addr);
if (kIsVolatile) {
reinterpret_cast<Atomic<kSize>*>(addr)->StoreSequentiallyConsistent(new_value);
@@ -693,7 +693,7 @@ inline void Object::SetField(MemberOffset field_offset, kSize new_value) {
template<typename kSize, bool kIsVolatile>
inline kSize Object::GetField(MemberOffset field_offset) {
- const byte* raw_addr = reinterpret_cast<const byte*>(this) + field_offset.Int32Value();
+ const uint8_t* raw_addr = reinterpret_cast<const uint8_t*>(this) + field_offset.Int32Value();
const kSize* addr = reinterpret_cast<const kSize*>(raw_addr);
if (kIsVolatile) {
return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadSequentiallyConsistent();
@@ -714,7 +714,7 @@ inline bool Object::CasFieldWeakSequentiallyConsistent64(MemberOffset field_offs
if (kVerifyFlags & kVerifyThis) {
VerifyObject(this);
}
- byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<int64_t>* atomic_addr = reinterpret_cast<Atomic<int64_t>*>(raw_addr);
return atomic_addr->CompareExchangeWeakSequentiallyConsistent(old_value, new_value);
}
@@ -731,7 +731,7 @@ inline bool Object::CasFieldStrongSequentiallyConsistent64(MemberOffset field_of
if (kVerifyFlags & kVerifyThis) {
VerifyObject(this);
}
- byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<int64_t>* atomic_addr = reinterpret_cast<Atomic<int64_t>*>(raw_addr);
return atomic_addr->CompareExchangeStrongSequentiallyConsistent(old_value, new_value);
}
@@ -742,7 +742,7 @@ inline T* Object::GetFieldObject(MemberOffset field_offset) {
if (kVerifyFlags & kVerifyThis) {
VerifyObject(this);
}
- byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
HeapReference<T>* objref_addr = reinterpret_cast<HeapReference<T>*>(raw_addr);
T* result = ReadBarrier::Barrier<T, kReadBarrierOption>(this, field_offset, objref_addr);
if (kIsVolatile) {
@@ -782,7 +782,7 @@ inline void Object::SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset,
if (kVerifyFlags & kVerifyWrites) {
VerifyObject(new_value);
}
- byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
HeapReference<Object>* objref_addr = reinterpret_cast<HeapReference<Object>*>(raw_addr);
if (kIsVolatile) {
// TODO: Refactor to use a SequentiallyConsistent store instead.
@@ -818,7 +818,7 @@ inline HeapReference<Object>* Object::GetFieldObjectReferenceAddr(MemberOffset f
if (kVerifyFlags & kVerifyThis) {
VerifyObject(this);
}
- return reinterpret_cast<HeapReference<Object>*>(reinterpret_cast<byte*>(this) +
+ return reinterpret_cast<HeapReference<Object>*>(reinterpret_cast<uint8_t*>(this) +
field_offset.Int32Value());
}
@@ -842,7 +842,7 @@ inline bool Object::CasFieldWeakSequentiallyConsistentObject(MemberOffset field_
}
HeapReference<Object> old_ref(HeapReference<Object>::FromMirrorPtr(old_value));
HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_value));
- byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
bool success = atomic_addr->CompareExchangeWeakSequentiallyConsistent(old_ref.reference_,
@@ -874,7 +874,7 @@ inline bool Object::CasFieldStrongSequentiallyConsistentObject(MemberOffset fiel
}
HeapReference<Object> old_ref(HeapReference<Object>::FromMirrorPtr(old_value));
HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_value));
- byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
bool success = atomic_addr->CompareExchangeStrongSequentiallyConsistent(old_ref.reference_,
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 57069ab..9578c97 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -69,8 +69,8 @@ Object* Object::CopyObject(Thread* self, mirror::Object* dest, mirror::Object* s
size_t num_bytes) {
// Copy instance data. We assume memcpy copies by words.
// TODO: expose and use move32.
- byte* src_bytes = reinterpret_cast<byte*>(src);
- byte* dst_bytes = reinterpret_cast<byte*>(dest);
+ uint8_t* src_bytes = reinterpret_cast<uint8_t*>(src);
+ uint8_t* dst_bytes = reinterpret_cast<uint8_t*>(dest);
size_t offset = sizeof(Object);
memcpy(dst_bytes + offset, src_bytes + offset, num_bytes - offset);
if (kUseBakerOrBrooksReadBarrier) {