1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
|
// Copyright 2011 Google Inc. All Rights Reserved.
#include "space.h"
#include <sys/mman.h>
#include "UniquePtr.h"
#include "dlmalloc.h"
#include "file.h"
#include "image.h"
#include "logging.h"
#include "os.h"
#include "utils.h"
namespace art {
#ifndef NDEBUG
#define DEBUG_SPACES 1
#endif
#define CHECK_MEMORY_CALL(call, args, what) \
do { \
int rc = call args; \
if (UNLIKELY(rc != 0)) { \
errno = rc; \
PLOG(FATAL) << # call << " failed for " << what; \
} \
} while (false)
AllocSpace* Space::CreateAllocSpace(const std::string& name, size_t initial_size,
size_t growth_limit, size_t capacity,
byte* requested_begin) {
// Memory we promise to dlmalloc before it asks for morecore.
// Note: making this value large means that large allocations are unlikely to succeed as dlmalloc
// will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
// size of the large allocation) will be greater than the footprint limit.
size_t starting_size = kPageSize;
uint64_t start_time = 0;
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
start_time = NanoTime();
VLOG(startup) << "Space::CreateAllocSpace entering " << name
<< " initial_size=" << PrettySize(initial_size)
<< " growth_limit=" << PrettySize(growth_limit)
<< " capacity=" << PrettySize(capacity)
<< " requested_begin=" << reinterpret_cast<void*>(requested_begin);
}
// Sanity check arguments
if (starting_size > initial_size) {
initial_size = starting_size;
}
if (initial_size > growth_limit) {
LOG(ERROR) << "Failed to create alloc space (" << name << ") where the initial size ("
<< PrettySize(initial_size) << ") is larger than its capacity ("
<< PrettySize(growth_limit) << ")";
return NULL;
}
if (growth_limit > capacity) {
LOG(ERROR) << "Failed to create alloc space (" << name << ") where the growth limit capacity ("
<< PrettySize(growth_limit) << ") is larger than the capacity ("
<< PrettySize(capacity) << ")";
return NULL;
}
// Page align growth limit and capacity which will be used to manage mmapped storage
growth_limit = RoundUp(growth_limit, kPageSize);
capacity = RoundUp(capacity, kPageSize);
UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin,
capacity, PROT_READ | PROT_WRITE));
if (mem_map.get() == NULL) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
<< PrettySize(capacity);
return NULL;
}
void* mspace = AllocSpace::CreateMallocSpace(mem_map->Begin(), starting_size, initial_size);
if (mspace == NULL) {
LOG(ERROR) << "Failed to initialize mspace for alloc space (" << name << ")";
return NULL;
}
// Protect memory beyond the initial size.
byte* end = mem_map->Begin() + starting_size;
if (capacity - initial_size > 0) {
CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), name);
}
// Everything is set so record in immutable structure and leave
AllocSpace* space = new AllocSpace(name, mem_map.release(), mspace, end, growth_limit);
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Space::CreateAllocSpace exiting (" << PrettyDuration(NanoTime() - start_time)
<< " ) " << *space;
}
return space;
}
void* AllocSpace::CreateMallocSpace(void* begin, size_t morecore_start, size_t initial_size) {
// clear errno to allow PLOG on error
errno = 0;
// create mspace using our backing storage starting at begin and with a footprint of
// morecore_start. Don't use an internal dlmalloc lock (as we already hold heap lock). When
// morecore_start bytes of memory is exhaused morecore will be called.
void* msp = create_mspace_with_base(begin, morecore_start, false /*locked*/);
if (msp != NULL) {
// Do not allow morecore requests to succeed beyond the initial size of the heap
mspace_set_footprint_limit(msp, initial_size);
} else {
PLOG(ERROR) << "create_mspace_with_base failed";
}
return msp;
}
Object* AllocSpace::AllocWithoutGrowth(size_t num_bytes) {
Object* result = reinterpret_cast<Object*>(mspace_calloc(mspace_, 1, num_bytes));
#if DEBUG_SPACES
if (result != NULL) {
CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
<< ") not in bounds of heap " << *this;
}
#endif
return result;
}
Object* AllocSpace::AllocWithGrowth(size_t num_bytes) {
// Grow as much as possible within the mspace.
size_t max_allowed = Capacity();
mspace_set_footprint_limit(mspace_, max_allowed);
// Try the allocation.
void* ptr = AllocWithoutGrowth(num_bytes);
// Shrink back down as small as possible.
size_t footprint = mspace_footprint(mspace_);
mspace_set_footprint_limit(mspace_, footprint);
// Return the new allocation or NULL.
Object* result = reinterpret_cast<Object*>(ptr);
CHECK(result == NULL || Contains(result));
return result;
}
void AllocSpace::Free(Object* ptr) {
#if DEBUG_SPACES
CHECK(ptr != NULL);
CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
#endif
mspace_free(mspace_, ptr);
}
void AllocSpace::FreeList(size_t num_ptrs, Object** ptrs) {
#if DEBUG_SPACES
CHECK(ptrs != NULL);
size_t num_broken_ptrs = 0;
for (size_t i = 0; i < num_ptrs; i++) {
if(!Contains(ptrs[i])) {
num_broken_ptrs++;
LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this;
}
}
CHECK_EQ(num_broken_ptrs, 0u);
#endif
mspace_bulk_free(mspace_, reinterpret_cast<void**>(ptrs), num_ptrs);
}
// Callback from dlmalloc when it needs to increase the footprint
extern "C" void* art_heap_morecore(void* mspace, intptr_t increment) {
AllocSpace* space = Heap::GetAllocSpace();
if (LIKELY(space->GetMspace() == mspace)) {
return space->MoreCore(increment);
} else {
// Exhaustively search alloc spaces
const std::vector<Space*>& spaces = Heap::GetSpaces();
for (size_t i = 0; i < spaces.size(); i++) {
if (spaces[i]->IsAllocSpace()) {
AllocSpace* space = spaces[i]->AsAllocSpace();
if (mspace == space->GetMspace()) {
return space->MoreCore(increment);
}
}
}
LOG(FATAL) << "Unexpected call to art_heap_morecore. mspace: " << mspace
<< " increment: " << increment;
return NULL;
}
}
void* AllocSpace::MoreCore(intptr_t increment) {
byte* original_end = end_;
if (increment != 0) {
VLOG(heap) << "AllocSpace::MoreCore " << PrettySize(increment);
byte* new_end = original_end + increment;
if (increment > 0) {
#if DEBUG_SPACES
// Should never be asked to increase the allocation beyond the capacity of the space. Enforced
// by mspace_set_footprint_limit.
CHECK_LE(new_end, Begin() + Capacity());
#endif
CHECK_MEMORY_CALL(mprotect, (original_end, increment, PROT_READ | PROT_WRITE), GetSpaceName());
} else {
#if DEBUG_SPACES
// Should never be asked for negative footprint (ie before begin)
CHECK_GT(original_end + increment, Begin());
#endif
// Advise we don't need the pages and protect them
// TODO: by removing permissions to the pages we may be causing TLB shoot-down which can be
// expensive (note the same isn't true for giving permissions to a page as the protected
// page shouldn't be in a TLB). We should investigate performance impact of just
// removing ignoring the memory protection change here and in Space::CreateAllocSpace. It's
// likely just a useful debug feature.
size_t size = -increment;
CHECK_MEMORY_CALL(madvise, (new_end, size, MADV_DONTNEED), GetSpaceName());
CHECK_MEMORY_CALL(mprotect, (new_end, size, PROT_NONE), GetSpaceName());
}
// Update end_
end_ = new_end;
}
return original_end;
}
size_t AllocSpace::AllocationSize(const Object* obj) {
return mspace_usable_size(const_cast<void*>(reinterpret_cast<const void*>(obj))) + kChunkOverhead;
}
// Call back from mspace_inspect_all returning the start and end of chunks and the bytes used,
// if used_bytes is 0 then it indicates the range isn't in use and we madvise to the system that
// we don't need it
static void DontNeed(void* start, void* end, size_t used_bytes, void* num_bytes) {
if (used_bytes == 0) {
start = reinterpret_cast<void*>(RoundUp((uintptr_t)start, kPageSize));
end = reinterpret_cast<void*>(RoundDown((uintptr_t)end, kPageSize));
if (end > start) {
// We have a page aligned region to madvise on
size_t length = reinterpret_cast<byte*>(end) - reinterpret_cast<byte*>(start);
CHECK_MEMORY_CALL(madvise, (start, length, MADV_DONTNEED), "trim");
}
}
}
void AllocSpace::Trim() {
// Trim to release memory at the end of the space
mspace_trim(mspace_, 0);
// Visit space looking for page size holes to advise we don't need
size_t num_bytes_released = 0;
mspace_inspect_all(mspace_, DontNeed, &num_bytes_released);
}
void AllocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
void* arg) {
mspace_inspect_all(mspace_, callback, arg);
}
size_t AllocSpace::GetFootprintLimit() {
return mspace_footprint_limit(mspace_);
}
void AllocSpace::SetFootprintLimit(size_t new_size) {
VLOG(heap) << "AllocSpace::SetFootprintLimit " << PrettySize(new_size);
// Compare against the actual footprint, rather than the Size(), because the heap may not have
// grown all the way to the allowed size yet.
size_t current_space_size = mspace_footprint(mspace_);
if (new_size < current_space_size) {
// Don't let the space grow any more.
new_size = current_space_size;
}
mspace_set_footprint_limit(mspace_, new_size);
}
ImageSpace* Space::CreateImageSpace(const std::string& image_file_name) {
CHECK(image_file_name != NULL);
uint64_t start_time = 0;
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
start_time = NanoTime();
LOG(INFO) << "Space::CreateImageSpace entering" << " image_file_name=" << image_file_name;
}
UniquePtr<File> file(OS::OpenFile(image_file_name.c_str(), false));
if (file.get() == NULL) {
LOG(ERROR) << "Failed to open " << image_file_name;
return NULL;
}
ImageHeader image_header;
bool success = file->ReadFully(&image_header, sizeof(image_header));
if (!success || !image_header.IsValid()) {
LOG(ERROR) << "Invalid image header " << image_file_name;
return NULL;
}
UniquePtr<MemMap> map(MemMap::MapFileAtAddress(image_header.GetImageBegin(),
file->Length(),
// TODO: selectively PROT_EXEC stubs
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_PRIVATE | MAP_FIXED,
file->Fd(),
0));
if (map.get() == NULL) {
LOG(ERROR) << "Failed to map " << image_file_name;
return NULL;
}
CHECK_EQ(image_header.GetImageBegin(), map->Begin());
DCHECK_EQ(0, memcmp(&image_header, map->Begin(), sizeof(ImageHeader)));
Runtime* runtime = Runtime::Current();
Object* jni_stub_array = image_header.GetImageRoot(ImageHeader::kJniStubArray);
runtime->SetJniDlsymLookupStub(down_cast<ByteArray*>(jni_stub_array));
Object* ame_stub_array = image_header.GetImageRoot(ImageHeader::kAbstractMethodErrorStubArray);
runtime->SetAbstractMethodErrorStubArray(down_cast<ByteArray*>(ame_stub_array));
Object* resolution_stub_array = image_header.GetImageRoot(ImageHeader::kInstanceResolutionStubArray);
runtime->SetResolutionStubArray(
down_cast<ByteArray*>(resolution_stub_array), Runtime::kInstanceMethod);
resolution_stub_array = image_header.GetImageRoot(ImageHeader::kStaticResolutionStubArray);
runtime->SetResolutionStubArray(
down_cast<ByteArray*>(resolution_stub_array), Runtime::kStaticMethod);
resolution_stub_array = image_header.GetImageRoot(ImageHeader::kUnknownMethodResolutionStubArray);
runtime->SetResolutionStubArray(
down_cast<ByteArray*>(resolution_stub_array), Runtime::kUnknownMethod);
Object* callee_save_method = image_header.GetImageRoot(ImageHeader::kCalleeSaveMethod);
runtime->SetCalleeSaveMethod(down_cast<Method*>(callee_save_method), Runtime::kSaveAll);
callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsOnlySaveMethod);
runtime->SetCalleeSaveMethod(down_cast<Method*>(callee_save_method), Runtime::kRefsOnly);
callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsAndArgsSaveMethod);
runtime->SetCalleeSaveMethod(down_cast<Method*>(callee_save_method), Runtime::kRefsAndArgs);
ImageSpace* space = new ImageSpace(image_file_name, map.release());
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Space::CreateImageSpace exiting (" << PrettyDuration(NanoTime() - start_time)
<< ") " << *space;
}
return space;
}
void ImageSpace::RecordImageAllocations(HeapBitmap* live_bitmap) const {
uint64_t start_time = 0;
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "ImageSpace::RecordImageAllocations entering";
start_time = NanoTime();
}
DCHECK(!Runtime::Current()->IsStarted());
CHECK(live_bitmap != NULL);
byte* current = Begin() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
byte* end = End();
while (current < end) {
DCHECK_ALIGNED(current, kObjectAlignment);
const Object* obj = reinterpret_cast<const Object*>(current);
live_bitmap->Set(obj);
current += RoundUp(obj->SizeOf(), kObjectAlignment);
}
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "ImageSpace::RecordImageAllocations exiting ("
<< PrettyDuration(NanoTime() - start_time) << ")";
}
}
std::ostream& operator<<(std::ostream& os, const Space& space) {
os << (space.IsImageSpace() ? "Image" : "Alloc") << "Space["
<< "begin=" << reinterpret_cast<void*>(space.Begin())
<< ",end=" << reinterpret_cast<void*>(space.End())
<< ",size=" << PrettySize(space.Size()) << ",capacity=" << PrettySize(space.Capacity())
<< ",name=\"" << space.GetSpaceName() << "\"]";
return os;
}
} // namespace art
|