1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
|
/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "dlmalloc_space.h"
#include "gc/accounting/card_table.h"
#include "gc/heap.h"
#include "runtime.h"
#include "thread.h"
#include "utils.h"
//#include <valgrind/memcheck.h>
#include <valgrind.h>
namespace art {
namespace gc {
namespace space {
// TODO: Remove define macro
#define CHECK_MEMORY_CALL(call, args, what) \
do { \
int rc = call args; \
if (UNLIKELY(rc != 0)) { \
errno = rc; \
PLOG(FATAL) << # call << " failed for " << what; \
} \
} while (false)
static const bool kPrefetchDuringDlMallocFreeList = true;
// Number of bytes to use as a red zone (rdz). A red zone of this size will be placed before and
// after each allocation. 8 bytes provides long/double alignment.
const size_t kValgrindRedZoneBytes = 8;
// A specialization of DlMallocSpace that provides information to valgrind wrt allocations.
class ValgrindDlMallocSpace : public DlMallocSpace {
public:
virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes) {
void* obj_with_rdz = DlMallocSpace::AllocWithGrowth(self, num_bytes + (2 * kValgrindRedZoneBytes));
if (obj_with_rdz != NULL) {
//VALGRIND_MAKE_MEM_UNDEFINED();
mirror::Object* result = reinterpret_cast<mirror::Object*>(reinterpret_cast<byte*>(obj_with_rdz) +
kValgrindRedZoneBytes);
VALGRIND_MEMPOOL_ALLOC(GetMspace(), result, num_bytes);
LOG(INFO) << "AllocWithGrowth on " << self << " = " << obj_with_rdz
<< " of size " << num_bytes;
return result;
} else {
return NULL;
}
}
virtual mirror::Object* Alloc(Thread* self, size_t num_bytes) {
void* obj_with_rdz = DlMallocSpace::Alloc(self, num_bytes + (2 * kValgrindRedZoneBytes));
if (obj_with_rdz != NULL) {
mirror::Object* result = reinterpret_cast<mirror::Object*>(reinterpret_cast<byte*>(obj_with_rdz) +
kValgrindRedZoneBytes);
VALGRIND_MEMPOOL_ALLOC(GetMspace(), result, num_bytes);
LOG(INFO) << "Alloc on " << self << " = " << obj_with_rdz
<< " of size " << num_bytes;
return result;
} else {
return NULL;
}
}
virtual size_t AllocationSize(const mirror::Object* obj) {
const void* obj_after_rdz = reinterpret_cast<const void*>(obj);
size_t result = DlMallocSpace::AllocationSize(
reinterpret_cast<const mirror::Object*>(reinterpret_cast<const byte*>(obj_after_rdz) -
kValgrindRedZoneBytes));
return result - (2 * kValgrindRedZoneBytes);
}
virtual size_t Free(Thread* self, mirror::Object* ptr) {
void* obj_after_rdz = reinterpret_cast<void*>(ptr);
void* obj_with_rdz = reinterpret_cast<byte*>(obj_after_rdz) - kValgrindRedZoneBytes;
LOG(INFO) << "Free on " << self << " of " << obj_with_rdz;
size_t freed = DlMallocSpace::Free(self, reinterpret_cast<mirror::Object*>(obj_with_rdz));
VALGRIND_MEMPOOL_FREE(GetMspace(), obj_after_rdz);
return freed - (2 * kValgrindRedZoneBytes);
}
virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
size_t freed = 0;
for (size_t i = 0; i < num_ptrs; i++) {
void* obj_after_rdz = reinterpret_cast<void*>(ptrs[i]);
void* obj_with_rdz = reinterpret_cast<byte*>(obj_after_rdz) - kValgrindRedZoneBytes;
LOG(INFO) << "FreeList on " << self << " of " << obj_with_rdz;
freed += DlMallocSpace::Free(self, reinterpret_cast<mirror::Object*>(obj_with_rdz));
VALGRIND_MEMPOOL_FREE(GetMspace(), obj_after_rdz);
}
return freed - (2 * kValgrindRedZoneBytes * num_ptrs);
}
ValgrindDlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
byte* end, size_t growth_limit) :
DlMallocSpace(name, mem_map, mspace, begin, end, growth_limit) {
VALGRIND_CREATE_MEMPOOL(GetMspace(), kValgrindRedZoneBytes, true);
}
virtual ~ValgrindDlMallocSpace() {
VALGRIND_DESTROY_MEMPOOL(GetMspace());
}
private:
DISALLOW_COPY_AND_ASSIGN(ValgrindDlMallocSpace);
};
size_t DlMallocSpace::bitmap_index_ = 0;
DlMallocSpace::DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
byte* end, size_t growth_limit)
: MemMapSpace(name, mem_map, end - begin, kGcRetentionPolicyAlwaysCollect),
num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
total_objects_allocated_(0), lock_("allocation space lock", kAllocSpaceLock), mspace_(mspace),
growth_limit_(growth_limit) {
CHECK(mspace != NULL);
size_t bitmap_index = bitmap_index_++;
static const uintptr_t kGcCardSize = static_cast<uintptr_t>(accounting::CardTable::kCardSize);
CHECK(reinterpret_cast<uintptr_t>(mem_map->Begin()) % kGcCardSize == 0);
CHECK(reinterpret_cast<uintptr_t>(mem_map->End()) % kGcCardSize == 0);
live_bitmap_.reset(accounting::SpaceBitmap::Create(
StringPrintf("allocspace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
Begin(), Capacity()));
DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace live bitmap #" << bitmap_index;
mark_bitmap_.reset(accounting::SpaceBitmap::Create(
StringPrintf("allocspace %s mark-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
Begin(), Capacity()));
DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace mark bitmap #" << bitmap_index;
}
DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_size, size_t
growth_limit, size_t capacity, byte* requested_begin) {
// Memory we promise to dlmalloc before it asks for morecore.
// Note: making this value large means that large allocations are unlikely to succeed as dlmalloc
// will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
// size of the large allocation) will be greater than the footprint limit.
size_t starting_size = kPageSize;
uint64_t start_time = 0;
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
start_time = NanoTime();
VLOG(startup) << "Space::CreateAllocSpace entering " << name
<< " initial_size=" << PrettySize(initial_size)
<< " growth_limit=" << PrettySize(growth_limit)
<< " capacity=" << PrettySize(capacity)
<< " requested_begin=" << reinterpret_cast<void*>(requested_begin);
}
// Sanity check arguments
if (starting_size > initial_size) {
initial_size = starting_size;
}
if (initial_size > growth_limit) {
LOG(ERROR) << "Failed to create alloc space (" << name << ") where the initial size ("
<< PrettySize(initial_size) << ") is larger than its capacity ("
<< PrettySize(growth_limit) << ")";
return NULL;
}
if (growth_limit > capacity) {
LOG(ERROR) << "Failed to create alloc space (" << name << ") where the growth limit capacity ("
<< PrettySize(growth_limit) << ") is larger than the capacity ("
<< PrettySize(capacity) << ")";
return NULL;
}
// Page align growth limit and capacity which will be used to manage mmapped storage
growth_limit = RoundUp(growth_limit, kPageSize);
capacity = RoundUp(capacity, kPageSize);
UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin,
capacity, PROT_READ | PROT_WRITE));
if (mem_map.get() == NULL) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
<< PrettySize(capacity);
return NULL;
}
void* mspace = CreateMallocSpace(mem_map->Begin(), starting_size, initial_size);
if (mspace == NULL) {
LOG(ERROR) << "Failed to initialize mspace for alloc space (" << name << ")";
return NULL;
}
// Protect memory beyond the initial size.
byte* end = mem_map->Begin() + starting_size;
if (capacity - initial_size > 0) {
CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), name);
}
// Everything is set so record in immutable structure and leave
MemMap* mem_map_ptr = mem_map.release();
DlMallocSpace* space;
if (RUNNING_ON_VALGRIND > 0) {
space = new ValgrindDlMallocSpace(name, mem_map_ptr, mspace, mem_map_ptr->Begin(), end,
growth_limit);
} else {
space = new DlMallocSpace(name, mem_map_ptr, mspace, mem_map_ptr->Begin(), end, growth_limit);
}
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Space::CreateAllocSpace exiting (" << PrettyDuration(NanoTime() - start_time)
<< " ) " << *space;
}
return space;
}
void* DlMallocSpace::CreateMallocSpace(void* begin, size_t morecore_start, size_t initial_size) {
// clear errno to allow PLOG on error
errno = 0;
// create mspace using our backing storage starting at begin and with a footprint of
// morecore_start. Don't use an internal dlmalloc lock (as we already hold heap lock). When
// morecore_start bytes of memory is exhaused morecore will be called.
void* msp = create_mspace_with_base(begin, morecore_start, false /*locked*/);
if (msp != NULL) {
// Do not allow morecore requests to succeed beyond the initial size of the heap
mspace_set_footprint_limit(msp, initial_size);
} else {
PLOG(ERROR) << "create_mspace_with_base failed";
}
return msp;
}
void DlMallocSpace::SwapBitmaps() {
live_bitmap_.swap(mark_bitmap_);
// Swap names to get more descriptive diagnostics.
std::string temp_name(live_bitmap_->GetName());
live_bitmap_->SetName(mark_bitmap_->GetName());
mark_bitmap_->SetName(temp_name);
}
mirror::Object* DlMallocSpace::AllocWithoutGrowthLocked(size_t num_bytes) {
mirror::Object* result = reinterpret_cast<mirror::Object*>(mspace_calloc(mspace_, 1, num_bytes));
if (result != NULL) {
if (kDebugSpaces) {
CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
<< ") not in bounds of allocation space " << *this;
}
size_t allocation_size = AllocationSize(result);
num_bytes_allocated_ += allocation_size;
total_bytes_allocated_ += allocation_size;
++total_objects_allocated_;
++num_objects_allocated_;
}
return result;
}
mirror::Object* DlMallocSpace::Alloc(Thread* self, size_t num_bytes) {
MutexLock mu(self, lock_);
return AllocWithoutGrowthLocked(num_bytes);
}
mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes) {
MutexLock mu(self, lock_);
// Grow as much as possible within the mspace.
size_t max_allowed = Capacity();
mspace_set_footprint_limit(mspace_, max_allowed);
// Try the allocation.
mirror::Object* result = AllocWithoutGrowthLocked(num_bytes);
// Shrink back down as small as possible.
size_t footprint = mspace_footprint(mspace_);
mspace_set_footprint_limit(mspace_, footprint);
// Return the new allocation or NULL.
CHECK(!kDebugSpaces || result == NULL || Contains(result));
return result;
}
void DlMallocSpace::SetGrowthLimit(size_t growth_limit) {
growth_limit = RoundUp(growth_limit, kPageSize);
growth_limit_ = growth_limit;
if (Size() > growth_limit_) {
end_ = begin_ + growth_limit;
}
}
DlMallocSpace* DlMallocSpace::CreateZygoteSpace() {
end_ = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(end_), kPageSize));
DCHECK(IsAligned<accounting::CardTable::kCardSize>(begin_));
DCHECK(IsAligned<accounting::CardTable::kCardSize>(end_));
DCHECK(IsAligned<kPageSize>(begin_));
DCHECK(IsAligned<kPageSize>(end_));
size_t size = RoundUp(Size(), kPageSize);
// Trim the heap so that we minimize the size of the Zygote space.
Trim();
// Trim our mem-map to free unused pages.
GetMemMap()->UnMapAtEnd(end_);
// TODO: Not hardcode these in?
const size_t starting_size = kPageSize;
const size_t initial_size = 2 * MB;
// Remaining size is for the new alloc space.
const size_t growth_limit = growth_limit_ - size;
const size_t capacity = Capacity() - size;
VLOG(heap) << "Begin " << reinterpret_cast<const void*>(begin_) << "\n"
<< "End " << reinterpret_cast<const void*>(end_) << "\n"
<< "Size " << size << "\n"
<< "GrowthLimit " << growth_limit_ << "\n"
<< "Capacity " << Capacity();
SetGrowthLimit(RoundUp(size, kPageSize));
SetFootprintLimit(RoundUp(size, kPageSize));
// FIXME: Do we need reference counted pointers here?
// Make the two spaces share the same mark bitmaps since the bitmaps span both of the spaces.
VLOG(heap) << "Creating new AllocSpace: ";
VLOG(heap) << "Size " << GetMemMap()->Size();
VLOG(heap) << "GrowthLimit " << PrettySize(growth_limit);
VLOG(heap) << "Capacity " << PrettySize(capacity);
UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(GetName(), End(), capacity, PROT_READ | PROT_WRITE));
void* mspace = CreateMallocSpace(end_, starting_size, initial_size);
// Protect memory beyond the initial size.
byte* end = mem_map->Begin() + starting_size;
if (capacity - initial_size > 0) {
CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), name_.c_str());
}
DlMallocSpace* alloc_space =
new DlMallocSpace(name_, mem_map.release(), mspace, end_, end, growth_limit);
live_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
CHECK_EQ(live_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End()));
mark_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
CHECK_EQ(mark_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End()));
name_ += "-zygote-transformed";
VLOG(heap) << "zygote space creation done";
return alloc_space;
}
size_t DlMallocSpace::Free(Thread* self, mirror::Object* ptr) {
MutexLock mu(self, lock_);
if (kDebugSpaces) {
CHECK(ptr != NULL);
CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
}
const size_t bytes_freed = InternalAllocationSize(ptr);
num_bytes_allocated_ -= bytes_freed;
--num_objects_allocated_;
mspace_free(mspace_, ptr);
return bytes_freed;
}
size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
DCHECK(ptrs != NULL);
// Don't need the lock to calculate the size of the freed pointers.
size_t bytes_freed = 0;
for (size_t i = 0; i < num_ptrs; i++) {
mirror::Object* ptr = ptrs[i];
const size_t look_ahead = 8;
if (kPrefetchDuringDlMallocFreeList && i + look_ahead < num_ptrs) {
// The head of chunk for the allocation is sizeof(size_t) behind the allocation.
__builtin_prefetch(reinterpret_cast<char*>(ptrs[i + look_ahead]) - sizeof(size_t));
}
bytes_freed += InternalAllocationSize(ptr);
}
if (kDebugSpaces) {
size_t num_broken_ptrs = 0;
for (size_t i = 0; i < num_ptrs; i++) {
if (!Contains(ptrs[i])) {
num_broken_ptrs++;
LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this;
} else {
size_t size = mspace_usable_size(ptrs[i]);
memset(ptrs[i], 0xEF, size);
}
}
CHECK_EQ(num_broken_ptrs, 0u);
}
{
MutexLock mu(self, lock_);
num_bytes_allocated_ -= bytes_freed;
num_objects_allocated_ -= num_ptrs;
mspace_bulk_free(mspace_, reinterpret_cast<void**>(ptrs), num_ptrs);
return bytes_freed;
}
}
// Callback from dlmalloc when it needs to increase the footprint
extern "C" void* art_heap_morecore(void* mspace, intptr_t increment) {
Heap* heap = Runtime::Current()->GetHeap();
DCHECK_EQ(heap->GetAllocSpace()->GetMspace(), mspace);
return heap->GetAllocSpace()->MoreCore(increment);
}
void* DlMallocSpace::MoreCore(intptr_t increment) {
lock_.AssertHeld(Thread::Current());
byte* original_end = end_;
if (increment != 0) {
VLOG(heap) << "DlMallocSpace::MoreCore " << PrettySize(increment);
byte* new_end = original_end + increment;
if (increment > 0) {
// Should never be asked to increase the allocation beyond the capacity of the space. Enforced
// by mspace_set_footprint_limit.
CHECK_LE(new_end, Begin() + Capacity());
CHECK_MEMORY_CALL(mprotect, (original_end, increment, PROT_READ | PROT_WRITE), GetName());
} else {
// Should never be asked for negative footprint (ie before begin)
CHECK_GT(original_end + increment, Begin());
// Advise we don't need the pages and protect them
// TODO: by removing permissions to the pages we may be causing TLB shoot-down which can be
// expensive (note the same isn't true for giving permissions to a page as the protected
// page shouldn't be in a TLB). We should investigate performance impact of just
// removing ignoring the memory protection change here and in Space::CreateAllocSpace. It's
// likely just a useful debug feature.
size_t size = -increment;
CHECK_MEMORY_CALL(madvise, (new_end, size, MADV_DONTNEED), GetName());
CHECK_MEMORY_CALL(mprotect, (new_end, size, PROT_NONE), GetName());
}
// Update end_
end_ = new_end;
}
return original_end;
}
// Virtual functions can't get inlined.
inline size_t DlMallocSpace::InternalAllocationSize(const mirror::Object* obj) {
return mspace_usable_size(const_cast<void*>(reinterpret_cast<const void*>(obj))) +
kChunkOverhead;
}
size_t DlMallocSpace::AllocationSize(const mirror::Object* obj) {
return InternalAllocationSize(obj);
}
size_t DlMallocSpace::Trim() {
MutexLock mu(Thread::Current(), lock_);
// Trim to release memory at the end of the space.
mspace_trim(mspace_, 0);
// Visit space looking for page-sized holes to advise the kernel we don't need.
size_t reclaimed = 0;
mspace_inspect_all(mspace_, DlmallocMadviseCallback, &reclaimed);
return reclaimed;
}
void DlMallocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
void* arg) {
MutexLock mu(Thread::Current(), lock_);
mspace_inspect_all(mspace_, callback, arg);
callback(NULL, NULL, 0, arg); // Indicate end of a space.
}
size_t DlMallocSpace::GetFootprintLimit() {
MutexLock mu(Thread::Current(), lock_);
return mspace_footprint_limit(mspace_);
}
void DlMallocSpace::SetFootprintLimit(size_t new_size) {
MutexLock mu(Thread::Current(), lock_);
VLOG(heap) << "DLMallocSpace::SetFootprintLimit " << PrettySize(new_size);
// Compare against the actual footprint, rather than the Size(), because the heap may not have
// grown all the way to the allowed size yet.
size_t current_space_size = mspace_footprint(mspace_);
if (new_size < current_space_size) {
// Don't let the space grow any more.
new_size = current_space_size;
}
mspace_set_footprint_limit(mspace_, new_size);
}
void DlMallocSpace::Dump(std::ostream& os) const {
os << GetType()
<< " begin=" << reinterpret_cast<void*>(Begin())
<< ",end=" << reinterpret_cast<void*>(End())
<< ",size=" << PrettySize(Size()) << ",capacity=" << PrettySize(Capacity())
<< ",name=\"" << GetName() << "\"]";
}
} // namespace space
} // namespace gc
} // namespace art
|