summaryrefslogtreecommitdiffstats
path: root/chromeos/hugepage_text/hugepage_text.cc
diff options
context:
space:
mode:
authoryunlian <yunlian@chromium.org>2016-02-23 15:13:53 -0800
committerCommit bot <commit-bot@chromium.org>2016-02-23 23:15:38 +0000
commitc5bd7c0d5743573f3ec7975cb1f76c4eff4c7eff (patch)
tree52ec318cfc0f425a6827bea91d32d641ecba8e37 /chromeos/hugepage_text/hugepage_text.cc
parentb8812fef3cb23bbb30ccfd2884aba1ee60681346 (diff)
downloadchromium_src-c5bd7c0d5743573f3ec7975cb1f76c4eff4c7eff.zip
chromium_src-c5bd7c0d5743573f3ec7975cb1f76c4eff4c7eff.tar.gz
chromium_src-c5bd7c0d5743573f3ec7975cb1f76c4eff4c7eff.tar.bz2
chromeos_hugepage: Make text section 2MB aligned for hugepages.
If we do not use the option '-Wl,--section-start=.text=2001000' at link time, the performance gain on hugepages is gone. This is probably because the original text section is not 2MB aligned and the after mremap, the hugepage is not preserved. This removes the support for anonymous hugepages. Because it causes some board crashes. It also removes the hugepage size limit, we want to use compiler flags to seperate hot and cold text segment. BUG=chromium:569963 TEST=it still uses hugepage on falco with modified kernel. On x86-alex and peach-pit, chrome starts normally. Review URL: https://codereview.chromium.org/1687223004 Cr-Commit-Position: refs/heads/master@{#377107}
Diffstat (limited to 'chromeos/hugepage_text/hugepage_text.cc')
-rw-r--r--chromeos/hugepage_text/hugepage_text.cc82
1 files changed, 19 insertions, 63 deletions
diff --git a/chromeos/hugepage_text/hugepage_text.cc b/chromeos/hugepage_text/hugepage_text.cc
index ccae53f..cb005fd 100644
--- a/chromeos/hugepage_text/hugepage_text.cc
+++ b/chromeos/hugepage_text/hugepage_text.cc
@@ -32,8 +32,6 @@ const int kHpageSize = (1 << kHpageShift);
const int kHpageMask = (~(kHpageSize - 1));
const int kProtection = (PROT_READ | PROT_WRITE);
-const int kMmapFlags = (MAP_ANONYMOUS | MAP_SHARED);
-const int kMmapHtlbFlags = (kMmapFlags | MAP_HUGETLB);
const int kMremapFlags = (MREMAP_MAYMOVE | MREMAP_FIXED);
// The number of hugepages we want to use to map chrome text section
@@ -41,47 +39,6 @@ const int kMremapFlags = (MREMAP_MAYMOVE | MREMAP_FIXED);
// in to a small area of the binary.
const int kNumHugePages = 15;
-// mremap syscall is always supported for small page segment on all kernels.
-// However, it is not the case for hugepage.
-// If not used carefully, mremap() a hugepage segment directly onto small page
-// text segment will cause irreversible damage to the existing text mapping
-// and cause process to segfault. This function will dynamically at run time
-// determine whether a process can safely execute mremap on a hugepage segment
-// without taking the process down.
-//
-// Inputs: none
-// Return: true if mremap on hugepage segment is supported on the host OS.
-static int HugetlbMremapSupported(void) {
- void *haddr = 0, *raddr = 0, *taddr;
- const size_t size = kHpageSize;
- int ret = 0;
-
- // use a pair of hugepage memory segments to test whether mremap() is
- // supported
- haddr = mmap(NULL, size, kProtection, kMmapHtlbFlags | MAP_NORESERVE, 0, 0);
- if (haddr == MAP_FAILED) {
- return 0;
- }
- taddr = mmap(NULL, size, kProtection, kMmapHtlbFlags | MAP_NORESERVE, 0, 0);
- if (taddr == MAP_FAILED) {
- munmap(haddr, size);
- return 0;
- }
-
- raddr = mremap(haddr, size, size, kMremapFlags, taddr);
- if (raddr != MAP_FAILED) {
- ret = 1;
- // clean up. raddr == taddr; also haddr is implicitly unmapped
- munmap(raddr, size);
- } else {
- // mremap fail, clean up both src and dst segments
- munmap(haddr, size);
- munmap(taddr, size);
- }
-
- return ret;
-}
-
// Get an anonymous mapping backed by explicit transparent hugepage
// Return NULL if such mapping can not be established.
static void* GetTransparentHugepageMapping(const size_t hsize) {
@@ -129,21 +86,10 @@ static void NoAsanAlignedMemcpy(void* dst, void* src, size_t size) {
// Effect: physical backing page changed from small page to hugepage. If there
// are error condition, the remapping operation is aborted.
static void MremapHugetlbText(void* vaddr, const size_t hsize) {
- void* haddr = MAP_FAILED;
-
- if ((reinterpret_cast<intptr_t>(vaddr) & ~kHpageMask) == 0 &&
- HugetlbMremapSupported()) {
- // Try anon hugepage from static hugepage pool only if the source address
- // is hugepage aligned, otherwise, mremap below has non-recoverable error.
- haddr = mmap(NULL, hsize, kProtection, kMmapHtlbFlags, 0, 0);
- }
-
- if (haddr == MAP_FAILED) {
- PLOG(INFO) << "static hugepage not available, trying transparent hugepage";
- haddr = GetTransparentHugepageMapping(hsize);
- if (haddr == NULL)
- return;
- }
+ DCHECK_EQ(0ul, reinterpret_cast<uintptr_t>(vaddr) & ~kHpageMask);
+ void* haddr = GetTransparentHugepageMapping(hsize);
+ if (haddr == NULL)
+ return;
// Copy text segment to hugepage mapping. We are using a non-asan memcpy,
// otherwise it would be flagged as a bunch of out of bounds reads.
@@ -167,20 +113,30 @@ static void MremapHugetlbText(void* vaddr, const size_t hsize) {
// Top level text remapping function.
//
// Inputs: vaddr, the starting virtual address to remap to hugepage
-// hsize, size of the memory segment to remap in bytes
+// segsize, size of the memory segment to remap in bytes
// Return: none
// Effect: physical backing page changed from small page to hugepage. If there
// are error condition, the remaping operation is aborted.
static void RemapHugetlbText(void* vaddr, const size_t segsize) {
- int hsize = segsize;
- if (segsize > kHpageSize * kNumHugePages)
- hsize = kHpageSize * kNumHugePages;
+ // remove unaligned head regions
+ uintptr_t head_gap =
+ (kHpageSize - reinterpret_cast<uintptr_t>(vaddr) % kHpageSize) %
+ kHpageSize;
+ uintptr_t addr = reinterpret_cast<uintptr_t>(vaddr) + head_gap;
+
+ if (segsize < head_gap)
+ return;
+
+ size_t hsize = segsize - head_gap;
hsize = hsize & kHpageMask;
+ if (hsize > kHpageSize * kNumHugePages)
+ hsize = kHpageSize * kNumHugePages;
+
if (hsize == 0)
return;
- MremapHugetlbText(vaddr, hsize);
+ MremapHugetlbText(reinterpret_cast<void*>(addr), hsize);
}
// For a given ELF program header descriptor, iterates over all segments within