diff options
Diffstat (limited to 'linker/linker_phdr.cpp')
-rw-r--r-- | linker/linker_phdr.cpp | 512 |
1 files changed, 333 insertions, 179 deletions
diff --git a/linker/linker_phdr.cpp b/linker/linker_phdr.cpp index 3101511..0b99d20 100644 --- a/linker/linker_phdr.cpp +++ b/linker/linker_phdr.cpp @@ -31,6 +31,9 @@ #include <errno.h> #include <machine/exec.h> #include <sys/mman.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <unistd.h> #include "linker.h" #include "linker_debug.h" @@ -50,7 +53,7 @@ p_vaddr -> segment's virtual address p_flags -> segment flags (e.g. readable, writable, executable) - We will ignore the p_paddr and p_align fields of Elf_Phdr for now. + We will ignore the p_paddr and p_align fields of ElfW(Phdr) for now. The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz) ranges of virtual addresses. A few rules apply: @@ -111,7 +114,7 @@ **/ -#define MAYBE_MAP_FLAG(x,from,to) (((x) & (from)) ? (to) : 0) +#define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0) #define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \ MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \ MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE)) @@ -124,19 +127,16 @@ ElfReader::ElfReader(const char* name, int fd) } ElfReader::~ElfReader() { - if (fd_ != -1) { - close(fd_); - } if (phdr_mmap_ != NULL) { munmap(phdr_mmap_, phdr_size_); } } -bool ElfReader::Load() { +bool ElfReader::Load(const android_dlextinfo* extinfo) { return ReadElfHeader() && VerifyElfHeader() && ReadProgramHeader() && - ReserveAddressSpace() && + ReserveAddressSpace(extinfo) && LoadSegments() && FindPhdr(); } @@ -156,10 +156,7 @@ bool ElfReader::ReadElfHeader() { } bool ElfReader::VerifyElfHeader() { - if (header_.e_ident[EI_MAG0] != ELFMAG0 || - header_.e_ident[EI_MAG1] != ELFMAG1 || - header_.e_ident[EI_MAG2] != ELFMAG2 || - header_.e_ident[EI_MAG3] != ELFMAG3) { + if (memcmp(header_.e_ident, ELFMAG, SELFMAG) != 0) { DL_ERR("\"%s\" has bad ELF magic", name_); return false; } @@ -217,14 +214,14 @@ bool ElfReader::ReadProgramHeader() { // Like the kernel, we only accept program header tables that // are smaller than 64KiB. - if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(Elf_Phdr)) { + if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) { DL_ERR("\"%s\" has invalid e_phnum: %zd", name_, phdr_num_); return false; } - Elf_Addr page_min = PAGE_START(header_.e_phoff); - Elf_Addr page_max = PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(Elf_Phdr))); - Elf_Addr page_offset = PAGE_OFFSET(header_.e_phoff); + ElfW(Addr) page_min = PAGE_START(header_.e_phoff); + ElfW(Addr) page_max = PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(ElfW(Phdr)))); + ElfW(Addr) page_offset = PAGE_OFFSET(header_.e_phoff); phdr_size_ = page_max - page_min; @@ -235,7 +232,7 @@ bool ElfReader::ReadProgramHeader() { } phdr_mmap_ = mmap_result; - phdr_table_ = reinterpret_cast<Elf_Phdr*>(reinterpret_cast<char*>(mmap_result) + page_offset); + phdr_table_ = reinterpret_cast<ElfW(Phdr)*>(reinterpret_cast<char*>(mmap_result) + page_offset); return true; } @@ -249,50 +246,50 @@ bool ElfReader::ReadProgramHeader() { * set to the minimum and maximum addresses of pages to be reserved, * or 0 if there is nothing to load. */ -size_t phdr_table_get_load_size(const Elf_Phdr* phdr_table, size_t phdr_count, - Elf_Addr* out_min_vaddr, - Elf_Addr* out_max_vaddr) { - Elf_Addr min_vaddr = UINTPTR_MAX; - Elf_Addr max_vaddr = 0; - - bool found_pt_load = false; - for (size_t i = 0; i < phdr_count; ++i) { - const Elf_Phdr* phdr = &phdr_table[i]; - - if (phdr->p_type != PT_LOAD) { - continue; - } - found_pt_load = true; +size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count, + ElfW(Addr)* out_min_vaddr, + ElfW(Addr)* out_max_vaddr) { + ElfW(Addr) min_vaddr = UINTPTR_MAX; + ElfW(Addr) max_vaddr = 0; - if (phdr->p_vaddr < min_vaddr) { - min_vaddr = phdr->p_vaddr; - } + bool found_pt_load = false; + for (size_t i = 0; i < phdr_count; ++i) { + const ElfW(Phdr)* phdr = &phdr_table[i]; - if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) { - max_vaddr = phdr->p_vaddr + phdr->p_memsz; - } - } - if (!found_pt_load) { - min_vaddr = 0; + if (phdr->p_type != PT_LOAD) { + continue; } + found_pt_load = true; - min_vaddr = PAGE_START(min_vaddr); - max_vaddr = PAGE_END(max_vaddr); - - if (out_min_vaddr != NULL) { - *out_min_vaddr = min_vaddr; + if (phdr->p_vaddr < min_vaddr) { + min_vaddr = phdr->p_vaddr; } - if (out_max_vaddr != NULL) { - *out_max_vaddr = max_vaddr; + + if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) { + max_vaddr = phdr->p_vaddr + phdr->p_memsz; } - return max_vaddr - min_vaddr; + } + if (!found_pt_load) { + min_vaddr = 0; + } + + min_vaddr = PAGE_START(min_vaddr); + max_vaddr = PAGE_END(max_vaddr); + + if (out_min_vaddr != NULL) { + *out_min_vaddr = min_vaddr; + } + if (out_max_vaddr != NULL) { + *out_max_vaddr = max_vaddr; + } + return max_vaddr - min_vaddr; } // Reserve a virtual address range big enough to hold all loadable // segments of a program header table. This is done by creating a // private anonymous mmap() with PROT_NONE. -bool ElfReader::ReserveAddressSpace() { - Elf_Addr min_vaddr; +bool ElfReader::ReserveAddressSpace(const android_dlextinfo* extinfo) { + ElfW(Addr) min_vaddr; load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr); if (load_size_ == 0) { DL_ERR("\"%s\" has no loadable segments", name_); @@ -300,11 +297,33 @@ bool ElfReader::ReserveAddressSpace() { } uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr); - int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS; - void* start = mmap(addr, load_size_, PROT_NONE, mmap_flags, -1, 0); - if (start == MAP_FAILED) { - DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_); - return false; + void* start; + size_t reserved_size = 0; + bool reserved_hint = true; + + if (extinfo != NULL) { + if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS) { + reserved_size = extinfo->reserved_size; + reserved_hint = false; + } else if (extinfo->flags & ANDROID_DLEXT_RESERVED_ADDRESS_HINT) { + reserved_size = extinfo->reserved_size; + } + } + + if (load_size_ > reserved_size) { + if (!reserved_hint) { + DL_ERR("reserved address space %zd smaller than %zd bytes needed for \"%s\"", + reserved_size - load_size_, load_size_, name_); + return false; + } + int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS; + start = mmap(addr, load_size_, PROT_NONE, mmap_flags, -1, 0); + if (start == MAP_FAILED) { + DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_); + return false; + } + } else { + start = extinfo->reserved_addr; } load_start_ = start; @@ -314,30 +333,30 @@ bool ElfReader::ReserveAddressSpace() { bool ElfReader::LoadSegments() { for (size_t i = 0; i < phdr_num_; ++i) { - const Elf_Phdr* phdr = &phdr_table_[i]; + const ElfW(Phdr)* phdr = &phdr_table_[i]; if (phdr->p_type != PT_LOAD) { continue; } // Segment addresses in memory. - Elf_Addr seg_start = phdr->p_vaddr + load_bias_; - Elf_Addr seg_end = seg_start + phdr->p_memsz; + ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_; + ElfW(Addr) seg_end = seg_start + phdr->p_memsz; - Elf_Addr seg_page_start = PAGE_START(seg_start); - Elf_Addr seg_page_end = PAGE_END(seg_end); + ElfW(Addr) seg_page_start = PAGE_START(seg_start); + ElfW(Addr) seg_page_end = PAGE_END(seg_end); - Elf_Addr seg_file_end = seg_start + phdr->p_filesz; + ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz; // File offsets. - Elf_Addr file_start = phdr->p_offset; - Elf_Addr file_end = file_start + phdr->p_filesz; + ElfW(Addr) file_start = phdr->p_offset; + ElfW(Addr) file_end = file_start + phdr->p_filesz; - Elf_Addr file_page_start = PAGE_START(file_start); - Elf_Addr file_length = file_end - file_page_start; + ElfW(Addr) file_page_start = PAGE_START(file_start); + ElfW(Addr) file_length = file_end - file_page_start; if (file_length != 0) { - void* seg_addr = mmap((void*)seg_page_start, + void* seg_addr = mmap(reinterpret_cast<void*>(seg_page_start), file_length, PFLAGS_TO_PROT(phdr->p_flags), MAP_FIXED|MAP_PRIVATE, @@ -352,7 +371,7 @@ bool ElfReader::LoadSegments() { // if the segment is writable, and does not end on a page boundary, // zero-fill it until the page limit. if ((phdr->p_flags & PF_W) != 0 && PAGE_OFFSET(seg_file_end) > 0) { - memset((void*)seg_file_end, 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end)); + memset(reinterpret_cast<void*>(seg_file_end), 0, PAGE_SIZE - PAGE_OFFSET(seg_file_end)); } seg_file_end = PAGE_END(seg_file_end); @@ -362,7 +381,7 @@ bool ElfReader::LoadSegments() { // between them. This is done by using a private anonymous // map for all extra pages. if (seg_page_end > seg_file_end) { - void* zeromap = mmap((void*)seg_file_end, + void* zeromap = mmap(reinterpret_cast<void*>(seg_file_end), seg_page_end - seg_file_end, PFLAGS_TO_PROT(phdr->p_flags), MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE, @@ -381,26 +400,27 @@ bool ElfReader::LoadSegments() { * with optional extra flags (i.e. really PROT_WRITE). Used by * phdr_table_protect_segments and phdr_table_unprotect_segments. */ -static int _phdr_table_set_load_prot(const Elf_Phdr* phdr_table, size_t phdr_count, - Elf_Addr load_bias, int extra_prot_flags) { - const Elf_Phdr* phdr = phdr_table; - const Elf_Phdr* phdr_limit = phdr + phdr_count; - - for (; phdr < phdr_limit; phdr++) { - if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) - continue; - - Elf_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias; - Elf_Addr seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias; - - int ret = mprotect((void*)seg_page_start, - seg_page_end - seg_page_start, - PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags); - if (ret < 0) { - return -1; - } +static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count, + ElfW(Addr) load_bias, int extra_prot_flags) { + const ElfW(Phdr)* phdr = phdr_table; + const ElfW(Phdr)* phdr_limit = phdr + phdr_count; + + for (; phdr < phdr_limit; phdr++) { + if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) { + continue; } - return 0; + + ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias; + ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias; + + int ret = mprotect(reinterpret_cast<void*>(seg_page_start), + seg_page_end - seg_page_start, + PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags); + if (ret < 0) { + return -1; + } + } + return 0; } /* Restore the original protection modes for all loadable segments. @@ -414,8 +434,8 @@ static int _phdr_table_set_load_prot(const Elf_Phdr* phdr_table, size_t phdr_cou * Return: * 0 on error, -1 on failure (error code in errno). */ -int phdr_table_protect_segments(const Elf_Phdr* phdr_table, size_t phdr_count, Elf_Addr load_bias) { - return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0); +int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias) { + return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0); } /* Change the protection of all loaded segments in memory to writable. @@ -434,50 +454,50 @@ int phdr_table_protect_segments(const Elf_Phdr* phdr_table, size_t phdr_count, E * Return: * 0 on error, -1 on failure (error code in errno). */ -int phdr_table_unprotect_segments(const Elf_Phdr* phdr_table, size_t phdr_count, Elf_Addr load_bias) { - return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE); +int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias) { + return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE); } /* Used internally by phdr_table_protect_gnu_relro and * phdr_table_unprotect_gnu_relro. */ -static int _phdr_table_set_gnu_relro_prot(const Elf_Phdr* phdr_table, size_t phdr_count, - Elf_Addr load_bias, int prot_flags) { - const Elf_Phdr* phdr = phdr_table; - const Elf_Phdr* phdr_limit = phdr + phdr_count; - - for (phdr = phdr_table; phdr < phdr_limit; phdr++) { - if (phdr->p_type != PT_GNU_RELRO) - continue; - - /* Tricky: what happens when the relro segment does not start - * or end at page boundaries?. We're going to be over-protective - * here and put every page touched by the segment as read-only. - * - * This seems to match Ian Lance Taylor's description of the - * feature at http://www.airs.com/blog/archives/189. - * - * Extract: - * Note that the current dynamic linker code will only work - * correctly if the PT_GNU_RELRO segment starts on a page - * boundary. This is because the dynamic linker rounds the - * p_vaddr field down to the previous page boundary. If - * there is anything on the page which should not be read-only, - * the program is likely to fail at runtime. So in effect the - * linker must only emit a PT_GNU_RELRO segment if it ensures - * that it starts on a page boundary. - */ - Elf_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias; - Elf_Addr seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias; - - int ret = mprotect((void*)seg_page_start, - seg_page_end - seg_page_start, - prot_flags); - if (ret < 0) { - return -1; - } +static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count, + ElfW(Addr) load_bias, int prot_flags) { + const ElfW(Phdr)* phdr = phdr_table; + const ElfW(Phdr)* phdr_limit = phdr + phdr_count; + + for (phdr = phdr_table; phdr < phdr_limit; phdr++) { + if (phdr->p_type != PT_GNU_RELRO) { + continue; } - return 0; + + // Tricky: what happens when the relro segment does not start + // or end at page boundaries? We're going to be over-protective + // here and put every page touched by the segment as read-only. + + // This seems to match Ian Lance Taylor's description of the + // feature at http://www.airs.com/blog/archives/189. + + // Extract: + // Note that the current dynamic linker code will only work + // correctly if the PT_GNU_RELRO segment starts on a page + // boundary. This is because the dynamic linker rounds the + // p_vaddr field down to the previous page boundary. If + // there is anything on the page which should not be read-only, + // the program is likely to fail at runtime. So in effect the + // linker must only emit a PT_GNU_RELRO segment if it ensures + // that it starts on a page boundary. + ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias; + ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias; + + int ret = mprotect(reinterpret_cast<void*>(seg_page_start), + seg_page_end - seg_page_start, + prot_flags); + if (ret < 0) { + return -1; + } + } + return 0; } /* Apply GNU relro protection if specified by the program header. This will @@ -496,10 +516,143 @@ static int _phdr_table_set_gnu_relro_prot(const Elf_Phdr* phdr_table, size_t phd * Return: * 0 on error, -1 on failure (error code in errno). */ -int phdr_table_protect_gnu_relro(const Elf_Phdr* phdr_table, size_t phdr_count, Elf_Addr load_bias) { - return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ); +int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias) { + return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ); +} + +/* Serialize the GNU relro segments to the given file descriptor. This can be + * performed after relocations to allow another process to later share the + * relocated segment, if it was loaded at the same address. + * + * Input: + * phdr_table -> program header table + * phdr_count -> number of entries in tables + * load_bias -> load bias + * fd -> writable file descriptor to use + * Return: + * 0 on error, -1 on failure (error code in errno). + */ +int phdr_table_serialize_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias, + int fd) { + const ElfW(Phdr)* phdr = phdr_table; + const ElfW(Phdr)* phdr_limit = phdr + phdr_count; + ssize_t file_offset = 0; + + for (phdr = phdr_table; phdr < phdr_limit; phdr++) { + if (phdr->p_type != PT_GNU_RELRO) { + continue; + } + + ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias; + ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias; + ssize_t size = seg_page_end - seg_page_start; + + ssize_t written = TEMP_FAILURE_RETRY(write(fd, reinterpret_cast<void*>(seg_page_start), size)); + if (written != size) { + return -1; + } + void* map = mmap(reinterpret_cast<void*>(seg_page_start), size, PROT_READ, + MAP_PRIVATE|MAP_FIXED, fd, file_offset); + if (map == MAP_FAILED) { + return -1; + } + file_offset += size; + } + return 0; +} + +/* Where possible, replace the GNU relro segments with mappings of the given + * file descriptor. This can be performed after relocations to allow a file + * previously created by phdr_table_serialize_gnu_relro in another process to + * replace the dirty relocated pages, saving memory, if it was loaded at the + * same address. We have to compare the data before we map over it, since some + * parts of the relro segment may not be identical due to other libraries in + * the process being loaded at different addresses. + * + * Input: + * phdr_table -> program header table + * phdr_count -> number of entries in tables + * load_bias -> load bias + * fd -> readable file descriptor to use + * Return: + * 0 on error, -1 on failure (error code in errno). + */ +int phdr_table_map_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias, + int fd) { + // Map the file at a temporary location so we can compare its contents. + struct stat file_stat; + if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) { + return -1; + } + off_t file_size = file_stat.st_size; + void* temp_mapping = NULL; + if (file_size > 0) { + temp_mapping = mmap(NULL, file_size, PROT_READ, MAP_PRIVATE, fd, 0); + if (temp_mapping == MAP_FAILED) { + return -1; + } + } + size_t file_offset = 0; + + // Iterate over the relro segments and compare/remap the pages. + const ElfW(Phdr)* phdr = phdr_table; + const ElfW(Phdr)* phdr_limit = phdr + phdr_count; + + for (phdr = phdr_table; phdr < phdr_limit; phdr++) { + if (phdr->p_type != PT_GNU_RELRO) { + continue; + } + + ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias; + ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias; + + char* file_base = static_cast<char*>(temp_mapping) + file_offset; + char* mem_base = reinterpret_cast<char*>(seg_page_start); + size_t match_offset = 0; + size_t size = seg_page_end - seg_page_start; + + if (file_size - file_offset < size) { + // File is too short to compare to this segment. The contents are likely + // different as well (it's probably for a different library version) so + // just don't bother checking. + break; + } + + while (match_offset < size) { + // Skip over dissimilar pages. + while (match_offset < size && + memcmp(mem_base + match_offset, file_base + match_offset, PAGE_SIZE) != 0) { + match_offset += PAGE_SIZE; + } + + // Count similar pages. + size_t mismatch_offset = match_offset; + while (mismatch_offset < size && + memcmp(mem_base + mismatch_offset, file_base + mismatch_offset, PAGE_SIZE) == 0) { + mismatch_offset += PAGE_SIZE; + } + + // Map over similar pages. + if (mismatch_offset > match_offset) { + void* map = mmap(mem_base + match_offset, mismatch_offset - match_offset, + PROT_READ, MAP_PRIVATE|MAP_FIXED, fd, match_offset); + if (map == MAP_FAILED) { + munmap(temp_mapping, file_size); + return -1; + } + } + + match_offset = mismatch_offset; + } + + // Add to the base file offset in case there are multiple relro segments. + file_offset += size; + } + munmap(temp_mapping, file_size); + return 0; } + #if defined(__arm__) # ifndef PT_ARM_EXIDX @@ -519,23 +672,24 @@ int phdr_table_protect_gnu_relro(const Elf_Phdr* phdr_table, size_t phdr_count, * Return: * 0 on error, -1 on failure (_no_ error code in errno) */ -int phdr_table_get_arm_exidx(const Elf_Phdr* phdr_table, size_t phdr_count, - Elf_Addr load_bias, - Elf_Addr** arm_exidx, unsigned* arm_exidx_count) { - const Elf_Phdr* phdr = phdr_table; - const Elf_Phdr* phdr_limit = phdr + phdr_count; - - for (phdr = phdr_table; phdr < phdr_limit; phdr++) { - if (phdr->p_type != PT_ARM_EXIDX) - continue; - - *arm_exidx = (Elf_Addr*)(load_bias + phdr->p_vaddr); - *arm_exidx_count = (unsigned)(phdr->p_memsz / 8); - return 0; +int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count, + ElfW(Addr) load_bias, + ElfW(Addr)** arm_exidx, unsigned* arm_exidx_count) { + const ElfW(Phdr)* phdr = phdr_table; + const ElfW(Phdr)* phdr_limit = phdr + phdr_count; + + for (phdr = phdr_table; phdr < phdr_limit; phdr++) { + if (phdr->p_type != PT_ARM_EXIDX) { + continue; } - *arm_exidx = NULL; - *arm_exidx_count = 0; - return -1; + + *arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr); + *arm_exidx_count = (unsigned)(phdr->p_memsz / 8); + return 0; + } + *arm_exidx = NULL; + *arm_exidx_count = 0; + return -1; } #endif @@ -553,40 +707,40 @@ int phdr_table_get_arm_exidx(const Elf_Phdr* phdr_table, size_t phdr_count, * Return: * void */ -void phdr_table_get_dynamic_section(const Elf_Phdr* phdr_table, size_t phdr_count, - Elf_Addr load_bias, - Elf_Dyn** dynamic, size_t* dynamic_count, Elf_Word* dynamic_flags) { - const Elf_Phdr* phdr = phdr_table; - const Elf_Phdr* phdr_limit = phdr + phdr_count; - - for (phdr = phdr_table; phdr < phdr_limit; phdr++) { - if (phdr->p_type != PT_DYNAMIC) { - continue; - } - - *dynamic = reinterpret_cast<Elf_Dyn*>(load_bias + phdr->p_vaddr); - if (dynamic_count) { - *dynamic_count = (unsigned)(phdr->p_memsz / 8); - } - if (dynamic_flags) { - *dynamic_flags = phdr->p_flags; - } - return; +void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count, + ElfW(Addr) load_bias, + ElfW(Dyn)** dynamic, size_t* dynamic_count, ElfW(Word)* dynamic_flags) { + const ElfW(Phdr)* phdr = phdr_table; + const ElfW(Phdr)* phdr_limit = phdr + phdr_count; + + for (phdr = phdr_table; phdr < phdr_limit; phdr++) { + if (phdr->p_type != PT_DYNAMIC) { + continue; } - *dynamic = NULL; + + *dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr->p_vaddr); if (dynamic_count) { - *dynamic_count = 0; + *dynamic_count = (unsigned)(phdr->p_memsz / 8); } + if (dynamic_flags) { + *dynamic_flags = phdr->p_flags; + } + return; + } + *dynamic = NULL; + if (dynamic_count) { + *dynamic_count = 0; + } } // Returns the address of the program header table as it appears in the loaded // segments in memory. This is in contrast with 'phdr_table_' which // is temporary and will be released before the library is relocated. bool ElfReader::FindPhdr() { - const Elf_Phdr* phdr_limit = phdr_table_ + phdr_num_; + const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_; // If there is a PT_PHDR, use it directly. - for (const Elf_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) { + for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) { if (phdr->p_type == PT_PHDR) { return CheckPhdr(load_bias_ + phdr->p_vaddr); } @@ -595,13 +749,13 @@ bool ElfReader::FindPhdr() { // Otherwise, check the first loadable segment. If its file offset // is 0, it starts with the ELF header, and we can trivially find the // loaded program header from it. - for (const Elf_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) { + for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) { if (phdr->p_type == PT_LOAD) { if (phdr->p_offset == 0) { - Elf_Addr elf_addr = load_bias_ + phdr->p_vaddr; - const Elf_Ehdr* ehdr = (const Elf_Ehdr*)(void*)elf_addr; - Elf_Addr offset = ehdr->e_phoff; - return CheckPhdr((Elf_Addr)ehdr + offset); + ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr; + const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(elf_addr); + ElfW(Addr) offset = ehdr->e_phoff; + return CheckPhdr((ElfW(Addr))ehdr + offset); } break; } @@ -614,17 +768,17 @@ bool ElfReader::FindPhdr() { // Ensures that our program header is actually within a loadable // segment. This should help catch badly-formed ELF files that // would cause the linker to crash later when trying to access it. -bool ElfReader::CheckPhdr(Elf_Addr loaded) { - const Elf_Phdr* phdr_limit = phdr_table_ + phdr_num_; - Elf_Addr loaded_end = loaded + (phdr_num_ * sizeof(Elf_Phdr)); - for (Elf_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) { +bool ElfReader::CheckPhdr(ElfW(Addr) loaded) { + const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_; + ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr))); + for (ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) { if (phdr->p_type != PT_LOAD) { continue; } - Elf_Addr seg_start = phdr->p_vaddr + load_bias_; - Elf_Addr seg_end = phdr->p_filesz + seg_start; + ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_; + ElfW(Addr) seg_end = phdr->p_filesz + seg_start; if (seg_start <= loaded && loaded_end <= seg_end) { - loaded_phdr_ = reinterpret_cast<const Elf_Phdr*>(loaded); + loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded); return true; } } |