diff options
author | Wolfgang Wiedmeyer <wolfgit@wiedmeyer.de> | 2016-12-12 14:59:47 +0100 |
---|---|---|
committer | Wolfgang Wiedmeyer <wolfgit@wiedmeyer.de> | 2016-12-12 14:59:47 +0100 |
commit | 73049dc117da1f669fa7cee4004e69b3493a58c9 (patch) | |
tree | 37dafd236bb27b1c23f59c7ad41655212f7c0902 | |
parent | 385816ce486622d72a8e56443b53ff8404d4ae3b (diff) | |
parent | 92595081575c82ace07201a3ea32004eba968c0b (diff) | |
download | bionic-73049dc117da1f669fa7cee4004e69b3493a58c9.zip bionic-73049dc117da1f669fa7cee4004e69b3493a58c9.tar.gz bionic-73049dc117da1f669fa7cee4004e69b3493a58c9.tar.bz2 |
Merge branch 'cm-13.0' of https://github.com/CyanogenMod/android_bionic into replicant-6.0replicant-6.0-beta-0001replicant-6.0-alpha-0006replicant-6.0
-rw-r--r-- | libc/arch-arm/cortex-a9/bionic/memcpy_base.S | 2 | ||||
-rw-r--r-- | libc/arch-arm64/generic/bionic/strrchr.S | 159 | ||||
-rw-r--r-- | libc/arch-arm64/kryo/bionic/memcpy_base.S | 4 | ||||
-rw-r--r-- | libc/dns/net/getaddrinfo.c | 10 | ||||
-rw-r--r-- | libc/dns/net/hosts_cache.c | 524 | ||||
-rw-r--r-- | libc/dns/net/hosts_cache.h | 23 | ||||
-rw-r--r-- | libc/dns/net/sethostent.c | 7 | ||||
-rw-r--r-- | libc/tools/zoneinfo/ZoneCompactor.java | 192 | ||||
-rwxr-xr-x | libc/tools/zoneinfo/update-tzdata.py | 262 | ||||
-rw-r--r-- | libc/upstream-openbsd/lib/libc/gen/fnmatch.c | 114 | ||||
-rw-r--r-- | libc/zoneinfo/tzdata | bin | 494261 -> 491711 bytes | |||
-rw-r--r-- | linker/linker.cpp | 10 |
12 files changed, 685 insertions, 622 deletions
diff --git a/libc/arch-arm/cortex-a9/bionic/memcpy_base.S b/libc/arch-arm/cortex-a9/bionic/memcpy_base.S index 6ab5a69..966b9b3 100644 --- a/libc/arch-arm/cortex-a9/bionic/memcpy_base.S +++ b/libc/arch-arm/cortex-a9/bionic/memcpy_base.S @@ -44,7 +44,7 @@ ENTRY_PRIVATE(MEMCPY_BASE) /* check if buffers are aligned. If so, run arm-only version */ eor r3, r0, r1 ands r3, r3, #0x3 - beq __memcpy_base_aligned + beq MEMCPY_BASE_ALIGNED /* Check the upper size limit for Neon unaligned memory access in memcpy */ cmp r2, #224 diff --git a/libc/arch-arm64/generic/bionic/strrchr.S b/libc/arch-arm64/generic/bionic/strrchr.S index 46b5031..409bc71 100644 --- a/libc/arch-arm64/generic/bionic/strrchr.S +++ b/libc/arch-arm64/generic/bionic/strrchr.S @@ -25,147 +25,30 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ - -/* Assumptions: - * - * ARMv8-a, AArch64 - * Neon Available. - */ + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ #include <private/bionic_asm.h> -/* Arguments and results. */ -#define srcin x0 -#define chrin w1 - -#define result x0 - -#define src x2 -#define tmp1 x3 -#define wtmp2 w4 -#define tmp3 x5 -#define src_match x6 -#define src_offset x7 -#define const_m1 x8 -#define tmp4 x9 -#define nul_match x10 -#define chr_match x11 - -#define vrepchr v0 -#define vdata1 v1 -#define vdata2 v2 -#define vhas_nul1 v3 -#define vhas_nul2 v4 -#define vhas_chr1 v5 -#define vhas_chr2 v6 -#define vrepmask_0 v7 -#define vrepmask_c v16 -#define vend1 v17 -#define vend2 v18 - -/* Core algorithm. - - For each 32-byte hunk we calculate a 64-bit syndrome value, with - two bits per byte (LSB is always in bits 0 and 1, for both big - and little-endian systems). For each tuple, bit 0 is set iff - the relevant byte matched the requested character; bit 1 is set - iff the relevant byte matched the NUL end of string (we trigger - off bit0 for the special case of looking for NUL). Since the bits - in the syndrome reflect exactly the order in which things occur - in the original string a count_trailing_zeros() operation will - identify exactly which byte is causing the termination, and why. */ - -/* Locals and temporaries. */ - +/* + * Find the last occurrence of a character in a string. + * + * Parameters: + * x0 - str + * x1 - c + * Returns: + * x0 - address of last occurrence of 'c' or 0 + */ ENTRY(strrchr) - /* Magic constant 0x40100401 to allow us to identify which lane - matches the requested byte. Magic constant 0x80200802 used - similarly for NUL termination. */ - mov wtmp2, #0x0401 - movk wtmp2, #0x4010, lsl #16 - dup vrepchr.16b, chrin - bic src, srcin, #31 /* Work with aligned 32-byte hunks. */ - dup vrepmask_c.4s, wtmp2 - mov src_offset, #0 - ands tmp1, srcin, #31 - add vrepmask_0.4s, vrepmask_c.4s, vrepmask_c.4s /* equiv: lsl #1 */ - b.eq .Laligned - - /* Input string is not 32-byte aligned. Rather than forcing - the padding bytes to a safe value, we calculate the syndrome - for all the bytes, but then mask off those bits of the - syndrome that are related to the padding. */ - ld1 {vdata1.16b, vdata2.16b}, [src], #32 - neg tmp1, tmp1 - cmeq vhas_nul1.16b, vdata1.16b, #0 - cmeq vhas_chr1.16b, vdata1.16b, vrepchr.16b - cmeq vhas_nul2.16b, vdata2.16b, #0 - cmeq vhas_chr2.16b, vdata2.16b, vrepchr.16b - and vhas_nul1.16b, vhas_nul1.16b, vrepmask_0.16b - and vhas_chr1.16b, vhas_chr1.16b, vrepmask_c.16b - and vhas_nul2.16b, vhas_nul2.16b, vrepmask_0.16b - and vhas_chr2.16b, vhas_chr2.16b, vrepmask_c.16b - addp vhas_nul1.16b, vhas_nul1.16b, vhas_nul2.16b // 256->128 - addp vhas_chr1.16b, vhas_chr1.16b, vhas_chr2.16b // 256->128 - addp vhas_nul1.16b, vhas_nul1.16b, vhas_nul1.16b // 128->64 - addp vhas_chr1.16b, vhas_chr1.16b, vhas_chr1.16b // 128->64 - mov nul_match, vhas_nul1.2d[0] - lsl tmp1, tmp1, #1 - mov const_m1, #~0 - mov chr_match, vhas_chr1.2d[0] - lsr tmp3, const_m1, tmp1 - - bic nul_match, nul_match, tmp3 // Mask padding bits. - bic chr_match, chr_match, tmp3 // Mask padding bits. - cbnz nul_match, .Ltail - -.Lloop: - cmp chr_match, #0 - csel src_match, src, src_match, ne - csel src_offset, chr_match, src_offset, ne -.Laligned: - ld1 {vdata1.16b, vdata2.16b}, [src], #32 - cmeq vhas_nul1.16b, vdata1.16b, #0 - cmeq vhas_chr1.16b, vdata1.16b, vrepchr.16b - cmeq vhas_nul2.16b, vdata2.16b, #0 - cmeq vhas_chr2.16b, vdata2.16b, vrepchr.16b - addp vend1.16b, vhas_nul1.16b, vhas_nul2.16b // 256->128 - and vhas_chr1.16b, vhas_chr1.16b, vrepmask_c.16b - and vhas_chr2.16b, vhas_chr2.16b, vrepmask_c.16b - addp vhas_chr1.16b, vhas_chr1.16b, vhas_chr2.16b // 256->128 - addp vend1.16b, vend1.16b, vend1.16b // 128->64 - addp vhas_chr1.16b, vhas_chr1.16b, vhas_chr1.16b // 128->64 - mov nul_match, vend1.2d[0] - mov chr_match, vhas_chr1.2d[0] - cbz nul_match, .Lloop - - and vhas_nul1.16b, vhas_nul1.16b, vrepmask_0.16b - and vhas_nul2.16b, vhas_nul2.16b, vrepmask_0.16b - addp vhas_nul1.16b, vhas_nul1.16b, vhas_nul2.16b - addp vhas_nul1.16b, vhas_nul1.16b, vhas_nul1.16b - mov nul_match, vhas_nul1.2d[0] - -.Ltail: - /* Work out exactly where the string ends. */ - sub tmp4, nul_match, #1 - eor tmp4, tmp4, nul_match - ands chr_match, chr_match, tmp4 - /* And pick the values corresponding to the last match. */ - csel src_match, src, src_match, ne - csel src_offset, chr_match, src_offset, ne - - /* Count down from the top of the syndrome to find the last match. */ - clz tmp3, src_offset - /* Src_match points beyond the word containing the match, so we can - simply subtract half the bit-offset into the syndrome. Because - we are counting down, we need to go back one more character. */ - add tmp3, tmp3, #2 - sub result, src_match, tmp3, lsr #1 - /* But if the syndrome shows no match was found, then return NULL. */ - cmp src_offset, #0 - csel result, result, xzr, ne - + mov x3, #0 + and w1, w1, #0xff +1: ldrb w2, [x0], #1 + cbz w2, 2f + cmp w2, w1 + b.ne 1b + sub x3, x0, #1 + b 1b +2: mov x0, x3 ret - END(strrchr) diff --git a/libc/arch-arm64/kryo/bionic/memcpy_base.S b/libc/arch-arm64/kryo/bionic/memcpy_base.S index 0096bb7..ee0757d 100644 --- a/libc/arch-arm64/kryo/bionic/memcpy_base.S +++ b/libc/arch-arm64/kryo/bionic/memcpy_base.S @@ -170,8 +170,8 @@ kryo_bb_prime_pump: add x10, x1, #(PLDOFFS*PLDSIZE) bic x10, x10, #0x7F sub x12, x12, #PLDOFFS - prfm PLDL1KEEP, [x10, #(-1*PLDSIZE)] - prfm PLDL1KEEP, [x10, #(-1*PLDSIZE+64)] + prfm PLDL1KEEP, [x10, #((PLDOFFS-1)*PLDSIZE)] + prfm PLDL1KEEP, [x10, #((PLDOFFS-1)*PLDSIZE)+64] cmp x12, #(448*1024/128) bhi kryo_bb_copy_128_loop_ddr diff --git a/libc/dns/net/getaddrinfo.c b/libc/dns/net/getaddrinfo.c index cc8b8b4..269be56 100644 --- a/libc/dns/net/getaddrinfo.c +++ b/libc/dns/net/getaddrinfo.c @@ -108,6 +108,8 @@ #include <stdarg.h> #include "nsswitch.h" +#include "hosts_cache.h" + #ifdef ANDROID_CHANGES #include <sys/system_properties.h> #endif /* ANDROID_CHANGES */ @@ -2107,6 +2109,14 @@ _files_getaddrinfo(void *rv, void *cb_data, va_list ap) name = va_arg(ap, char *); pai = va_arg(ap, struct addrinfo *); + memset(&sentinel, 0, sizeof(sentinel)); + cur = &sentinel; + int gai_error = hc_getaddrinfo(name, NULL, pai, &cur); + if (gai_error != EAI_SYSTEM) { + *((struct addrinfo **)rv) = sentinel.ai_next; + return (gai_error == 0 ? NS_SUCCESS : NS_NOTFOUND); + } + // fprintf(stderr, "_files_getaddrinfo() name = '%s'\n", name); memset(&sentinel, 0, sizeof(sentinel)); cur = &sentinel; diff --git a/libc/dns/net/hosts_cache.c b/libc/dns/net/hosts_cache.c new file mode 100644 index 0000000..deafb78 --- /dev/null +++ b/libc/dns/net/hosts_cache.c @@ -0,0 +1,524 @@ +/* + * Copyright (C) 2016 The CyanogenMod Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <fcntl.h> +#include <netdb.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <ctype.h> +#include <strings.h> +#include <sys/file.h> +#include <sys/mman.h> +#include <sys/socket.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <unistd.h> +#include <utime.h> +#include <pthread.h> + +#include <netinet/in6.h> +#include <arpa/inet.h> + +#include "hostent.h" +#include "resolv_private.h" + +#define MAX_ADDRLEN (INET6_ADDRSTRLEN - (1 + 5)) +#define MAX_HOSTLEN MAXHOSTNAMELEN + +#define ESTIMATED_LINELEN 32 +#define HCFILE_ALLOC_SIZE 256 + +/* From sethostent.c */ +#define ALIGNBYTES (sizeof(uintptr_t) - 1) +#define ALIGN(p) (((uintptr_t)(p) + ALIGNBYTES) &~ ALIGNBYTES) + +/* + * Host cache entry for hcfile.c_data. + * Offsets are into hcfile.h_data. + * Strings are *not* terminated by NULL, but by whitespace (isspace) or '#'. + * Use hstr* functions with these. + */ +struct hcent +{ + uint32_t addr; + uint32_t name; +}; + +/* + * Overall host cache file state. + */ +struct hcfile +{ + int h_fd; + struct stat h_st; + char *h_data; + + uint32_t c_alloc; + uint32_t c_len; + struct hcent *c_data; +}; +static struct hcfile hcfile; +static pthread_mutex_t hclock = PTHREAD_MUTEX_INITIALIZER; + +static size_t hstrlen(const char *s) +{ + const char *p = s; + while (*p && *p != '#' && !isspace(*p)) + ++p; + return p - s; +} + +static int hstrcmp(const char *a, const char *b) +{ + size_t alen = hstrlen(a); + size_t blen = hstrlen(b); + int res = strncmp(a, b, MIN(alen, blen)); + if (res == 0) + res = alen - blen; + return res; +} + +static char *hstrcpy(char *dest, const char *src) +{ + size_t len = hstrlen(src); + memcpy(dest, src, len); + dest[len] = '\0'; + return dest; +} + +static char *hstrdup(const char *s) +{ + size_t len = hstrlen(s); + char *dest = (char *)malloc(len + 1); + if (!dest) + return NULL; + memcpy(dest, s, len); + dest[len] = '\0'; + return dest; +} + +static int cmp_hcent_name(const void *a, const void *b) +{ + struct hcent *ea = (struct hcent *)a; + const char *na = hcfile.h_data + ea->name; + struct hcent *eb = (struct hcent *)b; + const char *nb = hcfile.h_data + eb->name; + + return hstrcmp(na, nb); +} + +static struct hcent *_hcfindname(const char *name) +{ + size_t first, last, mid; + struct hcent *cur = NULL; + int cmp; + + if (hcfile.c_len == 0) + return NULL; + + first = 0; + last = hcfile.c_len - 1; + mid = (first + last) / 2; + while (first <= last) { + cur = hcfile.c_data + mid; + cmp = hstrcmp(hcfile.h_data + cur->name, name); + if (cmp == 0) + goto found; + if (cmp < 0) + first = mid + 1; + else { + if (mid > 0) + last = mid - 1; + else + return NULL; + } + mid = (first + last) / 2; + } + return NULL; + +found: + while (cur > hcfile.c_data) { + struct hcent *prev = cur - 1; + cmp = cmp_hcent_name(cur, prev); + if (cmp) + break; + cur = prev; + } + + return cur; +} + +/* + * Find next name on line, if any. + * + * Assumes that line is terminated by LF. + */ +static const char *_hcnextname(const char *name) +{ + while (!isspace(*name)) { + if (*name == '#') + return NULL; + ++name; + } + while (isspace(*name)) { + if (*name == '\n') + return NULL; + ++name; + } + if (*name == '#') + return NULL; + return name; +} + +static int _hcfilemmap(void) +{ + struct stat st; + int h_fd; + char *h_addr; + const char *p, *pend; + uint32_t c_alloc; + + h_fd = open(_PATH_HOSTS, O_RDONLY); + if (h_fd < 0) + return -1; + if (flock(h_fd, LOCK_EX) != 0) { + close(h_fd); + return -1; + } + + if (hcfile.h_data) { + memset(&st, 0, sizeof(st)); + if (fstat(h_fd, &st) == 0) { + if (st.st_size == hcfile.h_st.st_size && + st.st_mtime == hcfile.h_st.st_mtime) { + flock(h_fd, LOCK_UN); + close(h_fd); + return 0; + } + } + free(hcfile.c_data); + munmap(hcfile.h_data, hcfile.h_st.st_size); + close(hcfile.h_fd); + memset(&hcfile, 0, sizeof(struct hcfile)); + } + + if (fstat(h_fd, &st) != 0) { + flock(h_fd, LOCK_UN); + close(h_fd); + return -1; + } + h_addr = mmap(NULL, st.st_size, PROT_READ, MAP_SHARED, h_fd, 0); + if (h_addr == MAP_FAILED) { + flock(h_fd, LOCK_UN); + close(h_fd); + return -1; + } + + hcfile.h_fd = h_fd; + hcfile.h_st = st; + hcfile.h_data = h_addr; + + c_alloc = 0; + /* + * Do an initial allocation if the file is "large". Estimate + * 32 bytes per line and define "large" as more than half of + * the alloc growth size (256 entries). + */ + if (st.st_size >= ESTIMATED_LINELEN * HCFILE_ALLOC_SIZE / 2) { + c_alloc = st.st_size / ESTIMATED_LINELEN; + hcfile.c_data = malloc(c_alloc * sizeof(struct hcent)); + if (!hcfile.c_data) { + goto oom; + } + } + + p = (const char *)h_addr; + pend = p + st.st_size; + while (p < pend) { + const char *eol, *addr, *name; + size_t len; + addr = p; + eol = memchr(p, '\n', pend - p); + if (!eol) + break; + p = eol + 1; + if (*addr == '#' || *addr == '\n') + continue; + len = hstrlen(addr); + if (len > MAX_ADDRLEN) + continue; + name = addr + len; + while (name < eol && isspace(*name)) + ++name; + while (name < eol) { + len = hstrlen(name); + if (len == 0) + break; + if (len < MAX_HOSTLEN) { + struct hcent *ent; + if (c_alloc <= hcfile.c_len) { + struct hcent *c_data; + c_alloc += HCFILE_ALLOC_SIZE; + c_data = realloc(hcfile.c_data, c_alloc * sizeof(struct hcent)); + if (!c_data) { + goto oom; + } + hcfile.c_data = c_data; + } + ent = hcfile.c_data + hcfile.c_len; + ent->addr = addr - h_addr; + ent->name = name - h_addr; + ++hcfile.c_len; + } + name += len; + while (name < eol && isspace(*name)) + ++name; + } + } + + qsort(hcfile.c_data, hcfile.c_len, + sizeof(struct hcent), cmp_hcent_name); + + flock(h_fd, LOCK_UN); + + return 0; + +oom: + free(hcfile.c_data); + munmap(hcfile.h_data, hcfile.h_st.st_size); + flock(hcfile.h_fd, LOCK_UN); + close(hcfile.h_fd); + memset(&hcfile, 0, sizeof(struct hcfile)); + return -1; +} + +/* + * Caching version of getaddrinfo. + * + * If we find the requested host name in the cache, use getaddrinfo to + * populate the result for each address we find. + * + * Note glibc and bionic differ in the handling of ai_canonname. POSIX + * says that ai_canonname is only populated in the first result entry. + * glibc does this. bionic populates ai_canonname in all result entries. + * We choose the POSIX/glibc way here. + */ +int hc_getaddrinfo(const char *host, const char *service, + const struct addrinfo *hints, + struct addrinfo **result) +{ + int ret = 0; + struct hcent *ent, *cur; + struct addrinfo *ai; + struct addrinfo rhints; + struct addrinfo *last; + int canonname = 0; + int cmp; + + if (getenv("ANDROID_HOSTS_CACHE_DISABLE") != NULL) + return EAI_SYSTEM; + + /* Avoid needless work and recursion */ + if (hints && (hints->ai_flags & AI_NUMERICHOST)) + return EAI_SYSTEM; + if (!host) + return EAI_SYSTEM; + + pthread_mutex_lock(&hclock); + + if (_hcfilemmap() != 0) { + ret = EAI_SYSTEM; + goto out; + } + ent = _hcfindname(host); + if (!ent) { + ret = EAI_NONAME; + goto out; + } + + if (hints) { + canonname = (hints->ai_flags & AI_CANONNAME); + memcpy(&rhints, hints, sizeof(rhints)); + rhints.ai_flags &= ~AI_CANONNAME; + } + else { + memset(&rhints, 0, sizeof(rhints)); + } + rhints.ai_flags |= AI_NUMERICHOST; + + last = NULL; + cur = ent; + do { + char addrstr[MAX_ADDRLEN]; + struct addrinfo *res; + + hstrcpy(addrstr, hcfile.h_data + cur->addr); + + if (getaddrinfo(addrstr, service, &rhints, &res) == 0) { + if (!last) + (*result)->ai_next = res; + else + last->ai_next = res; + last = res; + while (last->ai_next) + last = last->ai_next; + } + + if(cur + 1 >= hcfile.c_data + hcfile.c_len) + break; + cmp = cmp_hcent_name(cur, cur + 1); + cur = cur + 1; + } + while (!cmp); + + if (last == NULL) { + /* This check is equivalent to (*result)->ai_next == NULL */ + ret = EAI_NODATA; + goto out; + } + + if (canonname) { + ai = (*result)->ai_next; + free(ai->ai_canonname); + ai->ai_canonname = hstrdup(hcfile.h_data + ent->name); + } + +out: + pthread_mutex_unlock(&hclock); + return ret; +} + +/* + * Caching version of gethtbyname. + * + * Note glibc and bionic differ in the handling of aliases. glibc returns + * all aliases for all entries, regardless of whether they match h_addrtype. + * bionic returns only the aliases for the first hosts entry. We return all + * aliases for all IPv4 entries. + * + * Additionally, if an alias is IPv6 and the primary name for an alias also + * has an IPv4 entry, glibc will return the IPv4 address(es), but bionic + * will not. Neither do we. + */ +int hc_gethtbyname(const char *host, int af, struct getnamaddr *info) +{ + int ret = NETDB_SUCCESS; + struct hcent *ent, *cur; + int cmp; + size_t addrlen; + unsigned int naliases = 0; + char *aliases[MAXALIASES]; + unsigned int naddrs = 0; + char *addr_ptrs[MAXADDRS]; + unsigned int n; + + if (getenv("ANDROID_HOSTS_CACHE_DISABLE") != NULL) + return NETDB_INTERNAL; + + switch (af) { + case AF_INET: addrlen = NS_INADDRSZ; break; + case AF_INET6: addrlen = NS_IN6ADDRSZ; break; + default: + return NETDB_INTERNAL; + } + + pthread_mutex_lock(&hclock); + + if (_hcfilemmap() != 0) { + ret = NETDB_INTERNAL; + goto out; + } + + ent = _hcfindname(host); + if (!ent) { + ret = HOST_NOT_FOUND; + goto out; + } + + cur = ent; + do { + char addr[16]; + char addrstr[MAX_ADDRLEN]; + char namestr[MAX_HOSTLEN]; + const char *name; + + hstrcpy(addrstr, hcfile.h_data + cur->addr); + if (inet_pton(af, addrstr, &addr) == 1) { + char *aligned; + /* First match is considered the official hostname */ + if (naddrs == 0) { + hstrcpy(namestr, hcfile.h_data + cur->name); + HENT_SCOPY(info->hp->h_name, namestr, info->buf, info->buflen); + } + for (name = hcfile.h_data + cur->name; name; name = _hcnextname(name)) { + if (!hstrcmp(name, host)) + continue; + hstrcpy(namestr, name); + HENT_SCOPY(aliases[naliases], namestr, info->buf, info->buflen); + ++naliases; + if (naliases >= MAXALIASES) + goto nospc; + } + aligned = (char *)ALIGN(info->buf); + if (info->buf != aligned) { + if ((ptrdiff_t)info->buflen < (aligned - info->buf)) + goto nospc; + info->buflen -= (aligned - info->buf); + info->buf = aligned; + } + HENT_COPY(addr_ptrs[naddrs], addr, addrlen, info->buf, info->buflen); + ++naddrs; + if (naddrs >= MAXADDRS) + goto nospc; + } + + if(cur + 1 >= hcfile.c_data + hcfile.c_len) + break; + cmp = cmp_hcent_name(cur, cur + 1); + cur = cur + 1; + } + while (!cmp); + + if (naddrs == 0) { + ret = HOST_NOT_FOUND; + goto out; + } + + addr_ptrs[naddrs++] = NULL; + aliases[naliases++] = NULL; + + /* hp->h_name already populated */ + HENT_ARRAY(info->hp->h_aliases, naliases, info->buf, info->buflen); + for (n = 0; n < naliases; ++n) { + info->hp->h_aliases[n] = aliases[n]; + } + info->hp->h_addrtype = af; + info->hp->h_length = addrlen; + HENT_ARRAY(info->hp->h_addr_list, naddrs, info->buf, info->buflen); + for (n = 0; n < naddrs; ++n) { + info->hp->h_addr_list[n] = addr_ptrs[n]; + } + +out: + pthread_mutex_unlock(&hclock); + *info->he = ret; + return ret; + +nospc: + ret = NETDB_INTERNAL; + goto out; +} diff --git a/libc/dns/net/hosts_cache.h b/libc/dns/net/hosts_cache.h new file mode 100644 index 0000000..fa5488f --- /dev/null +++ b/libc/dns/net/hosts_cache.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2016 The CyanogenMod Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +struct getnamaddr; + +int hc_getaddrinfo(const char *host, const char *service, + const struct addrinfo *hints, + struct addrinfo **result); + +int hc_gethtbyname(const char *host, int af, struct getnamaddr *info); diff --git a/libc/dns/net/sethostent.c b/libc/dns/net/sethostent.c index 916421e..be29621 100644 --- a/libc/dns/net/sethostent.c +++ b/libc/dns/net/sethostent.c @@ -55,6 +55,8 @@ __RCSID("$NetBSD: sethostent.c,v 1.20 2014/03/17 13:24:23 christos Exp $"); #include "hostent.h" #include "resolv_private.h" +#include "hosts_cache.h" + #define ALIGNBYTES (sizeof(uintptr_t) - 1) #define ALIGN(p) (((uintptr_t)(p) + ALIGNBYTES) &~ ALIGNBYTES) @@ -99,6 +101,11 @@ _hf_gethtbyname(void *rv, void *cb_data, va_list ap) /* NOSTRICT skip string len */(void)va_arg(ap, int); af = va_arg(ap, int); + int rc = hc_gethtbyname(name, af, info); + if (rc != NETDB_INTERNAL) { + return (rc == NETDB_SUCCESS ? NS_SUCCESS : NS_NOTFOUND); + } + #if 0 { res_state res = __res_get_state(); diff --git a/libc/tools/zoneinfo/ZoneCompactor.java b/libc/tools/zoneinfo/ZoneCompactor.java deleted file mode 100644 index 2d598fe..0000000 --- a/libc/tools/zoneinfo/ZoneCompactor.java +++ /dev/null @@ -1,192 +0,0 @@ - -import java.io.*; -import java.util.*; - -// usage: java ZoneCompiler <setup file> <data directory> <output directory> <tzdata version> -// -// Compile a set of tzfile-formatted files into a single file containing an index. -// -// The compilation is controlled by a setup file, which is provided as a -// command-line argument. The setup file has the form: -// -// Link <toName> <fromName> -// ... -// <zone filename> -// ... -// -// Note that the links must be declared prior to the zone names. -// A zone name is a filename relative to the source directory such as -// 'GMT', 'Africa/Dakar', or 'America/Argentina/Jujuy'. -// -// Use the 'zic' command-line tool to convert from flat files -// (such as 'africa' or 'northamerica') to a directory -// hierarchy suitable for this tool (containing files such as 'data/Africa/Abidjan'). -// - -public class ZoneCompactor { - // Maximum number of characters in a zone name, including '\0' terminator. - private static final int MAXNAME = 40; - - // Zone name synonyms. - private Map<String,String> links = new HashMap<String,String>(); - - // File offsets by zone name. - private Map<String,Integer> offsets = new HashMap<String,Integer>(); - - // File lengths by zone name. - private Map<String,Integer> lengths = new HashMap<String,Integer>(); - - // Concatenate the contents of 'inFile' onto 'out'. - private static void copyFile(File inFile, OutputStream out) throws Exception { - byte[] ret = new byte[0]; - - InputStream in = new FileInputStream(inFile); - byte[] buf = new byte[8192]; - while (true) { - int nbytes = in.read(buf); - if (nbytes == -1) { - break; - } - out.write(buf, 0, nbytes); - - byte[] nret = new byte[ret.length + nbytes]; - System.arraycopy(ret, 0, nret, 0, ret.length); - System.arraycopy(buf, 0, nret, ret.length, nbytes); - ret = nret; - } - out.flush(); - } - - public ZoneCompactor(String setupFile, String dataDirectory, String zoneTabFile, String outputDirectory, String version) throws Exception { - // Read the setup file and concatenate all the data. - ByteArrayOutputStream allData = new ByteArrayOutputStream(); - BufferedReader reader = new BufferedReader(new FileReader(setupFile)); - String s; - int offset = 0; - while ((s = reader.readLine()) != null) { - s = s.trim(); - if (s.startsWith("Link")) { - StringTokenizer st = new StringTokenizer(s); - st.nextToken(); - String to = st.nextToken(); - String from = st.nextToken(); - links.put(from, to); - } else { - String link = links.get(s); - if (link == null) { - File sourceFile = new File(dataDirectory, s); - long length = sourceFile.length(); - offsets.put(s, offset); - lengths.put(s, (int) length); - - offset += length; - copyFile(sourceFile, allData); - } - } - } - reader.close(); - - // Fill in fields for links. - Iterator<String> it = links.keySet().iterator(); - while (it.hasNext()) { - String from = it.next(); - String to = links.get(from); - - offsets.put(from, offsets.get(to)); - lengths.put(from, lengths.get(to)); - } - - // Create/truncate the destination file. - RandomAccessFile f = new RandomAccessFile(new File(outputDirectory, "tzdata"), "rw"); - f.setLength(0); - - // Write the header. - - // byte[12] tzdata_version -- 'tzdata2012f\0' - // int index_offset -- so we can slip in extra header fields in a backwards-compatible way - // int data_offset - // int zonetab_offset - - // tzdata_version - f.write(toAscii(new byte[12], version)); - - // Write dummy values for the three offsets, and remember where we need to seek back to later - // when we have the real values. - int index_offset_offset = (int) f.getFilePointer(); - f.writeInt(0); - int data_offset_offset = (int) f.getFilePointer(); - f.writeInt(0); - int zonetab_offset_offset = (int) f.getFilePointer(); - f.writeInt(0); - - int index_offset = (int) f.getFilePointer(); - - // Write the index. - ArrayList<String> sortedOlsonIds = new ArrayList<String>(); - sortedOlsonIds.addAll(offsets.keySet()); - Collections.sort(sortedOlsonIds); - it = sortedOlsonIds.iterator(); - while (it.hasNext()) { - String zoneName = it.next(); - if (zoneName.length() >= MAXNAME) { - throw new RuntimeException("zone filename too long: " + zoneName.length()); - } - - // Follow the chain of links to work out where the real data for this zone lives. - String actualZoneName = zoneName; - while (links.get(actualZoneName) != null) { - actualZoneName = links.get(actualZoneName); - } - - f.write(toAscii(new byte[MAXNAME], zoneName)); - f.writeInt(offsets.get(actualZoneName)); - f.writeInt(lengths.get(actualZoneName)); - f.writeInt(0); // Used to be raw GMT offset. No longer used. - } - - int data_offset = (int) f.getFilePointer(); - - // Write the data. - f.write(allData.toByteArray()); - - int zonetab_offset = (int) f.getFilePointer(); - - // Copy the zone.tab. - reader = new BufferedReader(new FileReader(zoneTabFile)); - while ((s = reader.readLine()) != null) { - if (!s.startsWith("#")) { - f.writeBytes(s); - f.write('\n'); - } - } - reader.close(); - - // Go back and fix up the offsets in the header. - f.seek(index_offset_offset); - f.writeInt(index_offset); - f.seek(data_offset_offset); - f.writeInt(data_offset); - f.seek(zonetab_offset_offset); - f.writeInt(zonetab_offset); - - f.close(); - } - - private static byte[] toAscii(byte[] dst, String src) { - for (int i = 0; i < src.length(); ++i) { - if (src.charAt(i) > '~') { - throw new RuntimeException("non-ASCII string: " + src); - } - dst[i] = (byte) src.charAt(i); - } - return dst; - } - - public static void main(String[] args) throws Exception { - if (args.length != 5) { - System.err.println("usage: java ZoneCompactor <setup file> <data directory> <zone.tab file> <output directory> <tzdata version>"); - System.exit(0); - } - new ZoneCompactor(args[0], args[1], args[2], args[3], args[4]); - } -} diff --git a/libc/tools/zoneinfo/update-tzdata.py b/libc/tools/zoneinfo/update-tzdata.py deleted file mode 100755 index 68a5ff5..0000000 --- a/libc/tools/zoneinfo/update-tzdata.py +++ /dev/null @@ -1,262 +0,0 @@ -#!/usr/bin/python - -"""Updates the timezone data held in bionic and ICU.""" - -import ftplib -import glob -import httplib -import os -import re -import shutil -import subprocess -import sys -import tarfile -import tempfile - -regions = ['africa', 'antarctica', 'asia', 'australasia', - 'etcetera', 'europe', 'northamerica', 'southamerica', - # These two deliberately come last so they override what came - # before (and each other). - 'backward', 'backzone' ] - -def CheckDirExists(dir, dirname): - if not os.path.isdir(dir): - print "Couldn't find %s (%s)!" % (dirname, dir) - sys.exit(1) - -bionic_libc_tools_zoneinfo_dir = os.path.realpath(os.path.dirname(sys.argv[0])) - -# Find the bionic directory, searching upward from this script. -bionic_dir = os.path.realpath('%s/../../..' % bionic_libc_tools_zoneinfo_dir) -bionic_libc_zoneinfo_dir = '%s/libc/zoneinfo' % bionic_dir -CheckDirExists(bionic_libc_zoneinfo_dir, 'bionic/libc/zoneinfo') -CheckDirExists(bionic_libc_tools_zoneinfo_dir, 'bionic/libc/tools/zoneinfo') -print 'Found bionic in %s ...' % bionic_dir - -# Find the icu directory. -icu_dir = os.path.realpath('%s/../external/icu' % bionic_dir) -icu4c_dir = os.path.realpath('%s/icu4c/source' % icu_dir) -icu4j_dir = os.path.realpath('%s/icu4j' % icu_dir) -CheckDirExists(icu4c_dir, 'external/icu/icu4c/source') -CheckDirExists(icu4j_dir, 'external/icu/icu4j') -print 'Found icu in %s ...' % icu_dir - - -def GetCurrentTzDataVersion(): - return open('%s/tzdata' % bionic_libc_zoneinfo_dir).read().split('\x00', 1)[0] - - -def WriteSetupFile(): - """Writes the list of zones that ZoneCompactor should process.""" - links = [] - zones = [] - for region in regions: - for line in open('extracted/%s' % region): - fields = line.split() - if fields: - if fields[0] == 'Link': - links.append('%s %s %s' % (fields[0], fields[1], fields[2])) - zones.append(fields[2]) - elif fields[0] == 'Zone': - zones.append(fields[1]) - zones.sort() - - setup = open('setup', 'w') - for link in sorted(set(links)): - setup.write('%s\n' % link) - for zone in sorted(set(zones)): - setup.write('%s\n' % zone) - setup.close() - - -def SwitchToNewTemporaryDirectory(): - tmp_dir = tempfile.mkdtemp('-tzdata') - os.chdir(tmp_dir) - print 'Created temporary directory "%s"...' % tmp_dir - - -def FtpRetrieveFile(ftp, filename): - ftp.retrbinary('RETR %s' % filename, open(filename, 'wb').write) - - -def FtpRetrieveFileAndSignature(ftp, data_filename): - """Downloads and repackages the given data from the given FTP server.""" - print 'Downloading data...' - FtpRetrieveFile(ftp, data_filename) - - print 'Downloading signature...' - signature_filename = '%s.asc' % data_filename - FtpRetrieveFile(ftp, signature_filename) - - -def HttpRetrieveFile(http, path, output_filename): - http.request("GET", path) - f = open(output_filename, 'wb') - f.write(http.getresponse().read()) - f.close() - - -def HttpRetrieveFileAndSignature(http, data_filename): - """Downloads and repackages the given data from the given HTTP server.""" - path = "/time-zones/repository/releases/%s" % data_filename - - print 'Downloading data...' - HttpRetrieveFile(http, path, data_filename) - - print 'Downloading signature...' - signature_filename = '%s.asc' % data_filename - HttpRetrievefile(http, "%s.asc" % path, signature_filename) - - -def BuildIcuToolsAndData(data_filename): - # Keep track of the original cwd so we can go back to it at the end. - original_working_dir = os.getcwd() - - # Create a directory to run 'make' from. - icu_working_dir = '%s/icu' % original_working_dir - os.mkdir(icu_working_dir) - os.chdir(icu_working_dir) - - # Build the ICU tools. - print 'Configuring ICU tools...' - subprocess.check_call(['%s/runConfigureICU' % icu4c_dir, 'Linux']) - - # Run the ICU tools. - os.chdir('tools/tzcode') - - # The tz2icu tool only picks up icuregions and icuzones in they are in the CWD - for icu_data_file in [ 'icuregions', 'icuzones']: - icu_data_file_source = '%s/tools/tzcode/%s' % (icu4c_dir, icu_data_file) - icu_data_file_symlink = './%s' % icu_data_file - os.symlink(icu_data_file_source, icu_data_file_symlink) - - shutil.copyfile('%s/%s' % (original_working_dir, data_filename), data_filename) - print 'Making ICU data...' - # The Makefile assumes the existence of the bin directory. - os.mkdir('%s/bin' % icu_working_dir) - subprocess.check_call(['make']) - - # Copy the source file to its ultimate destination. - icu_txt_data_dir = '%s/data/misc' % icu4c_dir - print 'Copying zoneinfo64.txt to %s ...' % icu_txt_data_dir - shutil.copy('zoneinfo64.txt', icu_txt_data_dir) - - # Regenerate the .dat file. - os.chdir(icu_working_dir) - subprocess.check_call(['make', 'INCLUDE_UNI_CORE_DATA=1', '-j32']) - - # Copy the .dat file to its ultimate destination. - icu_dat_data_dir = '%s/stubdata' % icu4c_dir - datfiles = glob.glob('data/out/tmp/icudt??l.dat') - if len(datfiles) != 1: - print 'ERROR: Unexpectedly found %d .dat files (%s). Halting.' % (len(datfiles), datfiles) - sys.exit(1) - datfile = datfiles[0] - print 'Copying %s to %s ...' % (datfile, icu_dat_data_dir) - shutil.copy(datfile, icu_dat_data_dir) - - # Generate the ICU4J .jar files - os.chdir('%s/data' % icu_working_dir) - subprocess.check_call(['make', 'icu4j-data']) - - # Copy the ICU4J .jar files to their ultimate destination. - icu_jar_data_dir = '%s/main/shared/data' % icu4j_dir - jarfiles = glob.glob('out/icu4j/*.jar') - if len(jarfiles) != 2: - print 'ERROR: Unexpectedly found %d .jar files (%s). Halting.' % (len(jarfiles), jarfiles) - sys.exit(1) - for jarfile in jarfiles: - print 'Copying %s to %s ...' % (jarfile, icu_jar_data_dir) - shutil.copy(jarfile, icu_jar_data_dir) - - # Switch back to the original working cwd. - os.chdir(original_working_dir) - - -def CheckSignature(data_filename): - signature_filename = '%s.asc' % data_filename - print 'Verifying signature...' - # If this fails for you, you probably need to import Paul Eggert's public key: - # gpg --recv-keys ED97E90E62AA7E34 - subprocess.check_call(['gpg', '--trusted-key=ED97E90E62AA7E34', '--verify', - signature_filename, data_filename]) - - -def BuildBionicToolsAndData(data_filename): - new_version = re.search('(tzdata.+)\\.tar\\.gz', data_filename).group(1) - - print 'Extracting...' - os.mkdir('extracted') - tar = tarfile.open(data_filename, 'r') - tar.extractall('extracted') - - print 'Calling zic(1)...' - os.mkdir('data') - zic_inputs = [ 'extracted/%s' % x for x in regions ] - zic_cmd = ['zic', '-d', 'data' ] - zic_cmd.extend(zic_inputs) - subprocess.check_call(zic_cmd) - - WriteSetupFile() - - print 'Calling ZoneCompactor to update bionic to %s...' % new_version - subprocess.check_call(['javac', '-d', '.', - '%s/ZoneCompactor.java' % bionic_libc_tools_zoneinfo_dir]) - subprocess.check_call(['java', 'ZoneCompactor', - 'setup', 'data', 'extracted/zone.tab', - bionic_libc_zoneinfo_dir, new_version]) - - -# Run with no arguments from any directory, with no special setup required. -# See http://www.iana.org/time-zones/ for more about the source of this data. -def main(): - print 'Looking for new tzdata...' - - tzdata_filenames = [] - - # The FTP server lets you download intermediate releases, and also lets you - # download the signatures for verification, so it's your best choice. - use_ftp = True - - if use_ftp: - ftp = ftplib.FTP('ftp.iana.org') - ftp.login() - ftp.cwd('tz/releases') - for filename in ftp.nlst(): - if filename.startswith('tzdata20') and filename.endswith('.tar.gz'): - tzdata_filenames.append(filename) - tzdata_filenames.sort() - else: - http = httplib.HTTPConnection('www.iana.org') - http.request("GET", "/time-zones") - index_lines = http.getresponse().read().split('\n') - for line in index_lines: - m = re.compile('.*href="/time-zones/repository/releases/(tzdata20\d\d\c\.tar\.gz)".*').match(line) - if m: - tzdata_filenames.append(m.group(1)) - - # If you're several releases behind, we'll walk you through the upgrades - # one by one. - current_version = GetCurrentTzDataVersion() - current_filename = '%s.tar.gz' % current_version - for filename in tzdata_filenames: - if filename > current_filename: - print 'Found new tzdata: %s' % filename - SwitchToNewTemporaryDirectory() - if use_ftp: - FtpRetrieveFileAndSignature(ftp, filename) - else: - HttpRetrieveFileAndSignature(http, filename) - - CheckSignature(filename) - BuildIcuToolsAndData(filename) - BuildBionicToolsAndData(filename) - print 'Look in %s and %s for new data files' % (bionic_dir, icu_dir) - sys.exit(0) - - print 'You already have the latest tzdata (%s)!' % current_version - sys.exit(0) - - -if __name__ == '__main__': - main() diff --git a/libc/upstream-openbsd/lib/libc/gen/fnmatch.c b/libc/upstream-openbsd/lib/libc/gen/fnmatch.c index 2c860f7..62c58f8 100644 --- a/libc/upstream-openbsd/lib/libc/gen/fnmatch.c +++ b/libc/upstream-openbsd/lib/libc/gen/fnmatch.c @@ -90,12 +90,72 @@ #include <ctype.h> #include <limits.h> +#include <stdint.h> + #include "charclass.h" #define RANGE_MATCH 1 #define RANGE_NOMATCH 0 #define RANGE_ERROR (-1) +static unsigned int +utf8_len(const char *s) +{ + const unsigned char *b = (const unsigned char *)s; + unsigned int len = 1; + unsigned char c; + + if ((b[0] & 0xc0) != 0xc0) { + return 1; + } + c = b[0] << 1; + while (len < 6 && (c & 0x80)) { + if ((b[len] & 0xc0) != 0x80) { + return 1; + } + c <<= 1; + ++len; + } + + return len; +} + +static void +utf8_inc(const char **s) +{ + *s += utf8_len(*s); +} + +static uint32_t +utf8_get_inc(const char **s) +{ + unsigned int len = utf8_len(*s); + const unsigned char *b = (const unsigned char *)(*s); + unsigned int n; + uint32_t ch; + + *s += len; + + if (len == 1) { + return b[0]; + } + + ch = b[0] & (0xff >> len); + for (n = 1; n < len; ++n) { + ch <<= 6; + ch |= (b[n] & 0x3f); + } + + return ch; +} + +static uint32_t +utf8_get(const char *s) +{ + const char *tmp = s; + return utf8_get_inc(&tmp); +} + static int classmatch(const char *pattern, char test, int foldcase, const char **ep) { @@ -150,7 +210,7 @@ static int fnmatch_ch(const char **pattern, const char **string, int flags) const int escape = !(flags & FNM_NOESCAPE); const int slash = !!(flags & FNM_PATHNAME); int result = FNM_NOMATCH; - const char *startch; + uint32_t startch, endch, compch; int negate; if (**pattern == '[') @@ -171,7 +231,7 @@ static int fnmatch_ch(const char **pattern, const char **string, int flags) if (**pattern == ']') { ++*pattern; /* XXX: Fix for MBCS character width */ - ++*string; + utf8_inc(string); return (result ^ negate); } @@ -199,10 +259,13 @@ leadingclosebrace: * "x-]" is not allowed unless escaped ("x-\]") * XXX: Fix for locale/MBCS character width */ - if (((*pattern)[1] == '-') && ((*pattern)[2] != ']')) + startch = utf8_get_inc(pattern); + compch = utf8_get(*string); + if (((*pattern)[0] == '-') && ((*pattern)[1] != ']')) { - startch = *pattern; - *pattern += (escape && ((*pattern)[2] == '\\')) ? 3 : 2; + *pattern += 1; + if (escape && **pattern == '\\') + *pattern += 1; /* NOT a properly balanced [expr] pattern, EOS terminated * or ranges containing a slash in FNM_PATHNAME mode pattern @@ -211,32 +274,35 @@ leadingclosebrace: if (!**pattern || (slash && (**pattern == '/'))) break; + endch = utf8_get_inc(pattern); + + /* Refuse to attempt collation for non-ASCII chars */ + if (startch >= 0x80 || endch >= 0x80) + continue; + /* XXX: handle locale/MBCS comparison, advance by MBCS char width */ - if ((**string >= *startch) && (**string <= **pattern)) + if ((compch >= startch) && (compch <= endch)) result = 0; - else if (nocase && (isupper((unsigned char)**string) || - isupper((unsigned char)*startch) || - isupper((unsigned char)**pattern)) - && (tolower((unsigned char)**string) >= - tolower((unsigned char)*startch)) - && (tolower((unsigned char)**string) <= - tolower((unsigned char)**pattern))) + else if (nocase && (isupper(compch) || + isupper(startch) || + isupper(endch)) + && (tolower(compch) >= + tolower(startch)) + && (tolower(compch) <= + tolower(endch))) result = 0; - ++*pattern; continue; } /* XXX: handle locale/MBCS comparison, advance by MBCS char width */ - if ((**string == **pattern)) + if (compch == startch) result = 0; - else if (nocase && (isupper((unsigned char)**string) || - isupper((unsigned char)**pattern)) - && (tolower((unsigned char)**string) == - tolower((unsigned char)**pattern))) + else if (nocase && (isupper(compch) || + isupper(startch)) + && (tolower(compch) == + tolower(startch))) result = 0; - - ++*pattern; } /* NOT a properly balanced [expr] pattern; Rewind @@ -257,7 +323,7 @@ leadingclosebrace: } /* XXX: handle locale/MBCS comparison, advance by the MBCS char width */ - if (**string == **pattern) + if (utf8_get(*string) == utf8_get(*pattern)) result = 0; else if (nocase && (isupper((unsigned char)**string) || isupper((unsigned char)**pattern)) @@ -271,8 +337,8 @@ leadingclosebrace: return result; fnmatch_ch_success: - ++*pattern; - ++*string; + utf8_inc(pattern); + utf8_inc(string); return result; } diff --git a/libc/zoneinfo/tzdata b/libc/zoneinfo/tzdata Binary files differindex c464f46..d2cc535 100644 --- a/libc/zoneinfo/tzdata +++ b/libc/zoneinfo/tzdata diff --git a/linker/linker.cpp b/linker/linker.cpp index bc40cf1..99c02d8 100644 --- a/linker/linker.cpp +++ b/linker/linker.cpp @@ -2208,14 +2208,14 @@ bool soinfo::relocate(const VersionTracker& version_tracker, ElfRelIteratorT&& r MARK(rel->r_offset); TRACE_TYPE(RELO, "RELO R_X86_64_32 %08zx <- +%08zx %s", static_cast<size_t>(reloc), static_cast<size_t>(sym_addr), sym_name); - *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + addend; + *reinterpret_cast<Elf32_Addr*>(reloc) = sym_addr + addend; break; case R_X86_64_64: count_relocation(kRelocRelative); MARK(rel->r_offset); TRACE_TYPE(RELO, "RELO R_X86_64_64 %08zx <- +%08zx %s", static_cast<size_t>(reloc), static_cast<size_t>(sym_addr), sym_name); - *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + addend; + *reinterpret_cast<Elf64_Addr*>(reloc) = sym_addr + addend; break; case R_X86_64_PC32: count_relocation(kRelocRelative); @@ -2223,7 +2223,7 @@ bool soinfo::relocate(const VersionTracker& version_tracker, ElfRelIteratorT&& r TRACE_TYPE(RELO, "RELO R_X86_64_PC32 %08zx <- +%08zx (%08zx - %08zx) %s", static_cast<size_t>(reloc), static_cast<size_t>(sym_addr - reloc), static_cast<size_t>(sym_addr), static_cast<size_t>(reloc), sym_name); - *reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + addend - reloc; + *reinterpret_cast<Elf32_Addr*>(reloc) = sym_addr + addend - reloc; break; #elif defined(__arm__) case R_ARM_ABS32: @@ -3023,7 +3023,11 @@ bool soinfo::link_image(const soinfo_list_t& global_group, const soinfo_list_t& #endif // Make segments writable to allow text relocations to work properly. We will later call // phdr_table_protect_segments() after all of them are applied and all constructors are run. +#if defined(USE_LEGACY_BLOBS) + DEBUG("%s has text relocations. This is wasting memory and prevents " +#else DL_WARN("%s has text relocations. This is wasting memory and prevents " +#endif "security hardening. Please fix.", get_realpath()); if (phdr_table_unprotect_segments(phdr, phnum, load_bias) < 0) { DL_ERR("can't unprotect loadable segments for \"%s\": %s", |