summaryrefslogtreecommitdiffstats
path: root/libc/bionic
diff options
context:
space:
mode:
Diffstat (limited to 'libc/bionic')
-rw-r--r--libc/bionic/bionic_clone.c81
-rw-r--r--libc/bionic/clearenv.c39
-rw-r--r--libc/bionic/cpuacct.c60
-rw-r--r--libc/bionic/dlmalloc.c16
-rw-r--r--libc/bionic/dlmalloc.h8
-rw-r--r--libc/bionic/err.c126
-rw-r--r--libc/bionic/fdprintf.c58
-rw-r--r--libc/bionic/fork.c8
-rw-r--r--libc/bionic/fts.c1041
-rw-r--r--libc/bionic/libc_init_dynamic.c9
-rw-r--r--libc/bionic/libc_init_static.c6
-rw-r--r--libc/bionic/logd_write.c3
-rw-r--r--libc/bionic/malloc_debug_common.c488
-rw-r--r--libc/bionic/malloc_debug_common.h99
-rw-r--r--libc/bionic/malloc_debug_leak.c (renamed from libc/bionic/malloc_leak.c)365
-rw-r--r--libc/bionic/malloc_debug_qemu.c1014
-rw-r--r--libc/bionic/pthread.c668
-rw-r--r--libc/bionic/semaphore.c5
-rw-r--r--libc/bionic/stubs.c25
19 files changed, 3512 insertions, 607 deletions
diff --git a/libc/bionic/bionic_clone.c b/libc/bionic/bionic_clone.c
new file mode 100644
index 0000000..6b2fa58
--- /dev/null
+++ b/libc/bionic/bionic_clone.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#define __GNU_SOURCE 1
+#include <sched.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <stdio.h>
+
+/* WARNING: AT THE MOMENT, THIS IS ONLY SUPPORTED ON ARM
+ */
+
+extern int __bionic_clone(unsigned long clone_flags,
+ void* newsp,
+ int *parent_tidptr,
+ void *new_tls,
+ int *child_tidptr,
+ int (*fn)(void *),
+ void *arg);
+
+extern void _exit_thread(int retCode);
+
+/* this function is called from the __bionic_clone
+ * assembly fragment to call the thread function
+ * then exit. */
+extern void
+__bionic_clone_entry( int (*fn)(void *), void *arg )
+{
+ int ret = (*fn)(arg);
+ _exit_thread(ret);
+}
+
+int
+clone(int (*fn)(void *), void *child_stack, int flags, void* arg, ...)
+{
+ va_list args;
+ int *parent_tidptr = NULL;
+ void *new_tls = NULL;
+ int *child_tidptr = NULL;
+ int ret;
+
+ /* extract optional parameters - they are cummulative */
+ va_start(args, arg);
+ if (flags & (CLONE_PARENT_SETTID|CLONE_SETTLS|CLONE_CHILD_SETTID)) {
+ parent_tidptr = va_arg(args, int*);
+ }
+ if (flags & (CLONE_SETTLS|CLONE_CHILD_SETTID)) {
+ new_tls = va_arg(args, void*);
+ }
+ if (flags & CLONE_CHILD_SETTID) {
+ child_tidptr = va_arg(args, int*);
+ }
+ va_end(args);
+
+ ret = __bionic_clone(flags, child_stack, parent_tidptr, new_tls, child_tidptr, fn, arg);
+ return ret;
+}
diff --git a/libc/bionic/clearenv.c b/libc/bionic/clearenv.c
new file mode 100644
index 0000000..ffc58d9
--- /dev/null
+++ b/libc/bionic/clearenv.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+extern char** environ;
+
+int clearenv(void)
+{
+ char **P = environ;
+ int offset;
+
+ for (P = &environ[offset]; *P; ++P)
+ *P = 0;
+ return 0;
+}
diff --git a/libc/bionic/cpuacct.c b/libc/bionic/cpuacct.c
new file mode 100644
index 0000000..abdbc51
--- /dev/null
+++ b/libc/bionic/cpuacct.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <unistd.h>
+#include <stdio.h>
+#include <errno.h>
+#include <sys/stat.h>
+//#include <sys/types.h>
+
+int cpuacct_add(uid_t uid)
+{
+ int count;
+ FILE *fp;
+ char buf[80];
+
+ count = snprintf(buf, sizeof(buf), "/acct/uid/%d/tasks", uid);
+ fp = fopen(buf, "w+");
+ if (!fp) {
+ /* Note: sizeof("tasks") returns 6, which includes the NULL char */
+ buf[count - sizeof("tasks")] = 0;
+ if (mkdir(buf, 0775) < 0)
+ return -errno;
+
+ /* Note: sizeof("tasks") returns 6, which includes the NULL char */
+ buf[count - sizeof("tasks")] = '/';
+ fp = fopen(buf, "w+");
+ }
+ if (!fp)
+ return -errno;
+
+ fprintf(fp, "0");
+ if (fclose(fp))
+ return -errno;
+
+ return 0;
+}
diff --git a/libc/bionic/dlmalloc.c b/libc/bionic/dlmalloc.c
index f6f878e..19fbb75 100644
--- a/libc/bionic/dlmalloc.c
+++ b/libc/bionic/dlmalloc.c
@@ -390,9 +390,9 @@ MALLINFO_FIELD_TYPE default: size_t
size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set
REALLOC_ZERO_BYTES_FREES default: not defined
- This should be set if a call to realloc with zero bytes should
- be the same as a call to free. Some people think it should. Otherwise,
- since this malloc returns a unique pointer for malloc(0), so does
+ This should be set if a call to realloc with zero bytes should
+ be the same as a call to free. Some people think it should. Otherwise,
+ since this malloc returns a unique pointer for malloc(0), so does
realloc(p, 0).
LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
@@ -671,7 +671,7 @@ extern "C" {
/* ------------------- Declarations of public routines ------------------- */
/* Check an additional macro for the five primary functions */
-#if !defined(USE_DL_PREFIX) || !defined(MALLOC_LEAK_CHECK)
+#ifndef USE_DL_PREFIX
#define dlcalloc calloc
#define dlfree free
#define dlmalloc malloc
@@ -3627,7 +3627,7 @@ static void* sys_alloc(mstate m, size_t nb) {
m->seg.sflags = mmap_flag;
m->magic = mparams.magic;
init_bins(m);
- if (is_global(m))
+ if (is_global(m))
init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
else {
/* Offset top by embedded malloc_state */
@@ -3778,7 +3778,7 @@ static int sys_trim(mstate m, size_t pad) {
}
/* Unmap any unused mmapped segments */
- if (HAVE_MMAP)
+ if (HAVE_MMAP)
released += release_unused_segments(m);
/* On failure, disable autotrim to avoid repeated failed future calls */
@@ -3986,7 +3986,7 @@ static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
while (a < alignment) a <<= 1;
alignment = a;
}
-
+
if (bytes >= MAX_REQUEST - alignment) {
if (m != 0) { /* Test isn't needed but avoids compiler warning */
MALLOC_FAILURE_ACTION;
@@ -5446,5 +5446,5 @@ History:
Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
* Based loosely on libg++-1.2X malloc. (It retains some of the overall
structure of old version, but most details differ.)
-
+
*/
diff --git a/libc/bionic/dlmalloc.h b/libc/bionic/dlmalloc.h
index e5f7d4a..1b642d2 100644
--- a/libc/bionic/dlmalloc.h
+++ b/libc/bionic/dlmalloc.h
@@ -1,14 +1,14 @@
/*
Default header file for malloc-2.8.x, written by Doug Lea
and released to the public domain, as explained at
- http://creativecommons.org/licenses/publicdomain.
-
+ http://creativecommons.org/licenses/publicdomain.
+
last update: Mon Aug 15 08:55:52 2005 Doug Lea (dl at gee)
This header is for ANSI C/C++ only. You can set any of
the following #defines before including:
- * If USE_DL_PREFIX is defined, it is assumed that malloc.c
+ * If USE_DL_PREFIX is defined, it is assumed that malloc.c
was also compiled with this option, so all routines
have names starting with "dl".
@@ -34,7 +34,7 @@ extern "C" {
#if !ONLY_MSPACES
/* Check an additional macro for the five primary functions */
-#if !defined(USE_DL_PREFIX) || !defined(MALLOC_LEAK_CHECK)
+#if !defined(USE_DL_PREFIX)
#define dlcalloc calloc
#define dlfree free
#define dlmalloc malloc
diff --git a/libc/bionic/err.c b/libc/bionic/err.c
new file mode 100644
index 0000000..535b7e1
--- /dev/null
+++ b/libc/bionic/err.c
@@ -0,0 +1,126 @@
+/*-
+ * Copyright (c) 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#include <err.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <errno.h>
+
+extern char *__progname;
+
+__noreturn void
+err(int eval, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ verr(eval, fmt, ap);
+ va_end(ap);
+}
+
+__noreturn void
+errx(int eval, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ verrx(eval, fmt, ap);
+ va_end(ap);
+}
+
+__noreturn void
+verr(int eval, const char *fmt, va_list ap)
+{
+ int sverrno;
+
+ sverrno = errno;
+ (void)fprintf(stderr, "%s: ", __progname);
+ if (fmt != NULL) {
+ (void)vfprintf(stderr, fmt, ap);
+ (void)fprintf(stderr, ": ");
+ }
+ (void)fprintf(stderr, "%s\n", strerror(sverrno));
+ exit(eval);
+}
+
+
+__noreturn void
+verrx(int eval, const char *fmt, va_list ap)
+{
+ (void)fprintf(stderr, "%s: ", __progname);
+ if (fmt != NULL)
+ (void)vfprintf(stderr, fmt, ap);
+ (void)fprintf(stderr, "\n");
+ exit(eval);
+}
+
+void
+warn(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vwarn(fmt, ap);
+ va_end(ap);
+}
+
+void
+warnx(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vwarnx(fmt, ap);
+ va_end(ap);
+}
+
+void
+vwarn(const char *fmt, va_list ap)
+{
+ int sverrno;
+
+ sverrno = errno;
+ (void)fprintf(stderr, "%s: ", __progname);
+ if (fmt != NULL) {
+ (void)vfprintf(stderr, fmt, ap);
+ (void)fprintf(stderr, ": ");
+ }
+ (void)fprintf(stderr, "%s\n", strerror(sverrno));
+}
+
+void
+vwarnx(const char *fmt, va_list ap)
+{
+ (void)fprintf(stderr, "%s: ", __progname);
+ if (fmt != NULL)
+ (void)vfprintf(stderr, fmt, ap);
+ (void)fprintf(stderr, "\n");
+}
diff --git a/libc/bionic/fdprintf.c b/libc/bionic/fdprintf.c
new file mode 100644
index 0000000..c1d05ad
--- /dev/null
+++ b/libc/bionic/fdprintf.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+int vfdprintf(int fd, const char * __restrict format, __va_list ap)
+{
+ char *buf=0;
+ int ret;
+ ret = vasprintf(&buf, format, ap);
+ if (ret < 0)
+ goto end;
+
+ ret = write(fd, buf, ret);
+ free(buf);
+end:
+ return ret;
+}
+
+int fdprintf(int fd, const char * __restrict format, ...)
+{
+ __va_list ap;
+ int ret;
+
+ va_start(ap, format);
+ ret = vfdprintf(fd, format, ap);
+ va_end(ap);
+
+ return ret;
+}
diff --git a/libc/bionic/fork.c b/libc/bionic/fork.c
index 1c6a4ba..e20f548 100644
--- a/libc/bionic/fork.c
+++ b/libc/bionic/fork.c
@@ -43,6 +43,14 @@ int fork(void)
ret = __fork();
if (ret != 0) { /* not a child process */
__timer_table_start_stop(0);
+ } else {
+ /*
+ * Newly created process must update cpu accounting.
+ * Call cpuacct_add passing in our uid, which will take
+ * the current task id and add it to the uid group passed
+ * as a parameter.
+ */
+ cpuacct_add(getuid());
}
return ret;
}
diff --git a/libc/bionic/fts.c b/libc/bionic/fts.c
new file mode 100644
index 0000000..3dcfb28
--- /dev/null
+++ b/libc/bionic/fts.c
@@ -0,0 +1,1041 @@
+/* $OpenBSD: fts.c,v 1.43 2009/08/27 16:19:27 millert Exp $ */
+
+/*-
+ * Copyright (c) 1990, 1993, 1994
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/stat.h>
+
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <fts.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#define MAX(a,b) ((a)>(b)?(a):(b))
+
+static FTSENT *fts_alloc(FTS *, char *, size_t);
+static FTSENT *fts_build(FTS *, int);
+static void fts_lfree(FTSENT *);
+static void fts_load(FTS *, FTSENT *);
+static size_t fts_maxarglen(char * const *);
+static void fts_padjust(FTS *, FTSENT *);
+static int fts_palloc(FTS *, size_t);
+static FTSENT *fts_sort(FTS *, FTSENT *, int);
+static u_short fts_stat(FTS *, FTSENT *, int);
+static int fts_safe_changedir(FTS *, FTSENT *, int, char *);
+
+#define ISDOT(a) (a[0] == '.' && (!a[1] || (a[1] == '.' && !a[2])))
+
+#define CLR(opt) (sp->fts_options &= ~(opt))
+#define ISSET(opt) (sp->fts_options & (opt))
+#define SET(opt) (sp->fts_options |= (opt))
+
+#define FCHDIR(sp, fd) (!ISSET(FTS_NOCHDIR) && fchdir(fd))
+
+/* fts_build flags */
+#define BCHILD 1 /* fts_children */
+#define BNAMES 2 /* fts_children, names only */
+#define BREAD 3 /* fts_read */
+
+FTS *
+fts_open(char * const *argv, int options,
+ int (*compar)(const FTSENT **, const FTSENT **))
+{
+ FTS *sp;
+ FTSENT *p, *root;
+ int nitems;
+ FTSENT *parent, *tmp;
+ size_t len;
+
+ /* Options check. */
+ if (options & ~FTS_OPTIONMASK) {
+ errno = EINVAL;
+ return (NULL);
+ }
+
+ /* Allocate/initialize the stream */
+ if ((sp = calloc(1, sizeof(FTS))) == NULL)
+ return (NULL);
+ sp->fts_compar = compar;
+ sp->fts_options = options;
+
+ /* Logical walks turn on NOCHDIR; symbolic links are too hard. */
+ if (ISSET(FTS_LOGICAL))
+ SET(FTS_NOCHDIR);
+
+ /*
+ * Start out with 1K of path space, and enough, in any case,
+ * to hold the user's paths.
+ */
+ if (fts_palloc(sp, MAX(fts_maxarglen(argv), MAXPATHLEN)))
+ goto mem1;
+
+ /* Allocate/initialize root's parent. */
+ if ((parent = fts_alloc(sp, "", 0)) == NULL)
+ goto mem2;
+ parent->fts_level = FTS_ROOTPARENTLEVEL;
+
+ /* Allocate/initialize root(s). */
+ for (root = NULL, nitems = 0; *argv; ++argv, ++nitems) {
+ /* Don't allow zero-length paths. */
+ if ((len = strlen(*argv)) == 0) {
+ errno = ENOENT;
+ goto mem3;
+ }
+
+ if ((p = fts_alloc(sp, *argv, len)) == NULL)
+ goto mem3;
+ p->fts_level = FTS_ROOTLEVEL;
+ p->fts_parent = parent;
+ p->fts_accpath = p->fts_name;
+ p->fts_info = fts_stat(sp, p, ISSET(FTS_COMFOLLOW));
+
+ /* Command-line "." and ".." are real directories. */
+ if (p->fts_info == FTS_DOT)
+ p->fts_info = FTS_D;
+
+ /*
+ * If comparison routine supplied, traverse in sorted
+ * order; otherwise traverse in the order specified.
+ */
+ if (compar) {
+ p->fts_link = root;
+ root = p;
+ } else {
+ p->fts_link = NULL;
+ if (root == NULL)
+ tmp = root = p;
+ else {
+ tmp->fts_link = p;
+ tmp = p;
+ }
+ }
+ }
+ if (compar && nitems > 1)
+ root = fts_sort(sp, root, nitems);
+
+ /*
+ * Allocate a dummy pointer and make fts_read think that we've just
+ * finished the node before the root(s); set p->fts_info to FTS_INIT
+ * so that everything about the "current" node is ignored.
+ */
+ if ((sp->fts_cur = fts_alloc(sp, "", 0)) == NULL)
+ goto mem3;
+ sp->fts_cur->fts_link = root;
+ sp->fts_cur->fts_info = FTS_INIT;
+
+ /*
+ * If using chdir(2), grab a file descriptor pointing to dot to ensure
+ * that we can get back here; this could be avoided for some paths,
+ * but almost certainly not worth the effort. Slashes, symbolic links,
+ * and ".." are all fairly nasty problems. Note, if we can't get the
+ * descriptor we run anyway, just more slowly.
+ */
+ if (!ISSET(FTS_NOCHDIR) && (sp->fts_rfd = open(".", O_RDONLY, 0)) < 0)
+ SET(FTS_NOCHDIR);
+
+ if (nitems == 0)
+ free(parent);
+
+ return (sp);
+
+mem3: fts_lfree(root);
+ free(parent);
+mem2: free(sp->fts_path);
+mem1: free(sp);
+ return (NULL);
+}
+
+static void
+fts_load(FTS *sp, FTSENT *p)
+{
+ size_t len;
+ char *cp;
+
+ /*
+ * Load the stream structure for the next traversal. Since we don't
+ * actually enter the directory until after the preorder visit, set
+ * the fts_accpath field specially so the chdir gets done to the right
+ * place and the user can access the first node. From fts_open it's
+ * known that the path will fit.
+ */
+ len = p->fts_pathlen = p->fts_namelen;
+ memmove(sp->fts_path, p->fts_name, len + 1);
+ if ((cp = strrchr(p->fts_name, '/')) && (cp != p->fts_name || cp[1])) {
+ len = strlen(++cp);
+ memmove(p->fts_name, cp, len + 1);
+ p->fts_namelen = len;
+ }
+ p->fts_accpath = p->fts_path = sp->fts_path;
+ sp->fts_dev = p->fts_dev;
+}
+
+int
+fts_close(FTS *sp)
+{
+ FTSENT *freep, *p;
+ int rfd, error = 0;
+
+ /*
+ * This still works if we haven't read anything -- the dummy structure
+ * points to the root list, so we step through to the end of the root
+ * list which has a valid parent pointer.
+ */
+ if (sp->fts_cur) {
+ for (p = sp->fts_cur; p->fts_level >= FTS_ROOTLEVEL;) {
+ freep = p;
+ p = p->fts_link ? p->fts_link : p->fts_parent;
+ free(freep);
+ }
+ free(p);
+ }
+
+ /* Stash the original directory fd if needed. */
+ rfd = ISSET(FTS_NOCHDIR) ? -1 : sp->fts_rfd;
+
+ /* Free up child linked list, sort array, path buffer, stream ptr.*/
+ if (sp->fts_child)
+ fts_lfree(sp->fts_child);
+ if (sp->fts_array)
+ free(sp->fts_array);
+ free(sp->fts_path);
+ free(sp);
+
+ /* Return to original directory, checking for error. */
+ if (rfd != -1) {
+ int saved_errno;
+ error = fchdir(rfd);
+ saved_errno = errno;
+ (void)close(rfd);
+ errno = saved_errno;
+ }
+
+ return (error);
+}
+
+/*
+ * Special case of "/" at the end of the path so that slashes aren't
+ * appended which would cause paths to be written as "....//foo".
+ */
+#define NAPPEND(p) \
+ (p->fts_path[p->fts_pathlen - 1] == '/' \
+ ? p->fts_pathlen - 1 : p->fts_pathlen)
+
+FTSENT *
+fts_read(FTS *sp)
+{
+ FTSENT *p, *tmp;
+ int instr;
+ char *t;
+ int saved_errno;
+
+ /* If finished or unrecoverable error, return NULL. */
+ if (sp->fts_cur == NULL || ISSET(FTS_STOP))
+ return (NULL);
+
+ /* Set current node pointer. */
+ p = sp->fts_cur;
+
+ /* Save and zero out user instructions. */
+ instr = p->fts_instr;
+ p->fts_instr = FTS_NOINSTR;
+
+ /* Any type of file may be re-visited; re-stat and re-turn. */
+ if (instr == FTS_AGAIN) {
+ p->fts_info = fts_stat(sp, p, 0);
+ return (p);
+ }
+
+ /*
+ * Following a symlink -- SLNONE test allows application to see
+ * SLNONE and recover. If indirecting through a symlink, have
+ * keep a pointer to current location. If unable to get that
+ * pointer, follow fails.
+ */
+ if (instr == FTS_FOLLOW &&
+ (p->fts_info == FTS_SL || p->fts_info == FTS_SLNONE)) {
+ p->fts_info = fts_stat(sp, p, 1);
+ if (p->fts_info == FTS_D && !ISSET(FTS_NOCHDIR)) {
+ if ((p->fts_symfd = open(".", O_RDONLY, 0)) < 0) {
+ p->fts_errno = errno;
+ p->fts_info = FTS_ERR;
+ } else
+ p->fts_flags |= FTS_SYMFOLLOW;
+ }
+ return (p);
+ }
+
+ /* Directory in pre-order. */
+ if (p->fts_info == FTS_D) {
+ /* If skipped or crossed mount point, do post-order visit. */
+ if (instr == FTS_SKIP ||
+ (ISSET(FTS_XDEV) && p->fts_dev != sp->fts_dev)) {
+ if (p->fts_flags & FTS_SYMFOLLOW)
+ (void)close(p->fts_symfd);
+ if (sp->fts_child) {
+ fts_lfree(sp->fts_child);
+ sp->fts_child = NULL;
+ }
+ p->fts_info = FTS_DP;
+ return (p);
+ }
+
+ /* Rebuild if only read the names and now traversing. */
+ if (sp->fts_child && ISSET(FTS_NAMEONLY)) {
+ CLR(FTS_NAMEONLY);
+ fts_lfree(sp->fts_child);
+ sp->fts_child = NULL;
+ }
+
+ /*
+ * Cd to the subdirectory.
+ *
+ * If have already read and now fail to chdir, whack the list
+ * to make the names come out right, and set the parent errno
+ * so the application will eventually get an error condition.
+ * Set the FTS_DONTCHDIR flag so that when we logically change
+ * directories back to the parent we don't do a chdir.
+ *
+ * If haven't read do so. If the read fails, fts_build sets
+ * FTS_STOP or the fts_info field of the node.
+ */
+ if (sp->fts_child) {
+ if (fts_safe_changedir(sp, p, -1, p->fts_accpath)) {
+ p->fts_errno = errno;
+ p->fts_flags |= FTS_DONTCHDIR;
+ for (p = sp->fts_child; p; p = p->fts_link)
+ p->fts_accpath =
+ p->fts_parent->fts_accpath;
+ }
+ } else if ((sp->fts_child = fts_build(sp, BREAD)) == NULL) {
+ if (ISSET(FTS_STOP))
+ return (NULL);
+ return (p);
+ }
+ p = sp->fts_child;
+ sp->fts_child = NULL;
+ goto name;
+ }
+
+ /* Move to the next node on this level. */
+next: tmp = p;
+ if ((p = p->fts_link)) {
+ free(tmp);
+
+ /*
+ * If reached the top, return to the original directory (or
+ * the root of the tree), and load the paths for the next root.
+ */
+ if (p->fts_level == FTS_ROOTLEVEL) {
+ if (FCHDIR(sp, sp->fts_rfd)) {
+ SET(FTS_STOP);
+ return (NULL);
+ }
+ fts_load(sp, p);
+ return (sp->fts_cur = p);
+ }
+
+ /*
+ * User may have called fts_set on the node. If skipped,
+ * ignore. If followed, get a file descriptor so we can
+ * get back if necessary.
+ */
+ if (p->fts_instr == FTS_SKIP)
+ goto next;
+ if (p->fts_instr == FTS_FOLLOW) {
+ p->fts_info = fts_stat(sp, p, 1);
+ if (p->fts_info == FTS_D && !ISSET(FTS_NOCHDIR)) {
+ if ((p->fts_symfd =
+ open(".", O_RDONLY, 0)) < 0) {
+ p->fts_errno = errno;
+ p->fts_info = FTS_ERR;
+ } else
+ p->fts_flags |= FTS_SYMFOLLOW;
+ }
+ p->fts_instr = FTS_NOINSTR;
+ }
+
+name: t = sp->fts_path + NAPPEND(p->fts_parent);
+ *t++ = '/';
+ memmove(t, p->fts_name, p->fts_namelen + 1);
+ return (sp->fts_cur = p);
+ }
+
+ /* Move up to the parent node. */
+ p = tmp->fts_parent;
+ free(tmp);
+
+ if (p->fts_level == FTS_ROOTPARENTLEVEL) {
+ /*
+ * Done; free everything up and set errno to 0 so the user
+ * can distinguish between error and EOF.
+ */
+ free(p);
+ errno = 0;
+ return (sp->fts_cur = NULL);
+ }
+
+ /* NUL terminate the pathname. */
+ sp->fts_path[p->fts_pathlen] = '\0';
+
+ /*
+ * Return to the parent directory. If at a root node or came through
+ * a symlink, go back through the file descriptor. Otherwise, cd up
+ * one directory.
+ */
+ if (p->fts_level == FTS_ROOTLEVEL) {
+ if (FCHDIR(sp, sp->fts_rfd)) {
+ SET(FTS_STOP);
+ sp->fts_cur = p;
+ return (NULL);
+ }
+ } else if (p->fts_flags & FTS_SYMFOLLOW) {
+ if (FCHDIR(sp, p->fts_symfd)) {
+ saved_errno = errno;
+ (void)close(p->fts_symfd);
+ errno = saved_errno;
+ SET(FTS_STOP);
+ sp->fts_cur = p;
+ return (NULL);
+ }
+ (void)close(p->fts_symfd);
+ } else if (!(p->fts_flags & FTS_DONTCHDIR) &&
+ fts_safe_changedir(sp, p->fts_parent, -1, "..")) {
+ SET(FTS_STOP);
+ sp->fts_cur = p;
+ return (NULL);
+ }
+ p->fts_info = p->fts_errno ? FTS_ERR : FTS_DP;
+ return (sp->fts_cur = p);
+}
+
+/*
+ * Fts_set takes the stream as an argument although it's not used in this
+ * implementation; it would be necessary if anyone wanted to add global
+ * semantics to fts using fts_set. An error return is allowed for similar
+ * reasons.
+ */
+/* ARGSUSED */
+int
+fts_set(FTS *sp, FTSENT *p, int instr)
+{
+ if (instr && instr != FTS_AGAIN && instr != FTS_FOLLOW &&
+ instr != FTS_NOINSTR && instr != FTS_SKIP) {
+ errno = EINVAL;
+ return (1);
+ }
+ p->fts_instr = instr;
+ return (0);
+}
+
+FTSENT *
+fts_children(FTS *sp, int instr)
+{
+ FTSENT *p;
+ int fd;
+
+ if (instr && instr != FTS_NAMEONLY) {
+ errno = EINVAL;
+ return (NULL);
+ }
+
+ /* Set current node pointer. */
+ p = sp->fts_cur;
+
+ /*
+ * Errno set to 0 so user can distinguish empty directory from
+ * an error.
+ */
+ errno = 0;
+
+ /* Fatal errors stop here. */
+ if (ISSET(FTS_STOP))
+ return (NULL);
+
+ /* Return logical hierarchy of user's arguments. */
+ if (p->fts_info == FTS_INIT)
+ return (p->fts_link);
+
+ /*
+ * If not a directory being visited in pre-order, stop here. Could
+ * allow FTS_DNR, assuming the user has fixed the problem, but the
+ * same effect is available with FTS_AGAIN.
+ */
+ if (p->fts_info != FTS_D /* && p->fts_info != FTS_DNR */)
+ return (NULL);
+
+ /* Free up any previous child list. */
+ if (sp->fts_child)
+ fts_lfree(sp->fts_child);
+
+ if (instr == FTS_NAMEONLY) {
+ SET(FTS_NAMEONLY);
+ instr = BNAMES;
+ } else
+ instr = BCHILD;
+
+ /*
+ * If using chdir on a relative path and called BEFORE fts_read does
+ * its chdir to the root of a traversal, we can lose -- we need to
+ * chdir into the subdirectory, and we don't know where the current
+ * directory is, so we can't get back so that the upcoming chdir by
+ * fts_read will work.
+ */
+ if (p->fts_level != FTS_ROOTLEVEL || p->fts_accpath[0] == '/' ||
+ ISSET(FTS_NOCHDIR))
+ return (sp->fts_child = fts_build(sp, instr));
+
+ if ((fd = open(".", O_RDONLY, 0)) < 0)
+ return (NULL);
+ sp->fts_child = fts_build(sp, instr);
+ if (fchdir(fd)) {
+ (void)close(fd);
+ return (NULL);
+ }
+ (void)close(fd);
+ return (sp->fts_child);
+}
+
+/*
+ * This is the tricky part -- do not casually change *anything* in here. The
+ * idea is to build the linked list of entries that are used by fts_children
+ * and fts_read. There are lots of special cases.
+ *
+ * The real slowdown in walking the tree is the stat calls. If FTS_NOSTAT is
+ * set and it's a physical walk (so that symbolic links can't be directories),
+ * we can do things quickly. First, if it's a 4.4BSD file system, the type
+ * of the file is in the directory entry. Otherwise, we assume that the number
+ * of subdirectories in a node is equal to the number of links to the parent.
+ * The former skips all stat calls. The latter skips stat calls in any leaf
+ * directories and for any files after the subdirectories in the directory have
+ * been found, cutting the stat calls by about 2/3.
+ */
+static FTSENT *
+fts_build(FTS *sp, int type)
+{
+ struct dirent *dp;
+ FTSENT *p, *head;
+ FTSENT *cur, *tail;
+ DIR *dirp;
+ void *oldaddr;
+ size_t len, maxlen;
+ int nitems, cderrno, descend, level, nlinks, nostat, doadjust;
+ int saved_errno;
+ char *cp;
+
+ /* Set current node pointer. */
+ cur = sp->fts_cur;
+
+ /*
+ * Open the directory for reading. If this fails, we're done.
+ * If being called from fts_read, set the fts_info field.
+ */
+ if ((dirp = opendir(cur->fts_accpath)) == NULL) {
+ if (type == BREAD) {
+ cur->fts_info = FTS_DNR;
+ cur->fts_errno = errno;
+ }
+ return (NULL);
+ }
+
+ /*
+ * Nlinks is the number of possible entries of type directory in the
+ * directory if we're cheating on stat calls, 0 if we're not doing
+ * any stat calls at all, -1 if we're doing stats on everything.
+ */
+ if (type == BNAMES)
+ nlinks = 0;
+ else if (ISSET(FTS_NOSTAT) && ISSET(FTS_PHYSICAL)) {
+ nlinks = cur->fts_nlink - (ISSET(FTS_SEEDOT) ? 0 : 2);
+ nostat = 1;
+ } else {
+ nlinks = -1;
+ nostat = 0;
+ }
+
+#ifdef notdef
+ (void)printf("nlinks == %d (cur: %u)\n", nlinks, cur->fts_nlink);
+ (void)printf("NOSTAT %d PHYSICAL %d SEEDOT %d\n",
+ ISSET(FTS_NOSTAT), ISSET(FTS_PHYSICAL), ISSET(FTS_SEEDOT));
+#endif
+ /*
+ * If we're going to need to stat anything or we want to descend
+ * and stay in the directory, chdir. If this fails we keep going,
+ * but set a flag so we don't chdir after the post-order visit.
+ * We won't be able to stat anything, but we can still return the
+ * names themselves. Note, that since fts_read won't be able to
+ * chdir into the directory, it will have to return different path
+ * names than before, i.e. "a/b" instead of "b". Since the node
+ * has already been visited in pre-order, have to wait until the
+ * post-order visit to return the error. There is a special case
+ * here, if there was nothing to stat then it's not an error to
+ * not be able to stat. This is all fairly nasty. If a program
+ * needed sorted entries or stat information, they had better be
+ * checking FTS_NS on the returned nodes.
+ */
+ cderrno = 0;
+ if (nlinks || type == BREAD) {
+ if (fts_safe_changedir(sp, cur, dirfd(dirp), NULL)) {
+ if (nlinks && type == BREAD)
+ cur->fts_errno = errno;
+ cur->fts_flags |= FTS_DONTCHDIR;
+ descend = 0;
+ cderrno = errno;
+ (void)closedir(dirp);
+ dirp = NULL;
+ } else
+ descend = 1;
+ } else
+ descend = 0;
+
+ /*
+ * Figure out the max file name length that can be stored in the
+ * current path -- the inner loop allocates more path as necessary.
+ * We really wouldn't have to do the maxlen calculations here, we
+ * could do them in fts_read before returning the path, but it's a
+ * lot easier here since the length is part of the dirent structure.
+ *
+ * If not changing directories set a pointer so that can just append
+ * each new name into the path.
+ */
+ len = NAPPEND(cur);
+ if (ISSET(FTS_NOCHDIR)) {
+ cp = sp->fts_path + len;
+ *cp++ = '/';
+ }
+ len++;
+ maxlen = sp->fts_pathlen - len;
+
+ /*
+ * fts_level is a short so we must prevent it from wrapping
+ * around to FTS_ROOTLEVEL and FTS_ROOTPARENTLEVEL.
+ */
+ level = cur->fts_level;
+ if (level < FTS_MAXLEVEL)
+ level++;
+
+ /* Read the directory, attaching each entry to the `link' pointer. */
+ doadjust = 0;
+ for (head = tail = NULL, nitems = 0; dirp && (dp = readdir(dirp));) {
+ if (!ISSET(FTS_SEEDOT) && ISDOT(dp->d_name))
+ continue;
+
+ if (!(p = fts_alloc(sp, dp->d_name, strlen(dp->d_name))))
+ goto mem1;
+ if (strlen(dp->d_name) >= maxlen) { /* include space for NUL */
+ oldaddr = sp->fts_path;
+ if (fts_palloc(sp, strlen(dp->d_name) +len + 1)) {
+ /*
+ * No more memory for path or structures. Save
+ * errno, free up the current structure and the
+ * structures already allocated.
+ */
+mem1: saved_errno = errno;
+ if (p)
+ free(p);
+ fts_lfree(head);
+ (void)closedir(dirp);
+ cur->fts_info = FTS_ERR;
+ SET(FTS_STOP);
+ errno = saved_errno;
+ return (NULL);
+ }
+ /* Did realloc() change the pointer? */
+ if (oldaddr != sp->fts_path) {
+ doadjust = 1;
+ if (ISSET(FTS_NOCHDIR))
+ cp = sp->fts_path + len;
+ }
+ maxlen = sp->fts_pathlen - len;
+ }
+
+ p->fts_level = level;
+ p->fts_parent = sp->fts_cur;
+ p->fts_pathlen = len + strlen(dp->d_name);
+ if (p->fts_pathlen < len) {
+ /*
+ * If we wrap, free up the current structure and
+ * the structures already allocated, then error
+ * out with ENAMETOOLONG.
+ */
+ free(p);
+ fts_lfree(head);
+ (void)closedir(dirp);
+ cur->fts_info = FTS_ERR;
+ SET(FTS_STOP);
+ errno = ENAMETOOLONG;
+ return (NULL);
+ }
+
+ if (cderrno) {
+ if (nlinks) {
+ p->fts_info = FTS_NS;
+ p->fts_errno = cderrno;
+ } else
+ p->fts_info = FTS_NSOK;
+ p->fts_accpath = cur->fts_accpath;
+ } else if (nlinks == 0
+#ifdef DT_DIR
+ || (nostat &&
+ dp->d_type != DT_DIR && dp->d_type != DT_UNKNOWN)
+#endif
+ ) {
+ p->fts_accpath =
+ ISSET(FTS_NOCHDIR) ? p->fts_path : p->fts_name;
+ p->fts_info = FTS_NSOK;
+ } else {
+ /* Build a file name for fts_stat to stat. */
+ if (ISSET(FTS_NOCHDIR)) {
+ p->fts_accpath = p->fts_path;
+ memmove(cp, p->fts_name, p->fts_namelen + 1);
+ } else
+ p->fts_accpath = p->fts_name;
+ /* Stat it. */
+ p->fts_info = fts_stat(sp, p, 0);
+
+ /* Decrement link count if applicable. */
+ if (nlinks > 0 && (p->fts_info == FTS_D ||
+ p->fts_info == FTS_DC || p->fts_info == FTS_DOT))
+ --nlinks;
+ }
+
+ /* We walk in directory order so "ls -f" doesn't get upset. */
+ p->fts_link = NULL;
+ if (head == NULL)
+ head = tail = p;
+ else {
+ tail->fts_link = p;
+ tail = p;
+ }
+ ++nitems;
+ }
+ if (dirp)
+ (void)closedir(dirp);
+
+ /*
+ * If realloc() changed the address of the path, adjust the
+ * addresses for the rest of the tree and the dir list.
+ */
+ if (doadjust)
+ fts_padjust(sp, head);
+
+ /*
+ * If not changing directories, reset the path back to original
+ * state.
+ */
+ if (ISSET(FTS_NOCHDIR)) {
+ if (len == sp->fts_pathlen || nitems == 0)
+ --cp;
+ *cp = '\0';
+ }
+
+ /*
+ * If descended after called from fts_children or after called from
+ * fts_read and nothing found, get back. At the root level we use
+ * the saved fd; if one of fts_open()'s arguments is a relative path
+ * to an empty directory, we wind up here with no other way back. If
+ * can't get back, we're done.
+ */
+ if (descend && (type == BCHILD || !nitems) &&
+ (cur->fts_level == FTS_ROOTLEVEL ? FCHDIR(sp, sp->fts_rfd) :
+ fts_safe_changedir(sp, cur->fts_parent, -1, ".."))) {
+ cur->fts_info = FTS_ERR;
+ SET(FTS_STOP);
+ return (NULL);
+ }
+
+ /* If didn't find anything, return NULL. */
+ if (!nitems) {
+ if (type == BREAD)
+ cur->fts_info = FTS_DP;
+ return (NULL);
+ }
+
+ /* Sort the entries. */
+ if (sp->fts_compar && nitems > 1)
+ head = fts_sort(sp, head, nitems);
+ return (head);
+}
+
+static u_short
+fts_stat(FTS *sp, FTSENT *p, int follow)
+{
+ FTSENT *t;
+ dev_t dev;
+ ino_t ino;
+ struct stat *sbp, sb;
+ int saved_errno;
+
+ /* If user needs stat info, stat buffer already allocated. */
+ sbp = ISSET(FTS_NOSTAT) ? &sb : p->fts_statp;
+
+ /*
+ * If doing a logical walk, or application requested FTS_FOLLOW, do
+ * a stat(2). If that fails, check for a non-existent symlink. If
+ * fail, set the errno from the stat call.
+ */
+ if (ISSET(FTS_LOGICAL) || follow) {
+ if (stat(p->fts_accpath, sbp)) {
+ saved_errno = errno;
+ if (!lstat(p->fts_accpath, sbp)) {
+ errno = 0;
+ return (FTS_SLNONE);
+ }
+ p->fts_errno = saved_errno;
+ goto err;
+ }
+ } else if (lstat(p->fts_accpath, sbp)) {
+ p->fts_errno = errno;
+err: memset(sbp, 0, sizeof(struct stat));
+ return (FTS_NS);
+ }
+
+ if (S_ISDIR(sbp->st_mode)) {
+ /*
+ * Set the device/inode. Used to find cycles and check for
+ * crossing mount points. Also remember the link count, used
+ * in fts_build to limit the number of stat calls. It is
+ * understood that these fields are only referenced if fts_info
+ * is set to FTS_D.
+ */
+ dev = p->fts_dev = sbp->st_dev;
+ ino = p->fts_ino = sbp->st_ino;
+ p->fts_nlink = sbp->st_nlink;
+
+ if (ISDOT(p->fts_name))
+ return (FTS_DOT);
+
+ /*
+ * Cycle detection is done by brute force when the directory
+ * is first encountered. If the tree gets deep enough or the
+ * number of symbolic links to directories is high enough,
+ * something faster might be worthwhile.
+ */
+ for (t = p->fts_parent;
+ t->fts_level >= FTS_ROOTLEVEL; t = t->fts_parent)
+ if (ino == t->fts_ino && dev == t->fts_dev) {
+ p->fts_cycle = t;
+ return (FTS_DC);
+ }
+ return (FTS_D);
+ }
+ if (S_ISLNK(sbp->st_mode))
+ return (FTS_SL);
+ if (S_ISREG(sbp->st_mode))
+ return (FTS_F);
+ return (FTS_DEFAULT);
+}
+
+static FTSENT *
+fts_sort(FTS *sp, FTSENT *head, int nitems)
+{
+ FTSENT **ap, *p;
+
+ /*
+ * Construct an array of pointers to the structures and call qsort(3).
+ * Reassemble the array in the order returned by qsort. If unable to
+ * sort for memory reasons, return the directory entries in their
+ * current order. Allocate enough space for the current needs plus
+ * 40 so don't realloc one entry at a time.
+ */
+ if (nitems > sp->fts_nitems) {
+ struct _ftsent **a;
+
+ sp->fts_nitems = nitems + 40;
+ if ((a = realloc(sp->fts_array,
+ sp->fts_nitems * sizeof(FTSENT *))) == NULL) {
+ if (sp->fts_array)
+ free(sp->fts_array);
+ sp->fts_array = NULL;
+ sp->fts_nitems = 0;
+ return (head);
+ }
+ sp->fts_array = a;
+ }
+ for (ap = sp->fts_array, p = head; p; p = p->fts_link)
+ *ap++ = p;
+ qsort((void *)sp->fts_array, nitems, sizeof(FTSENT *), sp->fts_compar);
+ for (head = *(ap = sp->fts_array); --nitems; ++ap)
+ ap[0]->fts_link = ap[1];
+ ap[0]->fts_link = NULL;
+ return (head);
+}
+
+static FTSENT *
+fts_alloc(FTS *sp, char *name, size_t namelen)
+{
+ FTSENT *p;
+ size_t len;
+
+ /*
+ * The file name is a variable length array and no stat structure is
+ * necessary if the user has set the nostat bit. Allocate the FTSENT
+ * structure, the file name and the stat structure in one chunk, but
+ * be careful that the stat structure is reasonably aligned. Since the
+ * fts_name field is declared to be of size 1, the fts_name pointer is
+ * namelen + 2 before the first possible address of the stat structure.
+ */
+ len = sizeof(FTSENT) + namelen;
+ if (!ISSET(FTS_NOSTAT))
+ len += sizeof(struct stat) + ALIGNBYTES;
+ if ((p = malloc(len)) == NULL)
+ return (NULL);
+
+ memset(p, 0, len);
+ p->fts_path = sp->fts_path;
+ p->fts_namelen = namelen;
+ p->fts_instr = FTS_NOINSTR;
+ if (!ISSET(FTS_NOSTAT))
+ p->fts_statp = (struct stat *)ALIGN(p->fts_name + namelen + 2);
+ memcpy(p->fts_name, name, namelen);
+
+ return (p);
+}
+
+static void
+fts_lfree(FTSENT *head)
+{
+ FTSENT *p;
+
+ /* Free a linked list of structures. */
+ while ((p = head)) {
+ head = head->fts_link;
+ free(p);
+ }
+}
+
+/*
+ * Allow essentially unlimited paths; find, rm, ls should all work on any tree.
+ * Most systems will allow creation of paths much longer than MAXPATHLEN, even
+ * though the kernel won't resolve them. Add the size (not just what's needed)
+ * plus 256 bytes so don't realloc the path 2 bytes at a time.
+ */
+static int
+fts_palloc(FTS *sp, size_t more)
+{
+ char *p;
+
+ /*
+ * Check for possible wraparound.
+ */
+ more += 256;
+ if (sp->fts_pathlen + more < sp->fts_pathlen) {
+ if (sp->fts_path)
+ free(sp->fts_path);
+ sp->fts_path = NULL;
+ errno = ENAMETOOLONG;
+ return (1);
+ }
+ sp->fts_pathlen += more;
+ p = realloc(sp->fts_path, sp->fts_pathlen);
+ if (p == NULL) {
+ if (sp->fts_path)
+ free(sp->fts_path);
+ sp->fts_path = NULL;
+ return (1);
+ }
+ sp->fts_path = p;
+ return (0);
+}
+
+/*
+ * When the path is realloc'd, have to fix all of the pointers in structures
+ * already returned.
+ */
+static void
+fts_padjust(FTS *sp, FTSENT *head)
+{
+ FTSENT *p;
+ char *addr = sp->fts_path;
+
+#define ADJUST(p) { \
+ if ((p)->fts_accpath != (p)->fts_name) { \
+ (p)->fts_accpath = \
+ (char *)addr + ((p)->fts_accpath - (p)->fts_path); \
+ } \
+ (p)->fts_path = addr; \
+}
+ /* Adjust the current set of children. */
+ for (p = sp->fts_child; p; p = p->fts_link)
+ ADJUST(p);
+
+ /* Adjust the rest of the tree, including the current level. */
+ for (p = head; p->fts_level >= FTS_ROOTLEVEL;) {
+ ADJUST(p);
+ p = p->fts_link ? p->fts_link : p->fts_parent;
+ }
+}
+
+static size_t
+fts_maxarglen(char * const *argv)
+{
+ size_t len, max;
+
+ for (max = 0; *argv; ++argv)
+ if ((len = strlen(*argv)) > max)
+ max = len;
+ return (max + 1);
+}
+
+/*
+ * Change to dir specified by fd or p->fts_accpath without getting
+ * tricked by someone changing the world out from underneath us.
+ * Assumes p->fts_dev and p->fts_ino are filled in.
+ */
+static int
+fts_safe_changedir(FTS *sp, FTSENT *p, int fd, char *path)
+{
+ int ret, oerrno, newfd;
+ struct stat sb;
+
+ newfd = fd;
+ if (ISSET(FTS_NOCHDIR))
+ return (0);
+ if (fd < 0 && (newfd = open(path, O_RDONLY, 0)) < 0)
+ return (-1);
+ if (fstat(newfd, &sb)) {
+ ret = -1;
+ goto bail;
+ }
+ if (p->fts_dev != sb.st_dev || p->fts_ino != sb.st_ino) {
+ errno = ENOENT; /* disinformation */
+ ret = -1;
+ goto bail;
+ }
+ ret = fchdir(newfd);
+bail:
+ oerrno = errno;
+ if (fd < 0)
+ (void)close(newfd);
+ errno = oerrno;
+ return (ret);
+}
diff --git a/libc/bionic/libc_init_dynamic.c b/libc/bionic/libc_init_dynamic.c
index b479b27..682ebcf 100644
--- a/libc/bionic/libc_init_dynamic.c
+++ b/libc/bionic/libc_init_dynamic.c
@@ -52,8 +52,6 @@
#include "libc_init_common.h"
#include <bionic_tls.h>
-extern void malloc_debug_init();
-
/* We flag the __libc_preinit function as a constructor to ensure
* that its address is listed in libc.so's .init_array section.
* This ensures that the function is called by the dynamic linker
@@ -78,12 +76,11 @@ void __libc_prenit(void)
__libc_init_common(elfdata);
-#ifdef MALLOC_LEAK_CHECK
- /* setup malloc leak checker, requires system properties */
+ /* Setup malloc routines accordingly to the environment.
+ * Requires system properties
+ */
extern void malloc_debug_init(void);
malloc_debug_init();
-#endif
-
}
__noreturn void __libc_init(uintptr_t *elfdata,
diff --git a/libc/bionic/libc_init_static.c b/libc/bionic/libc_init_static.c
index e6264bb..d097b6b 100644
--- a/libc/bionic/libc_init_static.c
+++ b/libc/bionic/libc_init_static.c
@@ -68,12 +68,6 @@ __noreturn void __libc_init(uintptr_t *elfdata,
/* Initialize the C runtime environment */
__libc_init_common(elfdata);
-#ifdef MALLOC_LEAK_CHECK
- /* setup malloc leak checker, requires system properties */
- extern void malloc_debug_init(void);
- malloc_debug_init();
-#endif
-
/* Several Linux ABIs don't pass the onexit pointer, and the ones that
* do never use it. Therefore, we ignore it.
*/
diff --git a/libc/bionic/logd_write.c b/libc/bionic/logd_write.c
index 39f0258..618160f 100644
--- a/libc/bionic/logd_write.c
+++ b/libc/bionic/logd_write.c
@@ -66,7 +66,7 @@ static int __write_to_log_null(log_id_t log_id, struct iovec *vec);
static pthread_mutex_t log_init_lock = PTHREAD_MUTEX_INITIALIZER;
-log_channel_t log_channels[LOG_ID_MAX] = {
+static log_channel_t log_channels[LOG_ID_MAX] = {
{ __write_to_log_null, -1, NULL },
{ __write_to_log_init, -1, "/dev/"LOGGER_LOG_MAIN },
{ __write_to_log_init, -1, "/dev/"LOGGER_LOG_RADIO }
@@ -112,6 +112,7 @@ static int __write_to_log_init(log_id_t log_id, struct iovec *vec)
log_channels[log_id].logger =
(fd < 0) ? __write_to_log_null : __write_to_log_kernel;
+ log_channels[log_id].fd = fd;
log_channels[log_id].fd = fd;
diff --git a/libc/bionic/malloc_debug_common.c b/libc/bionic/malloc_debug_common.c
new file mode 100644
index 0000000..ec56826
--- /dev/null
+++ b/libc/bionic/malloc_debug_common.c
@@ -0,0 +1,488 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Contains definition of structures, global variables, and implementation of
+ * routines that are used by malloc leak detection code and other components in
+ * the system. The trick is that some components expect these data and
+ * routines to be defined / implemented in libc.so library, regardless
+ * whether or not MALLOC_LEAK_CHECK macro is defined. To make things even
+ * more tricky, malloc leak detection code, implemented in
+ * libc_malloc_debug.so also requires access to these variables and routines
+ * (to fill allocation entry hash table, for example). So, all relevant
+ * variables and routines are defined / implemented here and exported
+ * to all, leak detection code and other components via dynamic (libc.so),
+ * or static (libc.a) linking.
+ */
+
+#include <stdlib.h>
+#include <pthread.h>
+#include <unistd.h>
+#include "dlmalloc.h"
+#include "malloc_debug_common.h"
+
+/*
+ * In a VM process, this is set to 1 after fork()ing out of zygote.
+ */
+int gMallocLeakZygoteChild = 0;
+
+pthread_mutex_t gAllocationsMutex = PTHREAD_MUTEX_INITIALIZER;
+HashTable gHashTable;
+
+// =============================================================================
+// output functions
+// =============================================================================
+
+static int hash_entry_compare(const void* arg1, const void* arg2)
+{
+ HashEntry* e1 = *(HashEntry**)arg1;
+ HashEntry* e2 = *(HashEntry**)arg2;
+
+ size_t nbAlloc1 = e1->allocations;
+ size_t nbAlloc2 = e2->allocations;
+ size_t size1 = e1->size & ~SIZE_FLAG_MASK;
+ size_t size2 = e2->size & ~SIZE_FLAG_MASK;
+ size_t alloc1 = nbAlloc1 * size1;
+ size_t alloc2 = nbAlloc2 * size2;
+
+ // sort in descending order by:
+ // 1) total size
+ // 2) number of allocations
+ //
+ // This is used for sorting, not determination of equality, so we don't
+ // need to compare the bit flags.
+ int result;
+ if (alloc1 > alloc2) {
+ result = -1;
+ } else if (alloc1 < alloc2) {
+ result = 1;
+ } else {
+ if (nbAlloc1 > nbAlloc2) {
+ result = -1;
+ } else if (nbAlloc1 < nbAlloc2) {
+ result = 1;
+ } else {
+ result = 0;
+ }
+ }
+ return result;
+}
+
+/*
+ * Retrieve native heap information.
+ *
+ * "*info" is set to a buffer we allocate
+ * "*overallSize" is set to the size of the "info" buffer
+ * "*infoSize" is set to the size of a single entry
+ * "*totalMemory" is set to the sum of all allocations we're tracking; does
+ * not include heap overhead
+ * "*backtraceSize" is set to the maximum number of entries in the back trace
+ */
+void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
+ size_t* infoSize, size_t* totalMemory, size_t* backtraceSize)
+{
+ // don't do anything if we have invalid arguments
+ if (info == NULL || overallSize == NULL || infoSize == NULL ||
+ totalMemory == NULL || backtraceSize == NULL) {
+ return;
+ }
+
+ pthread_mutex_lock(&gAllocationsMutex);
+
+ if (gHashTable.count == 0) {
+ *info = NULL;
+ *overallSize = 0;
+ *infoSize = 0;
+ *totalMemory = 0;
+ *backtraceSize = 0;
+ goto done;
+ }
+
+ void** list = (void**)dlmalloc(sizeof(void*) * gHashTable.count);
+
+ // get the entries into an array to be sorted
+ int index = 0;
+ int i;
+ for (i = 0 ; i < HASHTABLE_SIZE ; i++) {
+ HashEntry* entry = gHashTable.slots[i];
+ while (entry != NULL) {
+ list[index] = entry;
+ *totalMemory = *totalMemory +
+ ((entry->size & ~SIZE_FLAG_MASK) * entry->allocations);
+ index++;
+ entry = entry->next;
+ }
+ }
+
+ // XXX: the protocol doesn't allow variable size for the stack trace (yet)
+ *infoSize = (sizeof(size_t) * 2) + (sizeof(intptr_t) * BACKTRACE_SIZE);
+ *overallSize = *infoSize * gHashTable.count;
+ *backtraceSize = BACKTRACE_SIZE;
+
+ // now get A byte array big enough for this
+ *info = (uint8_t*)dlmalloc(*overallSize);
+
+ if (*info == NULL) {
+ *overallSize = 0;
+ goto out_nomem_info;
+ }
+
+ qsort((void*)list, gHashTable.count, sizeof(void*), hash_entry_compare);
+
+ uint8_t* head = *info;
+ const int count = gHashTable.count;
+ for (i = 0 ; i < count ; i++) {
+ HashEntry* entry = list[i];
+ size_t entrySize = (sizeof(size_t) * 2) + (sizeof(intptr_t) * entry->numEntries);
+ if (entrySize < *infoSize) {
+ /* we're writing less than a full entry, clear out the rest */
+ memset(head + entrySize, 0, *infoSize - entrySize);
+ } else {
+ /* make sure the amount we're copying doesn't exceed the limit */
+ entrySize = *infoSize;
+ }
+ memcpy(head, &(entry->size), entrySize);
+ head += *infoSize;
+ }
+
+out_nomem_info:
+ dlfree(list);
+
+done:
+ pthread_mutex_unlock(&gAllocationsMutex);
+}
+
+void free_malloc_leak_info(uint8_t* info)
+{
+ dlfree(info);
+}
+
+struct mallinfo mallinfo()
+{
+ return dlmallinfo();
+}
+
+void* valloc(size_t bytes) {
+ /* assume page size of 4096 bytes */
+ return memalign( getpagesize(), bytes );
+}
+
+/* Support for malloc debugging.
+ * Note that if USE_DL_PREFIX is not defined, it's assumed that memory
+ * allocation routines are implemented somewhere else, so all our custom
+ * malloc routines should not be compiled at all.
+ */
+#ifdef USE_DL_PREFIX
+
+/* Table for dispatching malloc calls, initialized with default dispatchers. */
+const MallocDebug __libc_malloc_default_dispatch __attribute__((aligned(32))) =
+{
+ dlmalloc, dlfree, dlcalloc, dlrealloc, dlmemalign
+};
+
+/* Selector of dispatch table to use for dispatching malloc calls. */
+const MallocDebug* __libc_malloc_dispatch = &__libc_malloc_default_dispatch;
+
+void* malloc(size_t bytes) {
+ return __libc_malloc_dispatch->malloc(bytes);
+}
+void free(void* mem) {
+ __libc_malloc_dispatch->free(mem);
+}
+void* calloc(size_t n_elements, size_t elem_size) {
+ return __libc_malloc_dispatch->calloc(n_elements, elem_size);
+}
+void* realloc(void* oldMem, size_t bytes) {
+ return __libc_malloc_dispatch->realloc(oldMem, bytes);
+}
+void* memalign(size_t alignment, size_t bytes) {
+ return __libc_malloc_dispatch->memalign(alignment, bytes);
+}
+
+/* We implement malloc debugging only in libc.so, so code bellow
+ * must be excluded if we compile this file for static libc.a
+ */
+#ifndef LIBC_STATIC
+#include <sys/system_properties.h>
+#include <dlfcn.h>
+#include "logd.h"
+
+// =============================================================================
+// log functions
+// =============================================================================
+
+#define debug_log(format, ...) \
+ __libc_android_log_print(ANDROID_LOG_DEBUG, "libc", (format), ##__VA_ARGS__ )
+#define error_log(format, ...) \
+ __libc_android_log_print(ANDROID_LOG_ERROR, "libc", (format), ##__VA_ARGS__ )
+#define info_log(format, ...) \
+ __libc_android_log_print(ANDROID_LOG_INFO, "libc", (format), ##__VA_ARGS__ )
+
+/* Table for dispatching malloc calls, depending on environment. */
+static MallocDebug gMallocUse __attribute__((aligned(32))) = {
+ dlmalloc, dlfree, dlcalloc, dlrealloc, dlmemalign
+};
+
+extern char* __progname;
+
+/* Handle to shared library where actual memory allocation is implemented.
+ * This library is loaded and memory allocation calls are redirected there
+ * when libc.debug.malloc environment variable contains value other than
+ * zero:
+ * 1 - For memory leak detections.
+ * 5 - For filling allocated / freed memory with patterns defined by
+ * CHK_SENTINEL_VALUE, and CHK_FILL_FREE macros.
+ * 10 - For adding pre-, and post- allocation stubs in order to detect
+ * buffer overruns.
+ * Note that emulator's memory allocation instrumentation is not controlled by
+ * libc.debug.malloc value, but rather by emulator, started with -memcheck
+ * option. Note also, that if emulator has started with -memcheck option,
+ * emulator's instrumented memory allocation will take over value saved in
+ * libc.debug.malloc. In other words, if emulator has started with -memcheck
+ * option, libc.debug.malloc value is ignored.
+ * Actual functionality for debug levels 1-10 is implemented in
+ * libc_malloc_debug_leak.so, while functionality for emultor's instrumented
+ * allocations is implemented in libc_malloc_debug_qemu.so and can be run inside
+ * the emulator only.
+ */
+static void* libc_malloc_impl_handle = NULL;
+
+/* Make sure we have MALLOC_ALIGNMENT that matches the one that is
+ * used in dlmalloc. Emulator's memchecker needs this value to properly
+ * align its guarding zones.
+ */
+#ifndef MALLOC_ALIGNMENT
+#define MALLOC_ALIGNMENT ((size_t)8U)
+#endif /* MALLOC_ALIGNMENT */
+
+/* Initializes memory allocation framework once per process. */
+static void malloc_init_impl(void)
+{
+ const char* so_name = NULL;
+ MallocDebugInit malloc_debug_initialize = NULL;
+ unsigned int qemu_running = 0;
+ unsigned int debug_level = 0;
+ unsigned int memcheck_enabled = 0;
+ char env[PROP_VALUE_MAX];
+ char memcheck_tracing[PROP_VALUE_MAX];
+
+ /* Get custom malloc debug level. Note that emulator started with
+ * memory checking option will have priority over debug level set in
+ * libc.debug.malloc system property. */
+ if (__system_property_get("ro.kernel.qemu", env) && atoi(env)) {
+ qemu_running = 1;
+ if (__system_property_get("ro.kernel.memcheck", memcheck_tracing)) {
+ if (memcheck_tracing[0] != '0') {
+ // Emulator has started with memory tracing enabled. Enforce it.
+ debug_level = 20;
+ memcheck_enabled = 1;
+ }
+ }
+ }
+
+ /* If debug level has not been set by memcheck option in the emulator,
+ * lets grab it from libc.debug.malloc system property. */
+ if (!debug_level && __system_property_get("libc.debug.malloc", env)) {
+ debug_level = atoi(env);
+ }
+
+ /* Debug level 0 means that we should use dlxxx allocation
+ * routines (default). */
+ if (!debug_level) {
+ return;
+ }
+
+ // Lets see which .so must be loaded for the requested debug level
+ switch (debug_level) {
+ case 1:
+ case 5:
+ case 10:
+ so_name = "/system/lib/libc_malloc_debug_leak.so";
+ break;
+ case 20:
+ // Quick check: debug level 20 can only be handled in emulator.
+ if (!qemu_running) {
+ error_log("%s: Debug level %d can only be set in emulator\n",
+ __progname, debug_level);
+ return;
+ }
+ // Make sure that memory checking has been enabled in emulator.
+ if (!memcheck_enabled) {
+ error_log("%s: Memory checking is not enabled in the emulator\n",
+ __progname);
+ return;
+ }
+ so_name = "/system/lib/libc_malloc_debug_qemu.so";
+ break;
+ default:
+ error_log("%s: Debug level %d is unknown\n",
+ __progname, debug_level);
+ return;
+ }
+
+ // Load .so that implements the required malloc debugging functionality.
+ libc_malloc_impl_handle = dlopen(so_name, RTLD_LAZY);
+ if (libc_malloc_impl_handle == NULL) {
+ error_log("%s: Missing module %s required for malloc debug level %d\n",
+ __progname, so_name, debug_level);
+ return;
+ }
+
+ // Initialize malloc debugging in the loaded module.
+ malloc_debug_initialize =
+ dlsym(libc_malloc_impl_handle, "malloc_debug_initialize");
+ if (malloc_debug_initialize == NULL) {
+ error_log("%s: Initialization routine is not found in %s\n",
+ __progname, so_name);
+ dlclose(libc_malloc_impl_handle);
+ return;
+ }
+ if (malloc_debug_initialize()) {
+ dlclose(libc_malloc_impl_handle);
+ return;
+ }
+
+ if (debug_level == 20) {
+ // For memory checker we need to do extra initialization.
+ int (*memcheck_initialize)(int, const char*) =
+ dlsym(libc_malloc_impl_handle, "memcheck_initialize");
+ if (memcheck_initialize == NULL) {
+ error_log("%s: memcheck_initialize routine is not found in %s\n",
+ __progname, so_name);
+ dlclose(libc_malloc_impl_handle);
+ return;
+ }
+ if (memcheck_initialize(MALLOC_ALIGNMENT, memcheck_tracing)) {
+ dlclose(libc_malloc_impl_handle);
+ return;
+ }
+ }
+
+ // Initialize malloc dispatch table with appropriate routines.
+ switch (debug_level) {
+ case 1:
+ __libc_android_log_print(ANDROID_LOG_INFO, "libc",
+ "%s using MALLOC_DEBUG = %d (leak checker)\n",
+ __progname, debug_level);
+ gMallocUse.malloc =
+ dlsym(libc_malloc_impl_handle, "leak_malloc");
+ gMallocUse.free =
+ dlsym(libc_malloc_impl_handle, "leak_free");
+ gMallocUse.calloc =
+ dlsym(libc_malloc_impl_handle, "leak_calloc");
+ gMallocUse.realloc =
+ dlsym(libc_malloc_impl_handle, "leak_realloc");
+ gMallocUse.memalign =
+ dlsym(libc_malloc_impl_handle, "leak_memalign");
+ break;
+ case 5:
+ __libc_android_log_print(ANDROID_LOG_INFO, "libc",
+ "%s using MALLOC_DEBUG = %d (fill)\n",
+ __progname, debug_level);
+ gMallocUse.malloc =
+ dlsym(libc_malloc_impl_handle, "fill_malloc");
+ gMallocUse.free =
+ dlsym(libc_malloc_impl_handle, "fill_free");
+ gMallocUse.calloc = dlcalloc;
+ gMallocUse.realloc =
+ dlsym(libc_malloc_impl_handle, "fill_realloc");
+ gMallocUse.memalign =
+ dlsym(libc_malloc_impl_handle, "fill_memalign");
+ break;
+ case 10:
+ __libc_android_log_print(ANDROID_LOG_INFO, "libc",
+ "%s using MALLOC_DEBUG = %d (sentinels, fill)\n",
+ __progname, debug_level);
+ gMallocUse.malloc =
+ dlsym(libc_malloc_impl_handle, "chk_malloc");
+ gMallocUse.free =
+ dlsym(libc_malloc_impl_handle, "chk_free");
+ gMallocUse.calloc =
+ dlsym(libc_malloc_impl_handle, "chk_calloc");
+ gMallocUse.realloc =
+ dlsym(libc_malloc_impl_handle, "chk_realloc");
+ gMallocUse.memalign =
+ dlsym(libc_malloc_impl_handle, "chk_memalign");
+ break;
+ case 20:
+ __libc_android_log_print(ANDROID_LOG_INFO, "libc",
+ "%s[%u] using MALLOC_DEBUG = %d (instrumented for emulator)\n",
+ __progname, getpid(), debug_level);
+ gMallocUse.malloc =
+ dlsym(libc_malloc_impl_handle, "qemu_instrumented_malloc");
+ gMallocUse.free =
+ dlsym(libc_malloc_impl_handle, "qemu_instrumented_free");
+ gMallocUse.calloc =
+ dlsym(libc_malloc_impl_handle, "qemu_instrumented_calloc");
+ gMallocUse.realloc =
+ dlsym(libc_malloc_impl_handle, "qemu_instrumented_realloc");
+ gMallocUse.memalign =
+ dlsym(libc_malloc_impl_handle, "qemu_instrumented_memalign");
+ break;
+ default:
+ break;
+ }
+
+ // Make sure dispatch table is initialized
+ if ((gMallocUse.malloc == NULL) ||
+ (gMallocUse.free == NULL) ||
+ (gMallocUse.calloc == NULL) ||
+ (gMallocUse.realloc == NULL) ||
+ (gMallocUse.memalign == NULL)) {
+ error_log("%s: Cannot initialize malloc dispatch table for debug level"
+ " %d: %p, %p, %p, %p, %p\n",
+ __progname, debug_level,
+ gMallocUse.malloc, gMallocUse.free,
+ gMallocUse.calloc, gMallocUse.realloc,
+ gMallocUse.memalign);
+ dlclose(libc_malloc_impl_handle);
+ libc_malloc_impl_handle = NULL;
+ } else {
+ __libc_malloc_dispatch = &gMallocUse;
+ }
+}
+
+static pthread_once_t malloc_init_once_ctl = PTHREAD_ONCE_INIT;
+
+#endif // !LIBC_STATIC
+#endif // USE_DL_PREFIX
+
+/* Initializes memory allocation framework.
+ * This routine is called from __libc_init routines implemented
+ * in libc_init_static.c and libc_init_dynamic.c files.
+ */
+void malloc_debug_init(void)
+{
+ /* We need to initialize malloc iff we implement here custom
+ * malloc routines (i.e. USE_DL_PREFIX is defined) for libc.so */
+#if defined(USE_DL_PREFIX) && !defined(LIBC_STATIC)
+ if (pthread_once(&malloc_init_once_ctl, malloc_init_impl)) {
+ error_log("Unable to initialize malloc_debug component.");
+ }
+#endif // USE_DL_PREFIX && !LIBC_STATIC
+}
diff --git a/libc/bionic/malloc_debug_common.h b/libc/bionic/malloc_debug_common.h
new file mode 100644
index 0000000..87600d6
--- /dev/null
+++ b/libc/bionic/malloc_debug_common.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Contains declarations of types and constants used by malloc leak
+ * detection code in both, libc and libc_malloc_debug libraries.
+ */
+#ifndef MALLOC_DEBUG_COMMON_H
+#define MALLOC_DEBUG_COMMON_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define HASHTABLE_SIZE 1543
+#define BACKTRACE_SIZE 32
+/* flag definitions, currently sharing storage with "size" */
+#define SIZE_FLAG_ZYGOTE_CHILD (1<<31)
+#define SIZE_FLAG_MASK (SIZE_FLAG_ZYGOTE_CHILD)
+
+#define MAX_SIZE_T (~(size_t)0)
+
+// =============================================================================
+// Structures
+// =============================================================================
+
+typedef struct HashEntry HashEntry;
+struct HashEntry {
+ size_t slot;
+ HashEntry* prev;
+ HashEntry* next;
+ size_t numEntries;
+ // fields above "size" are NOT sent to the host
+ size_t size;
+ size_t allocations;
+ intptr_t backtrace[0];
+};
+
+typedef struct HashTable HashTable;
+struct HashTable {
+ size_t count;
+ HashEntry* slots[HASHTABLE_SIZE];
+};
+
+/* Entry in malloc dispatch table. */
+typedef struct MallocDebug MallocDebug;
+struct MallocDebug {
+ /* Address of the actual malloc routine. */
+ void* (*malloc)(size_t bytes);
+ /* Address of the actual free routine. */
+ void (*free)(void* mem);
+ /* Address of the actual calloc routine. */
+ void* (*calloc)(size_t n_elements, size_t elem_size);
+ /* Address of the actual realloc routine. */
+ void* (*realloc)(void* oldMem, size_t bytes);
+ /* Address of the actual memalign routine. */
+ void* (*memalign)(size_t alignment, size_t bytes);
+};
+
+/* Malloc debugging initialization routine.
+ * This routine must be implemented in .so modules that implement malloc
+ * debugging. This routine is called once per process from malloc_init_impl
+ * routine implemented in bionic/libc/bionic/malloc_debug_common.c when malloc
+ * debugging gets initialized for the process.
+ * Return:
+ * 0 on success, -1 on failure.
+ */
+typedef int (*MallocDebugInit)(void);
+
+#ifdef __cplusplus
+}; /* end of extern "C" */
+#endif
+
+#endif // MALLOC_DEBUG_COMMON_H
diff --git a/libc/bionic/malloc_leak.c b/libc/bionic/malloc_debug_leak.c
index b21bc6a..0a3a68d 100644
--- a/libc/bionic/malloc_leak.c
+++ b/libc/bionic/malloc_debug_leak.c
@@ -38,6 +38,7 @@
#include <stdarg.h>
#include <fcntl.h>
#include <unwind.h>
+#include <dlfcn.h>
#include <sys/socket.h>
#include <sys/un.h>
@@ -47,212 +48,37 @@
#include "dlmalloc.h"
#include "logd.h"
+#include "malloc_debug_common.h"
-// =============================================================================
-// Utilities directly used by Dalvik
-// =============================================================================
-
-#define HASHTABLE_SIZE 1543
-#define BACKTRACE_SIZE 32
-/* flag definitions, currently sharing storage with "size" */
-#define SIZE_FLAG_ZYGOTE_CHILD (1<<31)
-#define SIZE_FLAG_MASK (SIZE_FLAG_ZYGOTE_CHILD)
-
-#define MAX_SIZE_T (~(size_t)0)
-
-/*
- * In a VM process, this is set to 1 after fork()ing out of zygote.
- */
-int gMallocLeakZygoteChild = 0;
-
-// =============================================================================
-// Structures
-// =============================================================================
-
-typedef struct HashEntry HashEntry;
-struct HashEntry {
- size_t slot;
- HashEntry* prev;
- HashEntry* next;
- size_t numEntries;
- // fields above "size" are NOT sent to the host
- size_t size;
- size_t allocations;
- intptr_t backtrace[0];
-};
-
-typedef struct HashTable HashTable;
-struct HashTable {
- size_t count;
- HashEntry* slots[HASHTABLE_SIZE];
-};
+// This file should be included into the build only when
+// MALLOC_LEAK_CHECK, or MALLOC_QEMU_INSTRUMENT, or both
+// macros are defined.
+#ifndef MALLOC_LEAK_CHECK
+#error MALLOC_LEAK_CHECK is not defined.
+#endif // !MALLOC_LEAK_CHECK
-static pthread_mutex_t gAllocationsMutex = PTHREAD_MUTEX_INITIALIZER;
-static HashTable gHashTable;
+// Global variables defined in malloc_debug_common.c
+extern int gMallocLeakZygoteChild;
+extern pthread_mutex_t gAllocationsMutex;
+extern HashTable gHashTable;
+extern const MallocDebug __libc_malloc_default_dispatch;
+extern const MallocDebug* __libc_malloc_dispatch;
// =============================================================================
// log functions
// =============================================================================
#define debug_log(format, ...) \
- __libc_android_log_print(ANDROID_LOG_DEBUG, "malloc_leak", (format), ##__VA_ARGS__ )
-
-// =============================================================================
-// output functions
-// =============================================================================
-
-static int hash_entry_compare(const void* arg1, const void* arg2)
-{
- HashEntry* e1 = *(HashEntry**)arg1;
- HashEntry* e2 = *(HashEntry**)arg2;
-
- size_t nbAlloc1 = e1->allocations;
- size_t nbAlloc2 = e2->allocations;
- size_t size1 = e1->size & ~SIZE_FLAG_MASK;
- size_t size2 = e2->size & ~SIZE_FLAG_MASK;
- size_t alloc1 = nbAlloc1 * size1;
- size_t alloc2 = nbAlloc2 * size2;
-
- // sort in descending order by:
- // 1) total size
- // 2) number of allocations
- //
- // This is used for sorting, not determination of equality, so we don't
- // need to compare the bit flags.
- int result;
- if (alloc1 > alloc2) {
- result = -1;
- } else if (alloc1 < alloc2) {
- result = 1;
- } else {
- if (nbAlloc1 > nbAlloc2) {
- result = -1;
- } else if (nbAlloc1 < nbAlloc2) {
- result = 1;
- } else {
- result = 0;
- }
- }
- return result;
-}
-
-/*
- * Retrieve native heap information.
- *
- * "*info" is set to a buffer we allocate
- * "*overallSize" is set to the size of the "info" buffer
- * "*infoSize" is set to the size of a single entry
- * "*totalMemory" is set to the sum of all allocations we're tracking; does
- * not include heap overhead
- * "*backtraceSize" is set to the maximum number of entries in the back trace
- */
-void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
- size_t* infoSize, size_t* totalMemory, size_t* backtraceSize)
-{
- // don't do anything if we have invalid arguments
- if (info == NULL || overallSize == NULL || infoSize == NULL ||
- totalMemory == NULL || backtraceSize == NULL) {
- return;
- }
-
- pthread_mutex_lock(&gAllocationsMutex);
-
- if (gHashTable.count == 0) {
- *info = NULL;
- *overallSize = 0;
- *infoSize = 0;
- *totalMemory = 0;
- *backtraceSize = 0;
- goto done;
- }
-
- void** list = (void**)dlmalloc(sizeof(void*) * gHashTable.count);
-
- // debug_log("*****\ngHashTable.count = %d\n", gHashTable.count);
- // debug_log("list = %p\n", list);
-
- // get the entries into an array to be sorted
- int index = 0;
- int i;
- for (i = 0 ; i < HASHTABLE_SIZE ; i++) {
- HashEntry* entry = gHashTable.slots[i];
- while (entry != NULL) {
- list[index] = entry;
- *totalMemory = *totalMemory +
- ((entry->size & ~SIZE_FLAG_MASK) * entry->allocations);
- index++;
- entry = entry->next;
- }
- }
-
- // debug_log("sorted list!\n");
- // XXX: the protocol doesn't allow variable size for the stack trace (yet)
- *infoSize = (sizeof(size_t) * 2) + (sizeof(intptr_t) * BACKTRACE_SIZE);
- *overallSize = *infoSize * gHashTable.count;
- *backtraceSize = BACKTRACE_SIZE;
-
- // debug_log("infoSize = 0x%x overall = 0x%x\n", *infoSize, *overallSize);
- // now get A byte array big enough for this
- *info = (uint8_t*)dlmalloc(*overallSize);
-
- // debug_log("info = %p\n", info);
- if (*info == NULL) {
- *overallSize = 0;
- goto out_nomem_info;
- }
-
- // debug_log("sorting list...\n");
- qsort((void*)list, gHashTable.count, sizeof(void*), hash_entry_compare);
-
- uint8_t* head = *info;
- const int count = gHashTable.count;
- for (i = 0 ; i < count ; i++) {
- HashEntry* entry = list[i];
- size_t entrySize = (sizeof(size_t) * 2) + (sizeof(intptr_t) * entry->numEntries);
- if (entrySize < *infoSize) {
- /* we're writing less than a full entry, clear out the rest */
- memset(head + entrySize, 0, *infoSize - entrySize);
- } else {
- /* make sure the amount we're copying doesn't exceed the limit */
- entrySize = *infoSize;
- }
- memcpy(head, &(entry->size), entrySize);
- head += *infoSize;
- }
-
-out_nomem_info:
- dlfree(list);
-
-done:
- // debug_log("+++++ done!\n");
- pthread_mutex_unlock(&gAllocationsMutex);
-}
-
-void free_malloc_leak_info(uint8_t* info)
-{
- dlfree(info);
-}
-
-struct mallinfo mallinfo()
-{
- return dlmallinfo();
-}
-
-void* valloc(size_t bytes) {
- /* assume page size of 4096 bytes */
- return memalign( getpagesize(), bytes );
-}
+ __libc_android_log_print(ANDROID_LOG_DEBUG, "malloc_leak_check", (format), ##__VA_ARGS__ )
+#define error_log(format, ...) \
+ __libc_android_log_print(ANDROID_LOG_ERROR, "malloc_leak_check", (format), ##__VA_ARGS__ )
+#define info_log(format, ...) \
+ __libc_android_log_print(ANDROID_LOG_INFO, "malloc_leak_check", (format), ##__VA_ARGS__ )
+static int gTrapOnError = 1;
-/*
- * Code guarded by MALLOC_LEAK_CHECK is only needed when malloc check is
- * enabled. Currently we exclude them in libc.so, and only include them in
- * libc_debug.so.
- */
-#ifdef MALLOC_LEAK_CHECK
#define MALLOC_ALIGNMENT 8
#define GUARD 0x48151642
-
#define DEBUG 0
// =============================================================================
@@ -409,13 +235,13 @@ static _Unwind_Reason_Code trace_function(__unwind_context *context, void *arg)
if (state->count) {
intptr_t ip = (intptr_t)_Unwind_GetIP(context);
if (ip) {
- state->addrs[0] = ip;
+ state->addrs[0] = ip;
state->addrs++;
state->count--;
return _URC_NO_REASON;
}
}
- /*
+ /*
* If we run out of space to record the address or 0 has been seen, stop
* unwinding the stack.
*/
@@ -433,70 +259,6 @@ int get_backtrace(intptr_t* addrs, size_t max_entries)
}
// =============================================================================
-// malloc leak function dispatcher
-// =============================================================================
-
-static void* leak_malloc(size_t bytes);
-static void leak_free(void* mem);
-static void* leak_calloc(size_t n_elements, size_t elem_size);
-static void* leak_realloc(void* oldMem, size_t bytes);
-static void* leak_memalign(size_t alignment, size_t bytes);
-
-static void* fill_malloc(size_t bytes);
-static void fill_free(void* mem);
-static void* fill_realloc(void* oldMem, size_t bytes);
-static void* fill_memalign(size_t alignment, size_t bytes);
-
-static void* chk_malloc(size_t bytes);
-static void chk_free(void* mem);
-static void* chk_calloc(size_t n_elements, size_t elem_size);
-static void* chk_realloc(void* oldMem, size_t bytes);
-static void* chk_memalign(size_t alignment, size_t bytes);
-
-typedef struct {
- void* (*malloc)(size_t bytes);
- void (*free)(void* mem);
- void* (*calloc)(size_t n_elements, size_t elem_size);
- void* (*realloc)(void* oldMem, size_t bytes);
- void* (*memalign)(size_t alignment, size_t bytes);
-} MallocDebug;
-
-static const MallocDebug gMallocEngineTable[] __attribute__((aligned(32))) =
-{
- { dlmalloc, dlfree, dlcalloc, dlrealloc, dlmemalign },
- { leak_malloc, leak_free, leak_calloc, leak_realloc, leak_memalign },
- { fill_malloc, fill_free, dlcalloc, fill_realloc, fill_memalign },
- { chk_malloc, chk_free, chk_calloc, chk_realloc, chk_memalign }
-};
-
-enum {
- INDEX_NORMAL = 0,
- INDEX_LEAK_CHECK,
- INDEX_MALLOC_FILL,
- INDEX_MALLOC_CHECK,
-};
-
-static MallocDebug const * gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
-static int gMallocDebugLevel;
-static int gTrapOnError = 1;
-
-void* malloc(size_t bytes) {
- return gMallocDispatch->malloc(bytes);
-}
-void free(void* mem) {
- gMallocDispatch->free(mem);
-}
-void* calloc(size_t n_elements, size_t elem_size) {
- return gMallocDispatch->calloc(n_elements, elem_size);
-}
-void* realloc(void* oldMem, size_t bytes) {
- return gMallocDispatch->realloc(oldMem, bytes);
-}
-void* memalign(size_t alignment, size_t bytes) {
- return gMallocDispatch->memalign(alignment, bytes);
-}
-
-// =============================================================================
// malloc check functions
// =============================================================================
@@ -534,7 +296,9 @@ static void assert_log_message(const char* format, ...)
va_list args;
pthread_mutex_lock(&gAllocationsMutex);
- gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
+ {
+ const MallocDebug* current_dispatch = __libc_malloc_dispatch;
+ __libc_malloc_dispatch = &__libc_malloc_default_dispatch;
va_start(args, format);
__libc_android_log_vprint(ANDROID_LOG_ERROR, "libc",
format, args);
@@ -543,7 +307,8 @@ static void assert_log_message(const char* format, ...)
if (gTrapOnError) {
__builtin_trap();
}
- gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_CHECK];
+ __libc_malloc_dispatch = current_dispatch;
+ }
pthread_mutex_unlock(&gAllocationsMutex);
}
@@ -576,7 +341,7 @@ static int chk_mem_check(void* mem,
buf = (char*)mem - CHK_SENTINEL_HEAD_SIZE;
for (i=0 ; i<CHK_SENTINEL_HEAD_SIZE ; i++) {
if (buf[i] != CHK_SENTINEL_VALUE) {
- assert_log_message(
+ assert_log_message(
"*** %s CHECK: buffer %p "
"corrupted %d bytes before allocation",
func, mem, CHK_SENTINEL_HEAD_SIZE-i);
@@ -592,7 +357,7 @@ static int chk_mem_check(void* mem,
buf = (char*)mem + bytes;
for (i=CHK_SENTINEL_TAIL_SIZE-1 ; i>=0 ; i--) {
if (buf[i] != CHK_SENTINEL_VALUE) {
- assert_log_message(
+ assert_log_message(
"*** %s CHECK: buffer %p, size=%lu, "
"corrupted %d bytes after allocation",
func, buffer, bytes, i+1);
@@ -746,11 +511,11 @@ void* leak_malloc(size_t bytes)
intptr_t backtrace[BACKTRACE_SIZE];
size_t numEntries = get_backtrace(backtrace, BACKTRACE_SIZE);
-
+
AllocationEntry* header = (AllocationEntry*)base;
header->entry = record_backtrace(backtrace, numEntries, bytes);
header->guard = GUARD;
-
+
// now increment base to point to after our header.
// this should just work since our header is 8 bytes.
base = (AllocationEntry*)base + 1;
@@ -768,7 +533,7 @@ void leak_free(void* mem)
// check the guard to make sure it is valid
AllocationEntry* header = (AllocationEntry*)mem - 1;
-
+
if (header->guard != GUARD) {
// could be a memaligned block
if (((void**)mem)[-1] == MEMALIGN_GUARD) {
@@ -776,7 +541,7 @@ void leak_free(void* mem)
header = (AllocationEntry*)mem - 1;
}
}
-
+
if (header->guard == GUARD || is_valid_entry(header->entry)) {
// decrement the allocations
HashEntry* entry = header->entry;
@@ -845,7 +610,7 @@ void* leak_memalign(size_t alignment, size_t bytes)
// need to make sure it's a power of two
if (alignment & (alignment-1))
alignment = 1L << (31 - __builtin_clz(alignment));
-
+
// here, aligment is at least MALLOC_ALIGNMENT<<1 bytes
// we will align by at least MALLOC_ALIGNMENT bytes
// and at most alignment-MALLOC_ALIGNMENT bytes
@@ -858,7 +623,7 @@ void* leak_memalign(size_t alignment, size_t bytes)
// align the pointer
ptr += ((-ptr) % alignment);
-
+
// there is always enough space for the base pointer and the guard
((void**)ptr)[-1] = MEMALIGN_GUARD;
((void**)ptr)[-2] = base;
@@ -867,60 +632,12 @@ void* leak_memalign(size_t alignment, size_t bytes)
}
return base;
}
-#endif /* MALLOC_LEAK_CHECK */
-
-// called from libc_init()
-extern char* __progname;
-void malloc_debug_init()
+/* Initializes malloc debugging framework.
+ * See comments on MallocDebugInit in malloc_debug_common.h
+ */
+int malloc_debug_initialize(void)
{
- unsigned int level = 0;
-#ifdef MALLOC_LEAK_CHECK
- // if MALLOC_LEAK_CHECK is enabled, use level=1 by default
- level = 1;
-#endif
- char env[PROP_VALUE_MAX];
- int len = __system_property_get("libc.debug.malloc", env);
-
- if (len) {
- level = atoi(env);
-#ifndef MALLOC_LEAK_CHECK
- /* Alert the user that libc_debug.so needs to be installed as libc.so
- * when performing malloc checks.
- */
- if (level != 0) {
- __libc_android_log_print(ANDROID_LOG_INFO, "libc",
- "Malloc checks need libc_debug.so pushed to the device!\n");
-
- }
-#endif
- }
-
-#ifdef MALLOC_LEAK_CHECK
- gMallocDebugLevel = level;
- switch (level) {
- default:
- case 0:
- gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
- break;
- case 1:
- __libc_android_log_print(ANDROID_LOG_INFO, "libc",
- "%s using MALLOC_DEBUG = %d (leak checker)\n",
- __progname, level);
- gMallocDispatch = &gMallocEngineTable[INDEX_LEAK_CHECK];
- break;
- case 5:
- __libc_android_log_print(ANDROID_LOG_INFO, "libc",
- "%s using MALLOC_DEBUG = %d (fill)\n",
- __progname, level);
- gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_FILL];
- break;
- case 10:
- __libc_android_log_print(ANDROID_LOG_INFO, "libc",
- "%s using MALLOC_DEBUG = %d (sentinels, fill)\n",
- __progname, level);
- gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_CHECK];
- break;
- }
-#endif
+ // We don't really have anything that requires initialization here.
+ return 0;
}
diff --git a/libc/bionic/malloc_debug_qemu.c b/libc/bionic/malloc_debug_qemu.c
new file mode 100644
index 0000000..4b694e9
--- /dev/null
+++ b/libc/bionic/malloc_debug_qemu.c
@@ -0,0 +1,1014 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Contains implementation of memory allocation routines instrumented for
+ * usage in the emulator to detect memory allocation violations, such as
+ * memory leaks, buffer overruns, etc.
+ * Code, implemented here is intended to run in the emulated environment only,
+ * and serves simply as hooks into memory allocation routines. Main job of this
+ * code is to notify the emulator about memory being allocated/deallocated,
+ * providing information about each allocation. The idea is that emulator will
+ * keep list of currently allocated blocks, and, knowing boundaries of each
+ * block it will be able to verify that ld/st access to these blocks don't step
+ * over boundaries set for the user. To enforce that, each memory block
+ * allocated by this code is guarded with "prefix" and "suffix" areas, so
+ * every time emulator detects access to any of these guarding areas, it can be
+ * considered as access violation.
+ */
+
+#include <stdlib.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <errno.h>
+#include "dlmalloc.h"
+#include "logd.h"
+#include "malloc_debug_common.h"
+
+/* This file should be included into the build only when
+ * MALLOC_QEMU_INSTRUMENT macro is defined. */
+#ifndef MALLOC_QEMU_INSTRUMENT
+#error MALLOC_QEMU_INSTRUMENT is not defined.
+#endif // !MALLOC_QEMU_INSTRUMENT
+
+/* Controls access violation test performed to make sure that we catch AVs
+ * all the time they occur. See test_access_violation for more info. This macro
+ * is used for internal testing purposes and should always be set to zero for
+ * the production builds. */
+#define TEST_ACCESS_VIOLATIONS 0
+
+// =============================================================================
+// Communication structures
+// =============================================================================
+
+/* Describes memory block allocated from the heap. This structure is passed
+ * along with TRACE_DEV_REG_MALLOC event. This descriptor is used to inform
+ * the emulator about new memory block being allocated from the heap. The entire
+ * structure is initialized by the guest system before event is fired up. It is
+ * important to remember that same structure (an exact copy, except for
+ * replacing pointers with target_ulong) is also declared in the emulator's
+ * sources (file memcheck/memcheck_common.h). So, every time a change is made to
+ * any of these two declaration, another one must be also updated accordingly.
+ */
+typedef struct MallocDesc {
+ /* Pointer to the memory block actually allocated from the heap. Note that
+ * this is not the pointer that is returned to the malloc's caller. Pointer
+ * returned to the caller is calculated by adding value stored in this field
+ * to the value stored in prefix_size field of this structure.
+ */
+ void* ptr;
+
+ /* Number of bytes requested by the malloc's caller. */
+ uint32_t requested_bytes;
+
+ /* Byte size of the prefix data. Actual pointer returned to the malloc's
+ * caller is calculated by adding value stored in this field to the value
+ * stored in in the ptr field of this structure.
+ */
+ uint32_t prefix_size;
+
+ /* Byte size of the suffix data. */
+ uint32_t suffix_size;
+
+ /* Id of the process that initialized libc instance, in which allocation
+ * has occurred. This field is used by the emulator to report errors in
+ * the course of TRACE_DEV_REG_MALLOC event handling. In case of an error,
+ * emulator sets this field to zero (invalid value for a process ID).
+ */
+ uint32_t libc_pid;
+
+ /* Id of the process in context of which allocation has occurred.
+ * Value in this field may differ from libc_pid value, if process that
+ * is doing allocation has been forked from the process that initialized
+ * libc instance.
+ */
+ uint32_t allocator_pid;
+
+ /* Number of access violations detected on this allocation. */
+ uint32_t av_count;
+} MallocDesc;
+
+/* Describes memory block info queried from emulator. This structure is passed
+ * along with TRACE_DEV_REG_QUERY_MALLOC event. When handling free and realloc
+ * calls, it is required that we have information about memory blocks that were
+ * actually allocated in previous calls to malloc, calloc, memalign, or realloc.
+ * Since we don't keep this information directly in the allocated block, but
+ * rather we keep it in the emulator, we need to query emulator for that
+ * information with TRACE_DEV_REG_QUERY_MALLOC query. The entire structure is
+ * initialized by the guest system before event is fired up. It is important to
+ * remember that same structure (an exact copy, except for replacing pointers
+ * with target_ulong) is also declared in the emulator's sources (file
+ * memcheck/memecheck_common.h). So, every time a change is made to any of these
+ * two declaration, another one must be also updated accordingly.
+ */
+typedef struct MallocDescQuery {
+ /* Pointer, for which information is queried. Note that this pointer doesn't
+ * have to be exact pointer returned to malloc's caller, but can point
+ * anywhere inside an allocated block, including guarding areas. Emulator
+ * will respond with information about allocated block that contains this
+ * pointer.
+ */
+ void* ptr;
+
+ /* Id of the process that initialized libc instance, in which this query
+ * is called. This field is used by the emulator to report errors in
+ * the course of TRACE_DEV_REG_QUERY_MALLOC event handling. In case of an
+ * error, emulator sets this field to zero (invalid value for a process ID).
+ */
+ uint32_t libc_pid;
+
+ /* Process ID in context of which query is made. */
+ uint32_t query_pid;
+
+ /* Code of the allocation routine, in context of which query has been made:
+ * 1 - free
+ * 2 - realloc
+ */
+ uint32_t routine;
+
+ /* Address of memory allocation descriptor for the queried pointer.
+ * Descriptor, addressed by this field is initialized by the emulator in
+ * response to the query.
+ */
+ MallocDesc* desc;
+} MallocDescQuery;
+
+/* Describes memory block that is being freed back to the heap. This structure
+ * is passed along with TRACE_DEV_REG_FREE_PTR event. The entire structure is
+ * initialized by the guest system before event is fired up. It is important to
+ * remember that same structure (an exact copy, except for replacing pointers
+ * with target_ulong) is also declared in the emulator's sources (file
+ * memcheck/memecheck_common.h). So, every time a change is made to any of these
+ * two declaration, another one must be also updated accordingly.
+ */
+typedef struct MallocFree {
+ /* Pointer to be freed. */
+ void* ptr;
+
+ /* Id of the process that initialized libc instance, in which this free
+ * is called. This field is used by the emulator to report errors in
+ * the course of TRACE_DEV_REG_FREE_PTR event handling. In case of an
+ * error, emulator sets this field to zero (invalid value for a process ID).
+ */
+ uint32_t libc_pid;
+
+ /* Process ID in context of which memory is being freed. */
+ uint32_t free_pid;
+} MallocFree;
+
+// =============================================================================
+// Communication events
+// =============================================================================
+
+/* Notifies the emulator that libc has been initialized for a process.
+ * Event's value parameter is PID for the process in context of which libc has
+ * been initialized.
+ */
+#define TRACE_DEV_REG_LIBC_INIT 1536
+
+/* Notifies the emulator about new memory block been allocated.
+ * Event's value parameter points to MallocDesc instance that contains
+ * allocated block information. Note that 'libc_pid' field of the descriptor
+ * is used by emulator to report failure in handling this event. In case
+ * of a failure emulator will zero that field before completing this event.
+ */
+#define TRACE_DEV_REG_MALLOC 1537
+
+/* Notifies the emulator about memory block being freed.
+ * Event's value parameter points to MallocFree descriptor that contains
+ * information about block that's being freed. Note that 'libc_pid' field
+ * of the descriptor is used by emulator to report failure in handling this
+ * event. In case of a failure emulator will zero that field before completing
+ * this event.
+ */
+#define TRACE_DEV_REG_FREE_PTR 1538
+
+/* Queries the emulator about allocated memory block information.
+ * Event's value parameter points to MallocDescQuery descriptor that contains
+ * query parameters. Note that 'libc_pid' field of the descriptor is used by
+ * emulator to report failure in handling this event. In case of a failure
+ * emulator will zero that field before completing this event.
+ */
+#define TRACE_DEV_REG_QUERY_MALLOC 1539
+
+/* Queries the emulator to print a string to its stdout.
+ * Event's value parameter points to a zero-terminated string to be printed.
+ */
+#define TRACE_DEV_REG_PRINT_USER_STR 1540
+
+static void notify_qemu_string(const char* str);
+static void qemu_log(int prio, const char* fmt, ...);
+static void dump_malloc_descriptor(char* str,
+ size_t str_buf_size,
+ const MallocDesc* desc);
+
+// =============================================================================
+// Macros
+// =============================================================================
+
+/* Defines default size of allocation prefix.
+ * Note that we make prefix area quite large in order to increase chances of
+ * catching buffer overflow. */
+#define DEFAULT_PREFIX_SIZE (malloc_alignment * 4)
+
+/* Defines default size of allocation suffix.
+ * Note that we make suffix area quite large in order to increase chances of
+ * catching buffer overflow. */
+#define DEFAULT_SUFFIX_SIZE (malloc_alignment * 4)
+
+/* Debug tracing has been enabled by the emulator. */
+#define DEBUG_TRACING_ENABLED 0x00000001
+/* Error tracing has been enabled by the emulator. */
+#define ERROR_TRACING_ENABLED 0x00000002
+/* Info tracing has been enabled by the emulator. */
+#define INFO_TRACING_ENABLED 0x00000004
+/* All tracing flags combined. */
+#define ALL_TRACING_ENABLED (DEBUG_TRACING_ENABLED | \
+ ERROR_TRACING_ENABLED | \
+ INFO_TRACING_ENABLED)
+
+/* Prints a string to the emulator's stdout.
+ * In early stages of system loading, logging mesages via
+ * __libc_android_log_print API is not available, because ADB API has not been
+ * hooked up yet. So, in order to see such messages we need to print them to
+ * the emulator's stdout.
+ * Parameters passed to this macro are the same as parameters for printf
+ * routine.
+ */
+#define TR(...) \
+ do { \
+ char tr_str[4096]; \
+ snprintf(tr_str, sizeof(tr_str), __VA_ARGS__ ); \
+ tr_str[sizeof(tr_str) - 1] = '\0'; \
+ notify_qemu_string(&tr_str[0]); \
+ } while (0)
+
+// =============================================================================
+// Logging macros. Note that we simultaneously log messages to ADB and emulator.
+// =============================================================================
+
+/*
+ * Helper macros for checking if particular trace level is enabled.
+ */
+#define debug_LOG_ENABLED ((tracing_flags & DEBUG_TRACING_ENABLED) != 0)
+#define error_LOG_ENABLED ((tracing_flags & ERROR_TRACING_ENABLED) != 0)
+#define info_LOG_ENABLED ((tracing_flags & INFO_TRACING_ENABLED) != 0)
+#define tracing_enabled(type) (type##_LOG_ENABLED)
+
+/*
+ * Logging helper macros.
+ */
+#define debug_log(format, ...) \
+ do { \
+ __libc_android_log_print(ANDROID_LOG_DEBUG, "memcheck", \
+ (format), ##__VA_ARGS__ ); \
+ if (tracing_flags & DEBUG_TRACING_ENABLED) { \
+ qemu_log(ANDROID_LOG_DEBUG, (format), ##__VA_ARGS__ ); \
+ } \
+ } while (0)
+
+#define error_log(format, ...) \
+ do { \
+ __libc_android_log_print(ANDROID_LOG_ERROR, "memcheck", \
+ (format), ##__VA_ARGS__ ); \
+ if (tracing_flags & ERROR_TRACING_ENABLED) { \
+ qemu_log(ANDROID_LOG_ERROR, (format), ##__VA_ARGS__ ); \
+ } \
+ } while (0)
+
+#define info_log(format, ...) \
+ do { \
+ __libc_android_log_print(ANDROID_LOG_INFO, "memcheck", \
+ (format), ##__VA_ARGS__ ); \
+ if (tracing_flags & INFO_TRACING_ENABLED) { \
+ qemu_log(ANDROID_LOG_INFO, (format), ##__VA_ARGS__ ); \
+ } \
+ } while (0)
+
+/* Logs message dumping MallocDesc instance at the end of the message.
+ * Param:
+ * type - Message type: debug, error, or info
+ * desc - MallocDesc instance to dump.
+ * frmt + rest - Formats message preceding dumped descriptor.
+*/
+#define log_mdesc(type, desc, frmt, ...) \
+ do { \
+ if (tracing_enabled(type)) { \
+ char log_str[4096]; \
+ size_t str_len; \
+ snprintf(log_str, sizeof(log_str), frmt, ##__VA_ARGS__); \
+ log_str[sizeof(log_str) - 1] = '\0'; \
+ str_len = strlen(log_str); \
+ dump_malloc_descriptor(log_str + str_len, \
+ sizeof(log_str) - str_len, \
+ (desc)); \
+ type##_log(log_str); \
+ } \
+ } while (0)
+
+// =============================================================================
+// Static data
+// =============================================================================
+
+/* Emulator's magic page address.
+ * This page (mapped on /dev/qemu_trace device) is used to fire up events
+ * in the emulator. */
+static volatile void* qtrace = NULL;
+
+/* Cached PID of the process in context of which this libc instance
+ * has been initialized. */
+static uint32_t malloc_pid = 0;
+
+/* Memory allocation alignment that is used in dlmalloc.
+ * This variable is updated by memcheck_initialize routine. */
+static uint32_t malloc_alignment = 8;
+
+/* Tracing flags. These flags control which types of logging messages are
+ * enabled by the emulator. See XXX_TRACING_ENABLED for the values of flags
+ * stored in this variable. This variable is updated by memcheck_initialize
+ * routine. */
+static uint32_t tracing_flags = 0;
+
+// =============================================================================
+// Static routines
+// =============================================================================
+
+/* Gets pointer, returned to malloc caller for the given allocation decriptor.
+ * Param:
+ * desc - Allocation descriptor.
+ * Return:
+ * Pointer to the allocated memory returned to the malloc caller.
+ */
+static inline void*
+mallocdesc_user_ptr(const MallocDesc* desc)
+{
+ return (char*)desc->ptr + desc->prefix_size;
+}
+
+/* Gets size of memory block actually allocated from the heap for the given
+ * allocation decriptor.
+ * Param:
+ * desc - Allocation descriptor.
+ * Return:
+ * Size of memory block actually allocated from the heap.
+ */
+static inline uint32_t
+mallocdesc_alloc_size(const MallocDesc* desc)
+{
+ return desc->prefix_size + desc->requested_bytes + desc->suffix_size;
+}
+
+/* Gets pointer to the end of the allocated block for the given descriptor.
+ * Param:
+ * desc - Descriptor for the memory block, allocated in malloc handler.
+ * Return:
+ * Pointer to the end of (one byte past) the allocated block.
+ */
+static inline void*
+mallocdesc_alloc_end(const MallocDesc* desc)
+{
+ return (char*)desc->ptr + mallocdesc_alloc_size(desc);
+}
+
+/* Fires up an event in the emulator.
+ * Param:
+ * code - Event code (one of the TRACE_DEV_XXX).
+ * val - Event's value parameter.
+ */
+static inline void
+notify_qemu(uint32_t code, uint32_t val)
+{
+ if (NULL != qtrace) {
+ *(volatile uint32_t*)((uint32_t)qtrace + ((code - 1024) << 2)) = val;
+ }
+}
+
+/* Prints a zero-terminated string to the emulator's stdout (fires up
+ * TRACE_DEV_REG_PRINT_USER_STR event in the emulator).
+ * Param:
+ * str - Zero-terminated string to print.
+ */
+static void
+notify_qemu_string(const char* str)
+{
+ if (str != NULL) {
+ notify_qemu(TRACE_DEV_REG_PRINT_USER_STR, (uint32_t)str);
+ }
+}
+
+/* Fires up TRACE_DEV_REG_LIBC_INIT event in the emulator.
+ * Param:
+ * pid - ID of the process that initialized libc.
+ */
+static void
+notify_qemu_libc_initialized(uint32_t pid)
+{
+ notify_qemu(TRACE_DEV_REG_LIBC_INIT, pid);
+}
+
+/* Fires up TRACE_DEV_REG_MALLOC event in the emulator.
+ * Param:
+ * desc - Pointer to MallocDesc instance containing allocated block
+ * information.
+ * Return:
+ * Zero on success, or -1 on failure. Note that on failure libc_pid field of
+ * the desc parameter passed to this routine has been zeroed out by the
+ * emulator.
+ */
+static inline int
+notify_qemu_malloc(volatile MallocDesc* desc)
+{
+ desc->libc_pid = malloc_pid;
+ desc->allocator_pid = getpid();
+ desc->av_count = 0;
+ notify_qemu(TRACE_DEV_REG_MALLOC, (uint32_t)desc);
+
+ /* Emulator reports failure by zeroing libc_pid field of the
+ * descriptor. */
+ return desc->libc_pid != 0 ? 0 : -1;
+}
+
+/* Fires up TRACE_DEV_REG_FREE_PTR event in the emulator.
+ * Param:
+ * ptr - Pointer to the memory block that's being freed.
+ * Return:
+ * Zero on success, or -1 on failure.
+ */
+static inline int
+notify_qemu_free(void* ptr_to_free)
+{
+ volatile MallocFree free_desc;
+
+ free_desc.ptr = ptr_to_free;
+ free_desc.libc_pid = malloc_pid;
+ free_desc.free_pid = getpid();
+ notify_qemu(TRACE_DEV_REG_FREE_PTR, (uint32_t)&free_desc);
+
+ /* Emulator reports failure by zeroing libc_pid field of the
+ * descriptor. */
+ return free_desc.libc_pid != 0 ? 0 : -1;
+}
+
+/* Fires up TRACE_DEV_REG_QUERY_MALLOC event in the emulator.
+ * Param:
+ * ptr - Pointer to request allocation information for.
+ * desc - Pointer to MallocDesc instance that will receive allocation
+ * information.
+ * routine - Code of the allocation routine, in context of which query is made:
+ * 1 - free
+ * 2 - realloc
+ * Return:
+ * Zero on success, or -1 on failure.
+ */
+static inline int
+query_qemu_malloc_info(void* ptr, MallocDesc* desc, uint32_t routine)
+{
+ volatile MallocDescQuery query;
+
+ query.ptr = ptr;
+ query.libc_pid = malloc_pid;
+ query.query_pid = getpid();
+ query.routine = routine;
+ query.desc = desc;
+ notify_qemu(TRACE_DEV_REG_QUERY_MALLOC, (uint32_t)&query);
+
+ /* Emulator reports failure by zeroing libc_pid field of the
+ * descriptor. */
+ return query.libc_pid != 0 ? 0 : -1;
+}
+
+/* Logs a message to emulator's stdout.
+ * Param:
+ * prio - Message priority (debug, info, or error)
+ * fmt + rest - Message format and parameters.
+ */
+static void
+qemu_log(int prio, const char* fmt, ...)
+{
+ va_list ap;
+ char buf[4096];
+ const char* prefix;
+
+ /* Choose message prefix depending on the priority value. */
+ switch (prio) {
+ case ANDROID_LOG_ERROR:
+ if (!tracing_enabled(error)) {
+ return;
+ }
+ prefix = "E";
+ break;
+ case ANDROID_LOG_INFO:
+ if (!tracing_enabled(info)) {
+ return;
+ }
+ prefix = "I";
+ break;
+ case ANDROID_LOG_DEBUG:
+ default:
+ if (!tracing_enabled(debug)) {
+ return;
+ }
+ prefix = "D";
+ break;
+ }
+
+ va_start(ap, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+ buf[sizeof(buf) - 1] = '\0';
+
+ TR("%s/memcheck: %s\n", prefix, buf);
+}
+
+/* Dumps content of memory allocation descriptor to a string.
+ * Param:
+ * str - String to dump descriptor to.
+ * str_buf_size - Size of string's buffer.
+ * desc - Descriptor to dump.
+ */
+static void
+dump_malloc_descriptor(char* str, size_t str_buf_size, const MallocDesc* desc)
+{
+ if (str_buf_size) {
+ snprintf(str, str_buf_size,
+ "MDesc: %p: %X <-> %X [%u + %u + %u] by pid=%03u in libc_pid=%03u",
+ mallocdesc_user_ptr(desc), (uint32_t)desc->ptr,
+ (uint32_t)mallocdesc_alloc_end(desc), desc->prefix_size,
+ desc->requested_bytes, desc->suffix_size, desc->allocator_pid,
+ desc->libc_pid);
+ str[str_buf_size - 1] = '\0';
+ }
+}
+
+#if TEST_ACCESS_VIOLATIONS
+/* Causes an access violation on allocation descriptor, and verifies that
+ * violation has been detected by memory checker in the emulator.
+ */
+static void
+test_access_violation(const MallocDesc* desc)
+{
+ MallocDesc desc_chk;
+ char ch;
+ volatile char* prefix = (volatile char*)desc->ptr;
+ volatile char* suffix = (volatile char*)mallocdesc_user_ptr(desc) +
+ desc->requested_bytes;
+ /* We're causing AV by reading from the prefix and suffix areas of the
+ * allocated block. This should produce two access violations, so when we
+ * get allocation descriptor from QEMU, av_counter should be bigger than
+ * av_counter of the original descriptor by 2. */
+ ch = *prefix;
+ ch = *suffix;
+ if (!query_qemu_malloc_info(mallocdesc_user_ptr(desc), &desc_chk, 2) &&
+ desc_chk.av_count != (desc->av_count + 2)) {
+ log_mdesc(error, &desc_chk,
+ "<libc_pid=%03u, pid=%03u>: malloc: Access violation test failed:\n"
+ "Expected violations count %u is not equal to the actually reported %u",
+ malloc_pid, getpid(), desc->av_count + 2,
+ desc_chk.av_count);
+ }
+}
+#endif // TEST_ACCESS_VIOLATIONS
+
+// =============================================================================
+// API routines
+// =============================================================================
+
+void* qemu_instrumented_malloc(size_t bytes);
+void qemu_instrumented_free(void* mem);
+void* qemu_instrumented_calloc(size_t n_elements, size_t elem_size);
+void* qemu_instrumented_realloc(void* mem, size_t bytes);
+void* qemu_instrumented_memalign(size_t alignment, size_t bytes);
+
+/* Initializes malloc debugging instrumentation for the emulator.
+ * This routine is called from malloc_init_impl routine implemented in
+ * bionic/libc/bionic/malloc_debug_common.c when malloc debugging gets
+ * initialized for a process. The way malloc debugging implementation is
+ * done, it is guaranteed that this routine will be called just once per
+ * process.
+ * Return:
+ * 0 on success, or -1 on failure.
+*/
+int
+malloc_debug_initialize(void)
+{
+ /* We will be using emulator's magic page to report memory allocation
+ * activities. In essence, what magic page does, it translates writes to
+ * the memory mapped spaces into writes to an I/O port that emulator
+ * "listens to" on the other end. Note that until we open and map that
+ * device, logging to emulator's stdout will not be available. */
+ int fd = open("/dev/qemu_trace", O_RDWR);
+ if (fd < 0) {
+ error_log("Unable to open /dev/qemu_trace");
+ return -1;
+ } else {
+ qtrace = mmap(0, PAGESIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ close(fd);
+
+ if (qtrace == MAP_FAILED) {
+ qtrace = NULL;
+ error_log("Unable to mmap /dev/qemu_trace");
+ return -1;
+ }
+ }
+
+ /* Cache pid of the process this library has been initialized for. */
+ malloc_pid = getpid();
+
+ return 0;
+}
+
+/* Completes malloc debugging instrumentation for the emulator.
+ * Note that this routine is called after successful return from
+ * malloc_debug_initialize, which means that connection to the emulator via
+ * "magic page" has been established.
+ * Param:
+ * alignment - Alignment requirement set for memiry allocations.
+ * memcheck_param - Emulator's -memcheck option parameters. This string
+ * contains abbreviation for guest events that are enabled for tracing.
+ * Return:
+ * 0 on success, or -1 on failure.
+*/
+int
+memcheck_initialize(int alignment, const char* memcheck_param)
+{
+ malloc_alignment = alignment;
+
+ /* Parse -memcheck parameter for the guest tracing flags. */
+ while (*memcheck_param != '\0') {
+ switch (*memcheck_param) {
+ case 'a':
+ // Enable all messages from the guest.
+ tracing_flags |= ALL_TRACING_ENABLED;
+ break;
+ case 'd':
+ // Enable debug messages from the guest.
+ tracing_flags |= DEBUG_TRACING_ENABLED;
+ break;
+ case 'e':
+ // Enable error messages from the guest.
+ tracing_flags |= ERROR_TRACING_ENABLED;
+ break;
+ case 'i':
+ // Enable info messages from the guest.
+ tracing_flags |= INFO_TRACING_ENABLED;
+ break;
+ default:
+ break;
+ }
+ if (tracing_flags == ALL_TRACING_ENABLED) {
+ break;
+ }
+ memcheck_param++;
+ }
+
+ notify_qemu_libc_initialized(malloc_pid);
+
+ debug_log("Instrumented for pid=%03u: malloc=%p, free=%p, calloc=%p, realloc=%p, memalign=%p",
+ malloc_pid, qemu_instrumented_malloc, qemu_instrumented_free,
+ qemu_instrumented_calloc, qemu_instrumented_realloc,
+ qemu_instrumented_memalign);
+
+ return 0;
+}
+
+/* This routine serves as entry point for 'malloc'.
+ * Primary responsibility of this routine is to allocate requested number of
+ * bytes (plus prefix, and suffix guards), and report allocation to the
+ * emulator.
+ */
+void*
+qemu_instrumented_malloc(size_t bytes)
+{
+ MallocDesc desc;
+
+ /* Initialize block descriptor and allocate memory. Note that dlmalloc
+ * returns a valid pointer on zero allocation. Lets mimic this behavior. */
+ desc.prefix_size = DEFAULT_PREFIX_SIZE;
+ desc.requested_bytes = bytes;
+ desc.suffix_size = DEFAULT_SUFFIX_SIZE;
+ desc.ptr = dlmalloc(mallocdesc_alloc_size(&desc));
+ if (desc.ptr == NULL) {
+ error_log("<libc_pid=%03u, pid=%03u> malloc(%u): dlmalloc(%u) failed.",
+ malloc_pid, getpid(), bytes, mallocdesc_alloc_size(&desc));
+ return NULL;
+ }
+
+ // Fire up event in the emulator.
+ if (notify_qemu_malloc(&desc)) {
+ log_mdesc(error, &desc, "<libc_pid=%03u, pid=%03u>: malloc: notify_malloc failed for ",
+ malloc_pid, getpid());
+ dlfree(desc.ptr);
+ return NULL;
+ } else {
+#if TEST_ACCESS_VIOLATIONS
+ test_access_violation(&desc);
+#endif // TEST_ACCESS_VIOLATIONS
+ log_mdesc(info, &desc, "+++ <libc_pid=%03u, pid=%03u> malloc(%u) -> ",
+ malloc_pid, getpid(), bytes);
+ return mallocdesc_user_ptr(&desc);
+ }
+}
+
+/* This routine serves as entry point for 'malloc'.
+ * Primary responsibility of this routine is to free requested memory, and
+ * report free block to the emulator.
+ */
+void
+qemu_instrumented_free(void* mem)
+{
+ MallocDesc desc;
+
+ if (mem == NULL) {
+ // Just let go NULL free
+ dlfree(mem);
+ return;
+ }
+
+ // Query emulator for the freeing block information.
+ if (query_qemu_malloc_info(mem, &desc, 1)) {
+ error_log("<libc_pid=%03u, pid=%03u>: free(%p) query_info failed.",
+ malloc_pid, getpid(), mem);
+ return;
+ }
+
+#if TEST_ACCESS_VIOLATIONS
+ test_access_violation(&desc);
+#endif // TEST_ACCESS_VIOLATIONS
+
+ /* Make sure that pointer that's being freed matches what we expect
+ * for this memory block. Note that this violation should be already
+ * caught in the emulator. */
+ if (mem != mallocdesc_user_ptr(&desc)) {
+ log_mdesc(error, &desc, "<libc_pid=%03u, pid=%03u>: free(%p) is invalid for ",
+ malloc_pid, getpid(), mem);
+ return;
+ }
+
+ // Fire up event in the emulator and free block that was actually allocated.
+ if (notify_qemu_free(mem)) {
+ log_mdesc(error, &desc, "<libc_pid=%03u, pid=%03u>: free(%p) notify_free failed for ",
+ malloc_pid, getpid(), mem);
+ } else {
+ log_mdesc(info, &desc, "--- <libc_pid=%03u, pid=%03u> free(%p) -> ",
+ malloc_pid, getpid(), mem);
+ dlfree(desc.ptr);
+ }
+}
+
+/* This routine serves as entry point for 'calloc'.
+ * This routine behaves similarly to qemu_instrumented_malloc.
+ */
+void*
+qemu_instrumented_calloc(size_t n_elements, size_t elem_size)
+{
+ MallocDesc desc;
+ void* ret;
+ size_t total_size;
+ size_t total_elements;
+
+ if (n_elements == 0 || elem_size == 0) {
+ // Just let go zero bytes allocation.
+ info_log("::: <libc_pid=%03u, pid=%03u>: Zero calloc redir to malloc",
+ malloc_pid, getpid());
+ return qemu_instrumented_malloc(0);
+ }
+
+ /* Fail on overflow - just to be safe even though this code runs only
+ * within the debugging C library, not the production one */
+ if (n_elements && MAX_SIZE_T / n_elements < elem_size) {
+ return NULL;
+ }
+
+ /* Calculating prefix size. The trick here is to make sure that
+ * first element (returned to the caller) is properly aligned. */
+ if (DEFAULT_PREFIX_SIZE >= elem_size) {
+ /* If default alignment is bigger than element size, we will
+ * set our prefix size to the default alignment size. */
+ desc.prefix_size = DEFAULT_PREFIX_SIZE;
+ /* For the suffix we will use whatever bytes remain from the prefix
+ * allocation size, aligned to the size of an element, plus the usual
+ * default suffix size. */
+ desc.suffix_size = (DEFAULT_PREFIX_SIZE % elem_size) +
+ DEFAULT_SUFFIX_SIZE;
+ } else {
+ /* Make sure that prefix, and suffix sizes is at least elem_size,
+ * and first element returned to the caller is properly aligned. */
+ desc.prefix_size = elem_size + DEFAULT_PREFIX_SIZE - 1;
+ desc.prefix_size &= ~(malloc_alignment - 1);
+ desc.suffix_size = DEFAULT_SUFFIX_SIZE;
+ }
+ desc.requested_bytes = n_elements * elem_size;
+ total_size = desc.requested_bytes + desc.prefix_size + desc.suffix_size;
+ total_elements = total_size / elem_size;
+ total_size %= elem_size;
+ if (total_size != 0) {
+ // Add extra to the suffix area.
+ total_elements++;
+ desc.suffix_size += (elem_size - total_size);
+ }
+ desc.ptr = dlcalloc(total_elements, elem_size);
+ if (desc.ptr == NULL) {
+ error_log("<libc_pid=%03u, pid=%03u> calloc: dlcalloc(%u(%u), %u) (prx=%u, sfx=%u) failed.",
+ malloc_pid, getpid(), n_elements, total_elements, elem_size,
+ desc.prefix_size, desc.suffix_size);
+ return NULL;
+ }
+
+ if (notify_qemu_malloc(&desc)) {
+ log_mdesc(error, &desc, "<libc_pid=%03u, pid=%03u>: calloc(%u(%u), %u): notify_malloc failed for ",
+ malloc_pid, getpid(), n_elements, total_elements, elem_size);
+ dlfree(desc.ptr);
+ return NULL;
+ } else {
+#if TEST_ACCESS_VIOLATIONS
+ test_access_violation(&desc);
+#endif // TEST_ACCESS_VIOLATIONS
+ log_mdesc(info, &desc, "### <libc_pid=%03u, pid=%03u> calloc(%u(%u), %u) -> ",
+ malloc_pid, getpid(), n_elements, total_elements, elem_size);
+ return mallocdesc_user_ptr(&desc);
+ }
+}
+
+/* This routine serves as entry point for 'realloc'.
+ * This routine behaves similarly to qemu_instrumented_free +
+ * qemu_instrumented_malloc. Note that this modifies behavior of "shrinking" an
+ * allocation, but overall it doesn't seem to matter, as caller of realloc
+ * should not expect that pointer returned after shrinking will remain the same.
+ */
+void*
+qemu_instrumented_realloc(void* mem, size_t bytes)
+{
+ MallocDesc new_desc;
+ MallocDesc cur_desc;
+ size_t to_copy;
+ void* ret;
+
+ if (mem == NULL) {
+ // Nothing to realloc. just do regular malloc.
+ info_log("::: <libc_pid=%03u, pid=%03u>: realloc(%p, %u) redir to malloc",
+ malloc_pid, getpid(), mem, bytes);
+ return qemu_instrumented_malloc(bytes);
+ }
+
+ if (bytes == 0) {
+ // This is a "free" condition.
+ info_log("::: <libc_pid=%03u, pid=%03u>: realloc(%p, %u) redir to free and malloc",
+ malloc_pid, getpid(), mem, bytes);
+ qemu_instrumented_free(mem);
+
+ // This is what dlrealloc does for a "free" realloc.
+ return NULL;
+ }
+
+ // Query emulator for the reallocating block information.
+ if (query_qemu_malloc_info(mem, &cur_desc, 2)) {
+ // Note that this violation should be already caught in the emulator.
+ error_log("<libc_pid=%03u, pid=%03u>: realloc(%p, %u) query_info failed.",
+ malloc_pid, getpid(), mem, bytes);
+ return NULL;
+ }
+
+#if TEST_ACCESS_VIOLATIONS
+ test_access_violation(&cur_desc);
+#endif // TEST_ACCESS_VIOLATIONS
+
+ /* Make sure that reallocating pointer value is what we would expect
+ * for this memory block. Note that this violation should be already caught
+ * in the emulator.*/
+ if (mem != mallocdesc_user_ptr(&cur_desc)) {
+ log_mdesc(error, &cur_desc, "<libc_pid=%03u, pid=%03u>: realloc(%p, %u) is invalid for ",
+ malloc_pid, getpid(), mem, bytes);
+ return NULL;
+ }
+
+ /* TODO: We're a bit inefficient here, always allocating new block from
+ * the heap. If this realloc shrinks current buffer, we can just do the
+ * shrinking "in place", adjusting suffix_size in the allocation descriptor
+ * for this block that is stored in the emulator. */
+
+ // Initialize descriptor for the new block.
+ new_desc.prefix_size = DEFAULT_PREFIX_SIZE;
+ new_desc.requested_bytes = bytes;
+ new_desc.suffix_size = DEFAULT_SUFFIX_SIZE;
+ new_desc.ptr = dlmalloc(mallocdesc_alloc_size(&new_desc));
+ if (new_desc.ptr == NULL) {
+ log_mdesc(error, &cur_desc, "<libc_pid=%03u, pid=%03u>: realloc(%p, %u): dlmalloc(%u) failed on ",
+ malloc_pid, getpid(), mem, bytes,
+ mallocdesc_alloc_size(&new_desc));
+ return NULL;
+ }
+ ret = mallocdesc_user_ptr(&new_desc);
+
+ // Copy user data from old block to the new one.
+ to_copy = bytes < cur_desc.requested_bytes ? bytes :
+ cur_desc.requested_bytes;
+ if (to_copy != 0) {
+ memcpy(ret, mallocdesc_user_ptr(&cur_desc), to_copy);
+ }
+
+ // Register new block with emulator.
+ if(notify_qemu_malloc(&new_desc)) {
+ log_mdesc(error, &new_desc, "<libc_pid=%03u, pid=%03u>: realloc(%p, %u) notify_malloc failed -> ",
+ malloc_pid, getpid(), mem, bytes);
+ log_mdesc(error, &cur_desc, " <- ");
+ dlfree(new_desc.ptr);
+ return NULL;
+ }
+
+#if TEST_ACCESS_VIOLATIONS
+ test_access_violation(&new_desc);
+#endif // TEST_ACCESS_VIOLATIONS
+
+ // Free old block.
+ if (notify_qemu_free(mem)) {
+ log_mdesc(error, &cur_desc, "<libc_pid=%03u, pid=%03u>: realloc(%p, %u): notify_free failed for ",
+ malloc_pid, getpid(), mem, bytes);
+ /* Since we registered new decriptor with the emulator, we need
+ * to unregister it before freeing newly allocated block. */
+ notify_qemu_free(mallocdesc_user_ptr(&new_desc));
+ dlfree(new_desc.ptr);
+ return NULL;
+ }
+ dlfree(cur_desc.ptr);
+
+ log_mdesc(info, &new_desc, "=== <libc_pid=%03u, pid=%03u>: realloc(%p, %u) -> ",
+ malloc_pid, getpid(), mem, bytes);
+ log_mdesc(info, &cur_desc, " <- ");
+
+ return ret;
+}
+
+/* This routine serves as entry point for 'memalign'.
+ * This routine behaves similarly to qemu_instrumented_malloc.
+ */
+void*
+qemu_instrumented_memalign(size_t alignment, size_t bytes)
+{
+ MallocDesc desc;
+
+ if (bytes == 0) {
+ // Just let go zero bytes allocation.
+ info_log("::: <libc_pid=%03u, pid=%03u>: memalign(%X, %u) redir to malloc",
+ malloc_pid, getpid(), alignment, bytes);
+ return qemu_instrumented_malloc(0);
+ }
+
+ /* Prefix size for aligned allocation must be equal to the alignment used
+ * for allocation in order to ensure proper alignment of the returned
+ * pointer, in case that alignment requirement is greater than prefix
+ * size. */
+ desc.prefix_size = alignment > DEFAULT_PREFIX_SIZE ? alignment :
+ DEFAULT_PREFIX_SIZE;
+ desc.requested_bytes = bytes;
+ desc.suffix_size = DEFAULT_SUFFIX_SIZE;
+ desc.ptr = dlmemalign(desc.prefix_size, mallocdesc_alloc_size(&desc));
+ if (desc.ptr == NULL) {
+ error_log("<libc_pid=%03u, pid=%03u> memalign(%X, %u): dlmalloc(%u) failed.",
+ malloc_pid, getpid(), alignment, bytes,
+ mallocdesc_alloc_size(&desc));
+ return NULL;
+ }
+ if (notify_qemu_malloc(&desc)) {
+ log_mdesc(error, &desc, "<libc_pid=%03u, pid=%03u>: memalign(%X, %u): notify_malloc failed for ",
+ malloc_pid, getpid(), alignment, bytes);
+ dlfree(desc.ptr);
+ return NULL;
+ }
+
+#if TEST_ACCESS_VIOLATIONS
+ test_access_violation(&desc);
+#endif // TEST_ACCESS_VIOLATIONS
+
+ log_mdesc(info, &desc, "@@@ <libc_pid=%03u, pid=%03u> memalign(%X, %u) -> ",
+ malloc_pid, getpid(), alignment, bytes);
+ return mallocdesc_user_ptr(&desc);
+}
diff --git a/libc/bionic/pthread.c b/libc/bionic/pthread.c
index 7d4056d..ae44b06 100644
--- a/libc/bionic/pthread.c
+++ b/libc/bionic/pthread.c
@@ -43,12 +43,16 @@
#include <memory.h>
#include <assert.h>
#include <malloc.h>
+#include <linux/futex.h>
extern int __pthread_clone(int (*fn)(void*), void *child_stack, int flags, void *arg);
extern void _exit_with_stack_teardown(void * stackBase, int stackSize, int retCode);
extern void _exit_thread(int retCode);
extern int __set_errno(int);
+#define __likely(cond) __builtin_expect(!!(cond), 1)
+#define __unlikely(cond) __builtin_expect(!!(cond), 0)
+
void _thread_created_hook(pid_t thread_id) __attribute__((noinline));
#define PTHREAD_ATTR_FLAG_DETACHED 0x00000001
@@ -711,6 +715,21 @@ int pthread_setschedparam(pthread_t thid, int policy,
int __futex_wait(volatile void *ftx, int val, const struct timespec *timeout);
int __futex_wake(volatile void *ftx, int count);
+int __futex_syscall3(volatile void *ftx, int op, int val);
+int __futex_syscall4(volatile void *ftx, int op, int val, const struct timespec *timeout);
+
+#ifndef FUTEX_PRIVATE_FLAG
+#define FUTEX_PRIVATE_FLAG 128
+#endif
+
+#ifndef FUTEX_WAIT_PRIVATE
+#define FUTEX_WAIT_PRIVATE (FUTEX_WAIT|FUTEX_PRIVATE_FLAG)
+#endif
+
+#ifndef FUTEX_WAKE_PRIVATE
+#define FUTEX_WAKE_PRIVATE (FUTEX_WAKE|FUTEX_PRIVATE_FLAG)
+#endif
+
// mutex lock states
//
// 0: unlocked
@@ -722,7 +741,8 @@ int __futex_wake(volatile void *ftx, int count);
* bits: name description
* 31-16 tid owner thread's kernel id (recursive and errorcheck only)
* 15-14 type mutex type
- * 13-2 counter counter of recursive mutexes
+ * 13 shared process-shared flag
+ * 12-2 counter counter of recursive mutexes
* 1-0 state lock state (0, 1 or 2)
*/
@@ -736,9 +756,17 @@ int __futex_wake(volatile void *ftx, int count);
#define MUTEX_TYPE_ERRORCHECK 0x8000
#define MUTEX_COUNTER_SHIFT 2
-#define MUTEX_COUNTER_MASK 0x3ffc
-
+#define MUTEX_COUNTER_MASK 0x1ffc
+#define MUTEX_SHARED_MASK 0x2000
+/* a mutex attribute holds the following fields
+ *
+ * bits: name description
+ * 0-3 type type of mutex
+ * 4 shared process-shared flag
+ */
+#define MUTEXATTR_TYPE_MASK 0x000f
+#define MUTEXATTR_SHARED_MASK 0x0010
int pthread_mutexattr_init(pthread_mutexattr_t *attr)
@@ -763,10 +791,14 @@ int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
{
- if (attr && *attr >= PTHREAD_MUTEX_NORMAL &&
- *attr <= PTHREAD_MUTEX_ERRORCHECK ) {
- *type = *attr;
- return 0;
+ if (attr) {
+ int atype = (*attr & MUTEXATTR_TYPE_MASK);
+
+ if (atype >= PTHREAD_MUTEX_NORMAL &&
+ atype <= PTHREAD_MUTEX_ERRORCHECK) {
+ *type = atype;
+ return 0;
+ }
}
return EINVAL;
}
@@ -775,7 +807,7 @@ int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
{
if (attr && type >= PTHREAD_MUTEX_NORMAL &&
type <= PTHREAD_MUTEX_ERRORCHECK ) {
- *attr = type;
+ *attr = (*attr & ~MUTEXATTR_TYPE_MASK) | type;
return 0;
}
return EINVAL;
@@ -790,54 +822,70 @@ int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
switch (pshared) {
case PTHREAD_PROCESS_PRIVATE:
+ *attr &= ~MUTEXATTR_SHARED_MASK;
+ return 0;
+
case PTHREAD_PROCESS_SHARED:
/* our current implementation of pthread actually supports shared
* mutexes but won't cleanup if a process dies with the mutex held.
* Nevertheless, it's better than nothing. Shared mutexes are used
* by surfaceflinger and audioflinger.
*/
+ *attr |= MUTEXATTR_SHARED_MASK;
return 0;
}
-
- return ENOTSUP;
+ return EINVAL;
}
int pthread_mutexattr_getpshared(pthread_mutexattr_t *attr, int *pshared)
{
- if (!attr)
+ if (!attr || !pshared)
return EINVAL;
- *pshared = PTHREAD_PROCESS_PRIVATE;
+ *pshared = (*attr & MUTEXATTR_SHARED_MASK) ? PTHREAD_PROCESS_SHARED
+ : PTHREAD_PROCESS_PRIVATE;
return 0;
}
int pthread_mutex_init(pthread_mutex_t *mutex,
const pthread_mutexattr_t *attr)
{
- if ( mutex ) {
- if (attr == NULL) {
- mutex->value = MUTEX_TYPE_NORMAL;
- return 0;
- }
- switch ( *attr ) {
- case PTHREAD_MUTEX_NORMAL:
- mutex->value = MUTEX_TYPE_NORMAL;
- return 0;
+ int value = 0;
- case PTHREAD_MUTEX_RECURSIVE:
- mutex->value = MUTEX_TYPE_RECURSIVE;
- return 0;
+ if (mutex == NULL)
+ return EINVAL;
- case PTHREAD_MUTEX_ERRORCHECK:
- mutex->value = MUTEX_TYPE_ERRORCHECK;
- return 0;
- }
+ if (__likely(attr == NULL)) {
+ mutex->value = MUTEX_TYPE_NORMAL;
+ return 0;
}
- return EINVAL;
+
+ if ((*attr & MUTEXATTR_SHARED_MASK) != 0)
+ value |= MUTEX_SHARED_MASK;
+
+ switch (*attr & MUTEXATTR_TYPE_MASK) {
+ case PTHREAD_MUTEX_NORMAL:
+ value |= MUTEX_TYPE_NORMAL;
+ break;
+ case PTHREAD_MUTEX_RECURSIVE:
+ value |= MUTEX_TYPE_RECURSIVE;
+ break;
+ case PTHREAD_MUTEX_ERRORCHECK:
+ value |= MUTEX_TYPE_ERRORCHECK;
+ break;
+ default:
+ return EINVAL;
+ }
+
+ mutex->value = value;
+ return 0;
}
int pthread_mutex_destroy(pthread_mutex_t *mutex)
{
+ if (__unlikely(mutex == NULL))
+ return EINVAL;
+
mutex->value = 0xdead10cc;
return 0;
}
@@ -858,13 +906,15 @@ int pthread_mutex_destroy(pthread_mutex_t *mutex)
static __inline__ void
_normal_lock(pthread_mutex_t* mutex)
{
+ /* We need to preserve the shared flag during operations */
+ int shared = mutex->value & MUTEX_SHARED_MASK;
/*
* The common case is an unlocked mutex, so we begin by trying to
* change the lock's state from 0 to 1. __atomic_cmpxchg() returns 0
* if it made the swap successfully. If the result is nonzero, this
* lock is already held by another thread.
*/
- if (__atomic_cmpxchg(0, 1, &mutex->value ) != 0) {
+ if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value ) != 0) {
/*
* We want to go to sleep until the mutex is available, which
* requires promoting it to state 2. We need to swap in the new
@@ -881,8 +931,10 @@ _normal_lock(pthread_mutex_t* mutex)
* that the mutex is in state 2 when we go to sleep on it, which
* guarantees a wake-up call.
*/
- while (__atomic_swap(2, &mutex->value ) != 0)
- __futex_wait(&mutex->value, 2, 0);
+ int wait_op = shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE;
+
+ while (__atomic_swap(shared|2, &mutex->value ) != (shared|0))
+ __futex_syscall4(&mutex->value, wait_op, shared|2, 0);
}
}
@@ -893,12 +945,16 @@ _normal_lock(pthread_mutex_t* mutex)
static __inline__ void
_normal_unlock(pthread_mutex_t* mutex)
{
+ /* We need to preserve the shared flag during operations */
+ int shared = mutex->value & MUTEX_SHARED_MASK;
+
/*
- * The mutex value will be 1 or (rarely) 2. We use an atomic decrement
+ * The mutex state will be 1 or (rarely) 2. We use an atomic decrement
* to release the lock. __atomic_dec() returns the previous value;
* if it wasn't 1 we have to do some additional work.
*/
- if (__atomic_dec(&mutex->value) != 1) {
+ if (__atomic_dec(&mutex->value) != (shared|1)) {
+ int wake_op = shared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE;
/*
* Start by releasing the lock. The decrement changed it from
* "contended lock" to "uncontended lock", which means we still
@@ -913,7 +969,7 @@ _normal_unlock(pthread_mutex_t* mutex)
* _normal_lock(), because the __futex_wait() call there will
* return immediately if the mutex value isn't 2.
*/
- mutex->value = 0;
+ mutex->value = shared;
/*
* Wake up one waiting thread. We don't know which thread will be
@@ -936,7 +992,7 @@ _normal_unlock(pthread_mutex_t* mutex)
* Either way we have correct behavior and nobody is orphaned on
* the wait queue.
*/
- __futex_wake(&mutex->value, 1);
+ __futex_syscall3(&mutex->value, wake_op, 1);
}
}
@@ -945,182 +1001,188 @@ static pthread_mutex_t __recursive_lock = PTHREAD_MUTEX_INITIALIZER;
static void
_recursive_lock(void)
{
- _normal_lock( &__recursive_lock);
+ _normal_lock(&__recursive_lock);
}
static void
_recursive_unlock(void)
{
- _normal_unlock( &__recursive_lock );
+ _normal_unlock(&__recursive_lock );
}
-#define __likely(cond) __builtin_expect(!!(cond), 1)
-#define __unlikely(cond) __builtin_expect(!!(cond), 0)
-
int pthread_mutex_lock(pthread_mutex_t *mutex)
{
- if (__likely(mutex != NULL))
- {
- int mtype = (mutex->value & MUTEX_TYPE_MASK);
+ int mtype, tid, new_lock_type, shared, wait_op;
- if ( __likely(mtype == MUTEX_TYPE_NORMAL) ) {
- _normal_lock(mutex);
- }
- else
- {
- int tid = __get_thread()->kernel_id;
+ if (__unlikely(mutex == NULL))
+ return EINVAL;
- if ( tid == MUTEX_OWNER(mutex) )
- {
- int oldv, counter;
+ mtype = (mutex->value & MUTEX_TYPE_MASK);
+ shared = (mutex->value & MUTEX_SHARED_MASK);
- if (mtype == MUTEX_TYPE_ERRORCHECK) {
- /* trying to re-lock a mutex we already acquired */
- return EDEADLK;
- }
- /*
- * We own the mutex, but other threads are able to change
- * the contents (e.g. promoting it to "contended"), so we
- * need to hold the global lock.
- */
- _recursive_lock();
- oldv = mutex->value;
- counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
- mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
- _recursive_unlock();
- }
- else
- {
- /*
- * If the new lock is available immediately, we grab it in
- * the "uncontended" state.
- */
- int new_lock_type = 1;
-
- for (;;) {
- int oldv;
-
- _recursive_lock();
- oldv = mutex->value;
- if (oldv == mtype) { /* uncontended released lock => 1 or 2 */
- mutex->value = ((tid << 16) | mtype | new_lock_type);
- } else if ((oldv & 3) == 1) { /* locked state 1 => state 2 */
- oldv ^= 3;
- mutex->value = oldv;
- }
- _recursive_unlock();
-
- if (oldv == mtype)
- break;
-
- /*
- * The lock was held, possibly contended by others. From
- * now on, if we manage to acquire the lock, we have to
- * assume that others are still contending for it so that
- * we'll wake them when we unlock it.
- */
- new_lock_type = 2;
-
- __futex_wait( &mutex->value, oldv, 0 );
- }
- }
+ /* Handle normal case first */
+ if ( __likely(mtype == MUTEX_TYPE_NORMAL) ) {
+ _normal_lock(mutex);
+ return 0;
+ }
+
+ /* Do we already own this recursive or error-check mutex ? */
+ tid = __get_thread()->kernel_id;
+ if ( tid == MUTEX_OWNER(mutex) )
+ {
+ int oldv, counter;
+
+ if (mtype == MUTEX_TYPE_ERRORCHECK) {
+ /* trying to re-lock a mutex we already acquired */
+ return EDEADLK;
}
+ /*
+ * We own the mutex, but other threads are able to change
+ * the contents (e.g. promoting it to "contended"), so we
+ * need to hold the global lock.
+ */
+ _recursive_lock();
+ oldv = mutex->value;
+ counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
+ mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
+ _recursive_unlock();
return 0;
}
- return EINVAL;
+
+ /* We don't own the mutex, so try to get it.
+ *
+ * First, we try to change its state from 0 to 1, if this
+ * doesn't work, try to change it to state 2.
+ */
+ new_lock_type = 1;
+
+ /* compute futex wait opcode and restore shared flag in mtype */
+ wait_op = shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE;
+ mtype |= shared;
+
+ for (;;) {
+ int oldv;
+
+ _recursive_lock();
+ oldv = mutex->value;
+ if (oldv == mtype) { /* uncontended released lock => 1 or 2 */
+ mutex->value = ((tid << 16) | mtype | new_lock_type);
+ } else if ((oldv & 3) == 1) { /* locked state 1 => state 2 */
+ oldv ^= 3;
+ mutex->value = oldv;
+ }
+ _recursive_unlock();
+
+ if (oldv == mtype)
+ break;
+
+ /*
+ * The lock was held, possibly contended by others. From
+ * now on, if we manage to acquire the lock, we have to
+ * assume that others are still contending for it so that
+ * we'll wake them when we unlock it.
+ */
+ new_lock_type = 2;
+
+ __futex_syscall4(&mutex->value, wait_op, oldv, NULL);
+ }
+ return 0;
}
int pthread_mutex_unlock(pthread_mutex_t *mutex)
{
- if (__likely(mutex != NULL))
- {
- int mtype = (mutex->value & MUTEX_TYPE_MASK);
+ int mtype, tid, oldv, shared;
- if (__likely(mtype == MUTEX_TYPE_NORMAL)) {
- _normal_unlock(mutex);
- }
- else
- {
- int tid = __get_thread()->kernel_id;
+ if (__unlikely(mutex == NULL))
+ return EINVAL;
- if ( tid == MUTEX_OWNER(mutex) )
- {
- int oldv;
-
- _recursive_lock();
- oldv = mutex->value;
- if (oldv & MUTEX_COUNTER_MASK) {
- mutex->value = oldv - (1 << MUTEX_COUNTER_SHIFT);
- oldv = 0;
- } else {
- mutex->value = mtype;
- }
- _recursive_unlock();
+ mtype = (mutex->value & MUTEX_TYPE_MASK);
+ shared = (mutex->value & MUTEX_SHARED_MASK);
- if ((oldv & 3) == 2)
- __futex_wake( &mutex->value, 1 );
- }
- else {
- /* trying to unlock a lock we do not own */
- return EPERM;
- }
- }
+ /* Handle common case first */
+ if (__likely(mtype == MUTEX_TYPE_NORMAL)) {
+ _normal_unlock(mutex);
return 0;
}
- return EINVAL;
+
+ /* Do we already own this recursive or error-check mutex ? */
+ tid = __get_thread()->kernel_id;
+ if ( tid != MUTEX_OWNER(mutex) )
+ return EPERM;
+
+ /* We do, decrement counter or release the mutex if it is 0 */
+ _recursive_lock();
+ oldv = mutex->value;
+ if (oldv & MUTEX_COUNTER_MASK) {
+ mutex->value = oldv - (1 << MUTEX_COUNTER_SHIFT);
+ oldv = 0;
+ } else {
+ mutex->value = shared | mtype;
+ }
+ _recursive_unlock();
+
+ /* Wake one waiting thread, if any */
+ if ((oldv & 3) == 2) {
+ int wake_op = shared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE;
+ __futex_syscall3(&mutex->value, wake_op, 1);
+ }
+ return 0;
}
int pthread_mutex_trylock(pthread_mutex_t *mutex)
{
- if (__likely(mutex != NULL))
+ int mtype, tid, oldv, shared;
+
+ if (__unlikely(mutex == NULL))
+ return EINVAL;
+
+ mtype = (mutex->value & MUTEX_TYPE_MASK);
+ shared = (mutex->value & MUTEX_SHARED_MASK);
+
+ /* Handle common case first */
+ if ( __likely(mtype == MUTEX_TYPE_NORMAL) )
{
- int mtype = (mutex->value & MUTEX_TYPE_MASK);
+ if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value) == 0)
+ return 0;
- if ( __likely(mtype == MUTEX_TYPE_NORMAL) )
- {
- if (__atomic_cmpxchg(0, 1, &mutex->value) == 0)
- return 0;
+ return EBUSY;
+ }
- return EBUSY;
- }
- else
- {
- int tid = __get_thread()->kernel_id;
- int oldv;
+ /* Do we already own this recursive or error-check mutex ? */
+ tid = __get_thread()->kernel_id;
+ if ( tid == MUTEX_OWNER(mutex) )
+ {
+ int counter;
- if ( tid == MUTEX_OWNER(mutex) )
- {
- int oldv, counter;
+ if (mtype == MUTEX_TYPE_ERRORCHECK) {
+ /* already locked by ourselves */
+ return EDEADLK;
+ }
- if (mtype == MUTEX_TYPE_ERRORCHECK) {
- /* already locked by ourselves */
- return EDEADLK;
- }
+ _recursive_lock();
+ oldv = mutex->value;
+ counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
+ mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
+ _recursive_unlock();
+ return 0;
+ }
- _recursive_lock();
- oldv = mutex->value;
- counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
- mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
- _recursive_unlock();
- return 0;
- }
+ /* Restore sharing bit in mtype */
+ mtype |= shared;
- /* try to lock it */
- _recursive_lock();
- oldv = mutex->value;
- if (oldv == mtype) /* uncontended released lock => state 1 */
- mutex->value = ((tid << 16) | mtype | 1);
- _recursive_unlock();
+ /* Try to lock it, just once. */
+ _recursive_lock();
+ oldv = mutex->value;
+ if (oldv == mtype) /* uncontended released lock => state 1 */
+ mutex->value = ((tid << 16) | mtype | 1);
+ _recursive_unlock();
- if (oldv != mtype)
- return EBUSY;
+ if (oldv != mtype)
+ return EBUSY;
- return 0;
- }
- }
- return EINVAL;
+ return 0;
}
@@ -1163,100 +1225,152 @@ int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
clockid_t clock = CLOCK_MONOTONIC;
struct timespec abstime;
struct timespec ts;
+ int mtype, tid, oldv, new_lock_type, shared, wait_op;
/* compute absolute expiration time */
__timespec_to_relative_msec(&abstime, msecs, clock);
- if (__likely(mutex != NULL))
- {
- int mtype = (mutex->value & MUTEX_TYPE_MASK);
+ if (__unlikely(mutex == NULL))
+ return EINVAL;
- if ( __likely(mtype == MUTEX_TYPE_NORMAL) )
- {
- /* fast path for unconteded lock */
- if (__atomic_cmpxchg(0, 1, &mutex->value) == 0)
- return 0;
+ mtype = (mutex->value & MUTEX_TYPE_MASK);
+ shared = (mutex->value & MUTEX_SHARED_MASK);
- /* loop while needed */
- while (__atomic_swap(2, &mutex->value) != 0) {
- if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
- return EBUSY;
+ /* Handle common case first */
+ if ( __likely(mtype == MUTEX_TYPE_NORMAL) )
+ {
+ int wait_op = shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE;
- __futex_wait(&mutex->value, 2, &ts);
- }
+ /* fast path for unconteded lock */
+ if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value) == 0)
return 0;
+
+ /* loop while needed */
+ while (__atomic_swap(shared|2, &mutex->value) != (shared|0)) {
+ if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
+ return EBUSY;
+
+ __futex_syscall4(&mutex->value, wait_op, shared|2, &ts);
}
- else
- {
- int tid = __get_thread()->kernel_id;
- int oldv;
+ return 0;
+ }
- if ( tid == MUTEX_OWNER(mutex) )
- {
- int oldv, counter;
+ /* Do we already own this recursive or error-check mutex ? */
+ tid = __get_thread()->kernel_id;
+ if ( tid == MUTEX_OWNER(mutex) )
+ {
+ int oldv, counter;
- if (mtype == MUTEX_TYPE_ERRORCHECK) {
- /* already locked by ourselves */
- return EDEADLK;
- }
+ if (mtype == MUTEX_TYPE_ERRORCHECK) {
+ /* already locked by ourselves */
+ return EDEADLK;
+ }
- _recursive_lock();
- oldv = mutex->value;
- counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
- mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
- _recursive_unlock();
- return 0;
- }
- else
- {
- /*
- * If the new lock is available immediately, we grab it in
- * the "uncontended" state.
- */
- int new_lock_type = 1;
-
- for (;;) {
- int oldv;
- struct timespec ts;
-
- _recursive_lock();
- oldv = mutex->value;
- if (oldv == mtype) { /* uncontended released lock => 1 or 2 */
- mutex->value = ((tid << 16) | mtype | new_lock_type);
- } else if ((oldv & 3) == 1) { /* locked state 1 => state 2 */
- oldv ^= 3;
- mutex->value = oldv;
- }
- _recursive_unlock();
-
- if (oldv == mtype)
- break;
-
- /*
- * The lock was held, possibly contended by others. From
- * now on, if we manage to acquire the lock, we have to
- * assume that others are still contending for it so that
- * we'll wake them when we unlock it.
- */
- new_lock_type = 2;
-
- if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
- return EBUSY;
-
- __futex_wait( &mutex->value, oldv, &ts );
- }
- return 0;
- }
+ _recursive_lock();
+ oldv = mutex->value;
+ counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
+ mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
+ _recursive_unlock();
+ return 0;
+ }
+
+ /* We don't own the mutex, so try to get it.
+ *
+ * First, we try to change its state from 0 to 1, if this
+ * doesn't work, try to change it to state 2.
+ */
+ new_lock_type = 1;
+
+ /* Compute wait op and restore sharing bit in mtype */
+ wait_op = shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE;
+ mtype |= shared;
+
+ for (;;) {
+ int oldv;
+ struct timespec ts;
+
+ _recursive_lock();
+ oldv = mutex->value;
+ if (oldv == mtype) { /* uncontended released lock => 1 or 2 */
+ mutex->value = ((tid << 16) | mtype | new_lock_type);
+ } else if ((oldv & 3) == 1) { /* locked state 1 => state 2 */
+ oldv ^= 3;
+ mutex->value = oldv;
}
+ _recursive_unlock();
+
+ if (oldv == mtype)
+ break;
+
+ /*
+ * The lock was held, possibly contended by others. From
+ * now on, if we manage to acquire the lock, we have to
+ * assume that others are still contending for it so that
+ * we'll wake them when we unlock it.
+ */
+ new_lock_type = 2;
+
+ if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
+ return EBUSY;
+
+ __futex_syscall4(&mutex->value, wait_op, oldv, &ts);
}
- return EINVAL;
+ return 0;
}
+int pthread_condattr_init(pthread_condattr_t *attr)
+{
+ if (attr == NULL)
+ return EINVAL;
+
+ *attr = PTHREAD_PROCESS_PRIVATE;
+ return 0;
+}
+
+int pthread_condattr_getpshared(pthread_condattr_t *attr, int *pshared)
+{
+ if (attr == NULL || pshared == NULL)
+ return EINVAL;
+
+ *pshared = *attr;
+ return 0;
+}
+
+int pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared)
+{
+ if (attr == NULL)
+ return EINVAL;
+
+ if (pshared != PTHREAD_PROCESS_SHARED &&
+ pshared != PTHREAD_PROCESS_PRIVATE)
+ return EINVAL;
+
+ *attr = pshared;
+ return 0;
+}
+
+int pthread_condattr_destroy(pthread_condattr_t *attr)
+{
+ if (attr == NULL)
+ return EINVAL;
+
+ *attr = 0xdeada11d;
+ return 0;
+}
+
+/* We use one bit in condition variable values as the 'shared' flag
+ * The rest is a counter.
+ */
+#define COND_SHARED_MASK 0x0001
+#define COND_COUNTER_INCREMENT 0x0002
+#define COND_COUNTER_MASK (~COND_SHARED_MASK)
+
+#define COND_IS_SHARED(c) (((c)->value & COND_SHARED_MASK) != 0)
/* XXX *technically* there is a race condition that could allow
* XXX a signal to be missed. If thread A is preempted in _wait()
* XXX after unlocking the mutex and before waiting, and if other
- * XXX threads call signal or broadcast UINT_MAX times (exactly),
+ * XXX threads call signal or broadcast UINT_MAX/2 times (exactly),
* XXX before thread A is scheduled again and calls futex_wait(),
* XXX then the signal will be lost.
*/
@@ -1264,28 +1378,61 @@ int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
int pthread_cond_init(pthread_cond_t *cond,
const pthread_condattr_t *attr)
{
+ if (cond == NULL)
+ return EINVAL;
+
cond->value = 0;
+
+ if (attr != NULL && *attr == PTHREAD_PROCESS_SHARED)
+ cond->value |= COND_SHARED_MASK;
+
return 0;
}
int pthread_cond_destroy(pthread_cond_t *cond)
{
+ if (cond == NULL)
+ return EINVAL;
+
cond->value = 0xdeadc04d;
return 0;
}
-int pthread_cond_broadcast(pthread_cond_t *cond)
+/* This function is used by pthread_cond_broadcast and
+ * pthread_cond_signal to atomically decrement the counter
+ * then wake-up 'counter' threads.
+ */
+static int
+__pthread_cond_pulse(pthread_cond_t *cond, int counter)
{
- __atomic_dec(&cond->value);
- __futex_wake(&cond->value, INT_MAX);
+ long flags;
+ int wake_op;
+
+ if (__unlikely(cond == NULL))
+ return EINVAL;
+
+ flags = (cond->value & ~COND_COUNTER_MASK);
+ for (;;) {
+ long oldval = cond->value;
+ long newval = ((oldval - COND_COUNTER_INCREMENT) & COND_COUNTER_MASK)
+ | flags;
+ if (__atomic_cmpxchg(oldval, newval, &cond->value) == 0)
+ break;
+ }
+
+ wake_op = COND_IS_SHARED(cond) ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE;
+ __futex_syscall3(&cond->value, wake_op, counter);
return 0;
}
+int pthread_cond_broadcast(pthread_cond_t *cond)
+{
+ return __pthread_cond_pulse(cond, INT_MAX);
+}
+
int pthread_cond_signal(pthread_cond_t *cond)
{
- __atomic_dec(&cond->value);
- __futex_wake(&cond->value, 1);
- return 0;
+ return __pthread_cond_pulse(cond, 1);
}
int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
@@ -1299,9 +1446,10 @@ int __pthread_cond_timedwait_relative(pthread_cond_t *cond,
{
int status;
int oldvalue = cond->value;
+ int wait_op = COND_IS_SHARED(cond) ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE;
pthread_mutex_unlock(mutex);
- status = __futex_wait(&cond->value, oldvalue, reltime);
+ status = __futex_syscall4(&cond->value, wait_op, oldvalue, reltime);
pthread_mutex_lock(mutex);
if (status == (-ETIMEDOUT)) return ETIMEDOUT;
@@ -1687,7 +1835,17 @@ extern int __rt_sigprocmask(int, const sigset_t *, sigset_t *, size_t);
int pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
{
- return __rt_sigprocmask(how, set, oset, _NSIG / 8);
+ /* pthread_sigmask must return the error code, but the syscall
+ * will set errno instead and return 0/-1
+ */
+ int ret, old_errno = errno;
+
+ ret = __rt_sigprocmask(how, set, oset, _NSIG / 8);
+ if (ret < 0)
+ ret = errno;
+
+ errno = old_errno;
+ return ret;
}
diff --git a/libc/bionic/semaphore.c b/libc/bionic/semaphore.c
index 0c94600..84b9314 100644
--- a/libc/bionic/semaphore.c
+++ b/libc/bionic/semaphore.c
@@ -180,7 +180,7 @@ int sem_post(sem_t *sem)
if (sem == NULL)
return EINVAL;
- if (__atomic_inc((volatile int*)&sem->count) == 0)
+ if (__atomic_inc((volatile int*)&sem->count) >= 0)
__futex_wake(&sem->count, 1);
return 0;
@@ -196,7 +196,8 @@ int sem_trywait(sem_t *sem)
if (__atomic_dec_if_positive(&sem->count) > 0) {
return 0;
} else {
- return EAGAIN;
+ errno = EAGAIN;
+ return -1;
}
}
diff --git a/libc/bionic/stubs.c b/libc/bionic/stubs.c
index 365f21a..d495674 100644
--- a/libc/bionic/stubs.c
+++ b/libc/bionic/stubs.c
@@ -185,7 +185,7 @@ app_id_from_name( const char* name )
goto FAIL;
id = strtoul(name+4, &end, 10);
- if (id == 0 || *end != '\0')
+ if (*end != '\0')
goto FAIL;
id += AID_APP;
@@ -361,6 +361,12 @@ char* ttyname(int fd)
return NULL;
}
+int ttyname_r(int fd, char *buf, size_t buflen)
+{
+ fprintf(stderr, "FIX ME! implement ttyname_r() %s:%d\n", __FILE__, __LINE__);
+ return -ERANGE;
+}
+
struct netent *getnetbyaddr(uint32_t net, int type)
{
fprintf(stderr, "FIX ME! implement %s() %s:%d\n", __FUNCTION__, __FILE__, __LINE__);
@@ -378,3 +384,20 @@ struct protoent *getprotobynumber(int proto)
fprintf(stderr, "FIX ME! implement %s() %s:%d\n", __FUNCTION__, __FILE__, __LINE__);
return NULL;
}
+
+char* getusershell(void)
+{
+ fprintf(stderr, "FIX ME! implement %s() %s:%d\n", __FUNCTION__, __FILE__, __LINE__);
+ return NULL;
+}
+
+void setusershell(void)
+{
+ fprintf(stderr, "FIX ME! implement %s() %s:%d\n", __FUNCTION__, __FILE__, __LINE__);
+}
+
+void endusershell(void)
+{
+ fprintf(stderr, "FIX ME! implement %s() %s:%d\n", __FUNCTION__, __FILE__, __LINE__);
+}
+