aboutsummaryrefslogtreecommitdiffstats
path: root/fs/aio.c
diff options
context:
space:
mode:
authorRoland Dreier <roland@purestorage.com>2011-03-22 16:35:10 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-22 17:44:17 -0700
commite91f90bb0bb10be9cc8efd09a3cf4ecffcad0db1 (patch)
treebd9c134e875a3a96a5f96bbde9a112c180ba3d4f /fs/aio.c
parent77d1c8eb8a8e0989f4c46e9a40bbd4185d34974e (diff)
downloadkernel_samsung_smdk4412-e91f90bb0bb10be9cc8efd09a3cf4ecffcad0db1.zip
kernel_samsung_smdk4412-e91f90bb0bb10be9cc8efd09a3cf4ecffcad0db1.tar.gz
kernel_samsung_smdk4412-e91f90bb0bb10be9cc8efd09a3cf4ecffcad0db1.tar.bz2
aio: wake all waiters when destroying ctx
The test program below will hang because io_getevents() uses add_wait_queue_exclusive(), which means the wake_up() in io_destroy() only wakes up one of the threads. Fix this by using wake_up_all() in the aio code paths where we want to make sure no one gets stuck. // t.c -- compile with gcc -lpthread -laio t.c #include <libaio.h> #include <pthread.h> #include <stdio.h> #include <unistd.h> static const int nthr = 2; void *getev(void *ctx) { struct io_event ev; io_getevents(ctx, 1, 1, &ev, NULL); printf("io_getevents returned\n"); return NULL; } int main(int argc, char *argv[]) { io_context_t ctx = 0; pthread_t thread[nthr]; int i; io_setup(1024, &ctx); for (i = 0; i < nthr; ++i) pthread_create(&thread[i], NULL, getev, ctx); sleep(1); io_destroy(ctx); for (i = 0; i < nthr; ++i) pthread_join(thread[i], NULL); return 0; } Signed-off-by: Roland Dreier <roland@purestorage.com> Reviewed-by: Jeff Moyer <jmoyer@redhat.com> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/aio.c')
-rw-r--r--fs/aio.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 7f54f43..ebb6a22 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -520,7 +520,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
ctx->reqs_active--;
if (unlikely(!ctx->reqs_active && ctx->dead))
- wake_up(&ctx->wait);
+ wake_up_all(&ctx->wait);
}
static void aio_fput_routine(struct work_struct *data)
@@ -1229,7 +1229,7 @@ static void io_destroy(struct kioctx *ioctx)
* by other CPUs at this point. Right now, we rely on the
* locking done by the above calls to ensure this consistency.
*/
- wake_up(&ioctx->wait);
+ wake_up_all(&ioctx->wait);
put_ioctx(ioctx); /* once for the lookup */
}