aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/signal.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-03-23 10:37:00 +0100
committerTejun Heo <tj@kernel.org>2011-03-23 10:37:00 +0100
commite5c1902e9260a0075ea52cb5ef627a8d9aaede89 (patch)
tree0fc5ec2460fecfe564323d89227bec9293238c2a /kernel/signal.c
parentfe1bc6a0954611b806f9e158eb0817cf8ba21660 (diff)
downloadkernel_samsung_smdk4412-e5c1902e9260a0075ea52cb5ef627a8d9aaede89.zip
kernel_samsung_smdk4412-e5c1902e9260a0075ea52cb5ef627a8d9aaede89.tar.gz
kernel_samsung_smdk4412-e5c1902e9260a0075ea52cb5ef627a8d9aaede89.tar.bz2
signal: Fix premature completion of group stop when interfered by ptrace
task->signal->group_stop_count is used to track the progress of group stop. It's initialized to the number of tasks which need to stop for group stop to finish and each stopping or trapping task decrements. However, each task doesn't keep track of whether it decremented the counter or not and if woken up before the group stop is complete and stops again, it can decrement the counter multiple times. Please consider the following example code. static void *worker(void *arg) { while (1) ; return NULL; } int main(void) { pthread_t thread; pid_t pid; int i; pid = fork(); if (!pid) { for (i = 0; i < 5; i++) pthread_create(&thread, NULL, worker, NULL); while (1) ; return 0; } ptrace(PTRACE_ATTACH, pid, NULL, NULL); while (1) { waitid(P_PID, pid, NULL, WSTOPPED); ptrace(PTRACE_SINGLESTEP, pid, NULL, (void *)(long)SIGSTOP); } return 0; } The child creates five threads and the parent continuously traps the first thread and whenever the child gets a signal, SIGSTOP is delivered. If an external process sends SIGSTOP to the child, all other threads in the process should reliably stop. However, due to the above bug, the first thread will often end up consuming group_stop_count multiple times and SIGSTOP often ends up stopping none or part of the other four threads. This patch adds a new field task->group_stop which is protected by siglock and uses GROUP_STOP_CONSUME flag to track which task is still to consume group_stop_count to fix this bug. task_clear_group_stop_pending() and task_participate_group_stop() are added to help manipulating group stop states. As ptrace_stop() now also uses task_participate_group_stop(), it will set SIGNAL_STOP_STOPPED if it completes a group stop. There still are many issues regarding the interaction between group stop and ptrace. Patches to address them will follow. - Oleg spotted duplicate GROUP_STOP_CONSUME. Dropped. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Oleg Nesterov <oleg@redhat.com> Cc: Roland McGrath <roland@redhat.com>
Diffstat (limited to 'kernel/signal.c')
-rw-r--r--kernel/signal.c62
1 files changed, 54 insertions, 8 deletions
diff --git a/kernel/signal.c b/kernel/signal.c
index 95ac42d..ecb2008 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -223,6 +223,52 @@ static inline void print_dropped_signal(int sig)
current->comm, current->pid, sig);
}
+/**
+ * task_clear_group_stop_pending - clear pending group stop
+ * @task: target task
+ *
+ * Clear group stop states for @task.
+ *
+ * CONTEXT:
+ * Must be called with @task->sighand->siglock held.
+ */
+static void task_clear_group_stop_pending(struct task_struct *task)
+{
+ task->group_stop &= ~GROUP_STOP_CONSUME;
+}
+
+/**
+ * task_participate_group_stop - participate in a group stop
+ * @task: task participating in a group stop
+ *
+ * @task is participating in a group stop. Group stop states are cleared
+ * and the group stop count is consumed if %GROUP_STOP_CONSUME was set. If
+ * the consumption completes the group stop, the appropriate %SIGNAL_*
+ * flags are set.
+ *
+ * CONTEXT:
+ * Must be called with @task->sighand->siglock held.
+ */
+static bool task_participate_group_stop(struct task_struct *task)
+{
+ struct signal_struct *sig = task->signal;
+ bool consume = task->group_stop & GROUP_STOP_CONSUME;
+
+ task_clear_group_stop_pending(task);
+
+ if (!consume)
+ return false;
+
+ if (!WARN_ON_ONCE(sig->group_stop_count == 0))
+ sig->group_stop_count--;
+
+ if (!sig->group_stop_count) {
+ sig->flags = SIGNAL_STOP_STOPPED;
+ return true;
+ }
+ return false;
+}
+
/*
* allocate a new signal queue record
* - this may be called without locks if and only if t == current, otherwise an
@@ -1645,7 +1691,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
* we must participate in the bookkeeping.
*/
if (current->signal->group_stop_count > 0)
- --current->signal->group_stop_count;
+ task_participate_group_stop(current);
current->last_siginfo = info;
current->exit_code = exit_code;
@@ -1730,6 +1776,7 @@ static int do_signal_stop(int signr)
int notify = 0;
if (!sig->group_stop_count) {
+ unsigned int gstop = GROUP_STOP_CONSUME;
struct task_struct *t;
if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
@@ -1741,6 +1788,7 @@ static int do_signal_stop(int signr)
*/
sig->group_exit_code = signr;
+ current->group_stop = gstop;
sig->group_stop_count = 1;
for (t = next_thread(current); t != current; t = next_thread(t))
/*
@@ -1750,19 +1798,19 @@ static int do_signal_stop(int signr)
*/
if (!(t->flags & PF_EXITING) &&
!task_is_stopped_or_traced(t)) {
+ t->group_stop = gstop;
sig->group_stop_count++;
signal_wake_up(t, 0);
- }
+ } else
+ task_clear_group_stop_pending(t);
}
/*
* If there are no other threads in the group, or if there is
* a group stop in progress and we are the last to stop, report
* to the parent. When ptraced, every thread reports itself.
*/
- if (!--sig->group_stop_count) {
- sig->flags = SIGNAL_STOP_STOPPED;
+ if (task_participate_group_stop(current))
notify = CLD_STOPPED;
- }
if (task_ptrace(current))
notify = CLD_STOPPED;
@@ -2026,10 +2074,8 @@ void exit_signals(struct task_struct *tsk)
recalc_sigpending_and_wake(t);
if (unlikely(tsk->signal->group_stop_count) &&
- !--tsk->signal->group_stop_count) {
- tsk->signal->flags = SIGNAL_STOP_STOPPED;
+ task_participate_group_stop(tsk))
group_stop = CLD_STOPPED;
- }
out:
spin_unlock_irq(&tsk->sighand->siglock);