From 2d398807bac92e2177282a8fc76d96dec9f340b7 Mon Sep 17 00:00:00 2001 From: Dmitry Kasatkin Date: Fri, 18 Jan 2013 23:56:39 +0200 Subject: [PATCH] --- yaml --- r: 348949 b: refs/heads/master c: a67adb997419fb53540d4a4f79c6471c60bc69b6 h: refs/heads/master i: 348947: 496dbb9b771627b706dc6d9236732f1e64e11360 v: v3 --- [refs] | 2 +- trunk/arch/x86/kernel/step.c | 9 ++- trunk/include/linux/sched.h | 11 +--- trunk/kernel/ptrace.c | 72 +++++------------------ trunk/kernel/sched/core.c | 3 +- trunk/kernel/signal.c | 19 +++--- trunk/security/integrity/evm/evm_crypto.c | 4 +- 7 files changed, 33 insertions(+), 87 deletions(-) diff --git a/[refs] b/[refs] index 330f7324b4a5..98b7413fc3f3 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 9067ac85d533651b98c2ff903182a20cbb361fcb +refs/heads/master: a67adb997419fb53540d4a4f79c6471c60bc69b6 diff --git a/trunk/arch/x86/kernel/step.c b/trunk/arch/x86/kernel/step.c index 9b4d51d0c0d0..cd3b2438a980 100644 --- a/trunk/arch/x86/kernel/step.c +++ b/trunk/arch/x86/kernel/step.c @@ -165,11 +165,10 @@ void set_task_blockstep(struct task_struct *task, bool on) * Ensure irq/preemption can't change debugctl in between. * Note also that both TIF_BLOCKSTEP and debugctl should * be changed atomically wrt preemption. - * - * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if - * task is current or it can't be running, otherwise we can race - * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but - * PTRACE_KILL is not safe. + * FIXME: this means that set/clear TIF_BLOCKSTEP is simply + * wrong if task != current, SIGKILL can wakeup the stopped + * tracee and set/clear can play with the running task, this + * can confuse the next __switch_to_xtra(). */ local_irq_disable(); debugctl = get_debugctlmsr(); diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index d2112477ff5e..6fc8f45de4e9 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -2714,16 +2714,7 @@ static inline void thread_group_cputime_init(struct signal_struct *sig) extern void recalc_sigpending_and_wake(struct task_struct *t); extern void recalc_sigpending(void); -extern void signal_wake_up_state(struct task_struct *t, unsigned int state); - -static inline void signal_wake_up(struct task_struct *t, bool resume) -{ - signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); -} -static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) -{ - signal_wake_up_state(t, resume ? __TASK_TRACED : 0); -} +extern void signal_wake_up(struct task_struct *t, int resume_stopped); /* * Wrappers for p->thread_info->cpu access. No-op on UP. diff --git a/trunk/kernel/ptrace.c b/trunk/kernel/ptrace.c index 6cbeaae4406d..612a56126851 100644 --- a/trunk/kernel/ptrace.c +++ b/trunk/kernel/ptrace.c @@ -117,45 +117,11 @@ void __ptrace_unlink(struct task_struct *child) * TASK_KILLABLE sleeps. */ if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) - ptrace_signal_wake_up(child, true); + signal_wake_up(child, task_is_traced(child)); spin_unlock(&child->sighand->siglock); } -/* Ensure that nothing can wake it up, even SIGKILL */ -static bool ptrace_freeze_traced(struct task_struct *task) -{ - bool ret = false; - - /* Lockless, nobody but us can set this flag */ - if (task->jobctl & JOBCTL_LISTENING) - return ret; - - spin_lock_irq(&task->sighand->siglock); - if (task_is_traced(task) && !__fatal_signal_pending(task)) { - task->state = __TASK_TRACED; - ret = true; - } - spin_unlock_irq(&task->sighand->siglock); - - return ret; -} - -static void ptrace_unfreeze_traced(struct task_struct *task) -{ - if (task->state != __TASK_TRACED) - return; - - WARN_ON(!task->ptrace || task->parent != current); - - spin_lock_irq(&task->sighand->siglock); - if (__fatal_signal_pending(task)) - wake_up_state(task, __TASK_TRACED); - else - task->state = TASK_TRACED; - spin_unlock_irq(&task->sighand->siglock); -} - /** * ptrace_check_attach - check whether ptracee is ready for ptrace operation * @child: ptracee to check for @@ -185,29 +151,24 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state) * be changed by us so it's not changing right after this. */ read_lock(&tasklist_lock); - if (child->ptrace && child->parent == current) { - WARN_ON(child->state == __TASK_TRACED); + if ((child->ptrace & PT_PTRACED) && child->parent == current) { /* * child->sighand can't be NULL, release_task() * does ptrace_unlink() before __exit_signal(). */ - if (ignore_state || ptrace_freeze_traced(child)) + spin_lock_irq(&child->sighand->siglock); + WARN_ON_ONCE(task_is_stopped(child)); + if (ignore_state || (task_is_traced(child) && + !(child->jobctl & JOBCTL_LISTENING))) ret = 0; + spin_unlock_irq(&child->sighand->siglock); } read_unlock(&tasklist_lock); - if (!ret && !ignore_state) { - if (!wait_task_inactive(child, __TASK_TRACED)) { - /* - * This can only happen if may_ptrace_stop() fails and - * ptrace_stop() changes ->state back to TASK_RUNNING, - * so we should not worry about leaking __TASK_TRACED. - */ - WARN_ON(child->state == __TASK_TRACED); - ret = -ESRCH; - } - } + if (!ret && !ignore_state) + ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH; + /* All systems go.. */ return ret; } @@ -356,7 +317,7 @@ static int ptrace_attach(struct task_struct *task, long request, */ if (task_is_stopped(task) && task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) - signal_wake_up_state(task, __TASK_STOPPED); + signal_wake_up(task, 1); spin_unlock(&task->sighand->siglock); @@ -776,7 +737,7 @@ int ptrace_request(struct task_struct *child, long request, * tracee into STOP. */ if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) - ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); + signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); unlock_task_sighand(child, &flags); ret = 0; @@ -802,7 +763,7 @@ int ptrace_request(struct task_struct *child, long request, * start of this trap and now. Trigger re-trap. */ if (child->jobctl & JOBCTL_TRAP_NOTIFY) - ptrace_signal_wake_up(child, true); + signal_wake_up(child, true); ret = 0; } unlock_task_sighand(child, &flags); @@ -939,8 +900,6 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, goto out_put_task_struct; ret = arch_ptrace(child, request, addr, data); - if (ret || request != PTRACE_DETACH) - ptrace_unfreeze_traced(child); out_put_task_struct: put_task_struct(child); @@ -1080,11 +1039,8 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, ret = ptrace_check_attach(child, request == PTRACE_KILL || request == PTRACE_INTERRUPT); - if (!ret) { + if (!ret) ret = compat_arch_ptrace(child, request, addr, data); - if (ret || request != PTRACE_DETACH) - ptrace_unfreeze_traced(child); - } out_put_task_struct: put_task_struct(child); diff --git a/trunk/kernel/sched/core.c b/trunk/kernel/sched/core.c index 26058d0bebba..257002c13bb0 100644 --- a/trunk/kernel/sched/core.c +++ b/trunk/kernel/sched/core.c @@ -1523,8 +1523,7 @@ static void try_to_wake_up_local(struct task_struct *p) */ int wake_up_process(struct task_struct *p) { - WARN_ON(task_is_stopped_or_traced(p)); - return try_to_wake_up(p, TASK_NORMAL, 0); + return try_to_wake_up(p, TASK_ALL, 0); } EXPORT_SYMBOL(wake_up_process); diff --git a/trunk/kernel/signal.c b/trunk/kernel/signal.c index 3d09cf6cde75..53cd5c4d1172 100644 --- a/trunk/kernel/signal.c +++ b/trunk/kernel/signal.c @@ -680,17 +680,23 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) * No need to set need_resched since signal event passing * goes through ->blocked */ -void signal_wake_up_state(struct task_struct *t, unsigned int state) +void signal_wake_up(struct task_struct *t, int resume) { + unsigned int mask; + set_tsk_thread_flag(t, TIF_SIGPENDING); + /* - * TASK_WAKEKILL also means wake it up in the stopped/traced/killable + * For SIGKILL, we want to wake it up in the stopped/traced/killable * case. We don't check t->state here because there is a race with it * executing another processor and just now entering stopped state. * By using wake_up_state, we ensure the process will wake up and * handle its death signal. */ - if (!wake_up_state(t, state | TASK_INTERRUPTIBLE)) + mask = TASK_INTERRUPTIBLE; + if (resume) + mask |= TASK_WAKEKILL; + if (!wake_up_state(t, mask)) kick_process(t); } @@ -838,7 +844,7 @@ static void ptrace_trap_notify(struct task_struct *t) assert_spin_locked(&t->sighand->siglock); task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); - ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); + signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); } /* @@ -1794,10 +1800,6 @@ static inline int may_ptrace_stop(void) * If SIGKILL was already sent before the caller unlocked * ->siglock we must see ->core_state != NULL. Otherwise it * is safe to enter schedule(). - * - * This is almost outdated, a task with the pending SIGKILL can't - * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported - * after SIGKILL was already dequeued. */ if (unlikely(current->mm->core_state) && unlikely(current->mm == current->parent->mm)) @@ -1923,7 +1925,6 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) if (gstop_done) do_notify_parent_cldstop(current, false, why); - /* tasklist protects us from ptrace_freeze_traced() */ __set_current_state(TASK_RUNNING); if (clear_code) current->exit_code = 0; diff --git a/trunk/security/integrity/evm/evm_crypto.c b/trunk/security/integrity/evm/evm_crypto.c index dfb26918699c..7dd538ef5b83 100644 --- a/trunk/security/integrity/evm/evm_crypto.c +++ b/trunk/security/integrity/evm/evm_crypto.c @@ -205,9 +205,9 @@ int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name, rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_EVM, &xattr_data, sizeof(xattr_data), 0); - } - else if (rc == -ENODATA) + } else if (rc == -ENODATA && inode->i_op->removexattr) { rc = inode->i_op->removexattr(dentry, XATTR_NAME_EVM); + } return rc; }