aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/sys-i386/tls.c
diff options
context:
space:
mode:
authorPaolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>2006-03-31 02:30:24 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-31 12:18:52 -0800
commit54d8d3b5a0ce1cdbad1d3154c9ea9732d394e9c7 (patch)
tree129a29ab92fba7dc99229c87a38fe8df3ade7b15 /arch/um/sys-i386/tls.c
parentdd77aec07aec5cb81aed3b4ef79c1ff8bd0e2a68 (diff)
downloadkernel_samsung_smdk4412-54d8d3b5a0ce1cdbad1d3154c9ea9732d394e9c7.zip
kernel_samsung_smdk4412-54d8d3b5a0ce1cdbad1d3154c9ea9732d394e9c7.tar.gz
kernel_samsung_smdk4412-54d8d3b5a0ce1cdbad1d3154c9ea9732d394e9c7.tar.bz2
[PATCH] uml: add arch_switch_to for newly forked thread
Newly forked threads have no arch_switch_to_skas() called before their first run, because when schedule() switches to them they're resumed in the body of thread_wait() inside fork_handler() rather than in switch_threads() in switch_to_skas(). Compensate this missing call. Signed-off-by: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> Acked-by: Jeff Dike <jdike@addtoit.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/um/sys-i386/tls.c')
-rw-r--r--arch/um/sys-i386/tls.c13
1 files changed, 10 insertions, 3 deletions
diff --git a/arch/um/sys-i386/tls.c b/arch/um/sys-i386/tls.c
index e3c5bc5..2251654 100644
--- a/arch/um/sys-i386/tls.c
+++ b/arch/um/sys-i386/tls.c
@@ -70,8 +70,6 @@ static int get_free_idx(struct task_struct* task)
return -ESRCH;
}
-#define O_FORCE 1
-
static inline void clear_user_desc(struct user_desc* info)
{
/* Postcondition: LDT_empty(info) returns true. */
@@ -84,6 +82,8 @@ static inline void clear_user_desc(struct user_desc* info)
info->seg_not_present = 1;
}
+#define O_FORCE 1
+
static int load_TLS(int flags, struct task_struct *to)
{
int ret = 0;
@@ -162,7 +162,13 @@ void clear_flushed_tls(struct task_struct *task)
* SKAS patch. */
int arch_switch_tls_skas(struct task_struct *from, struct task_struct *to)
{
- return load_TLS(O_FORCE, to);
+ /* We have no need whatsoever to switch TLS for kernel threads; beyond
+ * that, that would also result in us calling os_set_thread_area with
+ * userspace_pid[cpu] == 0, which gives an error. */
+ if (likely(to->mm))
+ return load_TLS(O_FORCE, to);
+
+ return 0;
}
int arch_switch_tls_tt(struct task_struct *from, struct task_struct *to)
@@ -324,3 +330,4 @@ int ptrace_get_thread_area(struct task_struct *child, int idx,
out:
return ret;
}
+