aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/kernel/process.c4
-rw-r--r--arch/i386/kernel/vm86.c4
-rw-r--r--arch/x86_64/kernel/process.c12
3 files changed, 10 insertions, 10 deletions
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index b2203e21acb38..85bd56d443141 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -611,8 +611,8 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
* Save away %fs and %gs. No need to save %es and %ds, as
* those are always kernel segments while inside the kernel.
*/
- asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs));
- asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs));
+ asm volatile("mov %%fs,%0":"=m" (prev->fs));
+ asm volatile("mov %%gs,%0":"=m" (prev->gs));
/*
* Restore %fs and %gs if needed.
diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c
index 2f3d52dacff77..d16cd3738a489 100644
--- a/arch/i386/kernel/vm86.c
+++ b/arch/i386/kernel/vm86.c
@@ -294,8 +294,8 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
*/
info->regs32->eax = 0;
tsk->thread.saved_esp0 = tsk->thread.esp0;
- asm volatile("movl %%fs,%0":"=m" (tsk->thread.saved_fs));
- asm volatile("movl %%gs,%0":"=m" (tsk->thread.saved_gs));
+ asm volatile("mov %%fs,%0":"=m" (tsk->thread.saved_fs));
+ asm volatile("mov %%gs,%0":"=m" (tsk->thread.saved_gs));
tss = &per_cpu(init_tss, get_cpu());
tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 9922d2ba24a37..761b6d35e338d 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -402,10 +402,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
p->thread.fs = me->thread.fs;
p->thread.gs = me->thread.gs;
- asm("movl %%gs,%0" : "=m" (p->thread.gsindex));
- asm("movl %%fs,%0" : "=m" (p->thread.fsindex));
- asm("movl %%es,%0" : "=m" (p->thread.es));
- asm("movl %%ds,%0" : "=m" (p->thread.ds));
+ asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
+ asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
+ asm("mov %%es,%0" : "=m" (p->thread.es));
+ asm("mov %%ds,%0" : "=m" (p->thread.ds));
if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
@@ -468,11 +468,11 @@ struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *
* Switch DS and ES.
* This won't pick up thread selector changes, but I guess that is ok.
*/
- asm volatile("movl %%es,%0" : "=m" (prev->es));
+ asm volatile("mov %%es,%0" : "=m" (prev->es));
if (unlikely(next->es | prev->es))
loadsegment(es, next->es);
- asm volatile ("movl %%ds,%0" : "=m" (prev->ds));
+ asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
if (unlikely(next->ds | prev->ds))
loadsegment(ds, next->ds);