From f6b2bc847641ea38e2655c8424fef5d2d19f35f9 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Tue, 29 Nov 2011 11:24:10 +0000 Subject: [PATCH] x86-64: Cleanup some assembly entry points system_call_after_swapgs doesn't really benefit from forcing alignment from it - quite the opposite, native code needlessly so far got a big NOP instruction inserted in front of it. Xen being the only user of the separate entry point can well live with the branch going to three bytes into a cache line. The compatibility mode ptregs entry points for one can make use of the GLOBAL() macro, and should be suitably aligned. Their shared continuation point (ia32_ptregs_common) otoh doesn't need to be global at all, but should continue to be properly aligned. Signed-off-by: Jan Beulich Reviewed-by: Andi Kleen Link: http://lkml.kernel.org/r/4ED4CEEA020000780006407D@nat28.tlf.novell.com Signed-off-by: Ingo Molnar --- arch/x86/ia32/ia32entry.S | 7 ++++--- arch/x86/kernel/entry_64.S | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 0d5c279f3732..3e274564f6bf 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -453,8 +453,8 @@ quiet_ni_syscall: CFI_ENDPROC .macro PTREGSCALL label, func, arg - .globl \label -\label: + ALIGN +GLOBAL(\label) leaq \func(%rip),%rax leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */ jmp ia32_ptregs_common @@ -471,7 +471,8 @@ quiet_ni_syscall: PTREGSCALL stub32_vfork, sys_vfork, %rdi PTREGSCALL stub32_iopl, sys_iopl, %rsi -ENTRY(ia32_ptregs_common) + ALIGN +ia32_ptregs_common: popq %r11 CFI_ENDPROC CFI_STARTPROC32 simple diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 75f72a50cf26..cfad7fce6163 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -465,7 +465,7 @@ ENTRY(system_call) * after the swapgs, so that it can do the swapgs * for the guest and jump here on syscall. */ -ENTRY(system_call_after_swapgs) +GLOBAL(system_call_after_swapgs) movq %rsp,PER_CPU_VAR(old_rsp) movq PER_CPU_VAR(kernel_stack),%rsp