x86/boot: Annotate local functions
authorJiri Slaby <jslaby@suse.cz>
Fri, 11 Oct 2019 11:50:47 +0000 (13:50 +0200)
committerBorislav Petkov <bp@suse.de>
Fri, 18 Oct 2019 08:29:34 +0000 (10:29 +0200)
.Lrelocated, .Lpaging_enabled, .Lno_longmode, and .Lin_pm32 are
self-standing local functions, annotate them as such and preserve "no
alignment".

The annotations do not generate anything yet.

Signed-off-by: Jiri Slaby <jslaby@suse.cz>
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Cao jin <caoj.fnst@cn.fujitsu.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Kate Stewart <kstewart@linuxfoundation.org>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: linux-arch@vger.kernel.org
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Wei Huang <wei@redhat.com>
Cc: x86-ml <x86@kernel.org>
Cc: Xiaoyao Li <xiaoyao.li@linux.intel.com>
Link: https://lkml.kernel.org/r/20191011115108.12392-8-jslaby@suse.cz
arch/x86/boot/compressed/head_32.S
arch/x86/boot/compressed/head_64.S
arch/x86/boot/pmjump.S

index 5e30eaaf8576fb7ce0cca4c94fa931888c812101..f9e2a80bd69945f28d9d9271d44a10ed9b5bd9e8 100644 (file)
@@ -209,7 +209,7 @@ ENDPROC(efi32_stub_entry)
 #endif
 
        .text
-.Lrelocated:
+SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
 
 /*
  * Clear BSS (stack is currently empty)
@@ -260,6 +260,7 @@ ENDPROC(efi32_stub_entry)
  */
        xorl    %ebx, %ebx
        jmp     *%eax
+SYM_FUNC_END(.Lrelocated)
 
 #ifdef CONFIG_EFI_STUB
        .data
index d98cd483377eb7461d99ba563c7aaac466674466..7afe6e06706620016e6bc0328b02f2c501aa4e86 100644 (file)
@@ -511,7 +511,7 @@ ENDPROC(efi64_stub_entry)
 #endif
 
        .text
-.Lrelocated:
+SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
 
 /*
  * Clear BSS (stack is currently empty)
@@ -540,6 +540,7 @@ ENDPROC(efi64_stub_entry)
  * Jump to the decompressed kernel.
  */
        jmp     *%rax
+SYM_FUNC_END(.Lrelocated)
 
 /*
  * Adjust the global offset table
@@ -635,9 +636,10 @@ ENTRY(trampoline_32bit_src)
        lret
 
        .code64
-.Lpaging_enabled:
+SYM_FUNC_START_LOCAL_NOALIGN(.Lpaging_enabled)
        /* Return from the trampoline */
        jmp     *%rdi
+SYM_FUNC_END(.Lpaging_enabled)
 
        /*
          * The trampoline code has a size limit.
@@ -647,11 +649,12 @@ ENTRY(trampoline_32bit_src)
        .org    trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE
 
        .code32
-.Lno_longmode:
+SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode)
        /* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */
 1:
        hlt
        jmp     1b
+SYM_FUNC_END(.Lno_longmode)
 
 #include "../../kernel/verify_cpu.S"
 
index ea88d52eeac70489b05dff97ac2016bc87034725..81658fe353808738bb2eb0996a61d0a520bc66ab 100644 (file)
@@ -46,7 +46,7 @@ ENDPROC(protected_mode_jump)
 
        .code32
        .section ".text32","ax"
-.Lin_pm32:
+SYM_FUNC_START_LOCAL_NOALIGN(.Lin_pm32)
        # Set up data segments for flat 32-bit mode
        movl    %ecx, %ds
        movl    %ecx, %es
@@ -72,4 +72,4 @@ ENDPROC(protected_mode_jump)
        lldt    %cx
 
        jmpl    *%eax                   # Jump to the 32-bit entrypoint
-ENDPROC(.Lin_pm32)
+SYM_FUNC_END(.Lin_pm32)