x86/asm/32: Add ENDs to some functions and relabel with SYM_CODE_*
authorJiri Slaby <jslaby@suse.cz>
Fri, 11 Oct 2019 11:51:05 +0000 (13:51 +0200)
committerBorislav Petkov <bp@suse.de>
Fri, 18 Oct 2019 09:58:33 +0000 (11:58 +0200)
All these are functions which are invoked from elsewhere but they are
not typical C functions. So annotate them using the new SYM_CODE_START.
All these were not balanced with any END, so mark their ends by
SYM_CODE_END, appropriately.

Signed-off-by: Jiri Slaby <jslaby@suse.cz>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> [xen bits]
Reviewed-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> [hibernate]
Cc: Andy Lutomirski <luto@kernel.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Len Brown <len.brown@intel.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-pm@vger.kernel.org
Cc: Pavel Machek <pavel@ucw.cz>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Pingfan Liu <kernelfans@gmail.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86-ml <x86@kernel.org>
Cc: xen-devel@lists.xenproject.org
Link: https://lkml.kernel.org/r/20191011115108.12392-26-jslaby@suse.cz
arch/x86/entry/entry_32.S
arch/x86/kernel/acpi/wakeup_32.S
arch/x86/kernel/ftrace_32.S
arch/x86/kernel/head_32.S
arch/x86/power/hibernate_asm_32.S
arch/x86/realmode/rm/trampoline_32.S
arch/x86/xen/xen-asm_32.S

index 4900a6a5e125438965e6d47ef36b506aec552e5e..64fe7aa50ad27d2d259a2492d396737048af4c30 100644 (file)
@@ -847,9 +847,10 @@ SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
  * Xen doesn't set %esp to be precisely what the normal SYSENTER
  * entry point expects, so fix it up before using the normal path.
  */
-ENTRY(xen_sysenter_target)
+SYM_CODE_START(xen_sysenter_target)
        addl    $5*4, %esp                      /* remove xen-provided frame */
        jmp     .Lsysenter_past_esp
+SYM_CODE_END(xen_sysenter_target)
 #endif
 
 /*
index 427249292aef0e6a989c3ad27393f87f7bac9548..daf88f8143c5fe90c83cecaeb58b11889788b23c 100644 (file)
@@ -9,8 +9,7 @@
        .code32
        ALIGN
 
-ENTRY(wakeup_pmode_return)
-wakeup_pmode_return:
+SYM_CODE_START(wakeup_pmode_return)
        movw    $__KERNEL_DS, %ax
        movw    %ax, %ss
        movw    %ax, %fs
@@ -39,6 +38,7 @@ wakeup_pmode_return:
        # jump to place where we left off
        movl    saved_eip, %eax
        jmp     *%eax
+SYM_CODE_END(wakeup_pmode_return)
 
 bogus_magic:
        jmp     bogus_magic
@@ -72,7 +72,7 @@ restore_registers:
        popfl
        ret
 
-ENTRY(do_suspend_lowlevel)
+SYM_CODE_START(do_suspend_lowlevel)
        call    save_processor_state
        call    save_registers
        pushl   $3
@@ -87,6 +87,7 @@ ret_point:
        call    restore_registers
        call    restore_processor_state
        ret
+SYM_CODE_END(do_suspend_lowlevel)
 
 .data
 ALIGN
index 219be1309c37d92ac1b20b17c49310174e89c07b..a43ed4c0402dc89ac19fa2698c27eb913565cfcd 100644 (file)
@@ -89,7 +89,7 @@ WEAK(ftrace_stub)
        ret
 END(ftrace_caller)
 
-ENTRY(ftrace_regs_caller)
+SYM_CODE_START(ftrace_regs_caller)
        /*
         * We're here from an mcount/fentry CALL, and the stack frame looks like:
         *
@@ -163,6 +163,7 @@ SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
        popl    %eax
 
        jmp     .Lftrace_ret
+SYM_CODE_END(ftrace_regs_caller)
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 ENTRY(ftrace_graph_caller)
index e2b3e6cf86cab05a6a366aab4b93e7a7ada82a95..7029bbaccc4112df62710e55a522829da7ef9b1e 100644 (file)
@@ -64,7 +64,7 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
  * can.
  */
 __HEAD
-ENTRY(startup_32)
+SYM_CODE_START(startup_32)
        movl pa(initial_stack),%ecx
        
        /* test KEEP_SEGMENTS flag to see if the bootloader is asking
@@ -172,6 +172,7 @@ num_subarch_entries = (. - subarch_entries) / 4
 #else
        jmp .Ldefault_entry
 #endif /* CONFIG_PARAVIRT */
+SYM_CODE_END(startup_32)
 
 #ifdef CONFIG_HOTPLUG_CPU
 /*
index 6fe383002125f6a8ec01144fda30a647c9f8cb34..a19ed3d231853f135c861b690aad3061545af455 100644 (file)
@@ -35,7 +35,7 @@ ENTRY(swsusp_arch_suspend)
        ret
 ENDPROC(swsusp_arch_suspend)
 
-ENTRY(restore_image)
+SYM_CODE_START(restore_image)
        /* prepare to jump to the image kernel */
        movl    restore_jump_address, %ebx
        movl    restore_cr3, %ebp
@@ -45,9 +45,10 @@ ENTRY(restore_image)
        /* jump to relocated restore code */
        movl    relocated_restore_code, %eax
        jmpl    *%eax
+SYM_CODE_END(restore_image)
 
 /* code below has been relocated to a safe page */
-ENTRY(core_restore_code)
+SYM_CODE_START(core_restore_code)
        movl    temp_pgt, %eax
        movl    %eax, %cr3
 
@@ -77,6 +78,7 @@ copy_loop:
 
 done:
        jmpl    *%ebx
+SYM_CODE_END(core_restore_code)
 
        /* code below belongs to the image kernel */
        .align PAGE_SIZE
index ff00594a2ed0897a0ecea9dbf58f289d5b0ccad2..3fad907a179f15ea64106dff118c13ec4e79b837 100644 (file)
@@ -29,7 +29,7 @@
        .code16
 
        .balign PAGE_SIZE
-ENTRY(trampoline_start)
+SYM_CODE_START(trampoline_start)
        wbinvd                  # Needed for NUMA-Q should be harmless for others
 
        LJMPW_RM(1f)
@@ -54,11 +54,13 @@ ENTRY(trampoline_start)
        lmsw    %dx                     # into protected mode
 
        ljmpl   $__BOOT_CS, $pa_startup_32
+SYM_CODE_END(trampoline_start)
 
        .section ".text32","ax"
        .code32
-ENTRY(startup_32)                      # note: also used from wakeup_asm.S
+SYM_CODE_START(startup_32)                     # note: also used from wakeup_asm.S
        jmp     *%eax
+SYM_CODE_END(startup_32)
 
        .bss
        .balign 8
index c15db060a2425e45cded5b40e6363ebf29696cd6..8b8f8355b9381bdf0610d645f57a4723b3b8fbb7 100644 (file)
@@ -56,7 +56,7 @@
        _ASM_EXTABLE(1b,2b)
 .endm
 
-ENTRY(xen_iret)
+SYM_CODE_START(xen_iret)
        /* test eflags for special cases */
        testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp)
        jnz hyper_iret
@@ -122,6 +122,7 @@ xen_iret_end_crit:
 hyper_iret:
        /* put this out of line since its very rarely used */
        jmp hypercall_page + __HYPERVISOR_iret * 32
+SYM_CODE_END(xen_iret)
 
        .globl xen_iret_start_crit, xen_iret_end_crit
 
@@ -165,7 +166,7 @@ hyper_iret:
  * SAVE_ALL state before going on, since it's usermode state which we
  * eventually need to restore.
  */
-ENTRY(xen_iret_crit_fixup)
+SYM_CODE_START(xen_iret_crit_fixup)
        /*
         * Paranoia: Make sure we're really coming from kernel space.
         * One could imagine a case where userspace jumps into the
@@ -204,4 +205,4 @@ ENTRY(xen_iret_crit_fixup)
 
        lea 4(%edi), %esp               /* point esp to new frame */
 2:     jmp xen_do_upcall
-
+SYM_CODE_END(xen_iret_crit_fixup)