select RTC_CLASS
select RTC_DRV_GENERIC
select INIT_ALL_POSSIBLE
+ select HAVE_MEMBLOCK
+ select NO_BOOTMEM
select BUG
select BUILDTIME_EXTABLE_SORT
select HAVE_PERF_EVENTS
select GENERIC_ATOMIC64 if !64BIT
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
- select BROKEN_RODATA
select GENERIC_IRQ_PROBE
select GENERIC_PCI_IOMAP
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select SYSCTL_ARCH_UNALIGN_ALLOW
select SYSCTL_EXCEPTION_TRACE
select HAVE_MOD_ARCH_SPECIFIC
+ select HAVE_ARCH_HARDENED_USERCOPY
select VIRT_TO_BUS
select MODULES_USE_ELF_RELA
select CLONE_BACKWARDS
--- /dev/null
+/*
+ * Copyright (C) 2016 Helge Deller <deller@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_PARISC_DWARF_H
+#define _ASM_PARISC_DWARF_H
+
+#ifdef __ASSEMBLY__
+
+#define CFI_STARTPROC .cfi_startproc
+#define CFI_ENDPROC .cfi_endproc
+#define CFI_DEF_CFA .cfi_def_cfa
+#define CFI_REGISTER .cfi_register
+#define CFI_REL_OFFSET .cfi_rel_offset
+#define CFI_UNDEFINED .cfi_undefined
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_PARISC_DWARF_H */
#ifndef __ASM_PARISC_LINKAGE_H
#define __ASM_PARISC_LINKAGE_H
+#include <asm/dwarf.h>
+
#ifndef __ALIGN
#define __ALIGN .align 4
#define __ALIGN_STR ".align 4"
* In parisc assembly a semicolon marks a comment while a
* exclamation mark is used to separate independent lines.
*/
+#define ASM_NL !
+
#ifdef __ASSEMBLY__
#define ENTRY(name) \
END(name)
#endif
+#define ENTRY_CFI(name) \
+ ENTRY(name) ASM_NL\
+ CFI_STARTPROC
+
+#define ENDPROC_CFI(name) \
+ ENDPROC(name) ASM_NL\
+ CFI_ENDPROC
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PARISC_LINKAGE_H */
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
/* This is the size of the initially mapped kernel memory */
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) || defined(CONFIG_SMP)
#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
#else
#define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */
#include <linux/bug.h>
#include <linux/string.h>
+#include <linux/thread_info.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#define clear_user lclear_user
#define __clear_user lclear_user
-unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len);
-#define __copy_to_user copy_to_user
-unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long len);
-unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len);
+unsigned long __must_check __copy_to_user(void __user *dst, const void *src,
+ unsigned long len);
+unsigned long __must_check __copy_from_user(void *dst, const void __user *src,
+ unsigned long len);
+unsigned long copy_in_user(void __user *dst, const void __user *src,
+ unsigned long len);
#define __copy_in_user copy_in_user
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
}
-static inline unsigned long __must_check copy_from_user(void *to,
- const void __user *from,
- unsigned long n)
+static __always_inline unsigned long __must_check
+copy_from_user(void *to, const void __user *from, unsigned long n)
{
- int sz = __compiletime_object_size(to);
- unsigned long ret = n;
+ int sz = __compiletime_object_size(to);
+ unsigned long ret = n;
- if (likely(sz == -1 || sz >= n))
- ret = __copy_from_user(to, from, n);
- else if (!__builtin_constant_p(n))
+ if (likely(sz < 0 || sz >= n)) {
+ check_object_size(to, n, false);
+ ret = __copy_from_user(to, from, n);
+ } else if (!__builtin_constant_p(n))
copy_user_overflow(sz, n);
else
- __bad_copy_user();
+ __bad_copy_user();
if (unlikely(ret))
memset(to + (n - ret), 0, ret);
- return ret;
+
+ return ret;
+}
+
+static __always_inline unsigned long __must_check
+copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ int sz = __compiletime_object_size(from);
+
+ if (likely(sz < 0 || sz >= n)) {
+ check_object_size(from, n, true);
+ n = __copy_to_user(to, from, n);
+ } else if (!__builtin_constant_p(n))
+ copy_user_overflow(sz, n);
+ else
+ __bad_copy_user();
+
+ return n;
}
struct pt_regs;
* copy_thread moved args into task save area.
*/
-ENTRY(ret_from_kernel_thread)
+ENTRY_CFI(ret_from_kernel_thread)
/* Call schedule_tail first though */
BL schedule_tail, %r2
copy %r31, %r2
b finish_child_return
nop
-ENDPROC(ret_from_kernel_thread)
+ENDPROC_CFI(ret_from_kernel_thread)
/*
* struct task_struct *next)
*
* switch kernel stacks and return prev */
-ENTRY(_switch_to)
+ENTRY_CFI(_switch_to)
STREG %r2, -RP_OFFSET(%r30)
callee_save_float
LDREG -RP_OFFSET(%r30), %r2
bv %r0(%r2)
copy %r26, %r28
-ENDPROC(_switch_to)
+ENDPROC_CFI(_switch_to)
/*
* Common rfi return path for interruptions, kernel execve, and
.align PAGE_SIZE
-ENTRY(syscall_exit_rfi)
+ENTRY_CFI(syscall_exit_rfi)
mfctl %cr30,%r16
LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
ldo TASK_REGS(%r16),%r16
b do_cpu_irq_mask
ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
-ENDPROC(syscall_exit_rfi)
+ENDPROC_CFI(syscall_exit_rfi)
/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
-ENTRY(intr_save) /* for os_hpmc */
+ENTRY_CFI(intr_save) /* for os_hpmc */
mfsp %sr7,%r16
cmpib,COND(=),n 0,%r16,1f
get_stack_use_cr30
b handle_interruption
ldo R%intr_check_sig(%r2), %r2
-ENDPROC(intr_save)
+ENDPROC_CFI(intr_save)
/*
.endm
.macro fork_like name
-ENTRY(sys_\name\()_wrapper)
+ENTRY_CFI(sys_\name\()_wrapper)
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
ldo TASK_REGS(%r1),%r1
reg_save %r1
ldil L%sys_\name, %r31
be R%sys_\name(%sr4,%r31)
STREG %r28, PT_CR27(%r1)
-ENDPROC(sys_\name\()_wrapper)
+ENDPROC_CFI(sys_\name\()_wrapper)
.endm
fork_like clone
fork_like vfork
/* Set the return value for the child */
-ENTRY(child_return)
+ENTRY_CFI(child_return)
BL schedule_tail, %r2
nop
finish_child_return:
reg_restore %r1
b syscall_exit
copy %r0,%r28
-ENDPROC(child_return)
+ENDPROC_CFI(child_return)
-ENTRY(sys_rt_sigreturn_wrapper)
+ENTRY_CFI(sys_rt_sigreturn_wrapper)
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
ldo TASK_REGS(%r26),%r26 /* get pt regs */
/* Don't save regs, we are going to restore them from sigcontext. */
*/
bv %r0(%r2)
LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
-ENDPROC(sys_rt_sigreturn_wrapper)
+ENDPROC_CFI(sys_rt_sigreturn_wrapper)
-ENTRY(syscall_exit)
+ENTRY_CFI(syscall_exit)
/* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
* via syscall_exit_rfi if the signal was received while the process
* was running.
#else
nop
#endif
-ENDPROC(syscall_exit)
+ENDPROC_CFI(syscall_exit)
#ifdef CONFIG_FUNCTION_TRACER
.align 8
.globl return_to_handler
.type return_to_handler, @function
-ENTRY(return_to_handler)
+ENTRY_CFI(return_to_handler)
.proc
.callinfo caller,frame=FRAME_SIZE
.entry
LDREGM -FRAME_SIZE(%sp),%r3
.exit
.procend
-ENDPROC(return_to_handler)
+ENDPROC_CFI(return_to_handler)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_IRQSTACKS
/* void call_on_stack(unsigned long param1, void *func,
unsigned long new_stack) */
-ENTRY(call_on_stack)
+ENTRY_CFI(call_on_stack)
copy %sp, %r1
/* Regarding the HPPA calling conventions for function pointers,
bv (%rp)
LDREG -68(%sp), %sp
# endif /* CONFIG_64BIT */
-ENDPROC(call_on_stack)
+ENDPROC_CFI(call_on_stack)
#endif /* CONFIG_IRQSTACKS */
-get_register:
+ENTRY_CFI(get_register)
/*
* get_register is used by the non access tlb miss handlers to
* copy the value of the general register specified in r8 into
copy %r30,%r1
bv %r0(%r25) /* r31 */
copy %r31,%r1
+ENDPROC_CFI(get_register)
-set_register:
+ENTRY_CFI(set_register)
/*
* set_register is used by the non access tlb miss handlers to
* copy the value of r1 into the general register specified in
copy %r1,%r30
bv %r0(%r25) /* r31 */
copy %r1,%r31
+ENDPROC_CFI(set_register)
*/
.level 1.1
- .data
#include <asm/assembly.h>
#include <asm/pdc.h>
#include <linux/linkage.h>
+#include <linux/init.h>
/*
* stack for os_hpmc, the HPMC handler.
* IODC requires 7K byte stack. That leaves 1K byte for os_hpmc.
*/
+ __PAGE_ALIGNED_BSS
.align 4096
hpmc_stack:
.block 16384
#define HPMC_IODC_BUF_SIZE 0x8000
+ __PAGE_ALIGNED_BSS
.align 4096
hpmc_iodc_buf:
.block HPMC_IODC_BUF_SIZE
+ .section .bss
.align 8
hpmc_raddr:
.block 128
#define HPMC_PIM_DATA_SIZE 896 /* Enough to hold all architected 2.0 state */
+ .section .bss
.align 8
ENTRY(hpmc_pim_data)
.block HPMC_PIM_DATA_SIZE
.text
.import intr_save, code
-ENTRY(os_hpmc)
+ENTRY_CFI(os_hpmc)
.os_hpmc:
/*
b .
nop
-ENDPROC(os_hpmc)
+ENDPROC_CFI(os_hpmc)
.os_hpmc_end:
- nop
-.data
-.align 4
+
+
+ __INITRODATA
.export os_hpmc_size
os_hpmc_size:
.word .os_hpmc_end-.os_hpmc
long status;
struct pdc_system_map_addr_info addr_result;
- dev->addr = kmalloc(num_addrs * sizeof(unsigned long), GFP_KERNEL);
+ dev->addr = kmalloc_array(num_addrs, sizeof(*dev->addr), GFP_KERNEL);
if(!dev->addr) {
printk(KERN_ERR "%s %s(): memory allocation failure\n",
__FILE__, __func__);
.text
.align 128
-ENTRY(flush_tlb_all_local)
+ENTRY_CFI(flush_tlb_all_local)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
-ENDPROC(flush_tlb_all_local)
+ENDPROC_CFI(flush_tlb_all_local)
.import cache_info,data
-ENTRY(flush_instruction_cache_local)
+ENTRY_CFI(flush_instruction_cache_local)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
-ENDPROC(flush_instruction_cache_local)
+ENDPROC_CFI(flush_instruction_cache_local)
.import cache_info, data
-ENTRY(flush_data_cache_local)
+ENTRY_CFI(flush_data_cache_local)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
-ENDPROC(flush_data_cache_local)
+ENDPROC_CFI(flush_data_cache_local)
.align 16
/* Clear page using kernel mapping. */
-ENTRY(clear_page_asm)
+ENTRY_CFI(clear_page_asm)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
-ENDPROC(clear_page_asm)
+ENDPROC_CFI(clear_page_asm)
/* Copy page using kernel mapping. */
-ENTRY(copy_page_asm)
+ENTRY_CFI(copy_page_asm)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
-ENDPROC(copy_page_asm)
+ENDPROC_CFI(copy_page_asm)
/*
* NOTE: Code in clear_user_page has a hard coded dependency on the
.endm
/*
- * We can't do this since copy_user_page is used to bring in
- * file data that might have instructions. Since the data would
- * then need to be flushed out so the i-fetch can see it, it
- * makes more sense to just copy through the kernel translation
- * and flush it.
+ * copy_user_page_asm() performs a page copy using mappings
+ * equivalent to the user page mappings. It can be used to
+ * implement copy_user_page() but unfortunately both the `from'
+ * and `to' pages need to be flushed through mappings equivalent
+ * to the user mappings after the copy because the kernel accesses
+ * the `from' page through the kmap kernel mapping and the `to'
+ * page needs to be flushed since code can be copied. As a
+ * result, this implementation is less efficient than the simpler
+ * copy using the kernel mapping. It only needs the `from' page
+ * to flushed via the user mapping. The kunmap routines handle
+ * the flushes needed for the kernel mapping.
*
* I'm still keeping this around because it may be possible to
* use it if more information is passed into copy_user_page().
*
*/
-ENTRY(copy_user_page_asm)
+ENTRY_CFI(copy_user_page_asm)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
-ENDPROC(copy_user_page_asm)
+ENDPROC_CFI(copy_user_page_asm)
-ENTRY(clear_user_page_asm)
+ENTRY_CFI(clear_user_page_asm)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
-ENDPROC(clear_user_page_asm)
+ENDPROC_CFI(clear_user_page_asm)
-ENTRY(flush_dcache_page_asm)
+ENTRY_CFI(flush_dcache_page_asm)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
-ENDPROC(flush_dcache_page_asm)
+ENDPROC_CFI(flush_dcache_page_asm)
-ENTRY(flush_icache_page_asm)
+ENTRY_CFI(flush_icache_page_asm)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
-ENDPROC(flush_icache_page_asm)
+ENDPROC_CFI(flush_icache_page_asm)
-ENTRY(flush_kernel_dcache_page_asm)
+ENTRY_CFI(flush_kernel_dcache_page_asm)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
-ENDPROC(flush_kernel_dcache_page_asm)
+ENDPROC_CFI(flush_kernel_dcache_page_asm)
-ENTRY(purge_kernel_dcache_page_asm)
+ENTRY_CFI(purge_kernel_dcache_page_asm)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
-ENDPROC(purge_kernel_dcache_page_asm)
+ENDPROC_CFI(purge_kernel_dcache_page_asm)
-ENTRY(flush_user_dcache_range_asm)
+ENTRY_CFI(flush_user_dcache_range_asm)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
-ENDPROC(flush_user_dcache_range_asm)
+ENDPROC_CFI(flush_user_dcache_range_asm)
-ENTRY(flush_kernel_dcache_range_asm)
+ENTRY_CFI(flush_kernel_dcache_range_asm)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
-ENDPROC(flush_kernel_dcache_range_asm)
+ENDPROC_CFI(flush_kernel_dcache_range_asm)
-ENTRY(flush_user_icache_range_asm)
+ENTRY_CFI(flush_user_icache_range_asm)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
-ENDPROC(flush_user_icache_range_asm)
+ENDPROC_CFI(flush_user_icache_range_asm)
-ENTRY(flush_kernel_icache_page)
+ENTRY_CFI(flush_kernel_icache_page)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
-ENDPROC(flush_kernel_icache_page)
+ENDPROC_CFI(flush_kernel_icache_page)
-ENTRY(flush_kernel_icache_range_asm)
+ENTRY_CFI(flush_kernel_icache_range_asm)
.proc
.callinfo NO_CALLS
.entry
nop
.exit
.procend
-ENDPROC(flush_kernel_icache_range_asm)
+ENDPROC_CFI(flush_kernel_icache_range_asm)
/* align should cover use of rfi in disable_sr_hashing_asm and
* srdis_done.
*/
.align 256
-ENTRY(disable_sr_hashing_asm)
+ENTRY_CFI(disable_sr_hashing_asm)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
-ENDPROC(disable_sr_hashing_asm)
+ENDPROC_CFI(disable_sr_hashing_asm)
.end
* iodc_fn is the IODC function to call
*/
-ENTRY(real32_call_asm)
+ENTRY_CFI(real32_call_asm)
STREG %rp, -RP_OFFSET(%sp) /* save RP */
#ifdef CONFIG_64BIT
callee_save
LDREG -RP_OFFSET(%sp), %rp /* restore RP */
bv 0(%rp)
nop
-ENDPROC(real32_call_asm)
+ENDPROC_CFI(real32_call_asm)
# define PUSH_CR(r, where) mfctl r, %r1 ! STREG,ma %r1, REG_SZ(where)
# define POP_CR(r, where) LDREG,mb -REG_SZ(where), %r1 ! mtctl %r1, r
.text
-save_control_regs:
+ENTRY_CFI(save_control_regs)
load32 PA(save_cr_space), %r28
PUSH_CR(%cr24, %r28)
PUSH_CR(%cr25, %r28)
PUSH_CR(%cr15, %r28)
bv 0(%r2)
nop
+ENDPROC_CFI(save_control_regs)
-restore_control_regs:
+ENTRY_CFI(restore_control_regs)
load32 PA(save_cr_end), %r26
POP_CR(%cr15, %r26)
POP_CR(%cr31, %r26)
POP_CR(%cr24, %r26)
bv 0(%r2)
nop
+ENDPROC_CFI(restore_control_regs)
/* rfi_virt2real() and rfi_real2virt() could perhaps be adapted for
* more general-purpose use by the several places which need RFIs
*/
.text
.align 128
-rfi_virt2real:
+ENTRY_CFI(rfi_virt2real)
/* switch to real mode... */
rsm PSW_SM_I,%r0
load32 PA(rfi_v2r_1), %r1
tophys_r1 %r2
bv 0(%r2)
nop
+ENDPROC_CFI(rfi_virt2real)
.text
.align 128
-rfi_real2virt:
+ENTRY_CFI(rfi_real2virt)
rsm PSW_SM_I,%r0
load32 (rfi_r2v_1), %r1
nop
tovirt_r1 %r2
bv 0(%r2)
nop
+ENDPROC_CFI(rfi_real2virt)
#ifdef CONFIG_64BIT
* arg0p points to where saved arg values may be found
* iodc_fn is the IODC function to call
*/
-ENTRY(real64_call_asm)
+ENTRY_CFI(real64_call_asm)
std %rp, -0x10(%sp) /* save RP */
std %sp, -8(%arg0) /* save SP on real-mode stack */
copy %arg0, %sp /* adopt the real-mode SP */
ldd -0x10(%sp), %rp /* restore RP */
bv 0(%rp)
nop
-ENDPROC(real64_call_asm)
+ENDPROC_CFI(real64_call_asm)
#endif
** GCC 3.3 and later has a new function in libgcc.a for
** comparing function pointers.
*/
-ENTRY(__canonicalize_funcptr_for_compare)
+ENTRY_CFI(__canonicalize_funcptr_for_compare)
#ifdef CONFIG_64BIT
bve (%r2)
#else
bv %r0(%r2)
#endif
copy %r26,%r28
-ENDPROC(__canonicalize_funcptr_for_compare)
+ENDPROC_CFI(__canonicalize_funcptr_for_compare)
#include <linux/export.h>
#include <asm/processor.h>
+#include <asm/sections.h>
#include <asm/pdc.h>
#include <asm/led.h>
#include <asm/machdep.h> /* for pa7300lc_init() proto */
#endif
printk(KERN_CONT ".\n");
+ /*
+ * Check if initial kernel page mappings are sufficient.
+ * panic early if not, else we may access kernel functions
+ * and variables which can't be reached.
+ */
+ if (__pa((unsigned long) &_end) >= KERNEL_INITIAL_SIZE)
+ panic("KERNEL_INITIAL_ORDER too small!");
pdc_console_init();
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
- if (cpu != 0 && cpu < parisc_max_cpus)
- smp_boot_one_cpu(cpu, tidle);
+ if (cpu != 0 && cpu < parisc_max_cpus && smp_boot_one_cpu(cpu, tidle))
+ return -ENOSYS;
return cpu_online(cpu) ? 0 : -ENOSYS;
}
unsigned int cpu = smp_processor_id();
unsigned long next_tick = mfctl(16) + clocktick;
-#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT)
- /* With multiple 64bit CPUs online, the cr16's are not syncronized. */
- if (cpu != 0)
- clear_sched_clock_stable();
-#endif
-
mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
per_cpu(cpu_data, cpu).it_value = next_tick;
/* BSS */
BSS_SECTION(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE)
- /* bootmap is allocated in setup_bootmem() directly behind bss. */
-
. = ALIGN(HUGEPAGE_SIZE);
_end = . ;
.section .fixup, "ax"
/* get_user() fixups, store -EFAULT in r8, and 0 in r9 */
-ENTRY(fixup_get_user_skip_1)
+ENTRY_CFI(fixup_get_user_skip_1)
get_fault_ip %r1,%r8
ldo 4(%r1), %r1
ldi -EFAULT, %r8
bv %r0(%r1)
copy %r0, %r9
-ENDPROC(fixup_get_user_skip_1)
+ENDPROC_CFI(fixup_get_user_skip_1)
-ENTRY(fixup_get_user_skip_2)
+ENTRY_CFI(fixup_get_user_skip_2)
get_fault_ip %r1,%r8
ldo 8(%r1), %r1
ldi -EFAULT, %r8
bv %r0(%r1)
copy %r0, %r9
-ENDPROC(fixup_get_user_skip_2)
+ENDPROC_CFI(fixup_get_user_skip_2)
/* put_user() fixups, store -EFAULT in r8 */
-ENTRY(fixup_put_user_skip_1)
+ENTRY_CFI(fixup_put_user_skip_1)
get_fault_ip %r1,%r8
ldo 4(%r1), %r1
bv %r0(%r1)
ldi -EFAULT, %r8
-ENDPROC(fixup_put_user_skip_1)
+ENDPROC_CFI(fixup_put_user_skip_1)
-ENTRY(fixup_put_user_skip_2)
+ENTRY_CFI(fixup_put_user_skip_2)
get_fault_ip %r1,%r8
ldo 8(%r1), %r1
bv %r0(%r1)
ldi -EFAULT, %r8
-ENDPROC(fixup_put_user_skip_2)
+ENDPROC_CFI(fixup_put_user_skip_2)
* otherwise, returns number of bytes not transferred.
*/
-ENTRY(lclear_user)
+ENTRY_CFI(lclear_user)
.proc
.callinfo NO_CALLS
.entry
bv %r0(%r2)
copy %r25,%r28
.exit
-ENDPROC(lclear_user)
+ENDPROC_CFI(lclear_user)
.section .fixup,"ax"
2: fixup_branch $lclu_done
* else strlen + 1 (i.e. includes zero byte).
*/
-ENTRY(lstrnlen_user)
+ENTRY_CFI(lstrnlen_user)
.proc
.callinfo NO_CALLS
.entry
$lslen_nzero:
b $lslen_done
ldo 1(%r26),%r26 /* special case for N == 0 */
-ENDPROC(lstrnlen_user)
+ENDPROC_CFI(lstrnlen_user)
.section .fixup,"ax"
3: fixup_branch $lslen_done
}
#ifdef __KERNEL__
-unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len)
+unsigned long __copy_to_user(void __user *dst, const void *src,
+ unsigned long len)
{
mtsp(get_kernel_space(), 1);
mtsp(get_user_space(), 2);
return pa_memcpy((void __force *)dst, src, len);
}
+EXPORT_SYMBOL(__copy_to_user);
-EXPORT_SYMBOL(__copy_from_user);
-unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long len)
+unsigned long __copy_from_user(void *dst, const void __user *src,
+ unsigned long len)
{
mtsp(get_user_space(), 1);
mtsp(get_kernel_space(), 2);
return pa_memcpy(dst, (void __force *)src, len);
}
+EXPORT_SYMBOL(__copy_from_user);
unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len)
{
return dst;
}
-EXPORT_SYMBOL(copy_to_user);
-EXPORT_SYMBOL(copy_from_user);
EXPORT_SYMBOL(copy_in_user);
EXPORT_SYMBOL(memcpy);
return 0;
}
+/*
+ * parisc hardware trap list
+ *
+ * Documented in section 3 "Addressing and Access Control" of the
+ * "PA-RISC 1.1 Architecture and Instruction Set Reference Manual"
+ * https://parisc.wiki.kernel.org/index.php/File:Pa11_acd.pdf
+ *
+ * For implementation see handle_interruption() in traps.c
+ */
+static const char * const trap_description[] = {
+ [1] "High-priority machine check (HPMC)",
+ [2] "Power failure interrupt",
+ [3] "Recovery counter trap",
+ [5] "Low-priority machine check",
+ [6] "Instruction TLB miss fault",
+ [7] "Instruction access rights / protection trap",
+ [8] "Illegal instruction trap",
+ [9] "Break instruction trap",
+ [10] "Privileged operation trap",
+ [11] "Privileged register trap",
+ [12] "Overflow trap",
+ [13] "Conditional trap",
+ [14] "FP Assist Exception trap",
+ [15] "Data TLB miss fault",
+ [16] "Non-access ITLB miss fault",
+ [17] "Non-access DTLB miss fault",
+ [18] "Data memory protection/unaligned access trap",
+ [19] "Data memory break trap",
+ [20] "TLB dirty bit trap",
+ [21] "Page reference trap",
+ [22] "Assist emulation trap",
+ [25] "Taken branch trap",
+ [26] "Data memory access rights trap",
+ [27] "Data memory protection ID trap",
+ [28] "Unaligned data reference trap",
+};
+
/*
* Print out info about fatal segfaults, if the show_unhandled_signals
* sysctl is set:
unsigned long address, struct task_struct *tsk,
struct vm_area_struct *vma)
{
+ const char *trap_name = NULL;
+
if (!unhandled_signal(tsk, SIGSEGV))
return;
pr_warn("do_page_fault() command='%s' type=%lu address=0x%08lx",
tsk->comm, code, address);
print_vma_addr(KERN_CONT " in ", regs->iaoq[0]);
+
+ if (code < ARRAY_SIZE(trap_description))
+ trap_name = trap_description[code];
+ pr_warn(KERN_CONT " trap #%lu: %s%c", code,
+ trap_name ? trap_name : "unknown",
+ vma ? ',':'\n');
+
if (vma)
- pr_warn(" vm_start = 0x%08lx, vm_end = 0x%08lx\n",
+ pr_warn(KERN_CONT " vm_start = 0x%08lx, vm_end = 0x%08lx\n",
vma->vm_start, vma->vm_end);
show_regs(regs);
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/init.h>
physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
int npmem_ranges __read_mostly;
+/*
+ * get_memblock() allocates pages via memblock.
+ * We can't use memblock_find_in_range(0, KERNEL_INITIAL_SIZE) here since it
+ * doesn't allocate from bottom to top which is needed because we only created
+ * the initial mapping up to KERNEL_INITIAL_SIZE in the assembly bootup code.
+ */
+static void * __init get_memblock(unsigned long size)
+{
+ static phys_addr_t search_addr __initdata;
+ phys_addr_t phys;
+
+ if (!search_addr)
+ search_addr = PAGE_ALIGN(__pa((unsigned long) &_end));
+ search_addr = ALIGN(search_addr, size);
+ while (!memblock_is_region_memory(search_addr, size) ||
+ memblock_is_region_reserved(search_addr, size)) {
+ search_addr += size;
+ }
+ phys = search_addr;
+
+ if (phys)
+ memblock_reserve(phys, size);
+ else
+ panic("get_memblock() failed.\n");
+
+ return __va(phys);
+}
+
#ifdef CONFIG_64BIT
#define MAX_MEM (~0UL)
#else /* !CONFIG_64BIT */
static void __init setup_bootmem(void)
{
- unsigned long bootmap_size;
unsigned long mem_max;
- unsigned long bootmap_pages;
- unsigned long bootmap_start_pfn;
- unsigned long bootmap_pfn;
#ifndef CONFIG_DISCONTIGMEM
physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
int npmem_holes;
}
#endif
- if (npmem_ranges > 1) {
-
- /* Print the memory ranges */
+ /* Print the memory ranges */
+ pr_info("Memory Ranges:\n");
- printk(KERN_INFO "Memory Ranges:\n");
+ for (i = 0; i < npmem_ranges; i++) {
+ struct resource *res = &sysram_resources[i];
+ unsigned long start;
+ unsigned long size;
- for (i = 0; i < npmem_ranges; i++) {
- unsigned long start;
- unsigned long size;
+ size = (pmem_ranges[i].pages << PAGE_SHIFT);
+ start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
+ pr_info("%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
+ i, start, start + (size - 1), size >> 20);
- size = (pmem_ranges[i].pages << PAGE_SHIFT);
- start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
- printk(KERN_INFO "%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
- i,start, start + (size - 1), size >> 20);
- }
- }
-
- sysram_resource_count = npmem_ranges;
- for (i = 0; i < sysram_resource_count; i++) {
- struct resource *res = &sysram_resources[i];
+ /* request memory resource */
res->name = "System RAM";
- res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT;
- res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1;
+ res->start = start;
+ res->end = start + size - 1;
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
request_resource(&iomem_resource, res);
}
+ sysram_resource_count = npmem_ranges;
+
/*
* For 32 bit kernels we limit the amount of memory we can
* support, in order to preserve enough kernel address space
}
#endif
- bootmap_pages = 0;
- for (i = 0; i < npmem_ranges; i++)
- bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages);
-
- bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT;
-
#ifdef CONFIG_DISCONTIGMEM
for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
memset(NODE_DATA(i), 0, sizeof(pg_data_t));
- NODE_DATA(i)->bdata = &bootmem_node_data[i];
}
memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
/*
* Initialize and free the full range of memory in each range.
- * Note that the only writing these routines do are to the bootmap,
- * and we've made sure to locate the bootmap properly so that they
- * won't be writing over anything important.
*/
- bootmap_pfn = bootmap_start_pfn;
max_pfn = 0;
for (i = 0; i < npmem_ranges; i++) {
unsigned long start_pfn;
unsigned long npages;
+ unsigned long start;
+ unsigned long size;
start_pfn = pmem_ranges[i].start_pfn;
npages = pmem_ranges[i].pages;
- bootmap_size = init_bootmem_node(NODE_DATA(i),
- bootmap_pfn,
- start_pfn,
- (start_pfn + npages) );
- free_bootmem_node(NODE_DATA(i),
- (start_pfn << PAGE_SHIFT),
- (npages << PAGE_SHIFT) );
- bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ start = start_pfn << PAGE_SHIFT;
+ size = npages << PAGE_SHIFT;
+
+ /* add system RAM memblock */
+ memblock_add(start, size);
+
if ((start_pfn + npages) > max_pfn)
max_pfn = start_pfn + npages;
}
*/
max_low_pfn = max_pfn;
- /* bootmap sizing messed up? */
- BUG_ON((bootmap_pfn - bootmap_start_pfn) != bootmap_pages);
-
/* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
#define PDC_CONSOLE_IO_IODC_SIZE 32768
- reserve_bootmem_node(NODE_DATA(0), 0UL,
- (unsigned long)(PAGE0->mem_free +
- PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT);
- reserve_bootmem_node(NODE_DATA(0), __pa(KERNEL_BINARY_TEXT_START),
- (unsigned long)(_end - KERNEL_BINARY_TEXT_START),
- BOOTMEM_DEFAULT);
- reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
- ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT),
- BOOTMEM_DEFAULT);
+ memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free +
+ PDC_CONSOLE_IO_IODC_SIZE));
+ memblock_reserve(__pa(KERNEL_BINARY_TEXT_START),
+ (unsigned long)(_end - KERNEL_BINARY_TEXT_START));
#ifndef CONFIG_DISCONTIGMEM
/* reserve the holes */
for (i = 0; i < npmem_holes; i++) {
- reserve_bootmem_node(NODE_DATA(0),
- (pmem_holes[i].start_pfn << PAGE_SHIFT),
- (pmem_holes[i].pages << PAGE_SHIFT),
- BOOTMEM_DEFAULT);
+ memblock_reserve((pmem_holes[i].start_pfn << PAGE_SHIFT),
+ (pmem_holes[i].pages << PAGE_SHIFT));
}
#endif
initrd_below_start_ok = 1;
printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
- reserve_bootmem_node(NODE_DATA(0), __pa(initrd_start),
- initrd_reserve, BOOTMEM_DEFAULT);
+ memblock_reserve(__pa(initrd_start), initrd_reserve);
}
}
#endif
*/
if (!pmd) {
- pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE << PMD_ORDER);
+ pmd = (pmd_t *) get_memblock(PAGE_SIZE << PMD_ORDER);
pmd = (pmd_t *) __pa(pmd);
}
pg_table = (pte_t *)pmd_address(*pmd);
if (!pg_table) {
- pg_table = (pte_t *)
- alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE);
+ pg_table = (pte_t *) get_memblock(PAGE_SIZE);
pg_table = (pte_t *) __pa(pg_table);
}
}
#endif
- empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
+ empty_zero_page = get_memblock(PAGE_SIZE);
}
static void __init gateway_init(void)
discard it in modules) */
#define __init __section(.init.text) __cold notrace
#define __initdata __section(.init.data)
-#define __initconst __constsection(.init.rodata)
+#define __initconst __section(.init.rodata)
#define __exitdata __section(.exit.data)
#define __exit_call __used __section(.exitcall.exit)
-/*
- * Some architecture have tool chains which do not handle rodata attributes
- * correctly. For those disable special sections for const, so that other
- * architectures can annotate correctly.
- */
-#ifdef CONFIG_BROKEN_RODATA
-#define __constsection(x)
-#else
-#define __constsection(x) __section(x)
-#endif
-
/*
* modpost check for section mismatches during the kernel build.
* A section mismatch happens when there are references from a
*/
#define __ref __section(.ref.text) noinline
#define __refdata __section(.ref.data)
-#define __refconst __constsection(.ref.rodata)
+#define __refconst __section(.ref.rodata)
#ifdef MODULE
#define __exitused
/* Used for MEMORY_HOTPLUG */
#define __meminit __section(.meminit.text) __cold notrace
#define __meminitdata __section(.meminit.data)
-#define __meminitconst __constsection(.meminit.rodata)
+#define __meminitconst __section(.meminit.rodata)
#define __memexit __section(.memexit.text) __exitused __cold notrace
#define __memexitdata __section(.memexit.data)
-#define __memexitconst __constsection(.memexit.rodata)
+#define __memexitconst __section(.memexit.rodata)
/* For assembly routines */
#define __HEAD .section ".head.text","ax"
depends on SMP
bool
-# Can be selected by architectures with broken toolchains
-# that get confused by correct const<->read_only section
-# mappings
-config BROKEN_RODATA
- bool
-
config ASN1
tristate
help