s390: adapt entry.S to the move of thread_struct
authorMartin Schwidefsky <schwidefsky@de.ibm.com>
Mon, 20 Jul 2015 08:01:46 +0000 (10:01 +0200)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Mon, 20 Jul 2015 11:22:18 +0000 (13:22 +0200)
git commit 0c8c0f03e3a292e031596484275c14cf39c0ab7a
"x86/fpu, sched: Dynamically allocate 'struct fpu'"
moved the thread_struct to the end of the task_struct.

This causes some of the offsets used in entry.S to overflow their
instruction operand field. To fix this  use aghi to create a
dedicated pointer for the thread_struct.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/kernel/asm-offsets.c
arch/s390/kernel/entry.S
arch/s390/kernel/traps.c

index c7d1b9d0901147ba359fa03e2e4e026699413fa2..a2da259d932741614c4f6b78642c491b3829b223 100644 (file)
 
 int main(void)
 {
-       DEFINE(__THREAD_info, offsetof(struct task_struct, stack));
-       DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp));
-       DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment));
-       BLANK();
+       DEFINE(__TASK_thread_info, offsetof(struct task_struct, stack));
+       DEFINE(__TASK_thread, offsetof(struct task_struct, thread));
        DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
        BLANK();
-       DEFINE(__THREAD_per_cause, offsetof(struct task_struct, thread.per_event.cause));
-       DEFINE(__THREAD_per_address, offsetof(struct task_struct, thread.per_event.address));
-       DEFINE(__THREAD_per_paid, offsetof(struct task_struct, thread.per_event.paid));
+       DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp));
+       DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause));
+       DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address));
+       DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid));
+       DEFINE(__THREAD_trap_tdb, offsetof(struct thread_struct, trap_tdb));
        BLANK();
        DEFINE(__TI_task, offsetof(struct thread_info, task));
        DEFINE(__TI_flags, offsetof(struct thread_info, flags));
@@ -176,7 +176,6 @@ int main(void)
        DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
        DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
        DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
-       DEFINE(__THREAD_trap_tdb, offsetof(struct task_struct, thread.trap_tdb));
        DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
        DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c));
        DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20));
index 3238893c9d4ff66d492ce2a04421b85fa0cc0c34..84062e7a77dad75c50fa60822255ee2b20b34181 100644 (file)
@@ -178,17 +178,21 @@ _PIF_WORK = (_PIF_PER_TRAP)
  */
 ENTRY(__switch_to)
        stmg    %r6,%r15,__SF_GPRS(%r15)        # store gprs of prev task
-       stg     %r15,__THREAD_ksp(%r2)          # store kernel stack of prev
-       lg      %r4,__THREAD_info(%r2)          # get thread_info of prev
-       lg      %r5,__THREAD_info(%r3)          # get thread_info of next
+       lgr     %r1,%r2
+       aghi    %r1,__TASK_thread               # thread_struct of prev task
+       lg      %r4,__TASK_thread_info(%r2)     # get thread_info of prev
+       lg      %r5,__TASK_thread_info(%r3)     # get thread_info of next
+       stg     %r15,__THREAD_ksp(%r1)          # store kernel stack of prev
+       lgr     %r1,%r3
+       aghi    %r1,__TASK_thread               # thread_struct of next task
        lgr     %r15,%r5
        aghi    %r15,STACK_INIT                 # end of kernel stack of next
        stg     %r3,__LC_CURRENT                # store task struct of next
        stg     %r5,__LC_THREAD_INFO            # store thread info of next
        stg     %r15,__LC_KERNEL_STACK          # store end of kernel stack
+       lg      %r15,__THREAD_ksp(%r1)          # load kernel stack of next
        lctl    %c4,%c4,__TASK_pid(%r3)         # load pid to control reg. 4
        mvc     __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
-       lg      %r15,__THREAD_ksp(%r3)          # load kernel stack of next
        lmg     %r6,%r15,__SF_GPRS(%r15)        # load gprs of next task
        br      %r14
 
@@ -417,6 +421,7 @@ ENTRY(pgm_check_handler)
        LAST_BREAK %r14
        lg      %r15,__LC_KERNEL_STACK
        lg      %r14,__TI_task(%r12)
+       aghi    %r14,__TASK_thread      # pointer to thread_struct
        lghi    %r13,__LC_PGM_TDB
        tm      __LC_PGM_ILC+2,0x02     # check for transaction abort
        jz      2f
index 4d96c9f5345538471cea4cd62620439c88a7f867..7bea81d8a3635025b0f372c3f908ade71b188e1b 100644 (file)
@@ -259,7 +259,7 @@ void vector_exception(struct pt_regs *regs)
        }
 
        /* get vector interrupt code from fpc */
-       asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
+       asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc));
        vic = (current->thread.fp_regs.fpc & 0xf00) >> 8;
        switch (vic) {
        case 1: /* invalid vector operation */
@@ -297,7 +297,7 @@ void data_exception(struct pt_regs *regs)
 
        location = get_trap_ip(regs);
 
-       asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
+       asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc));
        /* Check for vector register enablement */
        if (MACHINE_HAS_VX && !current->thread.vxrs &&
            (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) {