OFFSET(TASK_THREAD_S9, task_struct, thread.s[9]);
OFFSET(TASK_THREAD_S10, task_struct, thread.s[10]);
OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]);
- OFFSET(TASK_THREAD_STATUS, task_struct, thread.status);
+ OFFSET(TASK_THREAD_SUM, task_struct, thread.sum);
OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu);
OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
offsetof(struct task_struct, thread.s[11])
- offsetof(struct task_struct, thread.ra)
);
- DEFINE(TASK_THREAD_STATUS_RA,
- offsetof(struct task_struct, thread.status)
+ DEFINE(TASK_THREAD_SUM_RA,
+ offsetof(struct task_struct, thread.sum)
- offsetof(struct task_struct, thread.ra)
);
REG_S s11, TASK_THREAD_S11_RA(a3)
/* save the user space access flag */
- li s0, SR_SUM
- csrr s1, CSR_STATUS
- REG_S s1, TASK_THREAD_STATUS_RA(a3)
+ csrr s0, CSR_STATUS
+ REG_S s0, TASK_THREAD_SUM_RA(a3)
/* Save the kernel shadow call stack pointer */
scs_save_current
/* Restore context from next->thread */
- REG_L s0, TASK_THREAD_STATUS_RA(a4)
+ REG_L s0, TASK_THREAD_SUM_RA(a4)
+ li s1, SR_SUM
+ and s0, s0, s1
csrs CSR_STATUS, s0
REG_L ra, TASK_THREAD_RA_RA(a4)
REG_L sp, TASK_THREAD_SP_RA(a4)