x86: rework __per_cpu_load adjustments
[linux-2.6-block.git] / arch / x86 / include / asm / stackprotector.h
CommitLineData
9b5609fd
IM
1#ifndef _ASM_STACKPROTECTOR_H
2#define _ASM_STACKPROTECTOR_H 1
3
960a672b 4#include <asm/tsc.h>
b2b062b8 5#include <asm/pda.h>
960a672b 6
18aa8bb1
IM
7/*
8 * Initialize the stackprotector canary value.
9 *
10 * NOTE: this must only be called from functions that never return,
11 * and it must always be inlined.
12 */
13static __always_inline void boot_init_stack_canary(void)
14{
960a672b
IM
15 u64 canary;
16 u64 tsc;
17
18aa8bb1 18 /*
c6e50f93
TH
19 * Build time only check to make sure the stack_canary is at
20 * offset 40 in the pda; this is a gcc ABI requirement
21 */
22 BUILD_BUG_ON(offsetof(struct x8664_pda, stack_canary) != 40);
23
24 /*
960a672b
IM
25 * We both use the random pool and the current TSC as a source
26 * of randomness. The TSC only matters for very early init,
27 * there it already has some randomness on most systems. Later
28 * on during the bootup the random pool has true entropy too.
18aa8bb1 29 */
960a672b
IM
30 get_random_bytes(&canary, sizeof(canary));
31 tsc = __native_read_tsc();
32 canary += tsc + (tsc << 32UL);
33
34 current->stack_canary = canary;
35 write_pda(stack_canary, canary);
18aa8bb1
IM
36}
37
9b5609fd 38#endif