Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq
[linux-2.6-block.git] / include / asm-x86_64 / pda.h
CommitLineData
1da177e4
LT
1#ifndef X86_64_PDA_H
2#define X86_64_PDA_H
3
4#ifndef __ASSEMBLY__
5#include <linux/stddef.h>
6#include <linux/types.h>
7#include <linux/cache.h>
b556b35e 8#include <asm/page.h>
1da177e4
LT
9
10/* Per processor datastructure. %gs points to it while the kernel runs */
11struct x8664_pda {
29a9af60
AV
12 struct task_struct *pcurrent; /* 0 Current process */
13 unsigned long data_offset; /* 8 Per cpu data offset from linker
14 address */
15 unsigned long kernelstack; /* 16 top of kernel stack for current */
16 unsigned long oldrsp; /* 24 user rsp for system call */
17 int irqcount; /* 32 Irq nesting counter. Starts with -1 */
18 int cpunumber; /* 36 Logical CPU number */
0a425405
AV
19#ifdef CONFIG_CC_STACKPROTECTOR
20 unsigned long stack_canary; /* 40 stack canary value */
21 /* gcc-ABI: this canary MUST be at
22 offset 40!!! */
23#endif
24 char *irqstackptr;
69d81fcd 25 int nodenumber; /* number of current node */
1da177e4
LT
26 unsigned int __softirq_pending;
27 unsigned int __nmi_count; /* number of NMI on this CPUs */
a15da49d
AK
28 short mmu_state;
29 short isidle;
df92004c 30 struct mm_struct *active_mm;
1da177e4 31 unsigned apic_timer_irqs;
b9169116 32} ____cacheline_aligned_in_smp;
1da177e4 33
365ba917
RT
34extern struct x8664_pda *_cpu_pda[];
35extern struct x8664_pda boot_cpu_pda[];
df79efde 36
365ba917 37#define cpu_pda(i) (_cpu_pda[i])
1da177e4
LT
38
39/*
40 * There is no fast way to get the base address of the PDA, all the accesses
41 * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
42 */
fd167e42 43extern void __bad_pda_field(void) __attribute__((noreturn));
1da177e4 44
c1a9d41f
AK
45/*
46 * proxy_pda doesn't actually exist, but tell gcc it is accessed for
47 * all PDA accesses so it gets read/write dependencies right.
48 */
53ee11ae
AK
49extern struct x8664_pda _proxy_pda;
50
1da177e4
LT
51#define pda_offset(field) offsetof(struct x8664_pda, field)
52
c1a9d41f
AK
53#define pda_to_op(op,field,val) do { \
54 typedef typeof(_proxy_pda.field) T__; \
55 if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
56 switch (sizeof(_proxy_pda.field)) { \
57 case 2: \
58 asm(op "w %1,%%gs:%c2" : \
59 "+m" (_proxy_pda.field) : \
60 "ri" ((T__)val), \
61 "i"(pda_offset(field))); \
62 break; \
63 case 4: \
64 asm(op "l %1,%%gs:%c2" : \
65 "+m" (_proxy_pda.field) : \
66 "ri" ((T__)val), \
67 "i" (pda_offset(field))); \
68 break; \
69 case 8: \
70 asm(op "q %1,%%gs:%c2": \
71 "+m" (_proxy_pda.field) : \
72 "ri" ((T__)val), \
73 "i"(pda_offset(field))); \
74 break; \
75 default: \
76 __bad_pda_field(); \
77 } \
1da177e4
LT
78 } while (0)
79
c1a9d41f
AK
80#define pda_from_op(op,field) ({ \
81 typeof(_proxy_pda.field) ret__; \
82 switch (sizeof(_proxy_pda.field)) { \
83 case 2: \
84 asm(op "w %%gs:%c1,%0" : \
85 "=r" (ret__) : \
86 "i" (pda_offset(field)), \
87 "m" (_proxy_pda.field)); \
88 break; \
89 case 4: \
90 asm(op "l %%gs:%c1,%0": \
91 "=r" (ret__): \
92 "i" (pda_offset(field)), \
93 "m" (_proxy_pda.field)); \
94 break; \
95 case 8: \
96 asm(op "q %%gs:%c1,%0": \
97 "=r" (ret__) : \
98 "i" (pda_offset(field)), \
99 "m" (_proxy_pda.field)); \
100 break; \
101 default: \
102 __bad_pda_field(); \
103 } \
1da177e4
LT
104 ret__; })
105
1da177e4
LT
106#define read_pda(field) pda_from_op("mov",field)
107#define write_pda(field,val) pda_to_op("mov",field,val)
108#define add_pda(field,val) pda_to_op("add",field,val)
109#define sub_pda(field,val) pda_to_op("sub",field,val)
3f74478b 110#define or_pda(field,val) pda_to_op("or",field,val)
1da177e4 111
9446868b
AK
112/* This is not atomic against other CPUs -- CPU preemption needs to be off */
113#define test_and_clear_bit_pda(bit,field) ({ \
114 int old__; \
115 asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
116 : "=r" (old__), "+m" (_proxy_pda.field) \
117 : "dIr" (bit), "i" (pda_offset(field)) : "memory"); \
118 old__; \
119})
120
1da177e4
LT
121#endif
122
123#define PDA_STACKOFFSET (5*8)
124
125#endif