Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_FUTEX_H |
2 | #define _LINUX_FUTEX_H | |
3 | ||
0771dfef IM |
4 | #include <linux/sched.h> |
5 | ||
1da177e4 LT |
6 | /* Second argument to futex syscall */ |
7 | ||
8 | ||
4732efbe JJ |
9 | #define FUTEX_WAIT 0 |
10 | #define FUTEX_WAKE 1 | |
11 | #define FUTEX_FD 2 | |
12 | #define FUTEX_REQUEUE 3 | |
13 | #define FUTEX_CMP_REQUEUE 4 | |
14 | #define FUTEX_WAKE_OP 5 | |
1da177e4 | 15 | |
0771dfef IM |
16 | /* |
17 | * Support for robust futexes: the kernel cleans up held futexes at | |
18 | * thread exit time. | |
19 | */ | |
20 | ||
21 | /* | |
22 | * Per-lock list entry - embedded in user-space locks, somewhere close | |
23 | * to the futex field. (Note: user-space uses a double-linked list to | |
24 | * achieve O(1) list add and remove, but the kernel only needs to know | |
25 | * about the forward link) | |
26 | * | |
27 | * NOTE: this structure is part of the syscall ABI, and must not be | |
28 | * changed. | |
29 | */ | |
30 | struct robust_list { | |
31 | struct robust_list __user *next; | |
32 | }; | |
33 | ||
34 | /* | |
35 | * Per-thread list head: | |
36 | * | |
37 | * NOTE: this structure is part of the syscall ABI, and must only be | |
38 | * changed if the change is first communicated with the glibc folks. | |
39 | * (When an incompatible change is done, we'll increase the structure | |
40 | * size, which glibc will detect) | |
41 | */ | |
42 | struct robust_list_head { | |
43 | /* | |
44 | * The head of the list. Points back to itself if empty: | |
45 | */ | |
46 | struct robust_list list; | |
47 | ||
48 | /* | |
49 | * This relative offset is set by user-space, it gives the kernel | |
50 | * the relative position of the futex field to examine. This way | |
51 | * we keep userspace flexible, to freely shape its data-structure, | |
52 | * without hardcoding any particular offset into the kernel: | |
53 | */ | |
54 | long futex_offset; | |
55 | ||
56 | /* | |
57 | * The death of the thread may race with userspace setting | |
58 | * up a lock's links. So to handle this race, userspace first | |
59 | * sets this field to the address of the to-be-taken lock, | |
60 | * then does the lock acquire, and then adds itself to the | |
61 | * list, and then clears this field. Hence the kernel will | |
62 | * always have full knowledge of all locks that the thread | |
63 | * _might_ have taken. We check the owner TID in any case, | |
64 | * so only truly owned locks will be handled. | |
65 | */ | |
66 | struct robust_list __user *list_op_pending; | |
67 | }; | |
68 | ||
69 | /* | |
70 | * Are there any waiters for this robust futex: | |
71 | */ | |
72 | #define FUTEX_WAITERS 0x80000000 | |
73 | ||
74 | /* | |
75 | * The kernel signals via this bit that a thread holding a futex | |
76 | * has exited without unlocking the futex. The kernel also does | |
77 | * a FUTEX_WAKE on such futexes, after setting the bit, to wake | |
78 | * up any possible waiters: | |
79 | */ | |
80 | #define FUTEX_OWNER_DIED 0x40000000 | |
81 | ||
82 | /* | |
83 | * Reserved bit: | |
84 | */ | |
85 | #define FUTEX_OWNER_PENDING 0x20000000 | |
86 | ||
87 | /* | |
88 | * The rest of the robust-futex field is for the TID: | |
89 | */ | |
90 | #define FUTEX_TID_MASK 0x1fffffff | |
91 | ||
92 | /* | |
93 | * A limit of one million locks held per thread (!) ought to be enough | |
94 | * for some time. This also protects against a deliberately circular | |
95 | * list. Not worth introducing an rlimit for this: | |
96 | */ | |
97 | #define ROBUST_LIST_LIMIT 1048576 | |
98 | ||
1da177e4 LT |
99 | long do_futex(unsigned long uaddr, int op, int val, |
100 | unsigned long timeout, unsigned long uaddr2, int val2, | |
101 | int val3); | |
102 | ||
0771dfef IM |
103 | extern int handle_futex_death(unsigned int *uaddr, struct task_struct *curr); |
104 | ||
105 | #ifdef CONFIG_FUTEX | |
106 | extern void exit_robust_list(struct task_struct *curr); | |
107 | #else | |
108 | static inline void exit_robust_list(struct task_struct *curr) | |
109 | { | |
110 | } | |
111 | #endif | |
112 | ||
4732efbe JJ |
113 | #define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */ |
114 | #define FUTEX_OP_ADD 1 /* *(int *)UADDR2 += OPARG; */ | |
115 | #define FUTEX_OP_OR 2 /* *(int *)UADDR2 |= OPARG; */ | |
116 | #define FUTEX_OP_ANDN 3 /* *(int *)UADDR2 &= ~OPARG; */ | |
117 | #define FUTEX_OP_XOR 4 /* *(int *)UADDR2 ^= OPARG; */ | |
118 | ||
119 | #define FUTEX_OP_OPARG_SHIFT 8 /* Use (1 << OPARG) instead of OPARG. */ | |
120 | ||
121 | #define FUTEX_OP_CMP_EQ 0 /* if (oldval == CMPARG) wake */ | |
122 | #define FUTEX_OP_CMP_NE 1 /* if (oldval != CMPARG) wake */ | |
123 | #define FUTEX_OP_CMP_LT 2 /* if (oldval < CMPARG) wake */ | |
124 | #define FUTEX_OP_CMP_LE 3 /* if (oldval <= CMPARG) wake */ | |
125 | #define FUTEX_OP_CMP_GT 4 /* if (oldval > CMPARG) wake */ | |
126 | #define FUTEX_OP_CMP_GE 5 /* if (oldval >= CMPARG) wake */ | |
127 | ||
128 | /* FUTEX_WAKE_OP will perform atomically | |
129 | int oldval = *(int *)UADDR2; | |
130 | *(int *)UADDR2 = oldval OP OPARG; | |
131 | if (oldval CMP CMPARG) | |
132 | wake UADDR2; */ | |
133 | ||
134 | #define FUTEX_OP(op, oparg, cmp, cmparg) \ | |
135 | (((op & 0xf) << 28) | ((cmp & 0xf) << 24) \ | |
136 | | ((oparg & 0xfff) << 12) | (cmparg & 0xfff)) | |
137 | ||
1da177e4 | 138 | #endif |