Linux 6.10-rc6
[linux-2.6-block.git] / arch / x86 / um / tls_32.c
1 /*
2  * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
3  * Licensed under the GPL
4  */
5
6 #include <linux/percpu.h>
7 #include <linux/sched.h>
8 #include <linux/syscalls.h>
9 #include <linux/uaccess.h>
10 #include <asm/ptrace-abi.h>
11 #include <os.h>
12 #include <skas.h>
13 #include <sysdep/tls.h>
14
15 /*
16  * If needed we can detect when it's uninitialized.
17  *
18  * These are initialized in an initcall and unchanged thereafter.
19  */
20 static int host_supports_tls = -1;
21 int host_gdt_entry_tls_min;
22
23 static int do_set_thread_area(struct user_desc *info)
24 {
25         int ret;
26         u32 cpu;
27
28         cpu = get_cpu();
29         ret = os_set_thread_area(info, userspace_pid[cpu]);
30         put_cpu();
31
32         if (ret)
33                 printk(KERN_ERR "PTRACE_SET_THREAD_AREA failed, err = %d, "
34                        "index = %d\n", ret, info->entry_number);
35
36         return ret;
37 }
38
39 /*
40  * sys_get_thread_area: get a yet unused TLS descriptor index.
41  * XXX: Consider leaving one free slot for glibc usage at first place. This must
42  * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
43  *
44  * Also, this must be tested when compiling in SKAS mode with dynamic linking
45  * and running against NPTL.
46  */
47 static int get_free_idx(struct task_struct* task)
48 {
49         struct thread_struct *t = &task->thread;
50         int idx;
51
52         for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
53                 if (!t->arch.tls_array[idx].present)
54                         return idx + GDT_ENTRY_TLS_MIN;
55         return -ESRCH;
56 }
57
58 static inline void clear_user_desc(struct user_desc* info)
59 {
60         /* Postcondition: LDT_empty(info) returns true. */
61         memset(info, 0, sizeof(*info));
62
63         /*
64          * Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
65          * indeed an empty user_desc.
66          */
67         info->read_exec_only = 1;
68         info->seg_not_present = 1;
69 }
70
71 #define O_FORCE 1
72
73 static int load_TLS(int flags, struct task_struct *to)
74 {
75         int ret = 0;
76         int idx;
77
78         for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
79                 struct uml_tls_struct* curr =
80                         &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
81
82                 /*
83                  * Actually, now if it wasn't flushed it gets cleared and
84                  * flushed to the host, which will clear it.
85                  */
86                 if (!curr->present) {
87                         if (!curr->flushed) {
88                                 clear_user_desc(&curr->tls);
89                                 curr->tls.entry_number = idx;
90                         } else {
91                                 WARN_ON(!LDT_empty(&curr->tls));
92                                 continue;
93                         }
94                 }
95
96                 if (!(flags & O_FORCE) && curr->flushed)
97                         continue;
98
99                 ret = do_set_thread_area(&curr->tls);
100                 if (ret)
101                         goto out;
102
103                 curr->flushed = 1;
104         }
105 out:
106         return ret;
107 }
108
109 /*
110  * Verify if we need to do a flush for the new process, i.e. if there are any
111  * present desc's, only if they haven't been flushed.
112  */
113 static inline int needs_TLS_update(struct task_struct *task)
114 {
115         int i;
116         int ret = 0;
117
118         for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
119                 struct uml_tls_struct* curr =
120                         &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
121
122                 /*
123                  * Can't test curr->present, we may need to clear a descriptor
124                  * which had a value.
125                  */
126                 if (curr->flushed)
127                         continue;
128                 ret = 1;
129                 break;
130         }
131         return ret;
132 }
133
134 /*
135  * On a newly forked process, the TLS descriptors haven't yet been flushed. So
136  * we mark them as such and the first switch_to will do the job.
137  */
138 void clear_flushed_tls(struct task_struct *task)
139 {
140         int i;
141
142         for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
143                 struct uml_tls_struct* curr =
144                         &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
145
146                 /*
147                  * Still correct to do this, if it wasn't present on the host it
148                  * will remain as flushed as it was.
149                  */
150                 if (!curr->present)
151                         continue;
152
153                 curr->flushed = 0;
154         }
155 }
156
157 /*
158  * In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
159  * common host process. So this is needed in SKAS0 too.
160  *
161  * However, if each thread had a different host process (and this was discussed
162  * for SMP support) this won't be needed.
163  *
164  * And this will not need be used when (and if) we'll add support to the host
165  * SKAS patch.
166  */
167
168 int arch_switch_tls(struct task_struct *to)
169 {
170         if (!host_supports_tls)
171                 return 0;
172
173         /*
174          * We have no need whatsoever to switch TLS for kernel threads; beyond
175          * that, that would also result in us calling os_set_thread_area with
176          * userspace_pid[cpu] == 0, which gives an error.
177          */
178         if (likely(to->mm))
179                 return load_TLS(O_FORCE, to);
180
181         return 0;
182 }
183
184 static int set_tls_entry(struct task_struct* task, struct user_desc *info,
185                          int idx, int flushed)
186 {
187         struct thread_struct *t = &task->thread;
188
189         if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
190                 return -EINVAL;
191
192         t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
193         t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
194         t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
195
196         return 0;
197 }
198
199 int arch_set_tls(struct task_struct *new, unsigned long tls)
200 {
201         struct user_desc info;
202         int idx, ret = -EFAULT;
203
204         if (copy_from_user(&info, (void __user *) tls, sizeof(info)))
205                 goto out;
206
207         ret = -EINVAL;
208         if (LDT_empty(&info))
209                 goto out;
210
211         idx = info.entry_number;
212
213         ret = set_tls_entry(new, &info, idx, 0);
214 out:
215         return ret;
216 }
217
218 static int get_tls_entry(struct task_struct *task, struct user_desc *info,
219                          int idx)
220 {
221         struct thread_struct *t = &task->thread;
222
223         if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
224                 return -EINVAL;
225
226         if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
227                 goto clear;
228
229         *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
230
231 out:
232         /*
233          * Temporary debugging check, to make sure that things have been
234          * flushed. This could be triggered if load_TLS() failed.
235          */
236         if (unlikely(task == current &&
237                      !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
238                 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
239                                 "without flushed TLS.", current->pid);
240         }
241
242         return 0;
243 clear:
244         /*
245          * When the TLS entry has not been set, the values read to user in the
246          * tls_array are 0 (because it's cleared at boot, see
247          * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
248          */
249         clear_user_desc(info);
250         info->entry_number = idx;
251         goto out;
252 }
253
254 SYSCALL_DEFINE1(set_thread_area, struct user_desc __user *, user_desc)
255 {
256         struct user_desc info;
257         int idx, ret;
258
259         if (!host_supports_tls)
260                 return -ENOSYS;
261
262         if (copy_from_user(&info, user_desc, sizeof(info)))
263                 return -EFAULT;
264
265         idx = info.entry_number;
266
267         if (idx == -1) {
268                 idx = get_free_idx(current);
269                 if (idx < 0)
270                         return idx;
271                 info.entry_number = idx;
272                 /* Tell the user which slot we chose for him.*/
273                 if (put_user(idx, &user_desc->entry_number))
274                         return -EFAULT;
275         }
276
277         ret = do_set_thread_area(&info);
278         if (ret)
279                 return ret;
280         return set_tls_entry(current, &info, idx, 1);
281 }
282
283 /*
284  * Perform set_thread_area on behalf of the traced child.
285  * Note: error handling is not done on the deferred load, and this differ from
286  * i386. However the only possible error are caused by bugs.
287  */
288 int ptrace_set_thread_area(struct task_struct *child, int idx,
289                            struct user_desc __user *user_desc)
290 {
291         struct user_desc info;
292
293         if (!host_supports_tls)
294                 return -EIO;
295
296         if (copy_from_user(&info, user_desc, sizeof(info)))
297                 return -EFAULT;
298
299         return set_tls_entry(child, &info, idx, 0);
300 }
301
302 SYSCALL_DEFINE1(get_thread_area, struct user_desc __user *, user_desc)
303 {
304         struct user_desc info;
305         int idx, ret;
306
307         if (!host_supports_tls)
308                 return -ENOSYS;
309
310         if (get_user(idx, &user_desc->entry_number))
311                 return -EFAULT;
312
313         ret = get_tls_entry(current, &info, idx);
314         if (ret < 0)
315                 goto out;
316
317         if (copy_to_user(user_desc, &info, sizeof(info)))
318                 ret = -EFAULT;
319
320 out:
321         return ret;
322 }
323
324 /*
325  * Perform get_thread_area on behalf of the traced child.
326  */
327 int ptrace_get_thread_area(struct task_struct *child, int idx,
328                 struct user_desc __user *user_desc)
329 {
330         struct user_desc info;
331         int ret;
332
333         if (!host_supports_tls)
334                 return -EIO;
335
336         ret = get_tls_entry(child, &info, idx);
337         if (ret < 0)
338                 goto out;
339
340         if (copy_to_user(user_desc, &info, sizeof(info)))
341                 ret = -EFAULT;
342 out:
343         return ret;
344 }
345
346 /*
347  * This code is really i386-only, but it detects and logs x86_64 GDT indexes
348  * if a 32-bit UML is running on a 64-bit host.
349  */
350 static int __init __setup_host_supports_tls(void)
351 {
352         check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
353         if (host_supports_tls) {
354                 printk(KERN_INFO "Host TLS support detected\n");
355                 printk(KERN_INFO "Detected host type: ");
356                 switch (host_gdt_entry_tls_min) {
357                 case GDT_ENTRY_TLS_MIN_I386:
358                         printk(KERN_CONT "i386");
359                         break;
360                 case GDT_ENTRY_TLS_MIN_X86_64:
361                         printk(KERN_CONT "x86_64");
362                         break;
363                 }
364                 printk(KERN_CONT " (GDT indexes %d to %d)\n",
365                        host_gdt_entry_tls_min,
366                        host_gdt_entry_tls_min + GDT_ENTRY_TLS_ENTRIES);
367         } else
368                 printk(KERN_ERR "  Host TLS support NOT detected! "
369                                 "TLS support inside UML will not work\n");
370         return 0;
371 }
372
373 __initcall(__setup_host_supports_tls);