Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/vm86.c | |
3 | * | |
4 | * Copyright (C) 1994 Linus Torvalds | |
5 | * | |
6 | * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86 | |
7 | * stack - Manfred Spraul <manfreds@colorfullife.com> | |
8 | * | |
9 | * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle | |
10 | * them correctly. Now the emulation will be in a | |
11 | * consistent state after stackfaults - Kasper Dupont | |
12 | * <kasperd@daimi.au.dk> | |
13 | * | |
14 | * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont | |
15 | * <kasperd@daimi.au.dk> | |
16 | * | |
17 | * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault | |
18 | * caused by Kasper Dupont's changes - Stas Sergeev | |
19 | * | |
20 | * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes. | |
21 | * Kasper Dupont <kasperd@daimi.au.dk> | |
22 | * | |
23 | * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault. | |
24 | * Kasper Dupont <kasperd@daimi.au.dk> | |
25 | * | |
26 | * 9 apr 2002 - Changed stack access macros to jump to a label | |
27 | * instead of returning to userspace. This simplifies | |
28 | * do_int, and is needed by handle_vm6_fault. Kasper | |
29 | * Dupont <kasperd@daimi.au.dk> | |
30 | * | |
31 | */ | |
32 | ||
33 | #include <linux/config.h> | |
34 | #include <linux/errno.h> | |
35 | #include <linux/interrupt.h> | |
36 | #include <linux/sched.h> | |
37 | #include <linux/kernel.h> | |
38 | #include <linux/signal.h> | |
39 | #include <linux/string.h> | |
40 | #include <linux/mm.h> | |
41 | #include <linux/smp.h> | |
42 | #include <linux/smp_lock.h> | |
43 | #include <linux/highmem.h> | |
44 | #include <linux/ptrace.h> | |
45 | ||
46 | #include <asm/uaccess.h> | |
47 | #include <asm/io.h> | |
48 | #include <asm/tlbflush.h> | |
49 | #include <asm/irq.h> | |
50 | ||
51 | /* | |
52 | * Known problems: | |
53 | * | |
54 | * Interrupt handling is not guaranteed: | |
55 | * - a real x86 will disable all interrupts for one instruction | |
56 | * after a "mov ss,xx" to make stack handling atomic even without | |
57 | * the 'lss' instruction. We can't guarantee this in v86 mode, | |
58 | * as the next instruction might result in a page fault or similar. | |
59 | * - a real x86 will have interrupts disabled for one instruction | |
60 | * past the 'sti' that enables them. We don't bother with all the | |
61 | * details yet. | |
62 | * | |
63 | * Let's hope these problems do not actually matter for anything. | |
64 | */ | |
65 | ||
66 | ||
67 | #define KVM86 ((struct kernel_vm86_struct *)regs) | |
68 | #define VMPI KVM86->vm86plus | |
69 | ||
70 | ||
71 | /* | |
72 | * 8- and 16-bit register defines.. | |
73 | */ | |
74 | #define AL(regs) (((unsigned char *)&((regs)->eax))[0]) | |
75 | #define AH(regs) (((unsigned char *)&((regs)->eax))[1]) | |
76 | #define IP(regs) (*(unsigned short *)&((regs)->eip)) | |
77 | #define SP(regs) (*(unsigned short *)&((regs)->esp)) | |
78 | ||
79 | /* | |
80 | * virtual flags (16 and 32-bit versions) | |
81 | */ | |
82 | #define VFLAGS (*(unsigned short *)&(current->thread.v86flags)) | |
83 | #define VEFLAGS (current->thread.v86flags) | |
84 | ||
85 | #define set_flags(X,new,mask) \ | |
86 | ((X) = ((X) & ~(mask)) | ((new) & (mask))) | |
87 | ||
88 | #define SAFE_MASK (0xDD5) | |
89 | #define RETURN_MASK (0xDFF) | |
90 | ||
91 | #define VM86_REGS_PART2 orig_eax | |
92 | #define VM86_REGS_SIZE1 \ | |
93 | ( (unsigned)( & (((struct kernel_vm86_regs *)0)->VM86_REGS_PART2) ) ) | |
94 | #define VM86_REGS_SIZE2 (sizeof(struct kernel_vm86_regs) - VM86_REGS_SIZE1) | |
95 | ||
96 | struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs)); | |
97 | struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs) | |
98 | { | |
99 | struct tss_struct *tss; | |
100 | struct pt_regs *ret; | |
101 | unsigned long tmp; | |
102 | ||
103 | /* | |
104 | * This gets called from entry.S with interrupts disabled, but | |
105 | * from process context. Enable interrupts here, before trying | |
106 | * to access user space. | |
107 | */ | |
108 | local_irq_enable(); | |
109 | ||
110 | if (!current->thread.vm86_info) { | |
111 | printk("no vm86_info: BAD\n"); | |
112 | do_exit(SIGSEGV); | |
113 | } | |
114 | set_flags(regs->eflags, VEFLAGS, VIF_MASK | current->thread.v86mask); | |
115 | tmp = copy_to_user(¤t->thread.vm86_info->regs,regs, VM86_REGS_SIZE1); | |
116 | tmp += copy_to_user(¤t->thread.vm86_info->regs.VM86_REGS_PART2, | |
117 | ®s->VM86_REGS_PART2, VM86_REGS_SIZE2); | |
118 | tmp += put_user(current->thread.screen_bitmap,¤t->thread.vm86_info->screen_bitmap); | |
119 | if (tmp) { | |
120 | printk("vm86: could not access userspace vm86_info\n"); | |
121 | do_exit(SIGSEGV); | |
122 | } | |
123 | ||
124 | tss = &per_cpu(init_tss, get_cpu()); | |
125 | current->thread.esp0 = current->thread.saved_esp0; | |
126 | current->thread.sysenter_cs = __KERNEL_CS; | |
127 | load_esp0(tss, ¤t->thread); | |
128 | current->thread.saved_esp0 = 0; | |
129 | put_cpu(); | |
130 | ||
131 | loadsegment(fs, current->thread.saved_fs); | |
132 | loadsegment(gs, current->thread.saved_gs); | |
133 | ret = KVM86->regs32; | |
134 | return ret; | |
135 | } | |
136 | ||
60ec5585 | 137 | static void mark_screen_rdonly(struct mm_struct *mm) |
1da177e4 LT |
138 | { |
139 | pgd_t *pgd; | |
140 | pud_t *pud; | |
141 | pmd_t *pmd; | |
60ec5585 HD |
142 | pte_t *pte; |
143 | spinlock_t *ptl; | |
1da177e4 LT |
144 | int i; |
145 | ||
60ec5585 | 146 | pgd = pgd_offset(mm, 0xA0000); |
1da177e4 LT |
147 | if (pgd_none_or_clear_bad(pgd)) |
148 | goto out; | |
149 | pud = pud_offset(pgd, 0xA0000); | |
150 | if (pud_none_or_clear_bad(pud)) | |
151 | goto out; | |
152 | pmd = pmd_offset(pud, 0xA0000); | |
153 | if (pmd_none_or_clear_bad(pmd)) | |
154 | goto out; | |
60ec5585 | 155 | pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); |
1da177e4 LT |
156 | for (i = 0; i < 32; i++) { |
157 | if (pte_present(*pte)) | |
158 | set_pte(pte, pte_wrprotect(*pte)); | |
159 | pte++; | |
160 | } | |
60ec5585 | 161 | pte_unmap_unlock(pte, ptl); |
1da177e4 | 162 | out: |
1da177e4 LT |
163 | flush_tlb(); |
164 | } | |
165 | ||
166 | ||
167 | ||
168 | static int do_vm86_irq_handling(int subfunction, int irqnumber); | |
169 | static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk); | |
170 | ||
171 | asmlinkage int sys_vm86old(struct pt_regs regs) | |
172 | { | |
173 | struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.ebx; | |
174 | struct kernel_vm86_struct info; /* declare this _on top_, | |
175 | * this avoids wasting of stack space. | |
176 | * This remains on the stack until we | |
177 | * return to 32 bit user space. | |
178 | */ | |
179 | struct task_struct *tsk; | |
180 | int tmp, ret = -EPERM; | |
181 | ||
182 | tsk = current; | |
183 | if (tsk->thread.saved_esp0) | |
184 | goto out; | |
185 | tmp = copy_from_user(&info, v86, VM86_REGS_SIZE1); | |
186 | tmp += copy_from_user(&info.regs.VM86_REGS_PART2, &v86->regs.VM86_REGS_PART2, | |
187 | (long)&info.vm86plus - (long)&info.regs.VM86_REGS_PART2); | |
188 | ret = -EFAULT; | |
189 | if (tmp) | |
190 | goto out; | |
191 | memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus); | |
192 | info.regs32 = ®s; | |
193 | tsk->thread.vm86_info = v86; | |
194 | do_sys_vm86(&info, tsk); | |
195 | ret = 0; /* we never return here */ | |
196 | out: | |
197 | return ret; | |
198 | } | |
199 | ||
200 | ||
201 | asmlinkage int sys_vm86(struct pt_regs regs) | |
202 | { | |
203 | struct kernel_vm86_struct info; /* declare this _on top_, | |
204 | * this avoids wasting of stack space. | |
205 | * This remains on the stack until we | |
206 | * return to 32 bit user space. | |
207 | */ | |
208 | struct task_struct *tsk; | |
209 | int tmp, ret; | |
210 | struct vm86plus_struct __user *v86; | |
211 | ||
212 | tsk = current; | |
213 | switch (regs.ebx) { | |
214 | case VM86_REQUEST_IRQ: | |
215 | case VM86_FREE_IRQ: | |
216 | case VM86_GET_IRQ_BITS: | |
217 | case VM86_GET_AND_RESET_IRQ: | |
218 | ret = do_vm86_irq_handling(regs.ebx, (int)regs.ecx); | |
219 | goto out; | |
220 | case VM86_PLUS_INSTALL_CHECK: | |
221 | /* NOTE: on old vm86 stuff this will return the error | |
e49332bd | 222 | from access_ok(), because the subfunction is |
1da177e4 LT |
223 | interpreted as (invalid) address to vm86_struct. |
224 | So the installation check works. | |
225 | */ | |
226 | ret = 0; | |
227 | goto out; | |
228 | } | |
229 | ||
230 | /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ | |
231 | ret = -EPERM; | |
232 | if (tsk->thread.saved_esp0) | |
233 | goto out; | |
234 | v86 = (struct vm86plus_struct __user *)regs.ecx; | |
235 | tmp = copy_from_user(&info, v86, VM86_REGS_SIZE1); | |
236 | tmp += copy_from_user(&info.regs.VM86_REGS_PART2, &v86->regs.VM86_REGS_PART2, | |
237 | (long)&info.regs32 - (long)&info.regs.VM86_REGS_PART2); | |
238 | ret = -EFAULT; | |
239 | if (tmp) | |
240 | goto out; | |
241 | info.regs32 = ®s; | |
242 | info.vm86plus.is_vm86pus = 1; | |
243 | tsk->thread.vm86_info = (struct vm86_struct __user *)v86; | |
244 | do_sys_vm86(&info, tsk); | |
245 | ret = 0; /* we never return here */ | |
246 | out: | |
247 | return ret; | |
248 | } | |
249 | ||
250 | ||
251 | static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk) | |
252 | { | |
253 | struct tss_struct *tss; | |
254 | /* | |
255 | * make sure the vm86() system call doesn't try to do anything silly | |
256 | */ | |
257 | info->regs.__null_ds = 0; | |
258 | info->regs.__null_es = 0; | |
259 | ||
260 | /* we are clearing fs,gs later just before "jmp resume_userspace", | |
261 | * because starting with Linux 2.1.x they aren't no longer saved/restored | |
262 | */ | |
263 | ||
264 | /* | |
265 | * The eflags register is also special: we cannot trust that the user | |
266 | * has set it up safely, so this makes sure interrupt etc flags are | |
267 | * inherited from protected mode. | |
268 | */ | |
269 | VEFLAGS = info->regs.eflags; | |
270 | info->regs.eflags &= SAFE_MASK; | |
271 | info->regs.eflags |= info->regs32->eflags & ~SAFE_MASK; | |
272 | info->regs.eflags |= VM_MASK; | |
273 | ||
274 | switch (info->cpu_type) { | |
275 | case CPU_286: | |
276 | tsk->thread.v86mask = 0; | |
277 | break; | |
278 | case CPU_386: | |
279 | tsk->thread.v86mask = NT_MASK | IOPL_MASK; | |
280 | break; | |
281 | case CPU_486: | |
282 | tsk->thread.v86mask = AC_MASK | NT_MASK | IOPL_MASK; | |
283 | break; | |
284 | default: | |
285 | tsk->thread.v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK; | |
286 | break; | |
287 | } | |
288 | ||
289 | /* | |
290 | * Save old state, set default return value (%eax) to 0 | |
291 | */ | |
292 | info->regs32->eax = 0; | |
293 | tsk->thread.saved_esp0 = tsk->thread.esp0; | |
4d37e7e3 ZA |
294 | savesegment(fs, tsk->thread.saved_fs); |
295 | savesegment(gs, tsk->thread.saved_gs); | |
1da177e4 LT |
296 | |
297 | tss = &per_cpu(init_tss, get_cpu()); | |
298 | tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0; | |
299 | if (cpu_has_sep) | |
300 | tsk->thread.sysenter_cs = 0; | |
301 | load_esp0(tss, &tsk->thread); | |
302 | put_cpu(); | |
303 | ||
304 | tsk->thread.screen_bitmap = info->screen_bitmap; | |
305 | if (info->flags & VM86_SCREEN_BITMAP) | |
60ec5585 | 306 | mark_screen_rdonly(tsk->mm); |
1da177e4 LT |
307 | __asm__ __volatile__( |
308 | "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs\n\t" | |
309 | "movl %0,%%esp\n\t" | |
310 | "movl %1,%%ebp\n\t" | |
311 | "jmp resume_userspace" | |
312 | : /* no outputs */ | |
313 | :"r" (&info->regs), "r" (tsk->thread_info) : "ax"); | |
314 | /* we never return here */ | |
315 | } | |
316 | ||
317 | static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval) | |
318 | { | |
319 | struct pt_regs * regs32; | |
320 | ||
321 | regs32 = save_v86_state(regs16); | |
322 | regs32->eax = retval; | |
323 | __asm__ __volatile__("movl %0,%%esp\n\t" | |
324 | "movl %1,%%ebp\n\t" | |
325 | "jmp resume_userspace" | |
326 | : : "r" (regs32), "r" (current_thread_info())); | |
327 | } | |
328 | ||
329 | static inline void set_IF(struct kernel_vm86_regs * regs) | |
330 | { | |
331 | VEFLAGS |= VIF_MASK; | |
332 | if (VEFLAGS & VIP_MASK) | |
333 | return_to_32bit(regs, VM86_STI); | |
334 | } | |
335 | ||
336 | static inline void clear_IF(struct kernel_vm86_regs * regs) | |
337 | { | |
338 | VEFLAGS &= ~VIF_MASK; | |
339 | } | |
340 | ||
341 | static inline void clear_TF(struct kernel_vm86_regs * regs) | |
342 | { | |
343 | regs->eflags &= ~TF_MASK; | |
344 | } | |
345 | ||
346 | static inline void clear_AC(struct kernel_vm86_regs * regs) | |
347 | { | |
348 | regs->eflags &= ~AC_MASK; | |
349 | } | |
350 | ||
351 | /* It is correct to call set_IF(regs) from the set_vflags_* | |
352 | * functions. However someone forgot to call clear_IF(regs) | |
353 | * in the opposite case. | |
354 | * After the command sequence CLI PUSHF STI POPF you should | |
355 | * end up with interrups disabled, but you ended up with | |
356 | * interrupts enabled. | |
357 | * ( I was testing my own changes, but the only bug I | |
358 | * could find was in a function I had not changed. ) | |
359 | * [KD] | |
360 | */ | |
361 | ||
362 | static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs * regs) | |
363 | { | |
364 | set_flags(VEFLAGS, eflags, current->thread.v86mask); | |
365 | set_flags(regs->eflags, eflags, SAFE_MASK); | |
366 | if (eflags & IF_MASK) | |
367 | set_IF(regs); | |
368 | else | |
369 | clear_IF(regs); | |
370 | } | |
371 | ||
372 | static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs) | |
373 | { | |
374 | set_flags(VFLAGS, flags, current->thread.v86mask); | |
375 | set_flags(regs->eflags, flags, SAFE_MASK); | |
376 | if (flags & IF_MASK) | |
377 | set_IF(regs); | |
378 | else | |
379 | clear_IF(regs); | |
380 | } | |
381 | ||
382 | static inline unsigned long get_vflags(struct kernel_vm86_regs * regs) | |
383 | { | |
384 | unsigned long flags = regs->eflags & RETURN_MASK; | |
385 | ||
386 | if (VEFLAGS & VIF_MASK) | |
387 | flags |= IF_MASK; | |
388 | flags |= IOPL_MASK; | |
389 | return flags | (VEFLAGS & current->thread.v86mask); | |
390 | } | |
391 | ||
392 | static inline int is_revectored(int nr, struct revectored_struct * bitmap) | |
393 | { | |
394 | __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" | |
395 | :"=r" (nr) | |
396 | :"m" (*bitmap),"r" (nr)); | |
397 | return nr; | |
398 | } | |
399 | ||
400 | #define val_byte(val, n) (((__u8 *)&val)[n]) | |
401 | ||
402 | #define pushb(base, ptr, val, err_label) \ | |
403 | do { \ | |
404 | __u8 __val = val; \ | |
405 | ptr--; \ | |
406 | if (put_user(__val, base + ptr) < 0) \ | |
407 | goto err_label; \ | |
408 | } while(0) | |
409 | ||
410 | #define pushw(base, ptr, val, err_label) \ | |
411 | do { \ | |
412 | __u16 __val = val; \ | |
413 | ptr--; \ | |
414 | if (put_user(val_byte(__val, 1), base + ptr) < 0) \ | |
415 | goto err_label; \ | |
416 | ptr--; \ | |
417 | if (put_user(val_byte(__val, 0), base + ptr) < 0) \ | |
418 | goto err_label; \ | |
419 | } while(0) | |
420 | ||
421 | #define pushl(base, ptr, val, err_label) \ | |
422 | do { \ | |
423 | __u32 __val = val; \ | |
424 | ptr--; \ | |
425 | if (put_user(val_byte(__val, 3), base + ptr) < 0) \ | |
426 | goto err_label; \ | |
427 | ptr--; \ | |
428 | if (put_user(val_byte(__val, 2), base + ptr) < 0) \ | |
429 | goto err_label; \ | |
430 | ptr--; \ | |
431 | if (put_user(val_byte(__val, 1), base + ptr) < 0) \ | |
432 | goto err_label; \ | |
433 | ptr--; \ | |
434 | if (put_user(val_byte(__val, 0), base + ptr) < 0) \ | |
435 | goto err_label; \ | |
436 | } while(0) | |
437 | ||
438 | #define popb(base, ptr, err_label) \ | |
439 | ({ \ | |
440 | __u8 __res; \ | |
441 | if (get_user(__res, base + ptr) < 0) \ | |
442 | goto err_label; \ | |
443 | ptr++; \ | |
444 | __res; \ | |
445 | }) | |
446 | ||
447 | #define popw(base, ptr, err_label) \ | |
448 | ({ \ | |
449 | __u16 __res; \ | |
450 | if (get_user(val_byte(__res, 0), base + ptr) < 0) \ | |
451 | goto err_label; \ | |
452 | ptr++; \ | |
453 | if (get_user(val_byte(__res, 1), base + ptr) < 0) \ | |
454 | goto err_label; \ | |
455 | ptr++; \ | |
456 | __res; \ | |
457 | }) | |
458 | ||
459 | #define popl(base, ptr, err_label) \ | |
460 | ({ \ | |
461 | __u32 __res; \ | |
462 | if (get_user(val_byte(__res, 0), base + ptr) < 0) \ | |
463 | goto err_label; \ | |
464 | ptr++; \ | |
465 | if (get_user(val_byte(__res, 1), base + ptr) < 0) \ | |
466 | goto err_label; \ | |
467 | ptr++; \ | |
468 | if (get_user(val_byte(__res, 2), base + ptr) < 0) \ | |
469 | goto err_label; \ | |
470 | ptr++; \ | |
471 | if (get_user(val_byte(__res, 3), base + ptr) < 0) \ | |
472 | goto err_label; \ | |
473 | ptr++; \ | |
474 | __res; \ | |
475 | }) | |
476 | ||
477 | /* There are so many possible reasons for this function to return | |
478 | * VM86_INTx, so adding another doesn't bother me. We can expect | |
479 | * userspace programs to be able to handle it. (Getting a problem | |
480 | * in userspace is always better than an Oops anyway.) [KD] | |
481 | */ | |
482 | static void do_int(struct kernel_vm86_regs *regs, int i, | |
483 | unsigned char __user * ssp, unsigned short sp) | |
484 | { | |
485 | unsigned long __user *intr_ptr; | |
486 | unsigned long segoffs; | |
487 | ||
488 | if (regs->cs == BIOSSEG) | |
489 | goto cannot_handle; | |
490 | if (is_revectored(i, &KVM86->int_revectored)) | |
491 | goto cannot_handle; | |
492 | if (i==0x21 && is_revectored(AH(regs),&KVM86->int21_revectored)) | |
493 | goto cannot_handle; | |
494 | intr_ptr = (unsigned long __user *) (i << 2); | |
495 | if (get_user(segoffs, intr_ptr)) | |
496 | goto cannot_handle; | |
497 | if ((segoffs >> 16) == BIOSSEG) | |
498 | goto cannot_handle; | |
499 | pushw(ssp, sp, get_vflags(regs), cannot_handle); | |
500 | pushw(ssp, sp, regs->cs, cannot_handle); | |
501 | pushw(ssp, sp, IP(regs), cannot_handle); | |
502 | regs->cs = segoffs >> 16; | |
503 | SP(regs) -= 6; | |
504 | IP(regs) = segoffs & 0xffff; | |
505 | clear_TF(regs); | |
506 | clear_IF(regs); | |
507 | clear_AC(regs); | |
508 | return; | |
509 | ||
510 | cannot_handle: | |
511 | return_to_32bit(regs, VM86_INTx + (i << 8)); | |
512 | } | |
513 | ||
514 | int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno) | |
515 | { | |
516 | if (VMPI.is_vm86pus) { | |
517 | if ( (trapno==3) || (trapno==1) ) | |
518 | return_to_32bit(regs, VM86_TRAP + (trapno << 8)); | |
519 | do_int(regs, trapno, (unsigned char __user *) (regs->ss << 4), SP(regs)); | |
520 | return 0; | |
521 | } | |
522 | if (trapno !=1) | |
523 | return 1; /* we let this handle by the calling routine */ | |
524 | if (current->ptrace & PT_PTRACED) { | |
525 | unsigned long flags; | |
526 | spin_lock_irqsave(¤t->sighand->siglock, flags); | |
527 | sigdelset(¤t->blocked, SIGTRAP); | |
528 | recalc_sigpending(); | |
529 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | |
530 | } | |
531 | send_sig(SIGTRAP, current, 1); | |
532 | current->thread.trap_no = trapno; | |
533 | current->thread.error_code = error_code; | |
534 | return 0; | |
535 | } | |
536 | ||
537 | void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) | |
538 | { | |
539 | unsigned char opcode; | |
540 | unsigned char __user *csp; | |
541 | unsigned char __user *ssp; | |
5fd75ebb | 542 | unsigned short ip, sp, orig_flags; |
1da177e4 LT |
543 | int data32, pref_done; |
544 | ||
545 | #define CHECK_IF_IN_TRAP \ | |
546 | if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \ | |
547 | newflags |= TF_MASK | |
548 | #define VM86_FAULT_RETURN do { \ | |
549 | if (VMPI.force_return_for_pic && (VEFLAGS & (IF_MASK | VIF_MASK))) \ | |
550 | return_to_32bit(regs, VM86_PICRETURN); \ | |
5fd75ebb PT |
551 | if (orig_flags & TF_MASK) \ |
552 | handle_vm86_trap(regs, 0, 1); \ | |
1da177e4 LT |
553 | return; } while (0) |
554 | ||
5fd75ebb PT |
555 | orig_flags = *(unsigned short *)®s->eflags; |
556 | ||
1da177e4 LT |
557 | csp = (unsigned char __user *) (regs->cs << 4); |
558 | ssp = (unsigned char __user *) (regs->ss << 4); | |
559 | sp = SP(regs); | |
560 | ip = IP(regs); | |
561 | ||
562 | data32 = 0; | |
563 | pref_done = 0; | |
564 | do { | |
565 | switch (opcode = popb(csp, ip, simulate_sigsegv)) { | |
566 | case 0x66: /* 32-bit data */ data32=1; break; | |
567 | case 0x67: /* 32-bit address */ break; | |
568 | case 0x2e: /* CS */ break; | |
569 | case 0x3e: /* DS */ break; | |
570 | case 0x26: /* ES */ break; | |
571 | case 0x36: /* SS */ break; | |
572 | case 0x65: /* GS */ break; | |
573 | case 0x64: /* FS */ break; | |
574 | case 0xf2: /* repnz */ break; | |
575 | case 0xf3: /* rep */ break; | |
576 | default: pref_done = 1; | |
577 | } | |
578 | } while (!pref_done); | |
579 | ||
580 | switch (opcode) { | |
581 | ||
582 | /* pushf */ | |
583 | case 0x9c: | |
584 | if (data32) { | |
585 | pushl(ssp, sp, get_vflags(regs), simulate_sigsegv); | |
586 | SP(regs) -= 4; | |
587 | } else { | |
588 | pushw(ssp, sp, get_vflags(regs), simulate_sigsegv); | |
589 | SP(regs) -= 2; | |
590 | } | |
591 | IP(regs) = ip; | |
592 | VM86_FAULT_RETURN; | |
593 | ||
594 | /* popf */ | |
595 | case 0x9d: | |
596 | { | |
597 | unsigned long newflags; | |
598 | if (data32) { | |
599 | newflags=popl(ssp, sp, simulate_sigsegv); | |
600 | SP(regs) += 4; | |
601 | } else { | |
602 | newflags = popw(ssp, sp, simulate_sigsegv); | |
603 | SP(regs) += 2; | |
604 | } | |
605 | IP(regs) = ip; | |
606 | CHECK_IF_IN_TRAP; | |
607 | if (data32) { | |
608 | set_vflags_long(newflags, regs); | |
609 | } else { | |
610 | set_vflags_short(newflags, regs); | |
611 | } | |
612 | VM86_FAULT_RETURN; | |
613 | } | |
614 | ||
615 | /* int xx */ | |
616 | case 0xcd: { | |
617 | int intno=popb(csp, ip, simulate_sigsegv); | |
618 | IP(regs) = ip; | |
619 | if (VMPI.vm86dbg_active) { | |
620 | if ( (1 << (intno &7)) & VMPI.vm86dbg_intxxtab[intno >> 3] ) | |
621 | return_to_32bit(regs, VM86_INTx + (intno << 8)); | |
622 | } | |
623 | do_int(regs, intno, ssp, sp); | |
624 | return; | |
625 | } | |
626 | ||
627 | /* iret */ | |
628 | case 0xcf: | |
629 | { | |
630 | unsigned long newip; | |
631 | unsigned long newcs; | |
632 | unsigned long newflags; | |
633 | if (data32) { | |
634 | newip=popl(ssp, sp, simulate_sigsegv); | |
635 | newcs=popl(ssp, sp, simulate_sigsegv); | |
636 | newflags=popl(ssp, sp, simulate_sigsegv); | |
637 | SP(regs) += 12; | |
638 | } else { | |
639 | newip = popw(ssp, sp, simulate_sigsegv); | |
640 | newcs = popw(ssp, sp, simulate_sigsegv); | |
641 | newflags = popw(ssp, sp, simulate_sigsegv); | |
642 | SP(regs) += 6; | |
643 | } | |
644 | IP(regs) = newip; | |
645 | regs->cs = newcs; | |
646 | CHECK_IF_IN_TRAP; | |
647 | if (data32) { | |
648 | set_vflags_long(newflags, regs); | |
649 | } else { | |
650 | set_vflags_short(newflags, regs); | |
651 | } | |
652 | VM86_FAULT_RETURN; | |
653 | } | |
654 | ||
655 | /* cli */ | |
656 | case 0xfa: | |
657 | IP(regs) = ip; | |
658 | clear_IF(regs); | |
659 | VM86_FAULT_RETURN; | |
660 | ||
661 | /* sti */ | |
662 | /* | |
663 | * Damn. This is incorrect: the 'sti' instruction should actually | |
664 | * enable interrupts after the /next/ instruction. Not good. | |
665 | * | |
666 | * Probably needs some horsing around with the TF flag. Aiee.. | |
667 | */ | |
668 | case 0xfb: | |
669 | IP(regs) = ip; | |
670 | set_IF(regs); | |
671 | VM86_FAULT_RETURN; | |
672 | ||
673 | default: | |
674 | return_to_32bit(regs, VM86_UNKNOWN); | |
675 | } | |
676 | ||
677 | return; | |
678 | ||
679 | simulate_sigsegv: | |
680 | /* FIXME: After a long discussion with Stas we finally | |
681 | * agreed, that this is wrong. Here we should | |
682 | * really send a SIGSEGV to the user program. | |
683 | * But how do we create the correct context? We | |
684 | * are inside a general protection fault handler | |
685 | * and has just returned from a page fault handler. | |
686 | * The correct context for the signal handler | |
687 | * should be a mixture of the two, but how do we | |
688 | * get the information? [KD] | |
689 | */ | |
690 | return_to_32bit(regs, VM86_UNKNOWN); | |
691 | } | |
692 | ||
693 | /* ---------------- vm86 special IRQ passing stuff ----------------- */ | |
694 | ||
695 | #define VM86_IRQNAME "vm86irq" | |
696 | ||
697 | static struct vm86_irqs { | |
698 | struct task_struct *tsk; | |
699 | int sig; | |
700 | } vm86_irqs[16]; | |
701 | ||
702 | static DEFINE_SPINLOCK(irqbits_lock); | |
703 | static int irqbits; | |
704 | ||
705 | #define ALLOWED_SIGS ( 1 /* 0 = don't send a signal */ \ | |
706 | | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \ | |
707 | | (1 << SIGUNUSED) ) | |
708 | ||
709 | static irqreturn_t irq_handler(int intno, void *dev_id, struct pt_regs * regs) | |
710 | { | |
711 | int irq_bit; | |
712 | unsigned long flags; | |
713 | ||
714 | spin_lock_irqsave(&irqbits_lock, flags); | |
715 | irq_bit = 1 << intno; | |
716 | if ((irqbits & irq_bit) || ! vm86_irqs[intno].tsk) | |
717 | goto out; | |
718 | irqbits |= irq_bit; | |
719 | if (vm86_irqs[intno].sig) | |
720 | send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1); | |
1da177e4 LT |
721 | /* |
722 | * IRQ will be re-enabled when user asks for the irq (whether | |
723 | * polling or as a result of the signal) | |
724 | */ | |
ad671423 PP |
725 | disable_irq_nosync(intno); |
726 | spin_unlock_irqrestore(&irqbits_lock, flags); | |
1da177e4 LT |
727 | return IRQ_HANDLED; |
728 | ||
729 | out: | |
730 | spin_unlock_irqrestore(&irqbits_lock, flags); | |
731 | return IRQ_NONE; | |
732 | } | |
733 | ||
734 | static inline void free_vm86_irq(int irqnumber) | |
735 | { | |
736 | unsigned long flags; | |
737 | ||
738 | free_irq(irqnumber, NULL); | |
739 | vm86_irqs[irqnumber].tsk = NULL; | |
740 | ||
741 | spin_lock_irqsave(&irqbits_lock, flags); | |
742 | irqbits &= ~(1 << irqnumber); | |
743 | spin_unlock_irqrestore(&irqbits_lock, flags); | |
744 | } | |
745 | ||
746 | void release_vm86_irqs(struct task_struct *task) | |
747 | { | |
748 | int i; | |
749 | for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++) | |
750 | if (vm86_irqs[i].tsk == task) | |
751 | free_vm86_irq(i); | |
752 | } | |
753 | ||
754 | static inline int get_and_reset_irq(int irqnumber) | |
755 | { | |
756 | int bit; | |
757 | unsigned long flags; | |
ad671423 | 758 | int ret = 0; |
1da177e4 LT |
759 | |
760 | if (invalid_vm86_irq(irqnumber)) return 0; | |
761 | if (vm86_irqs[irqnumber].tsk != current) return 0; | |
762 | spin_lock_irqsave(&irqbits_lock, flags); | |
763 | bit = irqbits & (1 << irqnumber); | |
764 | irqbits &= ~bit; | |
ad671423 PP |
765 | if (bit) { |
766 | enable_irq(irqnumber); | |
767 | ret = 1; | |
768 | } | |
769 | ||
1da177e4 | 770 | spin_unlock_irqrestore(&irqbits_lock, flags); |
ad671423 | 771 | return ret; |
1da177e4 LT |
772 | } |
773 | ||
774 | ||
775 | static int do_vm86_irq_handling(int subfunction, int irqnumber) | |
776 | { | |
777 | int ret; | |
778 | switch (subfunction) { | |
779 | case VM86_GET_AND_RESET_IRQ: { | |
780 | return get_and_reset_irq(irqnumber); | |
781 | } | |
782 | case VM86_GET_IRQ_BITS: { | |
783 | return irqbits; | |
784 | } | |
785 | case VM86_REQUEST_IRQ: { | |
786 | int sig = irqnumber >> 8; | |
787 | int irq = irqnumber & 255; | |
788 | if (!capable(CAP_SYS_ADMIN)) return -EPERM; | |
789 | if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM; | |
790 | if (invalid_vm86_irq(irq)) return -EPERM; | |
791 | if (vm86_irqs[irq].tsk) return -EPERM; | |
792 | ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL); | |
793 | if (ret) return ret; | |
794 | vm86_irqs[irq].sig = sig; | |
795 | vm86_irqs[irq].tsk = current; | |
796 | return irq; | |
797 | } | |
798 | case VM86_FREE_IRQ: { | |
799 | if (invalid_vm86_irq(irqnumber)) return -EPERM; | |
800 | if (!vm86_irqs[irqnumber].tsk) return 0; | |
801 | if (vm86_irqs[irqnumber].tsk != current) return -EPERM; | |
802 | free_vm86_irq(irqnumber); | |
803 | return 0; | |
804 | } | |
805 | } | |
806 | return -EINVAL; | |
807 | } | |
808 |