Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/vm86.c | |
3 | * | |
4 | * Copyright (C) 1994 Linus Torvalds | |
5 | * | |
6 | * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86 | |
7 | * stack - Manfred Spraul <manfreds@colorfullife.com> | |
8 | * | |
9 | * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle | |
10 | * them correctly. Now the emulation will be in a | |
11 | * consistent state after stackfaults - Kasper Dupont | |
12 | * <kasperd@daimi.au.dk> | |
13 | * | |
14 | * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont | |
15 | * <kasperd@daimi.au.dk> | |
16 | * | |
17 | * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault | |
18 | * caused by Kasper Dupont's changes - Stas Sergeev | |
19 | * | |
20 | * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes. | |
21 | * Kasper Dupont <kasperd@daimi.au.dk> | |
22 | * | |
23 | * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault. | |
24 | * Kasper Dupont <kasperd@daimi.au.dk> | |
25 | * | |
26 | * 9 apr 2002 - Changed stack access macros to jump to a label | |
27 | * instead of returning to userspace. This simplifies | |
28 | * do_int, and is needed by handle_vm6_fault. Kasper | |
29 | * Dupont <kasperd@daimi.au.dk> | |
30 | * | |
31 | */ | |
32 | ||
33 | #include <linux/config.h> | |
34 | #include <linux/errno.h> | |
35 | #include <linux/interrupt.h> | |
36 | #include <linux/sched.h> | |
37 | #include <linux/kernel.h> | |
38 | #include <linux/signal.h> | |
39 | #include <linux/string.h> | |
40 | #include <linux/mm.h> | |
41 | #include <linux/smp.h> | |
42 | #include <linux/smp_lock.h> | |
43 | #include <linux/highmem.h> | |
44 | #include <linux/ptrace.h> | |
45 | ||
46 | #include <asm/uaccess.h> | |
47 | #include <asm/io.h> | |
48 | #include <asm/tlbflush.h> | |
49 | #include <asm/irq.h> | |
50 | ||
51 | /* | |
52 | * Known problems: | |
53 | * | |
54 | * Interrupt handling is not guaranteed: | |
55 | * - a real x86 will disable all interrupts for one instruction | |
56 | * after a "mov ss,xx" to make stack handling atomic even without | |
57 | * the 'lss' instruction. We can't guarantee this in v86 mode, | |
58 | * as the next instruction might result in a page fault or similar. | |
59 | * - a real x86 will have interrupts disabled for one instruction | |
60 | * past the 'sti' that enables them. We don't bother with all the | |
61 | * details yet. | |
62 | * | |
63 | * Let's hope these problems do not actually matter for anything. | |
64 | */ | |
65 | ||
66 | ||
67 | #define KVM86 ((struct kernel_vm86_struct *)regs) | |
68 | #define VMPI KVM86->vm86plus | |
69 | ||
70 | ||
71 | /* | |
72 | * 8- and 16-bit register defines.. | |
73 | */ | |
74 | #define AL(regs) (((unsigned char *)&((regs)->eax))[0]) | |
75 | #define AH(regs) (((unsigned char *)&((regs)->eax))[1]) | |
76 | #define IP(regs) (*(unsigned short *)&((regs)->eip)) | |
77 | #define SP(regs) (*(unsigned short *)&((regs)->esp)) | |
78 | ||
79 | /* | |
80 | * virtual flags (16 and 32-bit versions) | |
81 | */ | |
82 | #define VFLAGS (*(unsigned short *)&(current->thread.v86flags)) | |
83 | #define VEFLAGS (current->thread.v86flags) | |
84 | ||
85 | #define set_flags(X,new,mask) \ | |
86 | ((X) = ((X) & ~(mask)) | ((new) & (mask))) | |
87 | ||
88 | #define SAFE_MASK (0xDD5) | |
89 | #define RETURN_MASK (0xDFF) | |
90 | ||
91 | #define VM86_REGS_PART2 orig_eax | |
92 | #define VM86_REGS_SIZE1 \ | |
93 | ( (unsigned)( & (((struct kernel_vm86_regs *)0)->VM86_REGS_PART2) ) ) | |
94 | #define VM86_REGS_SIZE2 (sizeof(struct kernel_vm86_regs) - VM86_REGS_SIZE1) | |
95 | ||
96 | struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs)); | |
97 | struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs) | |
98 | { | |
99 | struct tss_struct *tss; | |
100 | struct pt_regs *ret; | |
101 | unsigned long tmp; | |
102 | ||
103 | /* | |
104 | * This gets called from entry.S with interrupts disabled, but | |
105 | * from process context. Enable interrupts here, before trying | |
106 | * to access user space. | |
107 | */ | |
108 | local_irq_enable(); | |
109 | ||
110 | if (!current->thread.vm86_info) { | |
111 | printk("no vm86_info: BAD\n"); | |
112 | do_exit(SIGSEGV); | |
113 | } | |
114 | set_flags(regs->eflags, VEFLAGS, VIF_MASK | current->thread.v86mask); | |
115 | tmp = copy_to_user(¤t->thread.vm86_info->regs,regs, VM86_REGS_SIZE1); | |
116 | tmp += copy_to_user(¤t->thread.vm86_info->regs.VM86_REGS_PART2, | |
117 | ®s->VM86_REGS_PART2, VM86_REGS_SIZE2); | |
118 | tmp += put_user(current->thread.screen_bitmap,¤t->thread.vm86_info->screen_bitmap); | |
119 | if (tmp) { | |
120 | printk("vm86: could not access userspace vm86_info\n"); | |
121 | do_exit(SIGSEGV); | |
122 | } | |
123 | ||
124 | tss = &per_cpu(init_tss, get_cpu()); | |
125 | current->thread.esp0 = current->thread.saved_esp0; | |
126 | current->thread.sysenter_cs = __KERNEL_CS; | |
127 | load_esp0(tss, ¤t->thread); | |
128 | current->thread.saved_esp0 = 0; | |
129 | put_cpu(); | |
130 | ||
131 | loadsegment(fs, current->thread.saved_fs); | |
132 | loadsegment(gs, current->thread.saved_gs); | |
133 | ret = KVM86->regs32; | |
134 | return ret; | |
135 | } | |
136 | ||
137 | static void mark_screen_rdonly(struct task_struct * tsk) | |
138 | { | |
139 | pgd_t *pgd; | |
140 | pud_t *pud; | |
141 | pmd_t *pmd; | |
142 | pte_t *pte, *mapped; | |
143 | int i; | |
144 | ||
145 | preempt_disable(); | |
146 | spin_lock(&tsk->mm->page_table_lock); | |
147 | pgd = pgd_offset(tsk->mm, 0xA0000); | |
148 | if (pgd_none_or_clear_bad(pgd)) | |
149 | goto out; | |
150 | pud = pud_offset(pgd, 0xA0000); | |
151 | if (pud_none_or_clear_bad(pud)) | |
152 | goto out; | |
153 | pmd = pmd_offset(pud, 0xA0000); | |
154 | if (pmd_none_or_clear_bad(pmd)) | |
155 | goto out; | |
156 | pte = mapped = pte_offset_map(pmd, 0xA0000); | |
157 | for (i = 0; i < 32; i++) { | |
158 | if (pte_present(*pte)) | |
159 | set_pte(pte, pte_wrprotect(*pte)); | |
160 | pte++; | |
161 | } | |
162 | pte_unmap(mapped); | |
163 | out: | |
164 | spin_unlock(&tsk->mm->page_table_lock); | |
165 | preempt_enable(); | |
166 | flush_tlb(); | |
167 | } | |
168 | ||
169 | ||
170 | ||
171 | static int do_vm86_irq_handling(int subfunction, int irqnumber); | |
172 | static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk); | |
173 | ||
174 | asmlinkage int sys_vm86old(struct pt_regs regs) | |
175 | { | |
176 | struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.ebx; | |
177 | struct kernel_vm86_struct info; /* declare this _on top_, | |
178 | * this avoids wasting of stack space. | |
179 | * This remains on the stack until we | |
180 | * return to 32 bit user space. | |
181 | */ | |
182 | struct task_struct *tsk; | |
183 | int tmp, ret = -EPERM; | |
184 | ||
185 | tsk = current; | |
186 | if (tsk->thread.saved_esp0) | |
187 | goto out; | |
188 | tmp = copy_from_user(&info, v86, VM86_REGS_SIZE1); | |
189 | tmp += copy_from_user(&info.regs.VM86_REGS_PART2, &v86->regs.VM86_REGS_PART2, | |
190 | (long)&info.vm86plus - (long)&info.regs.VM86_REGS_PART2); | |
191 | ret = -EFAULT; | |
192 | if (tmp) | |
193 | goto out; | |
194 | memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus); | |
195 | info.regs32 = ®s; | |
196 | tsk->thread.vm86_info = v86; | |
197 | do_sys_vm86(&info, tsk); | |
198 | ret = 0; /* we never return here */ | |
199 | out: | |
200 | return ret; | |
201 | } | |
202 | ||
203 | ||
204 | asmlinkage int sys_vm86(struct pt_regs regs) | |
205 | { | |
206 | struct kernel_vm86_struct info; /* declare this _on top_, | |
207 | * this avoids wasting of stack space. | |
208 | * This remains on the stack until we | |
209 | * return to 32 bit user space. | |
210 | */ | |
211 | struct task_struct *tsk; | |
212 | int tmp, ret; | |
213 | struct vm86plus_struct __user *v86; | |
214 | ||
215 | tsk = current; | |
216 | switch (regs.ebx) { | |
217 | case VM86_REQUEST_IRQ: | |
218 | case VM86_FREE_IRQ: | |
219 | case VM86_GET_IRQ_BITS: | |
220 | case VM86_GET_AND_RESET_IRQ: | |
221 | ret = do_vm86_irq_handling(regs.ebx, (int)regs.ecx); | |
222 | goto out; | |
223 | case VM86_PLUS_INSTALL_CHECK: | |
224 | /* NOTE: on old vm86 stuff this will return the error | |
225 | from verify_area(), because the subfunction is | |
226 | interpreted as (invalid) address to vm86_struct. | |
227 | So the installation check works. | |
228 | */ | |
229 | ret = 0; | |
230 | goto out; | |
231 | } | |
232 | ||
233 | /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ | |
234 | ret = -EPERM; | |
235 | if (tsk->thread.saved_esp0) | |
236 | goto out; | |
237 | v86 = (struct vm86plus_struct __user *)regs.ecx; | |
238 | tmp = copy_from_user(&info, v86, VM86_REGS_SIZE1); | |
239 | tmp += copy_from_user(&info.regs.VM86_REGS_PART2, &v86->regs.VM86_REGS_PART2, | |
240 | (long)&info.regs32 - (long)&info.regs.VM86_REGS_PART2); | |
241 | ret = -EFAULT; | |
242 | if (tmp) | |
243 | goto out; | |
244 | info.regs32 = ®s; | |
245 | info.vm86plus.is_vm86pus = 1; | |
246 | tsk->thread.vm86_info = (struct vm86_struct __user *)v86; | |
247 | do_sys_vm86(&info, tsk); | |
248 | ret = 0; /* we never return here */ | |
249 | out: | |
250 | return ret; | |
251 | } | |
252 | ||
253 | ||
254 | static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk) | |
255 | { | |
256 | struct tss_struct *tss; | |
257 | /* | |
258 | * make sure the vm86() system call doesn't try to do anything silly | |
259 | */ | |
260 | info->regs.__null_ds = 0; | |
261 | info->regs.__null_es = 0; | |
262 | ||
263 | /* we are clearing fs,gs later just before "jmp resume_userspace", | |
264 | * because starting with Linux 2.1.x they aren't no longer saved/restored | |
265 | */ | |
266 | ||
267 | /* | |
268 | * The eflags register is also special: we cannot trust that the user | |
269 | * has set it up safely, so this makes sure interrupt etc flags are | |
270 | * inherited from protected mode. | |
271 | */ | |
272 | VEFLAGS = info->regs.eflags; | |
273 | info->regs.eflags &= SAFE_MASK; | |
274 | info->regs.eflags |= info->regs32->eflags & ~SAFE_MASK; | |
275 | info->regs.eflags |= VM_MASK; | |
276 | ||
277 | switch (info->cpu_type) { | |
278 | case CPU_286: | |
279 | tsk->thread.v86mask = 0; | |
280 | break; | |
281 | case CPU_386: | |
282 | tsk->thread.v86mask = NT_MASK | IOPL_MASK; | |
283 | break; | |
284 | case CPU_486: | |
285 | tsk->thread.v86mask = AC_MASK | NT_MASK | IOPL_MASK; | |
286 | break; | |
287 | default: | |
288 | tsk->thread.v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK; | |
289 | break; | |
290 | } | |
291 | ||
292 | /* | |
293 | * Save old state, set default return value (%eax) to 0 | |
294 | */ | |
295 | info->regs32->eax = 0; | |
296 | tsk->thread.saved_esp0 = tsk->thread.esp0; | |
297 | asm volatile("movl %%fs,%0":"=m" (tsk->thread.saved_fs)); | |
298 | asm volatile("movl %%gs,%0":"=m" (tsk->thread.saved_gs)); | |
299 | ||
300 | tss = &per_cpu(init_tss, get_cpu()); | |
301 | tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0; | |
302 | if (cpu_has_sep) | |
303 | tsk->thread.sysenter_cs = 0; | |
304 | load_esp0(tss, &tsk->thread); | |
305 | put_cpu(); | |
306 | ||
307 | tsk->thread.screen_bitmap = info->screen_bitmap; | |
308 | if (info->flags & VM86_SCREEN_BITMAP) | |
309 | mark_screen_rdonly(tsk); | |
310 | __asm__ __volatile__( | |
311 | "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs\n\t" | |
312 | "movl %0,%%esp\n\t" | |
313 | "movl %1,%%ebp\n\t" | |
314 | "jmp resume_userspace" | |
315 | : /* no outputs */ | |
316 | :"r" (&info->regs), "r" (tsk->thread_info) : "ax"); | |
317 | /* we never return here */ | |
318 | } | |
319 | ||
320 | static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval) | |
321 | { | |
322 | struct pt_regs * regs32; | |
323 | ||
324 | regs32 = save_v86_state(regs16); | |
325 | regs32->eax = retval; | |
326 | __asm__ __volatile__("movl %0,%%esp\n\t" | |
327 | "movl %1,%%ebp\n\t" | |
328 | "jmp resume_userspace" | |
329 | : : "r" (regs32), "r" (current_thread_info())); | |
330 | } | |
331 | ||
332 | static inline void set_IF(struct kernel_vm86_regs * regs) | |
333 | { | |
334 | VEFLAGS |= VIF_MASK; | |
335 | if (VEFLAGS & VIP_MASK) | |
336 | return_to_32bit(regs, VM86_STI); | |
337 | } | |
338 | ||
339 | static inline void clear_IF(struct kernel_vm86_regs * regs) | |
340 | { | |
341 | VEFLAGS &= ~VIF_MASK; | |
342 | } | |
343 | ||
344 | static inline void clear_TF(struct kernel_vm86_regs * regs) | |
345 | { | |
346 | regs->eflags &= ~TF_MASK; | |
347 | } | |
348 | ||
349 | static inline void clear_AC(struct kernel_vm86_regs * regs) | |
350 | { | |
351 | regs->eflags &= ~AC_MASK; | |
352 | } | |
353 | ||
354 | /* It is correct to call set_IF(regs) from the set_vflags_* | |
355 | * functions. However someone forgot to call clear_IF(regs) | |
356 | * in the opposite case. | |
357 | * After the command sequence CLI PUSHF STI POPF you should | |
358 | * end up with interrups disabled, but you ended up with | |
359 | * interrupts enabled. | |
360 | * ( I was testing my own changes, but the only bug I | |
361 | * could find was in a function I had not changed. ) | |
362 | * [KD] | |
363 | */ | |
364 | ||
365 | static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs * regs) | |
366 | { | |
367 | set_flags(VEFLAGS, eflags, current->thread.v86mask); | |
368 | set_flags(regs->eflags, eflags, SAFE_MASK); | |
369 | if (eflags & IF_MASK) | |
370 | set_IF(regs); | |
371 | else | |
372 | clear_IF(regs); | |
373 | } | |
374 | ||
375 | static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs) | |
376 | { | |
377 | set_flags(VFLAGS, flags, current->thread.v86mask); | |
378 | set_flags(regs->eflags, flags, SAFE_MASK); | |
379 | if (flags & IF_MASK) | |
380 | set_IF(regs); | |
381 | else | |
382 | clear_IF(regs); | |
383 | } | |
384 | ||
385 | static inline unsigned long get_vflags(struct kernel_vm86_regs * regs) | |
386 | { | |
387 | unsigned long flags = regs->eflags & RETURN_MASK; | |
388 | ||
389 | if (VEFLAGS & VIF_MASK) | |
390 | flags |= IF_MASK; | |
391 | flags |= IOPL_MASK; | |
392 | return flags | (VEFLAGS & current->thread.v86mask); | |
393 | } | |
394 | ||
395 | static inline int is_revectored(int nr, struct revectored_struct * bitmap) | |
396 | { | |
397 | __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" | |
398 | :"=r" (nr) | |
399 | :"m" (*bitmap),"r" (nr)); | |
400 | return nr; | |
401 | } | |
402 | ||
403 | #define val_byte(val, n) (((__u8 *)&val)[n]) | |
404 | ||
405 | #define pushb(base, ptr, val, err_label) \ | |
406 | do { \ | |
407 | __u8 __val = val; \ | |
408 | ptr--; \ | |
409 | if (put_user(__val, base + ptr) < 0) \ | |
410 | goto err_label; \ | |
411 | } while(0) | |
412 | ||
413 | #define pushw(base, ptr, val, err_label) \ | |
414 | do { \ | |
415 | __u16 __val = val; \ | |
416 | ptr--; \ | |
417 | if (put_user(val_byte(__val, 1), base + ptr) < 0) \ | |
418 | goto err_label; \ | |
419 | ptr--; \ | |
420 | if (put_user(val_byte(__val, 0), base + ptr) < 0) \ | |
421 | goto err_label; \ | |
422 | } while(0) | |
423 | ||
424 | #define pushl(base, ptr, val, err_label) \ | |
425 | do { \ | |
426 | __u32 __val = val; \ | |
427 | ptr--; \ | |
428 | if (put_user(val_byte(__val, 3), base + ptr) < 0) \ | |
429 | goto err_label; \ | |
430 | ptr--; \ | |
431 | if (put_user(val_byte(__val, 2), base + ptr) < 0) \ | |
432 | goto err_label; \ | |
433 | ptr--; \ | |
434 | if (put_user(val_byte(__val, 1), base + ptr) < 0) \ | |
435 | goto err_label; \ | |
436 | ptr--; \ | |
437 | if (put_user(val_byte(__val, 0), base + ptr) < 0) \ | |
438 | goto err_label; \ | |
439 | } while(0) | |
440 | ||
441 | #define popb(base, ptr, err_label) \ | |
442 | ({ \ | |
443 | __u8 __res; \ | |
444 | if (get_user(__res, base + ptr) < 0) \ | |
445 | goto err_label; \ | |
446 | ptr++; \ | |
447 | __res; \ | |
448 | }) | |
449 | ||
450 | #define popw(base, ptr, err_label) \ | |
451 | ({ \ | |
452 | __u16 __res; \ | |
453 | if (get_user(val_byte(__res, 0), base + ptr) < 0) \ | |
454 | goto err_label; \ | |
455 | ptr++; \ | |
456 | if (get_user(val_byte(__res, 1), base + ptr) < 0) \ | |
457 | goto err_label; \ | |
458 | ptr++; \ | |
459 | __res; \ | |
460 | }) | |
461 | ||
462 | #define popl(base, ptr, err_label) \ | |
463 | ({ \ | |
464 | __u32 __res; \ | |
465 | if (get_user(val_byte(__res, 0), base + ptr) < 0) \ | |
466 | goto err_label; \ | |
467 | ptr++; \ | |
468 | if (get_user(val_byte(__res, 1), base + ptr) < 0) \ | |
469 | goto err_label; \ | |
470 | ptr++; \ | |
471 | if (get_user(val_byte(__res, 2), base + ptr) < 0) \ | |
472 | goto err_label; \ | |
473 | ptr++; \ | |
474 | if (get_user(val_byte(__res, 3), base + ptr) < 0) \ | |
475 | goto err_label; \ | |
476 | ptr++; \ | |
477 | __res; \ | |
478 | }) | |
479 | ||
480 | /* There are so many possible reasons for this function to return | |
481 | * VM86_INTx, so adding another doesn't bother me. We can expect | |
482 | * userspace programs to be able to handle it. (Getting a problem | |
483 | * in userspace is always better than an Oops anyway.) [KD] | |
484 | */ | |
485 | static void do_int(struct kernel_vm86_regs *regs, int i, | |
486 | unsigned char __user * ssp, unsigned short sp) | |
487 | { | |
488 | unsigned long __user *intr_ptr; | |
489 | unsigned long segoffs; | |
490 | ||
491 | if (regs->cs == BIOSSEG) | |
492 | goto cannot_handle; | |
493 | if (is_revectored(i, &KVM86->int_revectored)) | |
494 | goto cannot_handle; | |
495 | if (i==0x21 && is_revectored(AH(regs),&KVM86->int21_revectored)) | |
496 | goto cannot_handle; | |
497 | intr_ptr = (unsigned long __user *) (i << 2); | |
498 | if (get_user(segoffs, intr_ptr)) | |
499 | goto cannot_handle; | |
500 | if ((segoffs >> 16) == BIOSSEG) | |
501 | goto cannot_handle; | |
502 | pushw(ssp, sp, get_vflags(regs), cannot_handle); | |
503 | pushw(ssp, sp, regs->cs, cannot_handle); | |
504 | pushw(ssp, sp, IP(regs), cannot_handle); | |
505 | regs->cs = segoffs >> 16; | |
506 | SP(regs) -= 6; | |
507 | IP(regs) = segoffs & 0xffff; | |
508 | clear_TF(regs); | |
509 | clear_IF(regs); | |
510 | clear_AC(regs); | |
511 | return; | |
512 | ||
513 | cannot_handle: | |
514 | return_to_32bit(regs, VM86_INTx + (i << 8)); | |
515 | } | |
516 | ||
517 | int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno) | |
518 | { | |
519 | if (VMPI.is_vm86pus) { | |
520 | if ( (trapno==3) || (trapno==1) ) | |
521 | return_to_32bit(regs, VM86_TRAP + (trapno << 8)); | |
522 | do_int(regs, trapno, (unsigned char __user *) (regs->ss << 4), SP(regs)); | |
523 | return 0; | |
524 | } | |
525 | if (trapno !=1) | |
526 | return 1; /* we let this handle by the calling routine */ | |
527 | if (current->ptrace & PT_PTRACED) { | |
528 | unsigned long flags; | |
529 | spin_lock_irqsave(¤t->sighand->siglock, flags); | |
530 | sigdelset(¤t->blocked, SIGTRAP); | |
531 | recalc_sigpending(); | |
532 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | |
533 | } | |
534 | send_sig(SIGTRAP, current, 1); | |
535 | current->thread.trap_no = trapno; | |
536 | current->thread.error_code = error_code; | |
537 | return 0; | |
538 | } | |
539 | ||
540 | void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) | |
541 | { | |
542 | unsigned char opcode; | |
543 | unsigned char __user *csp; | |
544 | unsigned char __user *ssp; | |
545 | unsigned short ip, sp; | |
546 | int data32, pref_done; | |
547 | ||
548 | #define CHECK_IF_IN_TRAP \ | |
549 | if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \ | |
550 | newflags |= TF_MASK | |
551 | #define VM86_FAULT_RETURN do { \ | |
552 | if (VMPI.force_return_for_pic && (VEFLAGS & (IF_MASK | VIF_MASK))) \ | |
553 | return_to_32bit(regs, VM86_PICRETURN); \ | |
554 | return; } while (0) | |
555 | ||
556 | csp = (unsigned char __user *) (regs->cs << 4); | |
557 | ssp = (unsigned char __user *) (regs->ss << 4); | |
558 | sp = SP(regs); | |
559 | ip = IP(regs); | |
560 | ||
561 | data32 = 0; | |
562 | pref_done = 0; | |
563 | do { | |
564 | switch (opcode = popb(csp, ip, simulate_sigsegv)) { | |
565 | case 0x66: /* 32-bit data */ data32=1; break; | |
566 | case 0x67: /* 32-bit address */ break; | |
567 | case 0x2e: /* CS */ break; | |
568 | case 0x3e: /* DS */ break; | |
569 | case 0x26: /* ES */ break; | |
570 | case 0x36: /* SS */ break; | |
571 | case 0x65: /* GS */ break; | |
572 | case 0x64: /* FS */ break; | |
573 | case 0xf2: /* repnz */ break; | |
574 | case 0xf3: /* rep */ break; | |
575 | default: pref_done = 1; | |
576 | } | |
577 | } while (!pref_done); | |
578 | ||
579 | switch (opcode) { | |
580 | ||
581 | /* pushf */ | |
582 | case 0x9c: | |
583 | if (data32) { | |
584 | pushl(ssp, sp, get_vflags(regs), simulate_sigsegv); | |
585 | SP(regs) -= 4; | |
586 | } else { | |
587 | pushw(ssp, sp, get_vflags(regs), simulate_sigsegv); | |
588 | SP(regs) -= 2; | |
589 | } | |
590 | IP(regs) = ip; | |
591 | VM86_FAULT_RETURN; | |
592 | ||
593 | /* popf */ | |
594 | case 0x9d: | |
595 | { | |
596 | unsigned long newflags; | |
597 | if (data32) { | |
598 | newflags=popl(ssp, sp, simulate_sigsegv); | |
599 | SP(regs) += 4; | |
600 | } else { | |
601 | newflags = popw(ssp, sp, simulate_sigsegv); | |
602 | SP(regs) += 2; | |
603 | } | |
604 | IP(regs) = ip; | |
605 | CHECK_IF_IN_TRAP; | |
606 | if (data32) { | |
607 | set_vflags_long(newflags, regs); | |
608 | } else { | |
609 | set_vflags_short(newflags, regs); | |
610 | } | |
611 | VM86_FAULT_RETURN; | |
612 | } | |
613 | ||
614 | /* int xx */ | |
615 | case 0xcd: { | |
616 | int intno=popb(csp, ip, simulate_sigsegv); | |
617 | IP(regs) = ip; | |
618 | if (VMPI.vm86dbg_active) { | |
619 | if ( (1 << (intno &7)) & VMPI.vm86dbg_intxxtab[intno >> 3] ) | |
620 | return_to_32bit(regs, VM86_INTx + (intno << 8)); | |
621 | } | |
622 | do_int(regs, intno, ssp, sp); | |
623 | return; | |
624 | } | |
625 | ||
626 | /* iret */ | |
627 | case 0xcf: | |
628 | { | |
629 | unsigned long newip; | |
630 | unsigned long newcs; | |
631 | unsigned long newflags; | |
632 | if (data32) { | |
633 | newip=popl(ssp, sp, simulate_sigsegv); | |
634 | newcs=popl(ssp, sp, simulate_sigsegv); | |
635 | newflags=popl(ssp, sp, simulate_sigsegv); | |
636 | SP(regs) += 12; | |
637 | } else { | |
638 | newip = popw(ssp, sp, simulate_sigsegv); | |
639 | newcs = popw(ssp, sp, simulate_sigsegv); | |
640 | newflags = popw(ssp, sp, simulate_sigsegv); | |
641 | SP(regs) += 6; | |
642 | } | |
643 | IP(regs) = newip; | |
644 | regs->cs = newcs; | |
645 | CHECK_IF_IN_TRAP; | |
646 | if (data32) { | |
647 | set_vflags_long(newflags, regs); | |
648 | } else { | |
649 | set_vflags_short(newflags, regs); | |
650 | } | |
651 | VM86_FAULT_RETURN; | |
652 | } | |
653 | ||
654 | /* cli */ | |
655 | case 0xfa: | |
656 | IP(regs) = ip; | |
657 | clear_IF(regs); | |
658 | VM86_FAULT_RETURN; | |
659 | ||
660 | /* sti */ | |
661 | /* | |
662 | * Damn. This is incorrect: the 'sti' instruction should actually | |
663 | * enable interrupts after the /next/ instruction. Not good. | |
664 | * | |
665 | * Probably needs some horsing around with the TF flag. Aiee.. | |
666 | */ | |
667 | case 0xfb: | |
668 | IP(regs) = ip; | |
669 | set_IF(regs); | |
670 | VM86_FAULT_RETURN; | |
671 | ||
672 | default: | |
673 | return_to_32bit(regs, VM86_UNKNOWN); | |
674 | } | |
675 | ||
676 | return; | |
677 | ||
678 | simulate_sigsegv: | |
679 | /* FIXME: After a long discussion with Stas we finally | |
680 | * agreed, that this is wrong. Here we should | |
681 | * really send a SIGSEGV to the user program. | |
682 | * But how do we create the correct context? We | |
683 | * are inside a general protection fault handler | |
684 | * and has just returned from a page fault handler. | |
685 | * The correct context for the signal handler | |
686 | * should be a mixture of the two, but how do we | |
687 | * get the information? [KD] | |
688 | */ | |
689 | return_to_32bit(regs, VM86_UNKNOWN); | |
690 | } | |
691 | ||
692 | /* ---------------- vm86 special IRQ passing stuff ----------------- */ | |
693 | ||
694 | #define VM86_IRQNAME "vm86irq" | |
695 | ||
696 | static struct vm86_irqs { | |
697 | struct task_struct *tsk; | |
698 | int sig; | |
699 | } vm86_irqs[16]; | |
700 | ||
701 | static DEFINE_SPINLOCK(irqbits_lock); | |
702 | static int irqbits; | |
703 | ||
704 | #define ALLOWED_SIGS ( 1 /* 0 = don't send a signal */ \ | |
705 | | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \ | |
706 | | (1 << SIGUNUSED) ) | |
707 | ||
708 | static irqreturn_t irq_handler(int intno, void *dev_id, struct pt_regs * regs) | |
709 | { | |
710 | int irq_bit; | |
711 | unsigned long flags; | |
712 | ||
713 | spin_lock_irqsave(&irqbits_lock, flags); | |
714 | irq_bit = 1 << intno; | |
715 | if ((irqbits & irq_bit) || ! vm86_irqs[intno].tsk) | |
716 | goto out; | |
717 | irqbits |= irq_bit; | |
718 | if (vm86_irqs[intno].sig) | |
719 | send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1); | |
720 | spin_unlock_irqrestore(&irqbits_lock, flags); | |
721 | /* | |
722 | * IRQ will be re-enabled when user asks for the irq (whether | |
723 | * polling or as a result of the signal) | |
724 | */ | |
725 | disable_irq(intno); | |
726 | return IRQ_HANDLED; | |
727 | ||
728 | out: | |
729 | spin_unlock_irqrestore(&irqbits_lock, flags); | |
730 | return IRQ_NONE; | |
731 | } | |
732 | ||
733 | static inline void free_vm86_irq(int irqnumber) | |
734 | { | |
735 | unsigned long flags; | |
736 | ||
737 | free_irq(irqnumber, NULL); | |
738 | vm86_irqs[irqnumber].tsk = NULL; | |
739 | ||
740 | spin_lock_irqsave(&irqbits_lock, flags); | |
741 | irqbits &= ~(1 << irqnumber); | |
742 | spin_unlock_irqrestore(&irqbits_lock, flags); | |
743 | } | |
744 | ||
745 | void release_vm86_irqs(struct task_struct *task) | |
746 | { | |
747 | int i; | |
748 | for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++) | |
749 | if (vm86_irqs[i].tsk == task) | |
750 | free_vm86_irq(i); | |
751 | } | |
752 | ||
753 | static inline int get_and_reset_irq(int irqnumber) | |
754 | { | |
755 | int bit; | |
756 | unsigned long flags; | |
757 | ||
758 | if (invalid_vm86_irq(irqnumber)) return 0; | |
759 | if (vm86_irqs[irqnumber].tsk != current) return 0; | |
760 | spin_lock_irqsave(&irqbits_lock, flags); | |
761 | bit = irqbits & (1 << irqnumber); | |
762 | irqbits &= ~bit; | |
763 | spin_unlock_irqrestore(&irqbits_lock, flags); | |
764 | if (!bit) | |
765 | return 0; | |
766 | enable_irq(irqnumber); | |
767 | return 1; | |
768 | } | |
769 | ||
770 | ||
771 | static int do_vm86_irq_handling(int subfunction, int irqnumber) | |
772 | { | |
773 | int ret; | |
774 | switch (subfunction) { | |
775 | case VM86_GET_AND_RESET_IRQ: { | |
776 | return get_and_reset_irq(irqnumber); | |
777 | } | |
778 | case VM86_GET_IRQ_BITS: { | |
779 | return irqbits; | |
780 | } | |
781 | case VM86_REQUEST_IRQ: { | |
782 | int sig = irqnumber >> 8; | |
783 | int irq = irqnumber & 255; | |
784 | if (!capable(CAP_SYS_ADMIN)) return -EPERM; | |
785 | if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM; | |
786 | if (invalid_vm86_irq(irq)) return -EPERM; | |
787 | if (vm86_irqs[irq].tsk) return -EPERM; | |
788 | ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL); | |
789 | if (ret) return ret; | |
790 | vm86_irqs[irq].sig = sig; | |
791 | vm86_irqs[irq].tsk = current; | |
792 | return irq; | |
793 | } | |
794 | case VM86_FREE_IRQ: { | |
795 | if (invalid_vm86_irq(irqnumber)) return -EPERM; | |
796 | if (!vm86_irqs[irqnumber].tsk) return 0; | |
797 | if (vm86_irqs[irqnumber].tsk != current) return -EPERM; | |
798 | free_vm86_irq(irqnumber); | |
799 | return 0; | |
800 | } | |
801 | } | |
802 | return -EINVAL; | |
803 | } | |
804 |