2 * Copyright (C) 1994 Linus Torvalds
4 * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86
5 * stack - Manfred Spraul <manfred@colorfullife.com>
7 * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle
8 * them correctly. Now the emulation will be in a
9 * consistent state after stackfaults - Kasper Dupont
10 * <kasperd@daimi.au.dk>
12 * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont
13 * <kasperd@daimi.au.dk>
15 * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault
16 * caused by Kasper Dupont's changes - Stas Sergeev
18 * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes.
19 * Kasper Dupont <kasperd@daimi.au.dk>
21 * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault.
22 * Kasper Dupont <kasperd@daimi.au.dk>
24 * 9 apr 2002 - Changed stack access macros to jump to a label
25 * instead of returning to userspace. This simplifies
26 * do_int, and is needed by handle_vm6_fault. Kasper
27 * Dupont <kasperd@daimi.au.dk>
31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/interrupt.h>
36 #include <linux/syscalls.h>
37 #include <linux/sched.h>
38 #include <linux/kernel.h>
39 #include <linux/signal.h>
40 #include <linux/string.h>
42 #include <linux/smp.h>
43 #include <linux/highmem.h>
44 #include <linux/ptrace.h>
45 #include <linux/audit.h>
46 #include <linux/stddef.h>
47 #include <linux/slab.h>
49 #include <asm/uaccess.h>
51 #include <asm/tlbflush.h>
53 #include <asm/traps.h>
58 * Interrupt handling is not guaranteed:
59 * - a real x86 will disable all interrupts for one instruction
60 * after a "mov ss,xx" to make stack handling atomic even without
61 * the 'lss' instruction. We can't guarantee this in v86 mode,
62 * as the next instruction might result in a page fault or similar.
63 * - a real x86 will have interrupts disabled for one instruction
64 * past the 'sti' that enables them. We don't bother with all the
67 * Let's hope these problems do not actually matter for anything.
72 * 8- and 16-bit register defines..
74 #define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0])
75 #define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1])
76 #define IP(regs) (*(unsigned short *)&((regs)->pt.ip))
77 #define SP(regs) (*(unsigned short *)&((regs)->pt.sp))
80 * virtual flags (16 and 32-bit versions)
82 #define VFLAGS (*(unsigned short *)&(current->thread.vm86->v86flags))
83 #define VEFLAGS (current->thread.vm86->v86flags)
85 #define set_flags(X, new, mask) \
86 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
88 #define SAFE_MASK (0xDD5)
89 #define RETURN_MASK (0xDFF)
91 void save_v86_state(struct kernel_vm86_regs *regs, int retval)
93 struct tss_struct *tss;
94 struct task_struct *tsk = current;
95 struct vm86plus_struct __user *user;
96 struct vm86 *vm86 = current->thread.vm86;
100 * This gets called from entry.S with interrupts disabled, but
101 * from process context. Enable interrupts here, before trying
102 * to access user space.
106 if (!vm86 || !vm86->vm86_info) {
107 pr_alert("no vm86_info: BAD\n");
110 set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->v86mask);
111 user = vm86->vm86_info;
113 if (!access_ok(VERIFY_WRITE, user, vm86->vm86plus.is_vm86pus ?
114 sizeof(struct vm86plus_struct) :
115 sizeof(struct vm86_struct))) {
116 pr_alert("could not access userspace vm86_info\n");
121 put_user_ex(regs->pt.bx, &user->regs.ebx);
122 put_user_ex(regs->pt.cx, &user->regs.ecx);
123 put_user_ex(regs->pt.dx, &user->regs.edx);
124 put_user_ex(regs->pt.si, &user->regs.esi);
125 put_user_ex(regs->pt.di, &user->regs.edi);
126 put_user_ex(regs->pt.bp, &user->regs.ebp);
127 put_user_ex(regs->pt.ax, &user->regs.eax);
128 put_user_ex(regs->pt.ip, &user->regs.eip);
129 put_user_ex(regs->pt.cs, &user->regs.cs);
130 put_user_ex(regs->pt.flags, &user->regs.eflags);
131 put_user_ex(regs->pt.sp, &user->regs.esp);
132 put_user_ex(regs->pt.ss, &user->regs.ss);
133 put_user_ex(regs->es, &user->regs.es);
134 put_user_ex(regs->ds, &user->regs.ds);
135 put_user_ex(regs->fs, &user->regs.fs);
136 put_user_ex(regs->gs, &user->regs.gs);
138 put_user_ex(vm86->screen_bitmap, &user->screen_bitmap);
139 } put_user_catch(err);
141 pr_alert("could not access userspace vm86_info\n");
145 tss = &per_cpu(cpu_tss, get_cpu());
146 tsk->thread.sp0 = vm86->saved_sp0;
147 tsk->thread.sysenter_cs = __KERNEL_CS;
148 load_sp0(tss, &tsk->thread);
152 memcpy(®s->pt, &vm86->regs32, sizeof(struct pt_regs));
154 lazy_load_gs(vm86->regs32.gs);
156 regs->pt.ax = retval;
159 static void mark_screen_rdonly(struct mm_struct *mm)
168 down_write(&mm->mmap_sem);
169 pgd = pgd_offset(mm, 0xA0000);
170 if (pgd_none_or_clear_bad(pgd))
172 pud = pud_offset(pgd, 0xA0000);
173 if (pud_none_or_clear_bad(pud))
175 pmd = pmd_offset(pud, 0xA0000);
176 split_huge_page_pmd_mm(mm, 0xA0000, pmd);
177 if (pmd_none_or_clear_bad(pmd))
179 pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
180 for (i = 0; i < 32; i++) {
181 if (pte_present(*pte))
182 set_pte(pte, pte_wrprotect(*pte));
185 pte_unmap_unlock(pte, ptl);
187 up_write(&mm->mmap_sem);
193 static int do_vm86_irq_handling(int subfunction, int irqnumber);
194 static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus);
196 SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
198 return do_sys_vm86((struct vm86plus_struct __user *) v86, false);
202 SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
205 case VM86_REQUEST_IRQ:
207 case VM86_GET_IRQ_BITS:
208 case VM86_GET_AND_RESET_IRQ:
209 return do_vm86_irq_handling(cmd, (int)arg);
210 case VM86_PLUS_INSTALL_CHECK:
212 * NOTE: on old vm86 stuff this will return the error
213 * from access_ok(), because the subfunction is
214 * interpreted as (invalid) address to vm86_struct.
215 * So the installation check works.
220 /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
221 return do_sys_vm86((struct vm86plus_struct __user *) arg, true);
225 static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus)
227 struct tss_struct *tss;
228 struct task_struct *tsk = current;
229 struct vm86 *vm86 = tsk->thread.vm86;
230 struct kernel_vm86_regs vm86regs;
231 struct pt_regs *regs = current_pt_regs();
232 unsigned long err = 0;
235 if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL)))
237 tsk->thread.vm86 = vm86;
242 if (!access_ok(VERIFY_READ, v86, plus ?
243 sizeof(struct vm86_struct) :
244 sizeof(struct vm86plus_struct)))
247 memset(&vm86regs, 0, sizeof(vm86regs));
250 get_user_ex(vm86regs.pt.bx, &v86->regs.ebx);
251 get_user_ex(vm86regs.pt.cx, &v86->regs.ecx);
252 get_user_ex(vm86regs.pt.dx, &v86->regs.edx);
253 get_user_ex(vm86regs.pt.si, &v86->regs.esi);
254 get_user_ex(vm86regs.pt.di, &v86->regs.edi);
255 get_user_ex(vm86regs.pt.bp, &v86->regs.ebp);
256 get_user_ex(vm86regs.pt.ax, &v86->regs.eax);
257 get_user_ex(vm86regs.pt.ip, &v86->regs.eip);
258 get_user_ex(seg, &v86->regs.cs);
259 vm86regs.pt.cs = seg;
260 get_user_ex(vm86regs.pt.flags, &v86->regs.eflags);
261 get_user_ex(vm86regs.pt.sp, &v86->regs.esp);
262 get_user_ex(seg, &v86->regs.ss);
263 vm86regs.pt.ss = seg;
264 get_user_ex(vm86regs.es, &v86->regs.es);
265 get_user_ex(vm86regs.ds, &v86->regs.ds);
266 get_user_ex(vm86regs.fs, &v86->regs.fs);
267 get_user_ex(vm86regs.gs, &v86->regs.gs);
269 get_user_ex(vm86->flags, &v86->flags);
270 get_user_ex(vm86->screen_bitmap, &v86->screen_bitmap);
271 get_user_ex(vm86->cpu_type, &v86->cpu_type);
272 } get_user_catch(err);
276 if (copy_from_user(&vm86->int_revectored, &v86->int_revectored,
277 sizeof(struct revectored_struct)))
279 if (copy_from_user(&vm86->int21_revectored, &v86->int21_revectored,
280 sizeof(struct revectored_struct)))
283 if (copy_from_user(&vm86->vm86plus, &v86->vm86plus,
284 sizeof(struct vm86plus_info_struct)))
286 vm86->vm86plus.is_vm86pus = 1;
288 memset(&vm86->vm86plus, 0,
289 sizeof(struct vm86plus_info_struct));
291 memcpy(&vm86->regs32, regs, sizeof(struct pt_regs));
292 vm86->vm86_info = v86;
295 * The flags register is also special: we cannot trust that the user
296 * has set it up safely, so this makes sure interrupt etc flags are
297 * inherited from protected mode.
299 VEFLAGS = vm86regs.pt.flags;
300 vm86regs.pt.flags &= SAFE_MASK;
301 vm86regs.pt.flags |= regs->flags & ~SAFE_MASK;
302 vm86regs.pt.flags |= X86_VM_MASK;
304 vm86regs.pt.orig_ax = regs->orig_ax;
306 switch (vm86->cpu_type) {
311 vm86->v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
314 vm86->v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
317 vm86->v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
324 vm86->saved_sp0 = tsk->thread.sp0;
325 lazy_save_gs(vm86->regs32.gs);
327 tss = &per_cpu(cpu_tss, get_cpu());
328 /* make room for real-mode segments */
329 tsk->thread.sp0 += 16;
331 tsk->thread.sysenter_cs = 0;
332 load_sp0(tss, &tsk->thread);
335 if (vm86->flags & VM86_SCREEN_BITMAP)
336 mark_screen_rdonly(tsk->mm);
338 memcpy((struct kernel_vm86_regs *)regs, &vm86regs, sizeof(vm86regs));
343 static inline void set_IF(struct kernel_vm86_regs *regs)
345 VEFLAGS |= X86_EFLAGS_VIF;
348 static inline void clear_IF(struct kernel_vm86_regs *regs)
350 VEFLAGS &= ~X86_EFLAGS_VIF;
353 static inline void clear_TF(struct kernel_vm86_regs *regs)
355 regs->pt.flags &= ~X86_EFLAGS_TF;
358 static inline void clear_AC(struct kernel_vm86_regs *regs)
360 regs->pt.flags &= ~X86_EFLAGS_AC;
364 * It is correct to call set_IF(regs) from the set_vflags_*
365 * functions. However someone forgot to call clear_IF(regs)
366 * in the opposite case.
367 * After the command sequence CLI PUSHF STI POPF you should
368 * end up with interrupts disabled, but you ended up with
369 * interrupts enabled.
370 * ( I was testing my own changes, but the only bug I
371 * could find was in a function I had not changed. )
375 static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
377 set_flags(VEFLAGS, flags, current->thread.vm86->v86mask);
378 set_flags(regs->pt.flags, flags, SAFE_MASK);
379 if (flags & X86_EFLAGS_IF)
385 static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
387 set_flags(VFLAGS, flags, current->thread.vm86->v86mask);
388 set_flags(regs->pt.flags, flags, SAFE_MASK);
389 if (flags & X86_EFLAGS_IF)
395 static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
397 unsigned long flags = regs->pt.flags & RETURN_MASK;
399 if (VEFLAGS & X86_EFLAGS_VIF)
400 flags |= X86_EFLAGS_IF;
401 flags |= X86_EFLAGS_IOPL;
402 return flags | (VEFLAGS & current->thread.vm86->v86mask);
405 static inline int is_revectored(int nr, struct revectored_struct *bitmap)
407 __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0"
409 :"m" (*bitmap), "r" (nr));
413 #define val_byte(val, n) (((__u8 *)&val)[n])
415 #define pushb(base, ptr, val, err_label) \
419 if (put_user(__val, base + ptr) < 0) \
423 #define pushw(base, ptr, val, err_label) \
427 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
430 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
434 #define pushl(base, ptr, val, err_label) \
438 if (put_user(val_byte(__val, 3), base + ptr) < 0) \
441 if (put_user(val_byte(__val, 2), base + ptr) < 0) \
444 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
447 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
451 #define popb(base, ptr, err_label) \
454 if (get_user(__res, base + ptr) < 0) \
460 #define popw(base, ptr, err_label) \
463 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
466 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
472 #define popl(base, ptr, err_label) \
475 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
478 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
481 if (get_user(val_byte(__res, 2), base + ptr) < 0) \
484 if (get_user(val_byte(__res, 3), base + ptr) < 0) \
490 /* There are so many possible reasons for this function to return
491 * VM86_INTx, so adding another doesn't bother me. We can expect
492 * userspace programs to be able to handle it. (Getting a problem
493 * in userspace is always better than an Oops anyway.) [KD]
495 static void do_int(struct kernel_vm86_regs *regs, int i,
496 unsigned char __user *ssp, unsigned short sp)
498 unsigned long __user *intr_ptr;
499 unsigned long segoffs;
500 struct vm86 *vm86 = current->thread.vm86;
502 if (regs->pt.cs == BIOSSEG)
504 if (is_revectored(i, &vm86->int_revectored))
506 if (i == 0x21 && is_revectored(AH(regs), &vm86->int21_revectored))
508 intr_ptr = (unsigned long __user *) (i << 2);
509 if (get_user(segoffs, intr_ptr))
511 if ((segoffs >> 16) == BIOSSEG)
513 pushw(ssp, sp, get_vflags(regs), cannot_handle);
514 pushw(ssp, sp, regs->pt.cs, cannot_handle);
515 pushw(ssp, sp, IP(regs), cannot_handle);
516 regs->pt.cs = segoffs >> 16;
518 IP(regs) = segoffs & 0xffff;
525 save_v86_state(regs, VM86_INTx + (i << 8));
528 int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
530 struct vm86 *vm86 = current->thread.vm86;
532 if (vm86->vm86plus.is_vm86pus) {
533 if ((trapno == 3) || (trapno == 1)) {
534 save_v86_state(regs, VM86_TRAP + (trapno << 8));
537 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
541 return 1; /* we let this handle by the calling routine */
542 current->thread.trap_nr = trapno;
543 current->thread.error_code = error_code;
544 force_sig(SIGTRAP, current);
548 void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
550 unsigned char opcode;
551 unsigned char __user *csp;
552 unsigned char __user *ssp;
553 unsigned short ip, sp, orig_flags;
554 int data32, pref_done;
555 struct vm86plus_info_struct *vmpi = ¤t->thread.vm86->vm86plus;
557 #define CHECK_IF_IN_TRAP \
558 if (vmpi->vm86dbg_active && vmpi->vm86dbg_TFpendig) \
559 newflags |= X86_EFLAGS_TF
561 orig_flags = *(unsigned short *)®s->pt.flags;
563 csp = (unsigned char __user *) (regs->pt.cs << 4);
564 ssp = (unsigned char __user *) (regs->pt.ss << 4);
571 switch (opcode = popb(csp, ip, simulate_sigsegv)) {
572 case 0x66: /* 32-bit data */ data32 = 1; break;
573 case 0x67: /* 32-bit address */ break;
574 case 0x2e: /* CS */ break;
575 case 0x3e: /* DS */ break;
576 case 0x26: /* ES */ break;
577 case 0x36: /* SS */ break;
578 case 0x65: /* GS */ break;
579 case 0x64: /* FS */ break;
580 case 0xf2: /* repnz */ break;
581 case 0xf3: /* rep */ break;
582 default: pref_done = 1;
584 } while (!pref_done);
591 pushl(ssp, sp, get_vflags(regs), simulate_sigsegv);
594 pushw(ssp, sp, get_vflags(regs), simulate_sigsegv);
598 goto vm86_fault_return;
603 unsigned long newflags;
605 newflags = popl(ssp, sp, simulate_sigsegv);
608 newflags = popw(ssp, sp, simulate_sigsegv);
614 set_vflags_long(newflags, regs);
616 set_vflags_short(newflags, regs);
623 int intno = popb(csp, ip, simulate_sigsegv);
625 if (vmpi->vm86dbg_active) {
626 if ((1 << (intno & 7)) & vmpi->vm86dbg_intxxtab[intno >> 3]) {
627 save_v86_state(regs, VM86_INTx + (intno << 8));
631 do_int(regs, intno, ssp, sp);
640 unsigned long newflags;
642 newip = popl(ssp, sp, simulate_sigsegv);
643 newcs = popl(ssp, sp, simulate_sigsegv);
644 newflags = popl(ssp, sp, simulate_sigsegv);
647 newip = popw(ssp, sp, simulate_sigsegv);
648 newcs = popw(ssp, sp, simulate_sigsegv);
649 newflags = popw(ssp, sp, simulate_sigsegv);
656 set_vflags_long(newflags, regs);
658 set_vflags_short(newflags, regs);
667 goto vm86_fault_return;
671 * Damn. This is incorrect: the 'sti' instruction should actually
672 * enable interrupts after the /next/ instruction. Not good.
674 * Probably needs some horsing around with the TF flag. Aiee..
682 save_v86_state(regs, VM86_UNKNOWN);
688 if (VEFLAGS & X86_EFLAGS_VIP) {
689 save_v86_state(regs, VM86_STI);
694 if (vmpi->force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) {
695 save_v86_state(regs, VM86_PICRETURN);
698 if (orig_flags & X86_EFLAGS_TF)
699 handle_vm86_trap(regs, 0, X86_TRAP_DB);
703 /* FIXME: After a long discussion with Stas we finally
704 * agreed, that this is wrong. Here we should
705 * really send a SIGSEGV to the user program.
706 * But how do we create the correct context? We
707 * are inside a general protection fault handler
708 * and has just returned from a page fault handler.
709 * The correct context for the signal handler
710 * should be a mixture of the two, but how do we
711 * get the information? [KD]
713 save_v86_state(regs, VM86_UNKNOWN);
716 /* ---------------- vm86 special IRQ passing stuff ----------------- */
718 #define VM86_IRQNAME "vm86irq"
720 static struct vm86_irqs {
721 struct task_struct *tsk;
725 static DEFINE_SPINLOCK(irqbits_lock);
728 #define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \
729 | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \
732 static irqreturn_t irq_handler(int intno, void *dev_id)
737 spin_lock_irqsave(&irqbits_lock, flags);
738 irq_bit = 1 << intno;
739 if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk)
742 if (vm86_irqs[intno].sig)
743 send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1);
745 * IRQ will be re-enabled when user asks for the irq (whether
746 * polling or as a result of the signal)
748 disable_irq_nosync(intno);
749 spin_unlock_irqrestore(&irqbits_lock, flags);
753 spin_unlock_irqrestore(&irqbits_lock, flags);
757 static inline void free_vm86_irq(int irqnumber)
761 free_irq(irqnumber, NULL);
762 vm86_irqs[irqnumber].tsk = NULL;
764 spin_lock_irqsave(&irqbits_lock, flags);
765 irqbits &= ~(1 << irqnumber);
766 spin_unlock_irqrestore(&irqbits_lock, flags);
769 void release_vm86_irqs(struct task_struct *task)
772 for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++)
773 if (vm86_irqs[i].tsk == task)
777 static inline int get_and_reset_irq(int irqnumber)
783 if (invalid_vm86_irq(irqnumber)) return 0;
784 if (vm86_irqs[irqnumber].tsk != current) return 0;
785 spin_lock_irqsave(&irqbits_lock, flags);
786 bit = irqbits & (1 << irqnumber);
789 enable_irq(irqnumber);
793 spin_unlock_irqrestore(&irqbits_lock, flags);
798 static int do_vm86_irq_handling(int subfunction, int irqnumber)
801 switch (subfunction) {
802 case VM86_GET_AND_RESET_IRQ: {
803 return get_and_reset_irq(irqnumber);
805 case VM86_GET_IRQ_BITS: {
808 case VM86_REQUEST_IRQ: {
809 int sig = irqnumber >> 8;
810 int irq = irqnumber & 255;
811 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
812 if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM;
813 if (invalid_vm86_irq(irq)) return -EPERM;
814 if (vm86_irqs[irq].tsk) return -EPERM;
815 ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL);
817 vm86_irqs[irq].sig = sig;
818 vm86_irqs[irq].tsk = current;
821 case VM86_FREE_IRQ: {
822 if (invalid_vm86_irq(irqnumber)) return -EPERM;
823 if (!vm86_irqs[irqnumber].tsk) return 0;
824 if (vm86_irqs[irqnumber].tsk != current) return -EPERM;
825 free_vm86_irq(irqnumber);