2 * Copyright (C) 1994 Linus Torvalds
4 * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86
5 * stack - Manfred Spraul <manfred@colorfullife.com>
7 * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle
8 * them correctly. Now the emulation will be in a
9 * consistent state after stackfaults - Kasper Dupont
10 * <kasperd@daimi.au.dk>
12 * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont
13 * <kasperd@daimi.au.dk>
15 * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault
16 * caused by Kasper Dupont's changes - Stas Sergeev
18 * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes.
19 * Kasper Dupont <kasperd@daimi.au.dk>
21 * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault.
22 * Kasper Dupont <kasperd@daimi.au.dk>
24 * 9 apr 2002 - Changed stack access macros to jump to a label
25 * instead of returning to userspace. This simplifies
26 * do_int, and is needed by handle_vm6_fault. Kasper
27 * Dupont <kasperd@daimi.au.dk>
31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/interrupt.h>
36 #include <linux/syscalls.h>
37 #include <linux/sched.h>
38 #include <linux/kernel.h>
39 #include <linux/signal.h>
40 #include <linux/string.h>
42 #include <linux/smp.h>
43 #include <linux/highmem.h>
44 #include <linux/ptrace.h>
45 #include <linux/audit.h>
46 #include <linux/stddef.h>
47 #include <linux/slab.h>
49 #include <asm/uaccess.h>
51 #include <asm/tlbflush.h>
57 * Interrupt handling is not guaranteed:
58 * - a real x86 will disable all interrupts for one instruction
59 * after a "mov ss,xx" to make stack handling atomic even without
60 * the 'lss' instruction. We can't guarantee this in v86 mode,
61 * as the next instruction might result in a page fault or similar.
62 * - a real x86 will have interrupts disabled for one instruction
63 * past the 'sti' that enables them. We don't bother with all the
66 * Let's hope these problems do not actually matter for anything.
70 #define KVM86 ((struct kernel_vm86_struct *)regs)
71 #define VMPI KVM86->vm86plus
75 * 8- and 16-bit register defines..
77 #define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0])
78 #define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1])
79 #define IP(regs) (*(unsigned short *)&((regs)->pt.ip))
80 #define SP(regs) (*(unsigned short *)&((regs)->pt.sp))
83 * virtual flags (16 and 32-bit versions)
85 #define VFLAGS (*(unsigned short *)&(current->thread.vm86->v86flags))
86 #define VEFLAGS (current->thread.vm86->v86flags)
88 #define set_flags(X, new, mask) \
89 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
91 #define SAFE_MASK (0xDD5)
92 #define RETURN_MASK (0xDFF)
94 struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
96 struct tss_struct *tss;
98 struct task_struct *tsk = current;
99 struct vm86plus_struct __user *user;
100 struct vm86 *vm86 = current->thread.vm86;
104 * This gets called from entry.S with interrupts disabled, but
105 * from process context. Enable interrupts here, before trying
106 * to access user space.
110 if (!vm86 || !vm86->vm86_info) {
111 pr_alert("no vm86_info: BAD\n");
114 set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->v86mask);
115 user = vm86->vm86_info;
117 if (!access_ok(VERIFY_WRITE, user, VMPI.is_vm86pus ?
118 sizeof(struct vm86plus_struct) :
119 sizeof(struct vm86_struct))) {
120 pr_alert("could not access userspace vm86_info\n");
125 put_user_ex(regs->pt.bx, &user->regs.ebx);
126 put_user_ex(regs->pt.cx, &user->regs.ecx);
127 put_user_ex(regs->pt.dx, &user->regs.edx);
128 put_user_ex(regs->pt.si, &user->regs.esi);
129 put_user_ex(regs->pt.di, &user->regs.edi);
130 put_user_ex(regs->pt.bp, &user->regs.ebp);
131 put_user_ex(regs->pt.ax, &user->regs.eax);
132 put_user_ex(regs->pt.ip, &user->regs.eip);
133 put_user_ex(regs->pt.cs, &user->regs.cs);
134 put_user_ex(regs->pt.flags, &user->regs.eflags);
135 put_user_ex(regs->pt.sp, &user->regs.esp);
136 put_user_ex(regs->pt.ss, &user->regs.ss);
137 put_user_ex(regs->es, &user->regs.es);
138 put_user_ex(regs->ds, &user->regs.ds);
139 put_user_ex(regs->fs, &user->regs.fs);
140 put_user_ex(regs->gs, &user->regs.gs);
142 put_user_ex(vm86->screen_bitmap, &user->screen_bitmap);
143 } put_user_catch(err);
145 pr_alert("could not access userspace vm86_info\n");
149 tss = &per_cpu(cpu_tss, get_cpu());
150 tsk->thread.sp0 = vm86->saved_sp0;
151 tsk->thread.sysenter_cs = __KERNEL_CS;
152 load_sp0(tss, &tsk->thread);
158 lazy_load_gs(ret->gs);
163 static void mark_screen_rdonly(struct mm_struct *mm)
172 down_write(&mm->mmap_sem);
173 pgd = pgd_offset(mm, 0xA0000);
174 if (pgd_none_or_clear_bad(pgd))
176 pud = pud_offset(pgd, 0xA0000);
177 if (pud_none_or_clear_bad(pud))
179 pmd = pmd_offset(pud, 0xA0000);
180 split_huge_page_pmd_mm(mm, 0xA0000, pmd);
181 if (pmd_none_or_clear_bad(pmd))
183 pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
184 for (i = 0; i < 32; i++) {
185 if (pte_present(*pte))
186 set_pte(pte, pte_wrprotect(*pte));
189 pte_unmap_unlock(pte, ptl);
191 up_write(&mm->mmap_sem);
197 static int do_vm86_irq_handling(int subfunction, int irqnumber);
198 static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
199 struct kernel_vm86_struct *info);
201 SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
203 struct kernel_vm86_struct info; /* declare this _on top_,
204 * this avoids wasting of stack space.
205 * This remains on the stack until we
206 * return to 32 bit user space.
209 return do_sys_vm86((struct vm86plus_struct __user *) v86, false, &info);
213 SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
215 struct kernel_vm86_struct info; /* declare this _on top_,
216 * this avoids wasting of stack space.
217 * This remains on the stack until we
218 * return to 32 bit user space.
222 case VM86_REQUEST_IRQ:
224 case VM86_GET_IRQ_BITS:
225 case VM86_GET_AND_RESET_IRQ:
226 return do_vm86_irq_handling(cmd, (int)arg);
227 case VM86_PLUS_INSTALL_CHECK:
229 * NOTE: on old vm86 stuff this will return the error
230 * from access_ok(), because the subfunction is
231 * interpreted as (invalid) address to vm86_struct.
232 * So the installation check works.
237 /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
238 return do_sys_vm86((struct vm86plus_struct __user *) arg, true, &info);
242 static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
243 struct kernel_vm86_struct *info)
245 struct tss_struct *tss;
246 struct task_struct *tsk = current;
247 struct vm86 *vm86 = tsk->thread.vm86;
248 unsigned long err = 0;
251 if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL)))
253 tsk->thread.vm86 = vm86;
258 if (!access_ok(VERIFY_READ, v86, plus ?
259 sizeof(struct vm86_struct) :
260 sizeof(struct vm86plus_struct)))
263 memset(info, 0, sizeof(*info));
266 get_user_ex(info->regs.pt.bx, &v86->regs.ebx);
267 get_user_ex(info->regs.pt.cx, &v86->regs.ecx);
268 get_user_ex(info->regs.pt.dx, &v86->regs.edx);
269 get_user_ex(info->regs.pt.si, &v86->regs.esi);
270 get_user_ex(info->regs.pt.di, &v86->regs.edi);
271 get_user_ex(info->regs.pt.bp, &v86->regs.ebp);
272 get_user_ex(info->regs.pt.ax, &v86->regs.eax);
273 get_user_ex(info->regs.pt.ip, &v86->regs.eip);
274 get_user_ex(seg, &v86->regs.cs);
275 info->regs.pt.cs = seg;
276 get_user_ex(info->regs.pt.flags, &v86->regs.eflags);
277 get_user_ex(info->regs.pt.sp, &v86->regs.esp);
278 get_user_ex(seg, &v86->regs.ss);
279 info->regs.pt.ss = seg;
280 get_user_ex(info->regs.es, &v86->regs.es);
281 get_user_ex(info->regs.ds, &v86->regs.ds);
282 get_user_ex(info->regs.fs, &v86->regs.fs);
283 get_user_ex(info->regs.gs, &v86->regs.gs);
285 get_user_ex(info->flags, &v86->flags);
286 get_user_ex(info->screen_bitmap, &v86->screen_bitmap);
287 get_user_ex(info->cpu_type, &v86->cpu_type);
288 } get_user_catch(err);
292 if (copy_from_user(&info->int_revectored, &v86->int_revectored,
293 sizeof(struct revectored_struct)))
295 if (copy_from_user(&info->int21_revectored, &v86->int21_revectored,
296 sizeof(struct revectored_struct)))
299 if (copy_from_user(&info->vm86plus, &v86->vm86plus,
300 sizeof(struct vm86plus_info_struct)))
302 info->vm86plus.is_vm86pus = 1;
305 info->regs32 = current_pt_regs();
306 vm86->vm86_info = v86;
309 * The flags register is also special: we cannot trust that the user
310 * has set it up safely, so this makes sure interrupt etc flags are
311 * inherited from protected mode.
313 VEFLAGS = info->regs.pt.flags;
314 info->regs.pt.flags &= SAFE_MASK;
315 info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK;
316 info->regs.pt.flags |= X86_VM_MASK;
318 info->regs.pt.orig_ax = info->regs32->orig_ax;
320 switch (info->cpu_type) {
325 vm86->v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
328 vm86->v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
331 vm86->v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
336 * Save old state, set default return value (%ax) to 0 (VM86_SIGNAL)
338 info->regs32->ax = VM86_SIGNAL;
339 vm86->saved_sp0 = tsk->thread.sp0;
340 lazy_save_gs(info->regs32->gs);
342 tss = &per_cpu(cpu_tss, get_cpu());
343 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
345 tsk->thread.sysenter_cs = 0;
346 load_sp0(tss, &tsk->thread);
349 vm86->screen_bitmap = info->screen_bitmap;
350 if (info->flags & VM86_SCREEN_BITMAP)
351 mark_screen_rdonly(tsk->mm);
353 /*call __audit_syscall_exit since we do not exit via the normal paths */
354 #ifdef CONFIG_AUDITSYSCALL
355 if (unlikely(current->audit_context))
356 __audit_syscall_exit(1, 0);
359 __asm__ __volatile__(
362 #ifdef CONFIG_X86_32_LAZY_GS
365 "jmp resume_userspace"
367 :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0));
368 unreachable(); /* we never return here */
371 static inline void return_to_32bit(struct kernel_vm86_regs *regs16, int retval)
373 struct pt_regs *regs32;
375 regs32 = save_v86_state(regs16);
377 __asm__ __volatile__("movl %0,%%esp\n\t"
379 "jmp resume_userspace"
380 : : "r" (regs32), "r" (current_thread_info()));
383 static inline void set_IF(struct kernel_vm86_regs *regs)
385 VEFLAGS |= X86_EFLAGS_VIF;
386 if (VEFLAGS & X86_EFLAGS_VIP)
387 return_to_32bit(regs, VM86_STI);
390 static inline void clear_IF(struct kernel_vm86_regs *regs)
392 VEFLAGS &= ~X86_EFLAGS_VIF;
395 static inline void clear_TF(struct kernel_vm86_regs *regs)
397 regs->pt.flags &= ~X86_EFLAGS_TF;
400 static inline void clear_AC(struct kernel_vm86_regs *regs)
402 regs->pt.flags &= ~X86_EFLAGS_AC;
406 * It is correct to call set_IF(regs) from the set_vflags_*
407 * functions. However someone forgot to call clear_IF(regs)
408 * in the opposite case.
409 * After the command sequence CLI PUSHF STI POPF you should
410 * end up with interrupts disabled, but you ended up with
411 * interrupts enabled.
412 * ( I was testing my own changes, but the only bug I
413 * could find was in a function I had not changed. )
417 static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
419 set_flags(VEFLAGS, flags, current->thread.vm86->v86mask);
420 set_flags(regs->pt.flags, flags, SAFE_MASK);
421 if (flags & X86_EFLAGS_IF)
427 static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
429 set_flags(VFLAGS, flags, current->thread.vm86->v86mask);
430 set_flags(regs->pt.flags, flags, SAFE_MASK);
431 if (flags & X86_EFLAGS_IF)
437 static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
439 unsigned long flags = regs->pt.flags & RETURN_MASK;
441 if (VEFLAGS & X86_EFLAGS_VIF)
442 flags |= X86_EFLAGS_IF;
443 flags |= X86_EFLAGS_IOPL;
444 return flags | (VEFLAGS & current->thread.vm86->v86mask);
447 static inline int is_revectored(int nr, struct revectored_struct *bitmap)
449 __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0"
451 :"m" (*bitmap), "r" (nr));
455 #define val_byte(val, n) (((__u8 *)&val)[n])
457 #define pushb(base, ptr, val, err_label) \
461 if (put_user(__val, base + ptr) < 0) \
465 #define pushw(base, ptr, val, err_label) \
469 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
472 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
476 #define pushl(base, ptr, val, err_label) \
480 if (put_user(val_byte(__val, 3), base + ptr) < 0) \
483 if (put_user(val_byte(__val, 2), base + ptr) < 0) \
486 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
489 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
493 #define popb(base, ptr, err_label) \
496 if (get_user(__res, base + ptr) < 0) \
502 #define popw(base, ptr, err_label) \
505 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
508 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
514 #define popl(base, ptr, err_label) \
517 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
520 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
523 if (get_user(val_byte(__res, 2), base + ptr) < 0) \
526 if (get_user(val_byte(__res, 3), base + ptr) < 0) \
532 /* There are so many possible reasons for this function to return
533 * VM86_INTx, so adding another doesn't bother me. We can expect
534 * userspace programs to be able to handle it. (Getting a problem
535 * in userspace is always better than an Oops anyway.) [KD]
537 static void do_int(struct kernel_vm86_regs *regs, int i,
538 unsigned char __user *ssp, unsigned short sp)
540 unsigned long __user *intr_ptr;
541 unsigned long segoffs;
543 if (regs->pt.cs == BIOSSEG)
545 if (is_revectored(i, &KVM86->int_revectored))
547 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
549 intr_ptr = (unsigned long __user *) (i << 2);
550 if (get_user(segoffs, intr_ptr))
552 if ((segoffs >> 16) == BIOSSEG)
554 pushw(ssp, sp, get_vflags(regs), cannot_handle);
555 pushw(ssp, sp, regs->pt.cs, cannot_handle);
556 pushw(ssp, sp, IP(regs), cannot_handle);
557 regs->pt.cs = segoffs >> 16;
559 IP(regs) = segoffs & 0xffff;
566 return_to_32bit(regs, VM86_INTx + (i << 8));
569 int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
571 if (VMPI.is_vm86pus) {
572 if ((trapno == 3) || (trapno == 1)) {
573 KVM86->regs32->ax = VM86_TRAP + (trapno << 8);
574 /* setting this flag forces the code in entry_32.S to
575 the path where we call save_v86_state() and change
576 the stack pointer to KVM86->regs32 */
577 set_thread_flag(TIF_NOTIFY_RESUME);
580 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
584 return 1; /* we let this handle by the calling routine */
585 current->thread.trap_nr = trapno;
586 current->thread.error_code = error_code;
587 force_sig(SIGTRAP, current);
591 void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
593 unsigned char opcode;
594 unsigned char __user *csp;
595 unsigned char __user *ssp;
596 unsigned short ip, sp, orig_flags;
597 int data32, pref_done;
599 #define CHECK_IF_IN_TRAP \
600 if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \
601 newflags |= X86_EFLAGS_TF
602 #define VM86_FAULT_RETURN do { \
603 if (VMPI.force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) \
604 return_to_32bit(regs, VM86_PICRETURN); \
605 if (orig_flags & X86_EFLAGS_TF) \
606 handle_vm86_trap(regs, 0, 1); \
609 orig_flags = *(unsigned short *)®s->pt.flags;
611 csp = (unsigned char __user *) (regs->pt.cs << 4);
612 ssp = (unsigned char __user *) (regs->pt.ss << 4);
619 switch (opcode = popb(csp, ip, simulate_sigsegv)) {
620 case 0x66: /* 32-bit data */ data32 = 1; break;
621 case 0x67: /* 32-bit address */ break;
622 case 0x2e: /* CS */ break;
623 case 0x3e: /* DS */ break;
624 case 0x26: /* ES */ break;
625 case 0x36: /* SS */ break;
626 case 0x65: /* GS */ break;
627 case 0x64: /* FS */ break;
628 case 0xf2: /* repnz */ break;
629 case 0xf3: /* rep */ break;
630 default: pref_done = 1;
632 } while (!pref_done);
639 pushl(ssp, sp, get_vflags(regs), simulate_sigsegv);
642 pushw(ssp, sp, get_vflags(regs), simulate_sigsegv);
651 unsigned long newflags;
653 newflags = popl(ssp, sp, simulate_sigsegv);
656 newflags = popw(ssp, sp, simulate_sigsegv);
662 set_vflags_long(newflags, regs);
664 set_vflags_short(newflags, regs);
671 int intno = popb(csp, ip, simulate_sigsegv);
673 if (VMPI.vm86dbg_active) {
674 if ((1 << (intno & 7)) & VMPI.vm86dbg_intxxtab[intno >> 3])
675 return_to_32bit(regs, VM86_INTx + (intno << 8));
677 do_int(regs, intno, ssp, sp);
686 unsigned long newflags;
688 newip = popl(ssp, sp, simulate_sigsegv);
689 newcs = popl(ssp, sp, simulate_sigsegv);
690 newflags = popl(ssp, sp, simulate_sigsegv);
693 newip = popw(ssp, sp, simulate_sigsegv);
694 newcs = popw(ssp, sp, simulate_sigsegv);
695 newflags = popw(ssp, sp, simulate_sigsegv);
702 set_vflags_long(newflags, regs);
704 set_vflags_short(newflags, regs);
717 * Damn. This is incorrect: the 'sti' instruction should actually
718 * enable interrupts after the /next/ instruction. Not good.
720 * Probably needs some horsing around with the TF flag. Aiee..
728 return_to_32bit(regs, VM86_UNKNOWN);
734 /* FIXME: After a long discussion with Stas we finally
735 * agreed, that this is wrong. Here we should
736 * really send a SIGSEGV to the user program.
737 * But how do we create the correct context? We
738 * are inside a general protection fault handler
739 * and has just returned from a page fault handler.
740 * The correct context for the signal handler
741 * should be a mixture of the two, but how do we
742 * get the information? [KD]
744 return_to_32bit(regs, VM86_UNKNOWN);
747 /* ---------------- vm86 special IRQ passing stuff ----------------- */
749 #define VM86_IRQNAME "vm86irq"
751 static struct vm86_irqs {
752 struct task_struct *tsk;
756 static DEFINE_SPINLOCK(irqbits_lock);
759 #define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \
760 | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \
763 static irqreturn_t irq_handler(int intno, void *dev_id)
768 spin_lock_irqsave(&irqbits_lock, flags);
769 irq_bit = 1 << intno;
770 if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk)
773 if (vm86_irqs[intno].sig)
774 send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1);
776 * IRQ will be re-enabled when user asks for the irq (whether
777 * polling or as a result of the signal)
779 disable_irq_nosync(intno);
780 spin_unlock_irqrestore(&irqbits_lock, flags);
784 spin_unlock_irqrestore(&irqbits_lock, flags);
788 static inline void free_vm86_irq(int irqnumber)
792 free_irq(irqnumber, NULL);
793 vm86_irqs[irqnumber].tsk = NULL;
795 spin_lock_irqsave(&irqbits_lock, flags);
796 irqbits &= ~(1 << irqnumber);
797 spin_unlock_irqrestore(&irqbits_lock, flags);
800 void release_vm86_irqs(struct task_struct *task)
803 for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++)
804 if (vm86_irqs[i].tsk == task)
808 static inline int get_and_reset_irq(int irqnumber)
814 if (invalid_vm86_irq(irqnumber)) return 0;
815 if (vm86_irqs[irqnumber].tsk != current) return 0;
816 spin_lock_irqsave(&irqbits_lock, flags);
817 bit = irqbits & (1 << irqnumber);
820 enable_irq(irqnumber);
824 spin_unlock_irqrestore(&irqbits_lock, flags);
829 static int do_vm86_irq_handling(int subfunction, int irqnumber)
832 switch (subfunction) {
833 case VM86_GET_AND_RESET_IRQ: {
834 return get_and_reset_irq(irqnumber);
836 case VM86_GET_IRQ_BITS: {
839 case VM86_REQUEST_IRQ: {
840 int sig = irqnumber >> 8;
841 int irq = irqnumber & 255;
842 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
843 if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM;
844 if (invalid_vm86_irq(irq)) return -EPERM;
845 if (vm86_irqs[irq].tsk) return -EPERM;
846 ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL);
848 vm86_irqs[irq].sig = sig;
849 vm86_irqs[irq].tsk = current;
852 case VM86_FREE_IRQ: {
853 if (invalid_vm86_irq(irqnumber)) return -EPERM;
854 if (!vm86_irqs[irqnumber].tsk) return 0;
855 if (vm86_irqs[irqnumber].tsk != current) return -EPERM;
856 free_vm86_irq(irqnumber);