2 * vm86.c: A vm86 emulator. The main purpose of this emulator is to do as
3 * little work as possible.
5 * Leendert van Doorn, leendert@watson.ibm.com
6 * Copyright (c) 2005-2006, International Business Machines Corporation.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
25 #define HIGHMEM (1 << 20) /* 1MB */
26 #define MASK16(v) ((v) & 0xFFFF)
38 static unsigned prev_eip = 0;
39 enum vm86_mode mode = 0;
41 static struct regs saved_rm_regs;
48 "<VM86_REAL_TO_PROTECTED>",
49 "<VM86_PROTECTED_TO_REAL>",
53 static char *rnames[] = { "ax", "cx", "dx", "bx", "sp", "bp", "si", "di" };
56 #define PDE_PS (1 << 7)
57 #define PT_ENTRY_PRESENT 0x1
59 /* We only support access to <=4G physical memory due to 1:1 mapping */
61 guest_linear_to_phys(uint32_t base)
63 uint32_t gcr3 = oldctx.cr3;
68 if (!(oldctx.cr0 & CR0_PG))
71 if (!(oldctx.cr4 & CR4_PAE)) {
72 l1_mfn = ((uint32_t *)(long)gcr3)[(base >> 22) & 0x3ff];
73 if (!(l1_mfn & PT_ENTRY_PRESENT))
74 panic("l2 entry not present\n");
76 if ((oldctx.cr4 & CR4_PSE) && (l1_mfn & PDE_PS)) {
77 l0_mfn = l1_mfn & 0xffc00000;
78 return l0_mfn + (base & 0x3fffff);
83 l0_mfn = ((uint32_t *)(long)l1_mfn)[(base >> 12) & 0x3ff];
84 if (!(l0_mfn & PT_ENTRY_PRESENT))
85 panic("l1 entry not present\n");
88 return l0_mfn + (base & 0xfff);
90 l2_mfn = ((uint64_t *)(long)gcr3)[(base >> 30) & 0x3];
91 if (!(l2_mfn & PT_ENTRY_PRESENT))
92 panic("l3 entry not present\n");
93 l2_mfn &= 0xffffff000ULL;
95 if (l2_mfn & 0xf00000000ULL) {
96 printf("l2 page above 4G\n");
97 cpuid_addr_value(l2_mfn + 8 * ((base >> 21) & 0x1ff), &l1_mfn);
99 l1_mfn = ((uint64_t *)(long)l2_mfn)[(base >> 21) & 0x1ff];
100 if (!(l1_mfn & PT_ENTRY_PRESENT))
101 panic("l2 entry not present\n");
103 if (l1_mfn & PDE_PS) { /* CR4.PSE is ignored in PAE mode */
104 l0_mfn = l1_mfn & 0xfffe00000ULL;
105 return l0_mfn + (base & 0x1fffff);
108 l1_mfn &= 0xffffff000ULL;
110 if (l1_mfn & 0xf00000000ULL) {
111 printf("l1 page above 4G\n");
112 cpuid_addr_value(l1_mfn + 8 * ((base >> 12) & 0x1ff), &l0_mfn);
114 l0_mfn = ((uint64_t *)(long)l1_mfn)[(base >> 12) & 0x1ff];
115 if (!(l0_mfn & PT_ENTRY_PRESENT))
116 panic("l1 entry not present\n");
118 l0_mfn &= 0xffffff000ULL;
120 return l0_mfn + (base & 0xfff);
125 address(struct regs *regs, unsigned seg, unsigned off)
127 uint64_t gdt_phys_base;
128 unsigned long long entry;
129 unsigned seg_base, seg_limit;
130 unsigned entry_low, entry_high;
133 if (mode == VM86_REAL || mode == VM86_REAL_TO_PROTECTED)
136 panic("segment is zero, but not in real mode!\n");
139 if (mode == VM86_REAL || seg > oldctx.gdtr_limit ||
140 (mode == VM86_REAL_TO_PROTECTED && regs->cs == seg))
141 return ((seg & 0xFFFF) << 4) + off;
143 gdt_phys_base = guest_linear_to_phys(oldctx.gdtr_base);
144 if (gdt_phys_base != (uint32_t)gdt_phys_base) {
145 printf("gdt base address above 4G\n");
146 cpuid_addr_value(gdt_phys_base + 8 * (seg >> 3), &entry);
148 entry = ((unsigned long long *)(long)gdt_phys_base)[seg >> 3];
150 entry_high = entry >> 32;
151 entry_low = entry & 0xFFFFFFFF;
153 seg_base = (entry_high & 0xFF000000) | ((entry >> 16) & 0xFFFFFF);
154 seg_limit = (entry_high & 0xF0000) | (entry_low & 0xFFFF);
156 if (entry_high & 0x8000 &&
157 ((entry_high & 0x800000 && off >> 12 <= seg_limit) ||
158 (!(entry_high & 0x800000) && off <= seg_limit)))
159 return seg_base + off;
161 panic("should never reach here in function address():\n\t"
162 "entry=0x%08x%08x, mode=%d, seg=0x%08x, offset=0x%08x\n",
163 entry_high, entry_low, mode, seg, off);
170 trace(struct regs *regs, int adjust, char *fmt, ...)
172 unsigned off = regs->eip - adjust;
175 if ((traceset & (1 << mode)) &&
176 (mode == VM86_REAL_TO_PROTECTED || mode == VM86_REAL)) {
177 /* 16-bit, seg:off addressing */
178 unsigned addr = address(regs, regs->cs, off);
179 printf("0x%08x: 0x%x:0x%04x ", addr, regs->cs, off);
180 printf("(%d) ", mode);
186 if ((traceset & (1 << mode)) &&
187 (mode == VM86_PROTECTED_TO_REAL || mode == VM86_PROTECTED)) {
188 /* 16-bit, gdt addressing */
189 unsigned addr = address(regs, regs->cs, off);
190 printf("0x%08x: 0x%x:0x%08x ", addr, regs->cs, off);
191 printf("(%d) ", mode);
200 static inline unsigned
201 read32(unsigned addr)
203 return *(unsigned long *) addr;
206 static inline unsigned
207 read16(unsigned addr)
209 return *(unsigned short *) addr;
212 static inline unsigned
215 return *(unsigned char *) addr;
219 write32(unsigned addr, unsigned value)
221 *(unsigned long *) addr = value;
225 write16(unsigned addr, unsigned value)
227 *(unsigned short *) addr = value;
231 write8(unsigned addr, unsigned value)
233 *(unsigned char *) addr = value;
237 push32(struct regs *regs, unsigned value)
240 write32(address(regs, regs->uss, MASK16(regs->uesp)), value);
244 push16(struct regs *regs, unsigned value)
247 write16(address(regs, regs->uss, MASK16(regs->uesp)), value);
250 static inline unsigned
251 pop32(struct regs *regs)
253 unsigned value = read32(address(regs, regs->uss, MASK16(regs->uesp)));
258 static inline unsigned
259 pop16(struct regs *regs)
261 unsigned value = read16(address(regs, regs->uss, MASK16(regs->uesp)));
266 static inline unsigned
267 fetch32(struct regs *regs)
269 unsigned addr = address(regs, regs->cs, MASK16(regs->eip));
275 static inline unsigned
276 fetch16(struct regs *regs)
278 unsigned addr = address(regs, regs->cs, MASK16(regs->eip));
284 static inline unsigned
285 fetch8(struct regs *regs)
287 unsigned addr = address(regs, regs->cs, MASK16(regs->eip));
294 getreg32(struct regs *regs, int r)
297 case 0: return regs->eax;
298 case 1: return regs->ecx;
299 case 2: return regs->edx;
300 case 3: return regs->ebx;
301 case 4: return regs->uesp;
302 case 5: return regs->ebp;
303 case 6: return regs->esi;
304 case 7: return regs->edi;
310 getreg16(struct regs *regs, int r)
312 return MASK16(getreg32(regs, r));
316 getreg8(struct regs *regs, int r)
319 case 0: return regs->eax & 0xFF; /* al */
320 case 1: return regs->ecx & 0xFF; /* cl */
321 case 2: return regs->edx & 0xFF; /* dl */
322 case 3: return regs->ebx & 0xFF; /* bl */
323 case 4: return (regs->eax >> 8) & 0xFF; /* ah */
324 case 5: return (regs->ecx >> 8) & 0xFF; /* ch */
325 case 6: return (regs->edx >> 8) & 0xFF; /* dh */
326 case 7: return (regs->ebx >> 8) & 0xFF; /* bh */
332 setreg32(struct regs *regs, int r, unsigned v)
335 case 0: regs->eax = v; break;
336 case 1: regs->ecx = v; break;
337 case 2: regs->edx = v; break;
338 case 3: regs->ebx = v; break;
339 case 4: regs->uesp = v; break;
340 case 5: regs->ebp = v; break;
341 case 6: regs->esi = v; break;
342 case 7: regs->edi = v; break;
347 setreg16(struct regs *regs, int r, unsigned v)
349 setreg32(regs, r, (getreg32(regs, r) & ~0xFFFF) | MASK16(v));
353 setreg8(struct regs *regs, int r, unsigned v)
357 case 0: regs->eax = (regs->eax & ~0xFF) | v; break;
358 case 1: regs->ecx = (regs->ecx & ~0xFF) | v; break;
359 case 2: regs->edx = (regs->edx & ~0xFF) | v; break;
360 case 3: regs->ebx = (regs->ebx & ~0xFF) | v; break;
361 case 4: regs->eax = (regs->eax & ~0xFF00) | (v << 8); break;
362 case 5: regs->ecx = (regs->ecx & ~0xFF00) | (v << 8); break;
363 case 6: regs->edx = (regs->edx & ~0xFF00) | (v << 8); break;
364 case 7: regs->ebx = (regs->ebx & ~0xFF00) | (v << 8); break;
369 segment(unsigned prefix, struct regs *regs, unsigned seg)
387 sib(struct regs *regs, int mod, unsigned byte)
389 unsigned scale = (byte >> 6) & 3;
390 int index = (byte >> 3) & 7;
397 addr = fetch32(regs);
399 addr = getreg32(regs, base);
402 addr = getreg32(regs, base) + (char) fetch8(regs);
405 addr = getreg32(regs, base) + fetch32(regs);
410 addr += getreg32(regs, index) << scale;
416 * Operand (modrm) decode
419 operand(unsigned prefix, struct regs *regs, unsigned modrm)
421 int mod, disp = 0, seg;
423 seg = segment(prefix, regs, regs->vds);
425 if (prefix & ADDR32) { /* 32-bit addressing */
426 switch ((mod = (modrm >> 6) & 3)) {
429 case 0: return address(regs, seg, regs->eax);
430 case 1: return address(regs, seg, regs->ecx);
431 case 2: return address(regs, seg, regs->edx);
432 case 3: return address(regs, seg, regs->ebx);
433 case 4: return address(regs, seg,
434 sib(regs, mod, fetch8(regs)));
435 case 5: return address(regs, seg, fetch32(regs));
436 case 6: return address(regs, seg, regs->esi);
437 case 7: return address(regs, seg, regs->edi);
442 if ((modrm & 7) != 4) {
444 disp = (char) fetch8(regs);
446 disp = (int) fetch32(regs);
449 case 0: return address(regs, seg, regs->eax + disp);
450 case 1: return address(regs, seg, regs->ecx + disp);
451 case 2: return address(regs, seg, regs->edx + disp);
452 case 3: return address(regs, seg, regs->ebx + disp);
453 case 4: return address(regs, seg,
454 sib(regs, mod, fetch8(regs)));
455 case 5: return address(regs, seg, regs->ebp + disp);
456 case 6: return address(regs, seg, regs->esi + disp);
457 case 7: return address(regs, seg, regs->edi + disp);
461 return getreg32(regs, modrm);
463 } else { /* 16-bit addressing */
464 switch ((mod = (modrm >> 6) & 3)) {
467 case 0: return address(regs, seg, MASK16(regs->ebx) +
469 case 1: return address(regs, seg, MASK16(regs->ebx) +
471 case 2: return address(regs, seg, MASK16(regs->ebp) +
473 case 3: return address(regs, seg, MASK16(regs->ebp) +
475 case 4: return address(regs, seg, MASK16(regs->esi));
476 case 5: return address(regs, seg, MASK16(regs->edi));
477 case 6: return address(regs, seg, fetch16(regs));
478 case 7: return address(regs, seg, MASK16(regs->ebx));
484 disp = (char) fetch8(regs);
486 disp = (int) fetch16(regs);
488 case 0: return address(regs, seg, MASK16(regs->ebx) +
489 MASK16(regs->esi) + disp);
490 case 1: return address(regs, seg, MASK16(regs->ebx) +
491 MASK16(regs->edi) + disp);
492 case 2: return address(regs, seg, MASK16(regs->ebp) +
493 MASK16(regs->esi) + disp);
494 case 3: return address(regs, seg, MASK16(regs->ebp) +
495 MASK16(regs->edi) + disp);
496 case 4: return address(regs, seg,
497 MASK16(regs->esi) + disp);
498 case 5: return address(regs, seg,
499 MASK16(regs->edi) + disp);
500 case 6: return address(regs, seg,
501 MASK16(regs->ebp) + disp);
502 case 7: return address(regs, seg,
503 MASK16(regs->ebx) + disp);
507 return getreg16(regs, modrm);
518 lidt(struct regs *regs, unsigned prefix, unsigned modrm)
520 unsigned eip = regs->eip - 3;
521 unsigned addr = operand(prefix, regs, modrm);
523 oldctx.idtr_limit = ((struct dtr *) addr)->size;
524 if ((prefix & DATA32) == 0)
525 oldctx.idtr_base = ((struct dtr *) addr)->base & 0xFFFFFF;
527 oldctx.idtr_base = ((struct dtr *) addr)->base;
528 TRACE((regs, regs->eip - eip, "lidt 0x%x <%d, 0x%x>",
529 addr, oldctx.idtr_limit, oldctx.idtr_base));
538 lgdt(struct regs *regs, unsigned prefix, unsigned modrm)
540 unsigned eip = regs->eip - 3;
541 unsigned addr = operand(prefix, regs, modrm);
543 oldctx.gdtr_limit = ((struct dtr *) addr)->size;
544 if ((prefix & DATA32) == 0)
545 oldctx.gdtr_base = ((struct dtr *) addr)->base & 0xFFFFFF;
547 oldctx.gdtr_base = ((struct dtr *) addr)->base;
548 TRACE((regs, regs->eip - eip, "lgdt 0x%x <%d, 0x%x>",
549 addr, oldctx.gdtr_limit, oldctx.gdtr_base));
555 * Modify CR0 either through an lmsw instruction.
558 lmsw(struct regs *regs, unsigned prefix, unsigned modrm)
560 unsigned eip = regs->eip - 3;
561 unsigned ax = operand(prefix, regs, modrm) & 0xF;
562 unsigned cr0 = (oldctx.cr0 & 0xFFFFFFF0) | ax;
564 TRACE((regs, regs->eip - eip, "lmsw 0x%x", ax));
565 oldctx.cr0 = cr0 | CR0_PE | CR0_NE;
567 set_mode(regs, VM86_REAL_TO_PROTECTED);
573 * We need to handle moves that address memory beyond the 64KB segment
574 * limit that VM8086 mode enforces.
577 movr(struct regs *regs, unsigned prefix, unsigned opc)
579 unsigned eip = regs->eip - 1;
580 unsigned modrm = fetch8(regs);
581 unsigned addr = operand(prefix, regs, modrm);
582 unsigned val, r = (modrm >> 3) & 7;
584 if ((modrm & 0xC0) == 0xC0) {
586 * Emulate all guest instructions in protected to real mode.
588 if (mode != VM86_PROTECTED_TO_REAL)
593 case 0x88: /* addr32 mov r8, r/m8 */
594 val = getreg8(regs, r);
595 TRACE((regs, regs->eip - eip,
596 "movb %%e%s, *0x%x", rnames[r], addr));
600 case 0x8A: /* addr32 mov r/m8, r8 */
601 TRACE((regs, regs->eip - eip,
602 "movb *0x%x, %%%s", addr, rnames[r]));
603 setreg8(regs, r, read8(addr));
606 case 0x89: /* addr32 mov r16, r/m16 */
607 val = getreg32(regs, r);
608 if ((modrm & 0xC0) == 0xC0) {
610 setreg32(regs, modrm & 7, val);
612 setreg16(regs, modrm & 7, MASK16(val));
616 if (prefix & DATA32) {
617 TRACE((regs, regs->eip - eip,
618 "movl %%e%s, *0x%x", rnames[r], addr));
621 TRACE((regs, regs->eip - eip,
622 "movw %%%s, *0x%x", rnames[r], addr));
623 write16(addr, MASK16(val));
627 case 0x8B: /* mov r/m16, r16 */
628 if ((modrm & 0xC0) == 0xC0) {
630 setreg32(regs, r, addr);
632 setreg16(regs, r, MASK16(addr));
636 if (prefix & DATA32) {
637 TRACE((regs, regs->eip - eip,
638 "movl *0x%x, %%e%s", addr, rnames[r]));
639 setreg32(regs, r, read32(addr));
641 TRACE((regs, regs->eip - eip,
642 "movw *0x%x, %%%s", addr, rnames[r]));
643 setreg16(regs, r, read16(addr));
647 case 0xC6: /* addr32 movb $imm, r/m8 */
648 if ((modrm >> 3) & 7)
652 TRACE((regs, regs->eip - eip, "movb $0x%x, *0x%x",
660 * We need to handle string moves that address memory beyond the 64KB segment
661 * limit that VM8086 mode enforces.
664 movs(struct regs *regs, unsigned prefix, unsigned opc)
666 unsigned eip = regs->eip - 1;
667 unsigned sseg = segment(prefix, regs, regs->vds);
668 unsigned dseg = regs->ves;
669 unsigned saddr, daddr;
671 int incr = ((regs->eflags & EFLAGS_DF) == 0) ? 1 : -1;
673 saddr = address(regs, sseg, regs->esi);
674 daddr = address(regs, dseg, regs->edi);
676 if ((prefix & REP) != 0) {
682 case 0xA4: /* movsb */
683 regs->esi += (incr * count);
684 regs->edi += (incr * count);
686 while (count-- != 0) {
687 write8(daddr, read8(saddr));
691 TRACE((regs, regs->eip - eip, "movsb (%%esi),%%es:(%%edi)"));
694 case 0xA5: /* movsw */
695 if ((prefix & DATA32) == 0) {
697 regs->esi += (incr * count);
698 regs->edi += (incr * count);
700 while (count-- != 0) {
701 write16(daddr, read16(saddr));
707 regs->esi += (incr * count);
708 regs->edi += (incr * count);
710 while (count-- != 0) {
711 write32(daddr, read32(saddr));
716 TRACE((regs, regs->eip - eip, "movsw %s(%%esi),%%es:(%%edi)"));
724 lods(struct regs *regs, unsigned prefix, unsigned opc)
726 unsigned eip = regs->eip - 1;
727 unsigned seg = segment(prefix, regs, regs->vds);
728 unsigned addr = address(regs, seg, regs->esi);
730 int incr = ((regs->eflags & EFLAGS_DF) == 0) ? 1 : -1;
732 if ((prefix & REP) != 0) {
738 case 0xAD: /* lodsw */
739 if ((prefix & DATA32) == 0) {
741 regs->esi += (incr * count);
742 while (count-- != 0) {
743 setreg16(regs, 0, read16(addr));
747 TRACE((regs, regs->eip - eip, "lodsw (%%esi),%%ax"));
750 regs->esi += (incr * count);
751 while (count-- != 0) {
752 setreg32(regs, 0, read32(addr));
755 TRACE((regs, regs->eip - eip, "lodsw (%%esi),%%eax"));
762 * Move to and from a control register.
765 movcr(struct regs *regs, unsigned prefix, unsigned opc)
767 unsigned eip = regs->eip - 2;
768 unsigned modrm = fetch8(regs);
769 unsigned cr = (modrm >> 3) & 7;
771 if ((modrm & 0xC0) != 0xC0) /* only registers */
775 case 0x20: /* mov Rd, Cd */
776 TRACE((regs, regs->eip - eip, "movl %%cr%d, %%eax", cr));
779 setreg32(regs, modrm,
780 oldctx.cr0 & ~(CR0_PE | CR0_NE));
783 setreg32(regs, modrm, get_cr2());
786 setreg32(regs, modrm, oldctx.cr3);
789 setreg32(regs, modrm, oldctx.cr4);
793 case 0x22: /* mov Cd, Rd */
794 TRACE((regs, regs->eip - eip, "movl %%eax, %%cr%d", cr));
797 oldctx.cr0 = getreg32(regs, modrm) | (CR0_PE | CR0_NE);
798 if (getreg32(regs, modrm) & CR0_PE)
799 set_mode(regs, VM86_REAL_TO_PROTECTED);
801 // set_mode(regs, VM86_REAL);
804 oldctx.cr3 = getreg32(regs, modrm);
807 oldctx.cr4 = getreg32(regs, modrm);
816 static inline void set_eflags_ZF(unsigned mask, unsigned v1, struct regs *regs)
818 if ((v1 & mask) == 0)
819 regs->eflags |= EFLAGS_ZF;
821 regs->eflags &= ~EFLAGS_ZF;
824 static void set_eflags_add(unsigned hi_bit_mask, unsigned v1, unsigned v2,
825 unsigned result, struct regs *regs)
830 unsigned nonsign_mask;
832 /* Carry out of high order bit? */
833 if ( v1 & v2 & hi_bit_mask )
834 regs->eflags |= EFLAGS_CF;
836 regs->eflags &= ~EFLAGS_CF;
838 /* Even parity in least significant byte? */
840 for (bit_count = 0; tmp != 0; bit_count++)
844 regs->eflags &= ~EFLAGS_PF;
846 regs->eflags |= EFLAGS_PF;
848 /* Carry out of least significant BCD digit? */
849 if ( v1 & v2 & (1<<3) )
850 regs->eflags |= EFLAGS_AF;
852 regs->eflags &= ~EFLAGS_AF;
854 /* Result is zero? */
855 full_mask = (hi_bit_mask - 1) | hi_bit_mask;
856 set_eflags_ZF(full_mask, result, regs);
858 /* Sign of result? */
859 if ( result & hi_bit_mask )
860 regs->eflags |= EFLAGS_SF;
862 regs->eflags &= ~EFLAGS_SF;
864 /* Carry out of highest non-sign bit? */
865 nonsign_mask = (hi_bit_mask >> 1) & ~hi_bit_mask;
866 if ( v1 & v2 & hi_bit_mask )
867 regs->eflags |= EFLAGS_OF;
869 regs->eflags &= ~EFLAGS_OF;
874 * We need to handle cmp opcodes that address memory beyond the 64KB
875 * segment limit that VM8086 mode enforces.
878 cmp(struct regs *regs, unsigned prefix, unsigned opc)
880 unsigned eip = regs->eip - 1;
881 unsigned modrm = fetch8(regs);
882 unsigned addr = operand(prefix, regs, modrm);
883 unsigned diff, val, r = (modrm >> 3) & 7;
885 if ((modrm & 0xC0) == 0xC0) /* no registers */
889 case 0x39: /* addr32 cmp r16, r/m16 */
890 val = getreg32(regs, r);
891 if (prefix & DATA32) {
892 diff = read32(addr) - val;
893 set_eflags_ZF(~0, diff, regs);
895 TRACE((regs, regs->eip - eip,
896 "cmp %%e%s, *0x%x (0x%x)",
897 rnames[r], addr, diff));
899 diff = read16(addr) - val;
900 set_eflags_ZF(0xFFFF, diff, regs);
902 TRACE((regs, regs->eip - eip,
903 "cmp %%%s, *0x%x (0x%x)",
904 rnames[r], addr, diff));
908 /* other cmp opcodes ... */
914 * We need to handle test opcodes that address memory beyond the 64KB
915 * segment limit that VM8086 mode enforces.
918 test(struct regs *regs, unsigned prefix, unsigned opc)
920 unsigned eip = regs->eip - 1;
921 unsigned modrm = fetch8(regs);
922 unsigned addr = operand(prefix, regs, modrm);
925 if ((modrm & 0xC0) == 0xC0) /* no registers */
929 case 0xF6: /* testb $imm, r/m8 */
930 if ((modrm >> 3) & 7)
933 diff = read8(addr) & val;
934 set_eflags_ZF(0xFF, diff, regs);
936 TRACE((regs, regs->eip - eip, "testb $0x%x, *0x%x (0x%x)",
940 /* other test opcodes ... */
947 * We need to handle add opcodes that address memory beyond the 64KB
948 * segment limit that VM8086 mode enforces.
951 add(struct regs *regs, unsigned prefix, unsigned opc)
953 unsigned eip = regs->eip - 1;
954 unsigned modrm = fetch8(regs);
955 unsigned addr = operand(prefix, regs, modrm);
956 unsigned r = (modrm >> 3) & 7;
963 if ((modrm & 0xC0) == 0xC0) /* no registers */
967 case 0x00: /* addr32 add r8, r/m8 */
968 val1 = getreg8(regs, r);
970 result = val1 + val2;
971 write8(addr, result);
972 TRACE((regs, regs->eip - eip,
973 "addb %%e%s, *0x%x", rnames[r], addr));
976 case 0x01: /* addr32 add r16, r/m16 */
977 if (prefix & DATA32) {
978 val1 = getreg32(regs, r);
980 result = val1 + val2;
981 write32(addr, result);
982 TRACE((regs, regs->eip - eip,
983 "addl %%e%s, *0x%x", rnames[r], addr));
985 val1 = getreg16(regs, r);
987 result = val1 + val2;
988 write16(addr, result);
989 TRACE((regs, regs->eip - eip,
990 "addw %%e%s, *0x%x", rnames[r], addr));
994 case 0x03: /* addr32 add r/m16, r16 */
995 if (prefix & DATA32) {
996 val1 = getreg32(regs, r);
998 result = val1 + val2;
999 setreg32(regs, r, result);
1000 TRACE((regs, regs->eip - eip,
1001 "addl *0x%x, %%e%s", addr, rnames[r]));
1003 val1 = getreg16(regs, r);
1004 val2 = read16(addr);
1005 result = val1 + val2;
1006 setreg16(regs, r, result);
1007 TRACE((regs, regs->eip - eip,
1008 "addw *0x%x, %%%s", addr, rnames[r]));
1016 hi_bit = (prefix & DATA32) ? (1<<31) : (1<<15);
1017 set_eflags_add(hi_bit, val1, val2, result, regs);
1023 * We need to handle pop opcodes that address memory beyond the 64KB
1024 * segment limit that VM8086 mode enforces.
1027 pop(struct regs *regs, unsigned prefix, unsigned opc)
1029 unsigned eip = regs->eip - 1;
1030 unsigned modrm = fetch8(regs);
1031 unsigned addr = operand(prefix, regs, modrm);
1033 if ((modrm & 0xC0) == 0xC0) /* no registers */
1037 case 0x8F: /* pop r/m16 */
1038 if ((modrm >> 3) & 7)
1040 if (prefix & DATA32)
1041 write32(addr, pop32(regs));
1043 write16(addr, pop16(regs));
1044 TRACE((regs, regs->eip - eip, "pop *0x%x", addr));
1047 /* other pop opcodes ... */
1054 mov_to_seg(struct regs *regs, unsigned prefix, unsigned opc)
1056 unsigned modrm = fetch8(regs);
1059 * Emulate segment loads in:
1060 * 1) real->protected mode.
1061 * 2) protected->real mode.
1063 if (mode != VM86_REAL_TO_PROTECTED &&
1064 mode != VM86_PROTECTED_TO_REAL)
1067 /* Register source only. */
1068 if ((modrm & 0xC0) != 0xC0)
1071 switch ((modrm & 0x38) >> 3) {
1073 regs->ves = getreg16(regs, modrm);
1074 if (mode == VM86_PROTECTED_TO_REAL)
1076 saved_rm_regs.ves = 0;
1077 oldctx.es_sel = regs->ves;
1083 regs->uss = getreg16(regs, modrm);
1084 if (mode == VM86_PROTECTED_TO_REAL)
1086 saved_rm_regs.uss = 0;
1087 oldctx.ss_sel = regs->uss;
1090 regs->vds = getreg16(regs, modrm);
1091 if (mode == VM86_PROTECTED_TO_REAL)
1093 saved_rm_regs.vds = 0;
1094 oldctx.ds_sel = regs->vds;
1097 regs->vfs = getreg16(regs, modrm);
1098 if (mode == VM86_PROTECTED_TO_REAL)
1100 saved_rm_regs.vfs = 0;
1101 oldctx.fs_sel = regs->vfs;
1104 regs->vgs = getreg16(regs, modrm);
1105 if (mode == VM86_PROTECTED_TO_REAL)
1107 saved_rm_regs.vgs = 0;
1108 oldctx.gs_sel = regs->vgs;
1113 printf("%s:%d: missed opcode %02x %02x\n",
1114 __FUNCTION__, __LINE__, opc, modrm);
1119 * Emulate a segment load in protected mode
1122 load_seg(unsigned long sel, uint32_t *base, uint32_t *limit, union vmcs_arbytes *arbytes)
1124 uint64_t gdt_phys_base;
1125 unsigned long long entry;
1127 /* protected mode: use seg as index into gdt */
1128 if (sel > oldctx.gdtr_limit)
1132 arbytes->fields.null_bit = 1;
1136 gdt_phys_base = guest_linear_to_phys(oldctx.gdtr_base);
1137 if (gdt_phys_base != (uint32_t)gdt_phys_base) {
1138 printf("gdt base address above 4G\n");
1139 cpuid_addr_value(gdt_phys_base + 8 * (sel >> 3), &entry);
1141 entry = ((unsigned long long *)(long)gdt_phys_base)[sel >> 3];
1143 /* Check the P bit first */
1144 if (!((entry >> (15+32)) & 0x1) && sel != 0)
1147 *base = (((entry >> (56-24)) & 0xFF000000) |
1148 ((entry >> (32-16)) & 0x00FF0000) |
1149 ((entry >> ( 16)) & 0x0000FFFF));
1150 *limit = (((entry >> (48-16)) & 0x000F0000) |
1151 (entry & 0x0000FFFF));
1154 arbytes->fields.seg_type = (entry >> (8+32)) & 0xF; /* TYPE */
1155 arbytes->fields.s = (entry >> (12+32)) & 0x1; /* S */
1156 if (arbytes->fields.s)
1157 arbytes->fields.seg_type |= 1; /* accessed */
1158 arbytes->fields.dpl = (entry >> (13+32)) & 0x3; /* DPL */
1159 arbytes->fields.p = (entry >> (15+32)) & 0x1; /* P */
1160 arbytes->fields.avl = (entry >> (20+32)) & 0x1; /* AVL */
1161 arbytes->fields.default_ops_size = (entry >> (22+32)) & 0x1; /* D */
1163 if (entry & (1ULL << (23+32))) { /* G */
1164 arbytes->fields.g = 1;
1165 *limit = (*limit << 12) | 0xFFF;
1172 * Emulate a protected mode segment load, falling back to clearing it if
1173 * the descriptor was invalid.
1176 load_or_clear_seg(unsigned long sel, uint32_t *base, uint32_t *limit, union vmcs_arbytes *arbytes)
1178 if (!load_seg(sel, base, limit, arbytes))
1179 load_seg(0, base, limit, arbytes);
1182 static unsigned char rm_irqbase[2];
1185 * Transition to protected mode
1188 protected_mode(struct regs *regs)
1190 extern char stack_top[];
1192 oldctx.rm_irqbase[0] = rm_irqbase[0];
1193 oldctx.rm_irqbase[1] = rm_irqbase[1];
1195 regs->eflags &= ~(EFLAGS_TF|EFLAGS_VM);
1197 oldctx.eip = regs->eip;
1198 oldctx.esp = regs->uesp;
1199 oldctx.eflags = regs->eflags;
1201 /* reload all segment registers */
1202 if (!load_seg(regs->cs, &oldctx.cs_base,
1203 &oldctx.cs_limit, &oldctx.cs_arbytes))
1204 panic("Invalid %%cs=0x%x for protected mode\n", regs->cs);
1205 oldctx.cs_sel = regs->cs;
1207 load_or_clear_seg(oldctx.es_sel, &oldctx.es_base,
1208 &oldctx.es_limit, &oldctx.es_arbytes);
1209 load_or_clear_seg(oldctx.ss_sel, &oldctx.ss_base,
1210 &oldctx.ss_limit, &oldctx.ss_arbytes);
1211 load_or_clear_seg(oldctx.ds_sel, &oldctx.ds_base,
1212 &oldctx.ds_limit, &oldctx.ds_arbytes);
1213 load_or_clear_seg(oldctx.fs_sel, &oldctx.fs_base,
1214 &oldctx.fs_limit, &oldctx.fs_arbytes);
1215 load_or_clear_seg(oldctx.gs_sel, &oldctx.gs_base,
1216 &oldctx.gs_limit, &oldctx.gs_arbytes);
1218 /* initialize jump environment to warp back to protected mode */
1219 regs->uss = DATA_SELECTOR;
1220 regs->uesp = (unsigned long)stack_top;
1221 regs->cs = CODE_SELECTOR;
1222 regs->eip = (unsigned long)switch_to_protected_mode;
1224 /* this should get us into 32-bit mode */
1228 * Start real-mode emulation
1231 real_mode(struct regs *regs)
1233 regs->eflags |= EFLAGS_VM | 0x02;
1236 * When we transition from protected to real-mode and we
1237 * have not reloaded the segment descriptors yet, they are
1238 * interpreted as if they were in protect mode.
1239 * We emulate this behavior by assuming that these memory
1240 * reference are below 1MB and set %ss, %ds, %es accordingly.
1242 if (regs->uss != 0) {
1243 if (regs->uss >= HIGHMEM)
1244 panic("%%ss 0x%lx higher than 1MB", regs->uss);
1245 regs->uss = address(regs, regs->uss, 0) >> 4;
1247 regs->uss = saved_rm_regs.uss;
1249 if (regs->vds != 0) {
1250 if (regs->vds >= HIGHMEM)
1251 panic("%%ds 0x%lx higher than 1MB", regs->vds);
1252 regs->vds = address(regs, regs->vds, 0) >> 4;
1254 regs->vds = saved_rm_regs.vds;
1256 if (regs->ves != 0) {
1257 if (regs->ves >= HIGHMEM)
1258 panic("%%es 0x%lx higher than 1MB", regs->ves);
1259 regs->ves = address(regs, regs->ves, 0) >> 4;
1261 regs->ves = saved_rm_regs.ves;
1264 /* this should get us into 16-bit mode */
1268 * This is the smarts of the emulator and handles the mode transitions. The
1269 * emulator handles 4 different modes. 1) VM86_REAL: emulated real-mode,
1270 * Just handle those instructions that are not supported under VM8086.
1271 * 2) VM86_REAL_TO_PROTECTED: going from real-mode to protected mode. In
1272 * this we single step through the instructions until we reload the
1273 * new %cs (some OSes do a lot of computations before reloading %cs). 2)
1274 * VM86_PROTECTED_TO_REAL when we are going from protected to real mode. In
1275 * this case we emulate the instructions by hand. Finally, 4) VM86_PROTECTED
1276 * when we transitioned to protected mode and we should abandon the
1277 * emulator. No instructions are emulated when in VM86_PROTECTED mode.
1280 set_mode(struct regs *regs, enum vm86_mode newmode)
1284 if (mode == VM86_PROTECTED_TO_REAL ||
1285 mode == VM86_REAL_TO_PROTECTED) {
1286 regs->eflags &= ~EFLAGS_TF;
1288 } else if (mode != VM86_REAL)
1289 panic("unexpected real mode transition");
1292 case VM86_REAL_TO_PROTECTED:
1293 if (mode == VM86_REAL) {
1294 regs->eflags |= EFLAGS_TF;
1295 saved_rm_regs.vds = regs->vds;
1296 saved_rm_regs.ves = regs->ves;
1297 saved_rm_regs.vfs = regs->vfs;
1298 saved_rm_regs.vgs = regs->vgs;
1299 saved_rm_regs.uss = regs->uss;
1305 } else if (mode != VM86_REAL_TO_PROTECTED)
1306 panic("unexpected real-to-protected mode transition");
1309 case VM86_PROTECTED_TO_REAL:
1310 if (mode != VM86_PROTECTED)
1311 panic("unexpected protected-to-real mode transition");
1314 case VM86_PROTECTED:
1315 if (mode != VM86_REAL_TO_PROTECTED)
1316 panic("unexpected protected mode transition");
1317 protected_mode(regs);
1322 if (mode != VM86_PROTECTED)
1323 TRACE((regs, 0, states[mode]));
1327 jmpl(struct regs *regs, int prefix)
1329 unsigned n = regs->eip;
1332 eip = (prefix & DATA32) ? fetch32(regs) : fetch16(regs);
1335 TRACE((regs, (regs->eip - n) + 1, "jmpl 0x%x:0x%x", cs, eip));
1340 if (mode == VM86_REAL_TO_PROTECTED) /* jump to protected mode */
1341 set_mode(regs, VM86_PROTECTED);
1342 else if (mode == VM86_PROTECTED_TO_REAL) /* jump to real mode */
1343 set_mode(regs, VM86_REAL);
1349 jmpl_indirect(struct regs *regs, int prefix, unsigned modrm)
1351 unsigned n = regs->eip;
1355 addr = operand(prefix, regs, modrm);
1357 eip = (prefix & DATA32) ? read32(addr) : read16(addr);
1358 addr += (prefix & DATA32) ? 4 : 2;
1361 TRACE((regs, (regs->eip - n) + 1, "jmpl 0x%x:0x%x", cs, eip));
1366 if (mode == VM86_REAL_TO_PROTECTED) /* jump to protected mode */
1367 set_mode(regs, VM86_PROTECTED);
1368 else if (mode == VM86_PROTECTED_TO_REAL) /* jump to real mode */
1369 set_mode(regs, VM86_REAL);
1375 retl(struct regs *regs, int prefix)
1379 if (prefix & DATA32) {
1381 cs = MASK16(pop32(regs));
1387 TRACE((regs, 1, "retl (to 0x%x:0x%x)", cs, eip));
1392 if (mode == VM86_REAL_TO_PROTECTED) /* jump to protected mode */
1393 set_mode(regs, VM86_PROTECTED);
1394 else if (mode == VM86_PROTECTED_TO_REAL) /* jump to real mode */
1395 set_mode(regs, VM86_REAL);
1401 interrupt(struct regs *regs, int n)
1403 TRACE((regs, 0, "external interrupt %d", n));
1404 push16(regs, regs->eflags);
1405 push16(regs, regs->cs);
1406 push16(regs, regs->eip);
1407 regs->eflags &= ~EFLAGS_IF;
1408 regs->eip = read16(address(regs, 0, n * 4));
1409 regs->cs = read16(address(regs, 0, n * 4 + 2));
1413 * Most port I/O operations are passed unmodified. We do have to be
1414 * careful and make sure the emulated program isn't remapping the
1415 * interrupt vectors. The following simple state machine catches
1416 * these attempts and rewrites them.
1419 outbyte(struct regs *regs, unsigned prefix, unsigned opc)
1421 static char icw2[2] = { 0 };
1425 case 0xE6: /* outb port, al */
1426 port = fetch8(regs);
1428 case 0xEE: /* outb (%dx), al */
1429 port = MASK16(regs->edx);
1435 al = regs->eax & 0xFF;
1438 case PIC_MASTER + PIC_CMD:
1439 if (al & (1 << 4)) /* A0=0,D4=1 -> ICW1 */
1442 case PIC_MASTER + PIC_IMR:
1445 printf("Remapping master: ICW2 0x%x -> 0x%x\n",
1446 al, NR_EXCEPTION_HANDLER);
1448 al = NR_EXCEPTION_HANDLER;
1452 case PIC_SLAVE + PIC_CMD:
1453 if (al & (1 << 4)) /* A0=0,D4=1 -> ICW1 */
1456 case PIC_SLAVE + PIC_IMR:
1459 printf("Remapping slave: ICW2 0x%x -> 0x%x\n",
1460 al, NR_EXCEPTION_HANDLER+8);
1462 al = NR_EXCEPTION_HANDLER+8;
1472 inbyte(struct regs *regs, unsigned prefix, unsigned opc)
1477 case 0xE4: /* inb al, port */
1478 port = fetch8(regs);
1480 case 0xEC: /* inb al, (%dx) */
1481 port = MASK16(regs->edx);
1487 regs->eax = (regs->eax & ~0xFF) | inb(port);
1492 pushrm(struct regs *regs, int prefix, unsigned modrm)
1494 unsigned n = regs->eip;
1498 addr = operand(prefix, regs, modrm);
1500 if (prefix & DATA32) {
1501 data = read32(addr);
1504 data = read16(addr);
1508 TRACE((regs, (regs->eip - n) + 1, "push *0x%x", addr));
1511 enum { OPC_INVALID, OPC_EMULATED };
1513 #define rdmsr(msr,val1,val2) \
1514 __asm__ __volatile__( \
1516 : "=a" (val1), "=d" (val2) \
1519 #define wrmsr(msr,val1,val2) \
1520 __asm__ __volatile__( \
1522 : /* no outputs */ \
1523 : "c" (msr), "a" (val1), "d" (val2))
1526 * Emulate a single instruction, including all its prefixes. We only implement
1527 * a small subset of the opcodes, and not all opcodes are implemented for each
1528 * of the four modes we can operate in.
1531 opcode(struct regs *regs)
1533 unsigned eip = regs->eip;
1534 unsigned opc, modrm, disp;
1535 unsigned prefix = 0;
1537 if (mode == VM86_PROTECTED_TO_REAL &&
1538 oldctx.cs_arbytes.fields.default_ops_size) {
1544 switch ((opc = fetch8(regs))) {
1546 case 0x00: /* addr32 add r8, r/m8 */
1547 case 0x01: /* addr32 add r16, r/m16 */
1548 case 0x03: /* addr32 add r/m16, r16 */
1549 if (mode != VM86_REAL && mode != VM86_REAL_TO_PROTECTED)
1551 if ((prefix & ADDR32) == 0)
1553 if (!add(regs, prefix, opc))
1555 return OPC_EMULATED;
1557 case 0x07: /* pop %es */
1558 regs->ves = (prefix & DATA32) ?
1559 pop32(regs) : pop16(regs);
1560 TRACE((regs, regs->eip - eip, "pop %%es"));
1561 if (mode == VM86_REAL_TO_PROTECTED) {
1562 saved_rm_regs.ves = 0;
1563 oldctx.es_sel = regs->ves;
1565 return OPC_EMULATED;
1567 case 0x0F: /* two byte opcode */
1568 if (mode == VM86_PROTECTED)
1570 switch ((opc = fetch8(regs))) {
1572 switch (((modrm = fetch8(regs)) >> 3) & 7) {
1577 if (!lgdt(regs, prefix, modrm))
1579 return OPC_EMULATED;
1581 if (!lidt(regs, prefix, modrm))
1583 return OPC_EMULATED;
1589 if (!lmsw(regs, prefix, modrm))
1591 return OPC_EMULATED;
1592 case 7: /* invlpg */
1596 case 0x06: /* clts */
1597 oldctx.cr0 &= ~CR0_TS;
1598 return OPC_EMULATED;
1599 case 0x09: /* wbinvd */
1600 return OPC_EMULATED;
1601 case 0x20: /* mov Rd, Cd (1h) */
1603 if (!movcr(regs, prefix, opc))
1605 return OPC_EMULATED;
1606 case 0x30: /* WRMSR */
1607 wrmsr(regs->ecx, regs->eax, regs->edx);
1608 return OPC_EMULATED;
1609 case 0x32: /* RDMSR */
1610 rdmsr(regs->ecx, regs->eax, regs->edx);
1611 return OPC_EMULATED;
1617 case 0x1F: /* pop %ds */
1618 regs->vds = (prefix & DATA32) ?
1619 pop32(regs) : pop16(regs);
1620 TRACE((regs, regs->eip - eip, "pop %%ds"));
1621 if (mode == VM86_REAL_TO_PROTECTED) {
1622 saved_rm_regs.vds = 0;
1623 oldctx.ds_sel = regs->vds;
1625 return OPC_EMULATED;
1628 TRACE((regs, regs->eip - eip, "%%es:"));
1633 TRACE((regs, regs->eip - eip, "%%cs:"));
1638 TRACE((regs, regs->eip - eip, "%%ss:"));
1642 case 0x39: /* addr32 cmp r16, r/m16 */
1643 case 0x3B: /* addr32 cmp r/m16, r16 */
1644 if (mode == VM86_PROTECTED_TO_REAL || !(prefix & ADDR32))
1646 if (!cmp(regs, prefix, opc))
1648 return OPC_EMULATED;
1651 TRACE((regs, regs->eip - eip, "%%ds:"));
1656 TRACE((regs, regs->eip - eip, "%%fs:"));
1661 TRACE((regs, regs->eip - eip, "%%gs:"));
1666 if (mode == VM86_PROTECTED_TO_REAL &&
1667 oldctx.cs_arbytes.fields.default_ops_size) {
1668 TRACE((regs, regs->eip - eip, "data16"));
1671 TRACE((regs, regs->eip - eip, "data32"));
1677 if (mode == VM86_PROTECTED_TO_REAL &&
1678 oldctx.cs_arbytes.fields.default_ops_size) {
1679 TRACE((regs, regs->eip - eip, "addr16"));
1682 TRACE((regs, regs->eip - eip, "addr32"));
1687 case 0x88: /* addr32 mov r8, r/m8 */
1688 case 0x8A: /* addr32 mov r/m8, r8 */
1689 if (mode == VM86_PROTECTED_TO_REAL || !(prefix & ADDR32))
1691 if (!movr(regs, prefix, opc))
1693 return OPC_EMULATED;
1695 case 0x89: /* mov r16, r/m16 */
1696 case 0x8B: /* mov r/m16, r16 */
1697 if (mode != VM86_PROTECTED_TO_REAL && !(prefix & ADDR32))
1699 if (!movr(regs, prefix, opc))
1701 return OPC_EMULATED;
1703 case 0x8E: /* mov r16, sreg */
1704 if (!mov_to_seg(regs, prefix, opc))
1706 return OPC_EMULATED;
1708 case 0x8F: /* addr32 pop r/m16 */
1709 if (!(prefix & ADDR32))
1711 if (!pop(regs, prefix, opc))
1713 return OPC_EMULATED;
1715 case 0x90: /* nop */
1716 TRACE((regs, regs->eip - eip, "nop"));
1717 return OPC_EMULATED;
1719 case 0x9C: /* pushf */
1720 TRACE((regs, regs->eip - eip, "pushf"));
1721 if (prefix & DATA32)
1722 push32(regs, regs->eflags & ~EFLAGS_VM);
1724 push16(regs, regs->eflags & ~EFLAGS_VM);
1725 return OPC_EMULATED;
1727 case 0x9D: /* popf */
1728 TRACE((regs, regs->eip - eip, "popf"));
1729 if (prefix & DATA32)
1730 regs->eflags = pop32(regs);
1732 regs->eflags = (regs->eflags & 0xFFFF0000L) |
1734 regs->eflags |= EFLAGS_VM;
1735 return OPC_EMULATED;
1737 case 0xA1: /* mov ax, r/m16 */
1740 int seg = segment(prefix, regs, regs->vds);
1741 int offset = prefix & ADDR32 ? fetch32(regs) : fetch16(regs);
1743 if (prefix & DATA32) {
1744 addr = address(regs, seg, offset);
1745 data = read32(addr);
1746 setreg32(regs, 0, data);
1748 addr = address(regs, seg, offset);
1749 data = read16(addr);
1750 setreg16(regs, 0, data);
1752 TRACE((regs, regs->eip - eip, "mov *0x%x, %%ax", addr));
1753 return OPC_EMULATED;
1756 case 0xA4: /* movsb */
1757 case 0xA5: /* movsw */
1758 if ((prefix & ADDR32) == 0)
1760 if (!movs(regs, prefix, opc))
1762 return OPC_EMULATED;
1764 case 0xAD: /* lodsw */
1765 if ((prefix & ADDR32) == 0)
1767 if (!lods(regs, prefix, opc))
1769 return OPC_EMULATED;
1771 case 0xBB: /* mov bx, imm16 */
1774 if (prefix & DATA32) {
1775 data = fetch32(regs);
1776 setreg32(regs, 3, data);
1778 data = fetch16(regs);
1779 setreg16(regs, 3, data);
1781 TRACE((regs, regs->eip - eip, "mov $0x%x, %%bx", data));
1782 return OPC_EMULATED;
1785 case 0xC6: /* addr32 movb $imm, r/m8 */
1786 if (!(prefix & ADDR32))
1788 if (!movr(regs, prefix, opc))
1790 return OPC_EMULATED;
1792 case 0xCB: /* retl */
1793 if (mode == VM86_REAL_TO_PROTECTED ||
1794 mode == VM86_PROTECTED_TO_REAL) {
1800 case 0xCD: /* int $n */
1801 TRACE((regs, regs->eip - eip, "int"));
1802 interrupt(regs, fetch8(regs));
1803 return OPC_EMULATED;
1805 case 0xCF: /* iret */
1806 if (prefix & DATA32) {
1807 TRACE((regs, regs->eip - eip, "data32 iretd"));
1808 regs->eip = pop32(regs);
1809 regs->cs = pop32(regs);
1810 regs->eflags = pop32(regs);
1812 TRACE((regs, regs->eip - eip, "iret"));
1813 regs->eip = pop16(regs);
1814 regs->cs = pop16(regs);
1815 regs->eflags = (regs->eflags & 0xFFFF0000L) |
1818 return OPC_EMULATED;
1820 case 0xE4: /* inb al, port */
1821 if (!inbyte(regs, prefix, opc))
1823 return OPC_EMULATED;
1825 case 0xE6: /* outb port, al */
1826 if (!outbyte(regs, prefix, opc))
1828 return OPC_EMULATED;
1830 case 0xEA: /* jmpl */
1831 if (mode == VM86_REAL_TO_PROTECTED ||
1832 mode == VM86_PROTECTED_TO_REAL) {
1840 unsigned modrm = fetch8(regs);
1841 switch((modrm >> 3) & 7) {
1842 case 5: /* jmpl (indirect) */
1843 if (mode == VM86_REAL_TO_PROTECTED ||
1844 mode == VM86_PROTECTED_TO_REAL) {
1845 jmpl_indirect(regs, prefix, modrm);
1850 case 6: /* push r/m16 */
1851 pushrm(regs, prefix, modrm);
1852 return OPC_EMULATED;
1859 case 0xEB: /* short jump */
1860 if (mode == VM86_REAL_TO_PROTECTED ||
1861 mode == VM86_PROTECTED_TO_REAL) {
1862 disp = (char) fetch8(regs);
1863 TRACE((regs, 2, "jmp 0x%x", regs->eip + disp));
1865 return OPC_EMULATED;
1869 case 0xEC: /* inb al, (%dx) */
1870 if (!inbyte(regs, prefix, opc))
1872 return OPC_EMULATED;
1874 case 0xEE: /* outb (%dx), al */
1875 if (!outbyte(regs, prefix, opc))
1877 return OPC_EMULATED;
1879 case 0xF0: /* lock */
1880 TRACE((regs, regs->eip - eip, "lock"));
1883 case 0xF4: /* hlt */
1884 TRACE((regs, regs->eip - eip, "hlt"));
1885 /* Do something power-saving here! */
1886 return OPC_EMULATED;
1888 case 0xF3: /* rep/repe/repz */
1889 TRACE((regs, regs->eip - eip, "rep"));
1893 case 0xF6: /* addr32 testb $imm, r/m8 */
1894 if (!(prefix & ADDR32))
1896 if (!test(regs, prefix, opc))
1898 return OPC_EMULATED;
1900 case 0xFA: /* cli */
1901 TRACE((regs, regs->eip - eip, "cli"));
1902 regs->eflags &= ~EFLAGS_IF;
1903 return OPC_EMULATED;
1905 case 0xFB: /* sti */
1906 TRACE((regs, regs->eip - eip, "sti"));
1907 regs->eflags |= EFLAGS_IF;
1908 return OPC_EMULATED;
1917 TRACE((regs, regs->eip - eip, "opc 0x%x", opc));
1922 emulate(struct regs *regs)
1928 /* emulate as many instructions as possible */
1929 while (opcode(regs) != OPC_INVALID)
1932 /* detect the case where we are not making progress */
1933 if (nemul == 0 && prev_eip == regs->eip) {
1934 flteip = address(regs, MASK16(regs->cs), regs->eip);
1936 printf("Undecoded sequence: \n");
1937 for (ip=flteip; ip < flteip+16; ip++)
1938 printf("0x%02x ", read8(ip));
1941 panic("Unknown opcode at %04x:%04x=0x%x",
1942 MASK16(regs->cs), regs->eip, flteip);
1944 prev_eip = regs->eip;
1948 trap(int trapno, int errno, struct regs *regs)
1950 /* emulate device interrupts */
1951 if (trapno >= NR_EXCEPTION_HANDLER) {
1952 int irq = trapno - NR_EXCEPTION_HANDLER;
1954 interrupt(regs, irq + 8);
1956 interrupt(regs, 0x70 + (irq - 8));
1962 if (regs->eflags & EFLAGS_VM) {
1963 /* emulate any 8086 instructions */
1964 if (mode == VM86_REAL)
1966 if (mode != VM86_REAL_TO_PROTECTED)
1967 panic("not in real-to-protected mode");
1974 if (regs->eflags & EFLAGS_VM) {
1975 /* emulate any 8086 instructions */
1976 if (mode == VM86_PROTECTED)
1977 panic("unexpected protected mode");
1985 printf("Trap (0x%x) while in %s mode\n",
1986 trapno, regs->eflags & EFLAGS_VM ? "real" : "protected");
1988 printf("Page fault address 0x%x\n", get_cr2());