2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Andy Gocke <agocke@gmail.com>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Andy Gocke <agocke@gmail.com>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmx_assist.h>
21 #include <palacios/vmx_lowlevel.h>
22 #include <palacios/vm_guest_mem.h>
23 #include <palacios/vmx.h>
24 #include <palacios/vmm_ctrl_regs.h>
26 #ifndef CONFIG_DEBUG_VMX
28 #define PrintDebug(fmt, args...)
31 #define VMXASSIST_GDT 0x10000
32 #define VMXASSIST_TSS 0x40000
33 #define VMXASSIST_START 0xd0000
34 #define VMXASSIST_1to1_PT 0xde000 // We'll shove this at the end, and pray to god VMXASSIST doesn't mess with it
37 #define VMXASSIST_MAGIC 0x17101966
40 struct vmx_assist_header {
41 uint64_t rsvd; // 8 bytes of nothing
45 } __attribute__((packed));
49 struct arbyte_fields {
50 unsigned int seg_type : 4,
61 } __attribute__((packed)) fields;
63 } __attribute__((packed));
65 struct vmx_assist_segment {
69 union vmcs_arbytes arbytes;
70 } __attribute__((packed));
76 struct vmx_assist_context {
77 uint32_t eip; /* execution pointer */
78 uint32_t esp; /* stack pointer */
79 uint32_t eflags; /* flags register */
81 uint32_t cr3; /* page table directory */
84 uint32_t idtr_limit; /* idt */
87 uint32_t gdtr_limit; /* gdt */
90 struct vmx_assist_segment cs;
91 struct vmx_assist_segment ds;
92 struct vmx_assist_segment es;
93 struct vmx_assist_segment ss;
94 struct vmx_assist_segment fs;
95 struct vmx_assist_segment gs;
96 struct vmx_assist_segment tr;
97 struct vmx_assist_segment ldtr;
100 unsigned char rm_irqbase[2];
101 } __attribute__((packed));
105 static void vmx_save_world_ctx(struct guest_info * info, struct vmx_assist_context * ctx);
106 static void vmx_restore_world_ctx(struct guest_info * info, struct vmx_assist_context * ctx);
108 int v3_vmxassist_ctx_switch(struct guest_info * info) {
109 struct vmx_assist_context * old_ctx = NULL;
110 struct vmx_assist_context * new_ctx = NULL;
111 struct vmx_assist_header * hdr = NULL;
112 struct vmx_data * vmx_info = (struct vmx_data *)info->vmm_data;
116 if (v3_gpa_to_hva(info, VMXASSIST_START, (addr_t *)&hdr) == -1) {
117 PrintError("Could not translate address for vmxassist header\n");
121 if (hdr->magic != VMXASSIST_MAGIC) {
122 PrintError("VMXASSIST_MAGIC field is invalid\n");
127 if (v3_gpa_to_hva(info, (addr_t)(hdr->old_ctx_gpa), (addr_t *)&(old_ctx)) == -1) {
128 PrintError("Could not translate address for VMXASSIST old context\n");
132 if (v3_gpa_to_hva(info, (addr_t)(hdr->new_ctx_gpa), (addr_t *)&(new_ctx)) == -1) {
133 PrintError("Could not translate address for VMXASSIST new context\n");
137 if (vmx_info->assist_state == VMXASSIST_DISABLED) {
139 /* Save the old Context */
140 vmx_save_world_ctx(info, old_ctx);
142 /* restore new context, vmxassist should launch the bios the first time */
143 vmx_restore_world_ctx(info, new_ctx);
145 vmx_info->assist_state = VMXASSIST_ENABLED;
147 } else if (vmx_info->assist_state == VMXASSIST_ENABLED) {
148 /* restore old context */
149 vmx_restore_world_ctx(info, old_ctx);
151 vmx_info->assist_state = VMXASSIST_DISABLED;
158 static void save_segment(struct v3_segment * seg, struct vmx_assist_segment * vmx_assist_seg) {
159 struct vmcs_segment tmp_seg;
161 memset(&tmp_seg, 0, sizeof(struct vmcs_segment));
163 v3_seg_to_vmxseg(seg, &tmp_seg);
165 vmx_assist_seg->sel = tmp_seg.selector;
166 vmx_assist_seg->limit = tmp_seg.limit;
167 vmx_assist_seg->base = tmp_seg.base;
168 vmx_assist_seg->arbytes.bytes = tmp_seg.access.val;
172 static void load_segment(struct vmx_assist_segment * vmx_assist_seg, struct v3_segment * seg) {
173 struct vmcs_segment tmp_seg;
175 memset(&tmp_seg, 0, sizeof(struct vmcs_segment));
177 tmp_seg.selector = vmx_assist_seg->sel;
178 tmp_seg.limit = vmx_assist_seg->limit;
179 tmp_seg.base = vmx_assist_seg->base;
180 tmp_seg.access.val = vmx_assist_seg->arbytes.bytes;
182 v3_vmxseg_to_seg(&tmp_seg, seg);
185 static void vmx_save_world_ctx(struct guest_info * info, struct vmx_assist_context * ctx) {
186 struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
188 PrintDebug("Writing from RIP: 0x%p\n", (void *)(addr_t)info->rip);
190 ctx->eip = info->rip;
191 ctx->esp = info->vm_regs.rsp;
192 ctx->eflags = info->ctrl_regs.rflags;
194 ctx->cr0 = info->shdw_pg_state.guest_cr0;
195 ctx->cr3 = info->shdw_pg_state.guest_cr3;
196 ctx->cr4 = vmx_info->guest_cr4;
199 save_segment(&(info->segments.cs), &(ctx->cs));
200 save_segment(&(info->segments.ds), &(ctx->ds));
201 save_segment(&(info->segments.es), &(ctx->es));
202 save_segment(&(info->segments.ss), &(ctx->ss));
203 save_segment(&(info->segments.fs), &(ctx->fs));
204 save_segment(&(info->segments.gs), &(ctx->gs));
205 save_segment(&(info->segments.tr), &(ctx->tr));
206 save_segment(&(info->segments.ldtr), &(ctx->ldtr));
209 ctx->idtr_limit = info->segments.idtr.limit;
210 ctx->idtr_base = info->segments.idtr.base;
212 ctx->gdtr_limit = info->segments.gdtr.limit;
213 ctx->gdtr_base = info->segments.gdtr.base;
216 static void vmx_restore_world_ctx(struct guest_info * info, struct vmx_assist_context * ctx) {
217 struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
219 PrintDebug("ctx rip: %p\n", (void *)(addr_t)ctx->eip);
221 info->rip = ctx->eip;
222 info->vm_regs.rsp = ctx->esp;
223 info->ctrl_regs.rflags = ctx->eflags;
225 info->shdw_pg_state.guest_cr0 = ctx->cr0;
226 info->shdw_pg_state.guest_cr3 = ctx->cr3;
227 vmx_info->guest_cr4 = ctx->cr4;
229 load_segment(&(ctx->cs), &(info->segments.cs));
230 load_segment(&(ctx->ds), &(info->segments.ds));
231 load_segment(&(ctx->es), &(info->segments.es));
232 load_segment(&(ctx->ss), &(info->segments.ss));
233 load_segment(&(ctx->fs), &(info->segments.fs));
234 load_segment(&(ctx->gs), &(info->segments.gs));
235 load_segment(&(ctx->tr), &(info->segments.tr));
236 load_segment(&(ctx->ldtr), &(info->segments.ldtr));
239 info->segments.idtr.limit = ctx->idtr_limit;
240 info->segments.idtr.base = ctx->idtr_base;
242 info->segments.gdtr.limit = ctx->gdtr_limit;
243 info->segments.gdtr.base = ctx->gdtr_base;
248 int v3_vmxassist_init(struct guest_info * core, struct vmx_data * vmx_state) {
251 core->vm_regs.rsp = 0x80000;
252 ((struct rflags *)&(core->ctrl_regs.rflags))->rsvd1 = 1;
254 #define GUEST_CR0 0x80010031
255 #define GUEST_CR4 0x00002010
256 core->ctrl_regs.cr0 = GUEST_CR0;
257 core->ctrl_regs.cr4 = GUEST_CR4;
259 ((struct cr0_32 *)&(core->shdw_pg_state.guest_cr0))->pe = 1;
260 ((struct cr0_32 *)&(core->shdw_pg_state.guest_cr0))->wp = 1;
263 // Setup segment registers
265 struct v3_segment * seg_reg = (struct v3_segment *)&(core->segments);
269 for (i = 0; i < 10; i++) {
270 seg_reg[i].selector = 3 << 3;
271 seg_reg[i].limit = 0xffff;
272 seg_reg[i].base = 0x0;
275 core->segments.cs.selector = 2 << 3;
277 /* Set only the segment registers */
278 for (i = 0; i < 6; i++) {
279 seg_reg[i].limit = 0xfffff;
280 seg_reg[i].granularity = 1;
282 seg_reg[i].system = 1;
284 seg_reg[i].present = 1;
288 core->segments.cs.type = 0xb;
290 core->segments.ldtr.selector = 0x20;
291 core->segments.ldtr.type = 2;
292 core->segments.ldtr.system = 0;
293 core->segments.ldtr.present = 1;
294 core->segments.ldtr.granularity = 0;
297 /************* Map in GDT and vmxassist *************/
299 uint64_t gdt[] __attribute__ ((aligned(32))) = {
300 0x0000000000000000ULL, /* 0x00: reserved */
301 0x0000830000000000ULL, /* 0x08: 32-bit TSS */
302 //0x0000890000000000ULL, /* 0x08: 32-bit TSS */
303 0x00CF9b000000FFFFULL, /* 0x10: CS 32-bit */
304 0x00CF93000000FFFFULL, /* 0x18: DS 32-bit */
305 0x000082000000FFFFULL, /* 0x20: LDTR 32-bit */
309 addr_t vmxassist_gdt = 0;
311 if (v3_gpa_to_hva(core, VMXASSIST_GDT, &vmxassist_gdt) == -1) {
312 PrintError("Could not find VMXASSIST GDT destination\n");
316 memcpy((void *)vmxassist_gdt, gdt, sizeof(uint64_t) * 5);
318 core->segments.gdtr.base = VMXASSIST_GDT;
321 uint64_t vmxassist_tss = VMXASSIST_TSS;
322 gdt[0x08 / sizeof(gdt[0])] |=
323 ((vmxassist_tss & 0xFF000000) << (56 - 24)) |
324 ((vmxassist_tss & 0x00FF0000) << (32 - 16)) |
325 ((vmxassist_tss & 0x0000FFFF) << (16)) |
328 core->segments.tr.selector = 0x08;
329 core->segments.tr.base = vmxassist_tss;
331 //core->segments.tr.type = 0x9;
332 core->segments.tr.type = 0x3;
333 core->segments.tr.system = 0;
334 core->segments.tr.present = 1;
335 core->segments.tr.granularity = 0;
338 if (core->shdw_pg_mode == NESTED_PAGING) {
339 // setup 1to1 page table internally.
341 pde32_4MB_t * pde = NULL;
343 PrintError("Setting up internal VMXASSIST page tables\n");
345 if (v3_gpa_to_hva(core, VMXASSIST_1to1_PT, (addr_t *)(&pde)) == -1) {
346 PrintError("Could not find VMXASSIST 1to1 PT destination\n");
350 memset(pde, 0, PAGE_SIZE);
352 for (i = 0; i < 1024; i++) {
355 pde[i].user_page = 1;
356 pde[i].large_page = 1;
357 pde[i].page_base_addr = PAGE_BASE_ADDR_4MB(i * PAGE_SIZE_4MB);
359 // PrintError("PDE %d: %x\n", i, *(uint32_t *)&(pde[i]));
362 core->ctrl_regs.cr3 = VMXASSIST_1to1_PT;
369 extern uint8_t v3_vmxassist_start[];
370 extern uint8_t v3_vmxassist_end[];
371 addr_t vmxassist_dst = 0;
373 if (v3_gpa_to_hva(core, VMXASSIST_START, &vmxassist_dst) == -1) {
374 PrintError("Could not find VMXASSIST destination\n");
378 memcpy((void *)vmxassist_dst, v3_vmxassist_start, v3_vmxassist_end - v3_vmxassist_start);
381 vmx_state->assist_state = VMXASSIST_DISABLED;