2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Andy Gocke <agocke@gmail.com>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Andy Gocke <agocke@gmail.com>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmx_assist.h>
21 #include <palacios/vmx_lowlevel.h>
22 #include <palacios/vm_guest_mem.h>
23 #include <palacios/vmx.h>
24 #include <palacios/vmm_ctrl_regs.h>
26 #ifndef V3_CONFIG_DEBUG_VMX
28 #define PrintDebug(fmt, args...)
34 #define VMXASSIST_MAGIC 0x17101966
37 struct vmx_assist_header {
38 uint64_t rsvd; // 8 bytes of nothing
42 } __attribute__((packed));
46 struct arbyte_fields {
47 unsigned int seg_type : 4,
58 } __attribute__((packed)) fields;
60 } __attribute__((packed));
62 struct vmx_assist_segment {
66 union vmcs_arbytes arbytes;
67 } __attribute__((packed));
73 struct vmx_assist_context {
74 uint32_t eip; /* execution pointer */
75 uint32_t esp; /* stack pointer */
76 uint32_t eflags; /* flags register */
78 uint32_t cr3; /* page table directory */
81 uint32_t idtr_limit; /* idt */
84 uint32_t gdtr_limit; /* gdt */
87 struct vmx_assist_segment cs;
88 struct vmx_assist_segment ds;
89 struct vmx_assist_segment es;
90 struct vmx_assist_segment ss;
91 struct vmx_assist_segment fs;
92 struct vmx_assist_segment gs;
93 struct vmx_assist_segment tr;
94 struct vmx_assist_segment ldtr;
97 unsigned char rm_irqbase[2];
98 } __attribute__((packed));
102 static void vmx_save_world_ctx(struct guest_info * info, struct vmx_assist_context * ctx);
103 static void vmx_restore_world_ctx(struct guest_info * info, struct vmx_assist_context * ctx);
105 int v3_vmxassist_ctx_switch(struct guest_info * info) {
106 struct vmx_assist_context * old_ctx = NULL;
107 struct vmx_assist_context * new_ctx = NULL;
108 struct vmx_assist_header * hdr = NULL;
109 struct vmx_data * vmx_info = (struct vmx_data *)info->vmm_data;
113 if (v3_gpa_to_hva(info, VMXASSIST_START, (addr_t *)&hdr) == -1) {
114 PrintError("Could not translate address for vmxassist header\n");
118 if (hdr->magic != VMXASSIST_MAGIC) {
119 PrintError("VMXASSIST_MAGIC field is invalid\n");
124 if (v3_gpa_to_hva(info, (addr_t)(hdr->old_ctx_gpa), (addr_t *)&(old_ctx)) == -1) {
125 PrintError("Could not translate address for VMXASSIST old context\n");
129 if (v3_gpa_to_hva(info, (addr_t)(hdr->new_ctx_gpa), (addr_t *)&(new_ctx)) == -1) {
130 PrintError("Could not translate address for VMXASSIST new context\n");
134 if (vmx_info->assist_state == VMXASSIST_DISABLED) {
136 /* Save the old Context */
137 vmx_save_world_ctx(info, old_ctx);
139 /* restore new context, vmxassist should launch the bios the first time */
140 vmx_restore_world_ctx(info, new_ctx);
142 vmx_info->assist_state = VMXASSIST_ENABLED;
144 } else if (vmx_info->assist_state == VMXASSIST_ENABLED) {
145 /* restore old context */
146 vmx_restore_world_ctx(info, old_ctx);
148 vmx_info->assist_state = VMXASSIST_DISABLED;
155 static void save_segment(struct v3_segment * seg, struct vmx_assist_segment * vmx_assist_seg) {
156 struct vmcs_segment tmp_seg;
158 memset(&tmp_seg, 0, sizeof(struct vmcs_segment));
160 v3_seg_to_vmxseg(seg, &tmp_seg);
162 vmx_assist_seg->sel = tmp_seg.selector;
163 vmx_assist_seg->limit = tmp_seg.limit;
164 vmx_assist_seg->base = tmp_seg.base;
165 vmx_assist_seg->arbytes.bytes = tmp_seg.access.val;
169 static void load_segment(struct vmx_assist_segment * vmx_assist_seg, struct v3_segment * seg) {
170 struct vmcs_segment tmp_seg;
172 memset(&tmp_seg, 0, sizeof(struct vmcs_segment));
174 tmp_seg.selector = vmx_assist_seg->sel;
175 tmp_seg.limit = vmx_assist_seg->limit;
176 tmp_seg.base = vmx_assist_seg->base;
177 tmp_seg.access.val = vmx_assist_seg->arbytes.bytes;
179 v3_vmxseg_to_seg(&tmp_seg, seg);
182 static void vmx_save_world_ctx(struct guest_info * info, struct vmx_assist_context * ctx) {
183 struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
185 PrintDebug("Writing from RIP: 0x%p\n", (void *)(addr_t)info->rip);
187 ctx->eip = info->rip;
188 ctx->esp = info->vm_regs.rsp;
189 ctx->eflags = info->ctrl_regs.rflags;
191 ctx->cr0 = info->shdw_pg_state.guest_cr0;
192 ctx->cr3 = info->shdw_pg_state.guest_cr3;
193 ctx->cr4 = vmx_info->guest_cr4;
196 save_segment(&(info->segments.cs), &(ctx->cs));
197 save_segment(&(info->segments.ds), &(ctx->ds));
198 save_segment(&(info->segments.es), &(ctx->es));
199 save_segment(&(info->segments.ss), &(ctx->ss));
200 save_segment(&(info->segments.fs), &(ctx->fs));
201 save_segment(&(info->segments.gs), &(ctx->gs));
202 save_segment(&(info->segments.tr), &(ctx->tr));
203 save_segment(&(info->segments.ldtr), &(ctx->ldtr));
206 ctx->idtr_limit = info->segments.idtr.limit;
207 ctx->idtr_base = info->segments.idtr.base;
209 ctx->gdtr_limit = info->segments.gdtr.limit;
210 ctx->gdtr_base = info->segments.gdtr.base;
213 static void vmx_restore_world_ctx(struct guest_info * info, struct vmx_assist_context * ctx) {
214 struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
216 PrintDebug("ctx rip: %p\n", (void *)(addr_t)ctx->eip);
218 info->rip = ctx->eip;
219 info->vm_regs.rsp = ctx->esp;
220 info->ctrl_regs.rflags = ctx->eflags;
222 info->shdw_pg_state.guest_cr0 = ctx->cr0;
223 info->shdw_pg_state.guest_cr3 = ctx->cr3;
224 vmx_info->guest_cr4 = ctx->cr4;
226 load_segment(&(ctx->cs), &(info->segments.cs));
227 load_segment(&(ctx->ds), &(info->segments.ds));
228 load_segment(&(ctx->es), &(info->segments.es));
229 load_segment(&(ctx->ss), &(info->segments.ss));
230 load_segment(&(ctx->fs), &(info->segments.fs));
231 load_segment(&(ctx->gs), &(info->segments.gs));
232 load_segment(&(ctx->tr), &(info->segments.tr));
233 load_segment(&(ctx->ldtr), &(info->segments.ldtr));
236 info->segments.idtr.limit = ctx->idtr_limit;
237 info->segments.idtr.base = ctx->idtr_base;
239 info->segments.gdtr.limit = ctx->gdtr_limit;
240 info->segments.gdtr.base = ctx->gdtr_base;
245 int v3_vmxassist_init(struct guest_info * core, struct vmx_data * vmx_state) {
248 core->vm_regs.rsp = 0x80000;
249 ((struct rflags *)&(core->ctrl_regs.rflags))->rsvd1 = 1;
251 #define GUEST_CR0 0x80010031
252 #define GUEST_CR4 0x00002010
253 core->ctrl_regs.cr0 = GUEST_CR0;
254 core->ctrl_regs.cr4 = GUEST_CR4;
256 ((struct cr0_32 *)&(core->shdw_pg_state.guest_cr0))->pe = 1;
257 ((struct cr0_32 *)&(core->shdw_pg_state.guest_cr0))->wp = 1;
260 // Setup segment registers
262 struct v3_segment * seg_reg = (struct v3_segment *)&(core->segments);
266 for (i = 0; i < 10; i++) {
267 seg_reg[i].selector = 3 << 3;
268 seg_reg[i].limit = 0xffff;
269 seg_reg[i].base = 0x0;
272 core->segments.cs.selector = 2 << 3;
274 /* Set only the segment registers */
275 for (i = 0; i < 6; i++) {
276 seg_reg[i].limit = 0xfffff;
277 seg_reg[i].granularity = 1;
279 seg_reg[i].system = 1;
281 seg_reg[i].present = 1;
285 core->segments.cs.type = 0xb;
287 core->segments.ldtr.selector = 0x20;
288 core->segments.ldtr.type = 2;
289 core->segments.ldtr.system = 0;
290 core->segments.ldtr.present = 1;
291 core->segments.ldtr.granularity = 0;
294 /************* Map in GDT and vmxassist *************/
296 uint64_t gdt[] __attribute__ ((aligned(32))) = {
297 0x0000000000000000ULL, /* 0x00: reserved */
298 0x0000830000000000ULL, /* 0x08: 32-bit TSS */
299 //0x0000890000000000ULL, /* 0x08: 32-bit TSS */
300 0x00CF9b000000FFFFULL, /* 0x10: CS 32-bit */
301 0x00CF93000000FFFFULL, /* 0x18: DS 32-bit */
302 0x000082000000FFFFULL, /* 0x20: LDTR 32-bit */
306 addr_t vmxassist_gdt = 0;
308 if (v3_gpa_to_hva(core, VMXASSIST_GDT, &vmxassist_gdt) == -1) {
309 PrintError("Could not find VMXASSIST GDT destination\n");
313 memcpy((void *)vmxassist_gdt, gdt, sizeof(uint64_t) * 5);
315 core->segments.gdtr.base = VMXASSIST_GDT;
318 uint64_t vmxassist_tss = VMXASSIST_TSS;
319 gdt[0x08 / sizeof(gdt[0])] |=
320 ((vmxassist_tss & 0xFF000000) << (56 - 24)) |
321 ((vmxassist_tss & 0x00FF0000) << (32 - 16)) |
322 ((vmxassist_tss & 0x0000FFFF) << (16)) |
325 core->segments.tr.selector = 0x08;
326 core->segments.tr.base = vmxassist_tss;
328 //core->segments.tr.type = 0x9;
329 core->segments.tr.type = 0x3;
330 core->segments.tr.system = 0;
331 core->segments.tr.present = 1;
332 core->segments.tr.granularity = 0;
335 if (core->shdw_pg_mode == NESTED_PAGING) {
336 // setup 1to1 page table internally.
338 pde32_4MB_t * pde = NULL;
340 PrintError("Setting up internal VMXASSIST page tables\n");
342 if (v3_gpa_to_hva(core, VMXASSIST_1to1_PT, (addr_t *)(&pde)) == -1) {
343 PrintError("Could not find VMXASSIST 1to1 PT destination\n");
347 memset(pde, 0, PAGE_SIZE);
349 for (i = 0; i < 1024; i++) {
352 pde[i].user_page = 1;
353 pde[i].large_page = 1;
354 pde[i].page_base_addr = PAGE_BASE_ADDR_4MB(i * PAGE_SIZE_4MB);
356 // PrintError("PDE %d: %x\n", i, *(uint32_t *)&(pde[i]));
359 core->ctrl_regs.cr3 = VMXASSIST_1to1_PT;
366 extern uint8_t v3_vmxassist_start[];
367 extern uint8_t v3_vmxassist_end[];
368 addr_t vmxassist_dst = 0;
370 if (v3_gpa_to_hva(core, VMXASSIST_START, &vmxassist_dst) == -1) {
371 PrintError("Could not find VMXASSIST destination\n");
375 memcpy((void *)vmxassist_dst, v3_vmxassist_start, v3_vmxassist_end - v3_vmxassist_start);
378 vmx_state->assist_state = VMXASSIST_DISABLED;