2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2012, NWU EECS 441 Transactional Memory Team
11 * Copyright (c) 2012, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Maciek Swiech <dotpyfe@u.northwestern.edu>
15 * Marcel Flores <marcel-flores@u.northwestern.edu>
16 * Zachary Bischof <zbischof@u.northwestern.edu>
17 * Kyle C. Hale <kh@u.northwestern.edu>
19 * This is free software. You are permitted to use,
20 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
23 #include <palacios/vmm_mem.h>
24 #include <palacios/vmm.h>
25 #include <palacios/vmcb.h>
26 #include <palacios/vmm_decoder.h>
27 #include <palacios/vm_guest_mem.h>
28 #include <palacios/vmm_ctrl_regs.h>
29 #include <palacios/vmm_direct_paging.h>
30 #include <palacios/svm.h>
31 #include <palacios/vmm_excp.h>
32 #include <palacios/vmm_list.h>
33 #include <palacios/vmm_hashtable.h>
35 #include <extensions/trans_mem.h>
36 #include <extensions/tm_util.h>
38 extern void v3_stgi();
39 extern void v3_clgi();
41 #if !V3_CONFIG_DEBUG_TM_FUNC
43 #define PrintDebug(fmt, args...)
46 /* TM Read/Write List data structure and API *********************************
49 static void free_mem_op_list (struct list_head * list) {
50 struct mem_op * curr = NULL;
51 struct mem_op * tmp = NULL;
53 list_for_each_entry_safe(curr, tmp, list, op_node) {
54 list_del(&(curr->op_node));
60 void v3_clear_tm_lists (struct v3_trans_mem * tm) {
61 free_mem_op_list(&(tm->trans_w_list));
62 free_mem_op_list(&(tm->trans_r_list));
66 int add_mem_op_to_list (struct list_head * list, addr_t guest_addr) {
69 new = list_contains_guest_addr(list, guest_addr);
76 new = (struct mem_op *)V3_Malloc(sizeof(struct mem_op));
81 new->guest_addr = guest_addr;
84 list_add_tail(&(new->op_node), list);
90 struct mem_op * list_contains_guest_addr (struct list_head * list, addr_t guest_addr) {
91 struct mem_op * curr = NULL;
92 struct mem_op * tmp = NULL;
94 list_for_each_entry_safe(curr, tmp, list, op_node) {
95 if (curr->guest_addr == guest_addr) {
106 int update_list(struct v3_trans_mem * tm, struct list_head * list) {
107 struct mem_op * curr = NULL;
108 struct mem_op * tmp = NULL;
112 list_for_each_entry_safe(curr, tmp, list, op_node) {
113 if (!curr->current) {
114 /* we do not have the most current entry! grab it from the staging
117 sp_loc = (void *)((addr_t)(tm->staging_page) + (curr->guest_addr % PAGE_SIZE));
118 if ((curr->guest_addr % PAGE_SIZE_4KB) > (PAGE_SIZE_4KB - 8)) {
119 PrintError(tm->ginfo->vm_info, tm->ginfo,"++ TM UDATE LIST ++ data ref spans page boundary\n");
123 if (v3_hpa_to_hva((addr_t)(sp_loc), &v_sp_loc) == -1) {
124 PrintError(tm->ginfo->vm_info, tm->ginfo,"Could not convert address on staging page to virtual address\n");
128 memcpy((void*)(&(curr->data)), (void*)v_sp_loc, sizeof(uint64_t));
137 int stage_entry (struct v3_trans_mem * tm, struct list_head * list, addr_t guest_addr) {
140 struct mem_op * curr = list_contains_guest_addr(list, guest_addr);
143 PrintDebug(tm->ginfo->vm_info, tm->ginfo,"tried to stage entry from addr %p that doesn't exist in this list\n", (void*)guest_addr);
147 sp_loc = (void*)((addr_t)(tm->staging_page) + (guest_addr % PAGE_SIZE_4KB));
149 if ((curr->guest_addr % PAGE_SIZE_4KB) > (PAGE_SIZE_4KB - 8)) {
150 PrintError(tm->ginfo->vm_info, tm->ginfo,"++ TM UDATE LIST ++ data ref spans page boundary\n");
154 if (v3_hpa_to_hva((addr_t)(sp_loc), &v_sp_loc) == -1) {
155 PrintError(tm->ginfo->vm_info, tm->ginfo,"Could not convert address on staging page to virt addr\n");
159 /* write data back to the data page */
160 memcpy((void*)v_sp_loc,(void*)(&(curr->data)), sizeof(uint64_t));
162 /* mark entry as not current so we grab it back later */
168 int copy_add_entry(struct list_head * list, addr_t guest_addr, uint64_t data){
171 // Don't repeatedly add
172 new = list_contains_guest_addr(list, guest_addr);
178 new = (struct mem_op*)V3_Malloc(sizeof(struct mem_op));
184 new->guest_addr = guest_addr;
187 list_add_tail(&(new->op_node), list);
193 int commit_list(struct guest_info * core, struct v3_trans_mem * tm) {
194 // We should not be interruptable here, needs to happen atomically
195 PrintDebug(core->vm_info, core,"-- TM COMMIT -- commiting data\n");
198 struct mem_op * curr = NULL;
199 struct mem_op * tmp = NULL;
201 list_for_each_entry_safe(curr, tmp, &(tm->trans_w_list), op_node) {
204 if (v3_gva_to_hva(core, (addr_t)(curr->guest_addr), &v_ga_loc) == -1) {
205 PrintError(core->vm_info, core,"Could not translate gva to hva\n");
209 PrintDebug(core->vm_info, core,"\tValue being copied: %p\n", (void*)(curr->data));
210 memcpy((void*)v_ga_loc, (void*)(&(curr->data)) , sizeof(uint64_t));
218 int v3_copy_lists(struct guest_info *core) {
219 PrintError(core->vm_info, core, "TM: unimplemented (%s)\n", __FUNCTION__);
224 /* TM State functions ********************************************************
226 * int v3_set_tm(struct guest_info *core)
227 * int v3_clr_tm(struct guest_info *core)
228 * int v3_clr_vtlb(struct guest_info *core)
229 * int v3_tm_set_abrt(struct guest_info *core)
233 int v3_set_tm (struct v3_trans_mem * tm) {
234 struct v3_tm_state * tms = (struct v3_tm_state *)v3_get_extension_state(tm->ginfo->vm_info, "trans_mem");
235 if (tm->TM_MODE == TM_ON) {
236 PrintError(tm->ginfo->vm_info, tm->ginfo,"++ TM SET ++ tried to set tm but it was already on\n");
241 tm->TM_STATE = TM_NULL;
244 enum TM_MODE_E sys_tm;
246 flags = v3_lock_irqsave(tms->lock);
247 (tms->cores_active)++;
248 sys_tm = tms->TM_MODE;
249 v3_unlock_irqrestore(tms->lock, flags);
251 // need to flush everyone elses VTLB to get them to start single stepping IF THEY ARENT ALREADY
253 if (sys_tm == TM_OFF) {
255 for (core_num = 0; core_num < tm->ginfo->vm_info->num_cores; core_num++) {
256 if (core_num == tm->ginfo->vcpu_id) {
260 struct guest_info * r_core = &(tm->ginfo->vm_info->cores[core_num]);
262 // TODO: what if this happens at an inopportune time?
266 flags = v3_lock_irqsave(tms->lock);
267 tms->TM_MODE = TM_ON;
268 v3_unlock_irqrestore(tms->lock, flags);
273 int v3_clr_tm (struct v3_trans_mem * tm) {
274 PrintDebug(tm->ginfo->vm_info, tm->ginfo,"++ CLR TM ++ clearing tm state\n");
276 struct v3_tm_state * tms = (struct v3_tm_state *)v3_get_extension_state(tm->ginfo->vm_info, "trans_mem");
277 tm->TM_MODE = TM_OFF;
278 tm->TM_STATE = TM_NULL;
279 tm->cur_instr_len = -1;
281 // last core to turn off?
285 flags = v3_lock_irqsave(tms->lock);
286 num_act = --(tms->cores_active);
287 v3_unlock_irqrestore(tms->lock, flags);
290 PrintDebug(tm->ginfo->vm_info, tm->ginfo,"++ CLR TM ++ we are the last tm->ginfo in TM, turn off system state\n");
291 tms->TM_MODE = TM_OFF;
296 int v3_clr_vtlb (struct guest_info * core) {
297 PrintDebug(core->vm_info, core,"++ TM VTLB ++ flushing core %d's VTLB\n", core->vcpu_id);
298 v3_invalidate_shadow_pts(core);
303 int v3_tm_set_abrt(struct v3_trans_mem * tm) {
304 tm->TM_STATE = TM_ABORT;
309 /* TM extra ******************************************************************
312 int v3_free_staging_page(struct v3_trans_mem * tm) {
313 if (!(tm->staging_page)) {
314 PrintDebug(tm->ginfo->vm_info, tm->ginfo,"++ %d : TM FREE ++ tried to dealloc null staging page\n", tm->ginfo->vcpu_id);
317 V3_FreePages(tm->staging_page, 1);
318 tm->staging_page = NULL;