2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2011, Jack Lange <jacklange@cs.pitt.edu>
11 * Copyright (c) 2011, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jacklangel@cs.pitt.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
21 #include <palacios/vmm_barrier.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vm_guest.h>
25 int v3_init_barrier(struct v3_vm_info * vm_info) {
26 struct v3_barrier * barrier = &(vm_info->barrier);
28 memset(barrier, 0, sizeof(struct v3_barrier));
29 v3_bitmap_init(&(barrier->cpu_map), vm_info->num_cores);
30 v3_lock_init(&(barrier->lock));
35 int v3_deinit_barrier(struct v3_vm_info * vm_info) {
36 struct v3_barrier * barrier = &(vm_info->barrier);
38 v3_bitmap_deinit(&(barrier->cpu_map));
39 v3_lock_deinit(&(barrier->lock));
45 /* Barrier synchronization primitive
46 * -- This call will block until all the guest cores are waiting at a common synchronization point
47 * in a yield loop. The core will block at the sync point until the barrier is lowered.
50 * vm_info -- The VM for which the barrier is being activated
51 * local_core -- The core whose thread this function is being called from, or NULL
52 * if the calling thread is not associated with a VM's core context
55 int v3_raise_barrier(struct v3_vm_info * vm_info, struct guest_info * local_core) {
56 struct v3_barrier * barrier = &(vm_info->barrier);
66 flag = v3_lock_irqsave(barrier->lock);
68 if (barrier->active == 0) {
73 v3_unlock_irqrestore(barrier->lock, flag);
76 /* If we are in a core context and the barrier has already been acquired
77 we'll be safe and let the other barrier proceed. We will still report an error
78 though to allow possible cleanups to occur at the call site.
80 if (local_core != NULL) {
81 v3_wait_at_barrier(local_core);
87 // If we are raising the barrier from a core context
88 // we have to mark ourselves blocked first to avoid deadlock
89 if (local_core != NULL) {
90 local_vcpu = local_core->vcpu_id;
91 v3_bitmap_set(&(barrier->cpu_map), local_vcpu);
95 // send out interrupts to force exits on all cores
96 for (i = 0; i < vm_info->num_cores; i++) {
97 if (vm_info->cores[i].vcpu_id != local_vcpu) {
98 v3_interrupt_cpu(vm_info, vm_info->cores[i].pcpu_id, 0);
102 // wait for barrier catch on all cores
103 while (all_blocked == 0) {
106 for (i = 0; i < vm_info->num_cores; i++) {
107 if (v3_bitmap_check(&(barrier->cpu_map), i) == 0) {
108 // There is still a core that is not waiting at the barrier
113 if (all_blocked == 1) {
117 v3_yield(local_core);
126 /* Lowers a barrier that has already been raised
127 * guest cores will automatically resume execution
128 * once this has been called
130 * TODO: Need someway to check that the barrier is active
133 int v3_lower_barrier(struct v3_vm_info * vm_info) {
134 struct v3_barrier * barrier = &(vm_info->barrier);
136 // Clear the active flag, so cores won't wait
139 // Clear all the cpu flags, so cores will proceed
140 v3_bitmap_reset(&(barrier->cpu_map));
147 * Syncronization point for guest cores
148 * -- called as part of the main VMM event loop for each core
149 * -- if a barrier has been activated then the core will signal
150 * it has reached the barrier and sit in a yield loop until the
151 * barrier has been lowered
153 int v3_wait_at_barrier(struct guest_info * core) {
154 struct v3_barrier * barrier = &(core->vm_info->barrier);
156 if (barrier->active == 0) {
160 /* Barrier has been activated.
161 * Wait here until it's lowered
165 // set cpu bit in barrier bitmap
166 v3_bitmap_set(&(barrier->cpu_map), core->vcpu_id);
168 // wait for cpu bit to clear
169 while (v3_bitmap_check(&(barrier->cpu_map), core->vcpu_id) == 1) {