Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


revert to earlier rdtsc intercepts
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21
22 #include <palacios/svm.h>
23 #include <palacios/vmm.h>
24
25 #include <palacios/vmcb.h>
26 #include <palacios/vmm_mem.h>
27 #include <palacios/vmm_paging.h>
28 #include <palacios/svm_handler.h>
29
30 #include <palacios/vmm_debug.h>
31 #include <palacios/vm_guest_mem.h>
32
33 #include <palacios/vmm_decoder.h>
34 #include <palacios/vmm_string.h>
35 #include <palacios/vmm_lowlevel.h>
36 #include <palacios/svm_msr.h>
37
38 #include <palacios/vmm_rbtree.h>
39 #include <palacios/vmm_barrier.h>
40 #include <palacios/vmm_debug.h>
41
42
43
44 #ifdef V3_CONFIG_CHECKPOINT
45 #include <palacios/vmm_checkpoint.h>
46 #endif
47
48 #include <palacios/vmm_direct_paging.h>
49
50 #include <palacios/vmm_ctrl_regs.h>
51 #include <palacios/svm_io.h>
52
53 #include <palacios/vmm_sprintf.h>
54
55
56 #ifndef V3_CONFIG_DEBUG_SVM
57 #undef PrintDebug
58 #define PrintDebug(fmt, args...)
59 #endif
60
61
62 uint32_t v3_last_exit;
63
64 // This is a global pointer to the host's VMCB
65 static addr_t host_vmcbs[V3_CONFIG_MAX_CPUS] = { [0 ... V3_CONFIG_MAX_CPUS - 1] = 0};
66
67
68
69 extern void v3_stgi();
70 extern void v3_clgi();
71 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
72 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, vmcb_t * host_vmcb);
73
74
75 static vmcb_t * Allocate_VMCB() {
76     vmcb_t * vmcb_page = NULL;
77     addr_t vmcb_pa = (addr_t)V3_AllocPages(1);
78
79     if ((void *)vmcb_pa == NULL) {
80         PrintError("Error allocating VMCB\n");
81         return NULL;
82     }
83
84     vmcb_page = (vmcb_t *)V3_VAddr((void *)vmcb_pa);
85
86     memset(vmcb_page, 0, 4096);
87
88     return vmcb_page;
89 }
90
91
92 static int v3_svm_handle_efer_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data)
93 {
94     int status;
95
96     // Call arch-independent handler
97     if ((status = v3_handle_efer_write(core, msr, src, priv_data)) != 0) {
98         return status;
99     }
100
101     // SVM-specific code
102     {
103         // Ensure that hardware visible EFER.SVME bit is set (SVM Enable)
104         struct efer_64 * hw_efer = (struct efer_64 *)&(core->ctrl_regs.efer);
105         hw_efer->svme = 1;
106     }
107
108     return 0;
109 }
110
111
112 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) {
113     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
114     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
115     uint_t i;
116
117
118     //
119     ctrl_area->svm_instrs.VMRUN = 1;
120     ctrl_area->svm_instrs.VMMCALL = 1;
121     ctrl_area->svm_instrs.VMLOAD = 1;
122     ctrl_area->svm_instrs.VMSAVE = 1;
123     ctrl_area->svm_instrs.STGI = 1;
124     ctrl_area->svm_instrs.CLGI = 1;
125     ctrl_area->svm_instrs.SKINIT = 1;
126     ctrl_area->svm_instrs.ICEBP = 1;
127     ctrl_area->svm_instrs.WBINVD = 1;
128     ctrl_area->svm_instrs.MONITOR = 1;
129     ctrl_area->svm_instrs.MWAIT_always = 1;
130     ctrl_area->svm_instrs.MWAIT_if_armed = 1;
131     ctrl_area->instrs.INVLPGA = 1;
132     ctrl_area->instrs.CPUID = 1;
133
134     ctrl_area->instrs.HLT = 1;
135
136     /* Set at VMM launch as needed */
137     ctrl_area->instrs.RDTSC = 0;
138     ctrl_area->svm_instrs.RDTSCP = 0;
139
140     // guest_state->cr0 = 0x00000001;    // PE 
141   
142     /*
143       ctrl_area->exceptions.de = 1;
144       ctrl_area->exceptions.df = 1;
145       
146       ctrl_area->exceptions.ts = 1;
147       ctrl_area->exceptions.ss = 1;
148       ctrl_area->exceptions.ac = 1;
149       ctrl_area->exceptions.mc = 1;
150       ctrl_area->exceptions.gp = 1;
151       ctrl_area->exceptions.ud = 1;
152       ctrl_area->exceptions.np = 1;
153       ctrl_area->exceptions.of = 1;
154       
155       ctrl_area->exceptions.nmi = 1;
156     */
157     
158
159     ctrl_area->instrs.NMI = 1;
160     ctrl_area->instrs.SMI = 0; // allow SMIs to run in guest
161     ctrl_area->instrs.INIT = 1;
162     //    ctrl_area->instrs.PAUSE = 1;
163     ctrl_area->instrs.shutdown_evts = 1;
164
165
166     /* DEBUG FOR RETURN CODE */
167     ctrl_area->exit_code = 1;
168
169
170     /* Setup Guest Machine state */
171
172     core->vm_regs.rsp = 0x00;
173     core->rip = 0xfff0;
174
175     core->vm_regs.rdx = 0x00000f00;
176
177
178     core->cpl = 0;
179
180     core->ctrl_regs.rflags = 0x00000002; // The reserved bit is always 1
181     core->ctrl_regs.cr0 = 0x60010010; // Set the WP flag so the memory hooks work in real-mode
182     core->ctrl_regs.efer |= EFER_MSR_svm_enable;
183
184
185
186
187
188     core->segments.cs.selector = 0xf000;
189     core->segments.cs.limit = 0xffff;
190     core->segments.cs.base = 0x0000000f0000LL;
191
192     // (raw attributes = 0xf3)
193     core->segments.cs.type = 0x3;
194     core->segments.cs.system = 0x1;
195     core->segments.cs.dpl = 0x3;
196     core->segments.cs.present = 1;
197
198
199
200     struct v3_segment * segregs [] = {&(core->segments.ss), &(core->segments.ds), 
201                                       &(core->segments.es), &(core->segments.fs), 
202                                       &(core->segments.gs), NULL};
203
204     for ( i = 0; segregs[i] != NULL; i++) {
205         struct v3_segment * seg = segregs[i];
206         
207         seg->selector = 0x0000;
208         //    seg->base = seg->selector << 4;
209         seg->base = 0x00000000;
210         seg->limit = ~0u;
211
212         // (raw attributes = 0xf3)
213         seg->type = 0x3;
214         seg->system = 0x1;
215         seg->dpl = 0x3;
216         seg->present = 1;
217     }
218
219     core->segments.gdtr.limit = 0x0000ffff;
220     core->segments.gdtr.base = 0x0000000000000000LL;
221     core->segments.idtr.limit = 0x0000ffff;
222     core->segments.idtr.base = 0x0000000000000000LL;
223
224     core->segments.ldtr.selector = 0x0000;
225     core->segments.ldtr.limit = 0x0000ffff;
226     core->segments.ldtr.base = 0x0000000000000000LL;
227     core->segments.tr.selector = 0x0000;
228     core->segments.tr.limit = 0x0000ffff;
229     core->segments.tr.base = 0x0000000000000000LL;
230
231
232     core->dbg_regs.dr6 = 0x00000000ffff0ff0LL;
233     core->dbg_regs.dr7 = 0x0000000000000400LL;
234
235
236     ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(core->vm_info->io_map.arch_data);
237     ctrl_area->instrs.IOIO_PROT = 1;
238             
239     ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(core->vm_info->msr_map.arch_data);
240     ctrl_area->instrs.MSR_PROT = 1;   
241
242
243     PrintDebug("Exiting on interrupts\n");
244     ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
245     ctrl_area->instrs.INTR = 1;
246
247
248     v3_hook_msr(core->vm_info, EFER_MSR, 
249                 &v3_handle_efer_read,
250                 &v3_svm_handle_efer_write, 
251                 core);
252
253     if (core->shdw_pg_mode == SHADOW_PAGING) {
254         PrintDebug("Creating initial shadow page table\n");
255         
256         /* JRL: This is a performance killer, and a simplistic solution */
257         /* We need to fix this */
258         ctrl_area->TLB_CONTROL = 1;
259         ctrl_area->guest_ASID = 1;
260         
261         
262         if (v3_init_passthrough_pts(core) == -1) {
263             PrintError("Could not initialize passthrough page tables\n");
264             return ;
265         }
266
267
268         core->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
269         PrintDebug("Created\n");
270         
271         core->ctrl_regs.cr0 |= 0x80000000;
272         core->ctrl_regs.cr3 = core->direct_map_pt;
273
274         ctrl_area->cr_reads.cr0 = 1;
275         ctrl_area->cr_writes.cr0 = 1;
276         //ctrl_area->cr_reads.cr4 = 1;
277         ctrl_area->cr_writes.cr4 = 1;
278         ctrl_area->cr_reads.cr3 = 1;
279         ctrl_area->cr_writes.cr3 = 1;
280
281
282
283         ctrl_area->instrs.INVLPG = 1;
284
285         ctrl_area->exceptions.pf = 1;
286
287         guest_state->g_pat = 0x7040600070406ULL;
288
289
290
291     } else if (core->shdw_pg_mode == NESTED_PAGING) {
292         // Flush the TLB on entries/exits
293         ctrl_area->TLB_CONTROL = 1;
294         ctrl_area->guest_ASID = 1;
295
296         // Enable Nested Paging
297         ctrl_area->NP_ENABLE = 1;
298
299         PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
300
301         // Set the Nested Page Table pointer
302         if (v3_init_passthrough_pts(core) == -1) {
303             PrintError("Could not initialize Nested page tables\n");
304             return ;
305         }
306
307         ctrl_area->N_CR3 = core->direct_map_pt;
308
309         guest_state->g_pat = 0x7040600070406ULL;
310     }
311     
312     /* tell the guest that we don't support SVM */
313     v3_hook_msr(core->vm_info, SVM_VM_CR_MSR, 
314         &v3_handle_vm_cr_read,
315         &v3_handle_vm_cr_write, 
316         core);
317
318
319     {
320 #define INT_PENDING_AMD_MSR             0xc0010055
321
322         v3_hook_msr(core->vm_info, IA32_STAR_MSR, NULL, NULL, NULL);
323         v3_hook_msr(core->vm_info, IA32_LSTAR_MSR, NULL, NULL, NULL);
324         v3_hook_msr(core->vm_info, IA32_FMASK_MSR, NULL, NULL, NULL);
325         v3_hook_msr(core->vm_info, IA32_KERN_GS_BASE_MSR, NULL, NULL, NULL);
326         v3_hook_msr(core->vm_info, IA32_CSTAR_MSR, NULL, NULL, NULL);
327
328         v3_hook_msr(core->vm_info, SYSENTER_CS_MSR, NULL, NULL, NULL);
329         v3_hook_msr(core->vm_info, SYSENTER_ESP_MSR, NULL, NULL, NULL);
330         v3_hook_msr(core->vm_info, SYSENTER_EIP_MSR, NULL, NULL, NULL);
331
332
333         v3_hook_msr(core->vm_info, FS_BASE_MSR, NULL, NULL, NULL);
334         v3_hook_msr(core->vm_info, GS_BASE_MSR, NULL, NULL, NULL);
335
336         // Passthrough read operations are ok.
337         v3_hook_msr(core->vm_info, INT_PENDING_AMD_MSR, NULL, v3_msr_unhandled_write, NULL);
338     }
339 }
340
341
342 int v3_init_svm_vmcb(struct guest_info * core, v3_vm_class_t vm_class) {
343
344     PrintDebug("Allocating VMCB\n");
345     core->vmm_data = (void *)Allocate_VMCB();
346     
347     if (core->vmm_data == NULL) {
348         PrintError("Could not allocate VMCB, Exiting...\n");
349         return -1;
350     }
351
352     if (vm_class == V3_PC_VM) {
353         PrintDebug("Initializing VMCB (addr=%p)\n", (void *)core->vmm_data);
354         Init_VMCB_BIOS((vmcb_t*)(core->vmm_data), core);
355     } else {
356         PrintError("Invalid VM class\n");
357         return -1;
358     }
359
360     core->core_run_state = CORE_STOPPED;
361
362     return 0;
363 }
364
365
366 int v3_deinit_svm_vmcb(struct guest_info * core) {
367     V3_FreePages(V3_PAddr(core->vmm_data), 1);
368     return 0;
369 }
370
371
372 #ifdef V3_CONFIG_CHECKPOINT
373 int v3_svm_save_core(struct guest_info * core, void * ctx){
374
375     if (v3_chkpt_save_8(ctx, "cpl", &(core->cpl)) == -1) { 
376         PrintError("Could not save SVM cpl\n");
377         return -1;
378     }
379
380     if (v3_chkpt_save(ctx, "vmcb_data", PAGE_SIZE, core->vmm_data) == -1) { 
381         PrintError("Could not save SVM vmcb\n");
382         return -1;
383     }
384
385     return 0;
386 }
387
388 int v3_svm_load_core(struct guest_info * core, void * ctx){
389     
390     if (v3_chkpt_load_8(ctx, "cpl", &(core->cpl)) == -1) { 
391         PrintError("Could not load SVM cpl\n");
392         return -1;
393     }
394
395     if (v3_chkpt_load(ctx, "vmcb_data", PAGE_SIZE, core->vmm_data) == -1) {
396         return -1;
397     }
398
399     return 0;
400 }
401 #endif
402
403 static int update_irq_exit_state(struct guest_info * info) {
404     vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
405
406     // Fix for QEMU bug using EVENTINJ as an internal cache
407     guest_ctrl->EVENTINJ.valid = 0;
408
409     if ((info->intr_core_state.irq_pending == 1) && (guest_ctrl->guest_ctrl.V_IRQ == 0)) {
410         
411 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
412         PrintDebug("INTAK cycle completed for irq %d\n", info->intr_core_state.irq_vector);
413 #endif
414
415         info->intr_core_state.irq_started = 1;
416         info->intr_core_state.irq_pending = 0;
417
418         v3_injecting_intr(info, info->intr_core_state.irq_vector, V3_EXTERNAL_IRQ);
419     }
420
421     if ((info->intr_core_state.irq_started == 1) && (guest_ctrl->exit_int_info.valid == 0)) {
422 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
423         PrintDebug("Interrupt %d taken by guest\n", info->intr_core_state.irq_vector);
424 #endif
425
426         // Interrupt was taken fully vectored
427         info->intr_core_state.irq_started = 0;
428
429     } else if ((info->intr_core_state.irq_started == 1) && (guest_ctrl->exit_int_info.valid == 1)) {
430 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
431         PrintDebug("EXIT INT INFO is set (vec=%d)\n", guest_ctrl->exit_int_info.vector);
432 #endif
433     }
434
435     return 0;
436 }
437
438
439 static int update_irq_entry_state(struct guest_info * info) {
440     vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
441
442
443     if (info->intr_core_state.irq_pending == 0) {
444         guest_ctrl->guest_ctrl.V_IRQ = 0;
445         guest_ctrl->guest_ctrl.V_INTR_VECTOR = 0;
446     }
447     
448     if (v3_excp_pending(info)) {
449         uint_t excp = v3_get_excp_number(info);
450         
451         guest_ctrl->EVENTINJ.type = SVM_INJECTION_EXCEPTION;
452         
453         if (info->excp_state.excp_error_code_valid) {
454             guest_ctrl->EVENTINJ.error_code = info->excp_state.excp_error_code;
455             guest_ctrl->EVENTINJ.ev = 1;
456 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
457             PrintDebug("Injecting exception %d with error code %x\n", excp, guest_ctrl->EVENTINJ.error_code);
458 #endif
459         }
460         
461         guest_ctrl->EVENTINJ.vector = excp;
462         
463         guest_ctrl->EVENTINJ.valid = 1;
464
465 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
466         PrintDebug("<%d> Injecting Exception %d (CR2=%p) (EIP=%p)\n", 
467                    (int)info->num_exits, 
468                    guest_ctrl->EVENTINJ.vector, 
469                    (void *)(addr_t)info->ctrl_regs.cr2,
470                    (void *)(addr_t)info->rip);
471 #endif
472
473         v3_injecting_excp(info, excp);
474     } else if (info->intr_core_state.irq_started == 1) {
475 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
476         PrintDebug("IRQ pending from previous injection\n");
477 #endif
478         guest_ctrl->guest_ctrl.V_IRQ = 1;
479         guest_ctrl->guest_ctrl.V_INTR_VECTOR = info->intr_core_state.irq_vector;
480         guest_ctrl->guest_ctrl.V_IGN_TPR = 1;
481         guest_ctrl->guest_ctrl.V_INTR_PRIO = 0xf;
482
483     } else {
484         switch (v3_intr_pending(info)) {
485             case V3_EXTERNAL_IRQ: {
486                 uint32_t irq = v3_get_intr(info);
487
488                 guest_ctrl->guest_ctrl.V_IRQ = 1;
489                 guest_ctrl->guest_ctrl.V_INTR_VECTOR = irq;
490                 guest_ctrl->guest_ctrl.V_IGN_TPR = 1;
491                 guest_ctrl->guest_ctrl.V_INTR_PRIO = 0xf;
492
493 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
494                 PrintDebug("Injecting Interrupt %d (EIP=%p)\n", 
495                            guest_ctrl->guest_ctrl.V_INTR_VECTOR, 
496                            (void *)(addr_t)info->rip);
497 #endif
498
499                 info->intr_core_state.irq_pending = 1;
500                 info->intr_core_state.irq_vector = irq;
501                 
502                 break;
503             }
504             case V3_NMI:
505                 guest_ctrl->EVENTINJ.type = SVM_INJECTION_NMI;
506                 break;
507             case V3_SOFTWARE_INTR:
508                 guest_ctrl->EVENTINJ.type = SVM_INJECTION_SOFT_INTR;
509
510 #ifdef V3_CONFIG_DEBUG_INTERRUPTS
511                 PrintDebug("Injecting software interrupt --  type: %d, vector: %d\n", 
512                            SVM_INJECTION_SOFT_INTR, info->intr_core_state.swintr_vector);
513 #endif
514                 guest_ctrl->EVENTINJ.vector = info->intr_core_state.swintr_vector;
515                 guest_ctrl->EVENTINJ.valid = 1;
516             
517                 /* reset swintr state */
518                 info->intr_core_state.swintr_posted = 0;
519                 info->intr_core_state.swintr_vector = 0;
520                 
521                 break;
522             case V3_VIRTUAL_IRQ:
523                 guest_ctrl->EVENTINJ.type = SVM_INJECTION_IRQ;
524                 break;
525
526             case V3_INVALID_INTR:
527             default:
528                 break;
529         }
530         
531     }
532
533     return 0;
534 }
535
536 int 
537 v3_svm_config_tsc_virtualization(struct guest_info * info) {
538     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
539
540
541     if (info->time_state.flags & VM_TIME_TRAP_RDTSC) {
542         ctrl_area->instrs.RDTSC = 1;
543         ctrl_area->svm_instrs.RDTSCP = 1;
544     } else {
545         ctrl_area->instrs.RDTSC = 0;
546         ctrl_area->svm_instrs.RDTSCP = 0;
547
548         if (info->time_state.flags & VM_TIME_TSC_PASSTHROUGH) {
549                 ctrl_area->TSC_OFFSET = 0;
550         } else {
551                 ctrl_area->TSC_OFFSET = v3_tsc_host_offset(&info->time_state);
552         }
553     }
554     return 0;
555 }
556
557 /* 
558  * CAUTION and DANGER!!! 
559  * 
560  * The VMCB CANNOT(!!) be accessed outside of the clgi/stgi calls inside this function
561  * When exectuing a symbiotic call, the VMCB WILL be overwritten, so any dependencies 
562  * on its contents will cause things to break. The contents at the time of the exit WILL 
563  * change before the exit handler is executed.
564  */
565 int v3_svm_enter(struct guest_info * info) {
566     vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
567     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data)); 
568     addr_t exit_code = 0, exit_info1 = 0, exit_info2 = 0;
569     uint64_t guest_cycles = 0;
570
571     // Conditionally yield the CPU if the timeslice has expired
572     v3_yield_cond(info,-1);
573
574     // Update timer devices after being in the VM before doing 
575     // IRQ updates, so that any interrupts they raise get seen 
576     // immediately.
577     v3_advance_time(info, NULL);
578     v3_update_timers(info);
579
580     // disable global interrupts for vm state transition
581     v3_clgi();
582
583     // Synchronize the guest state to the VMCB
584     guest_state->cr0 = info->ctrl_regs.cr0;
585     guest_state->cr2 = info->ctrl_regs.cr2;
586     guest_state->cr3 = info->ctrl_regs.cr3;
587     guest_state->cr4 = info->ctrl_regs.cr4;
588     guest_state->dr6 = info->dbg_regs.dr6;
589     guest_state->dr7 = info->dbg_regs.dr7;
590     guest_ctrl->guest_ctrl.V_TPR = info->ctrl_regs.cr8 & 0xff;
591     guest_state->rflags = info->ctrl_regs.rflags;
592     guest_state->efer = info->ctrl_regs.efer;
593     
594     /* Synchronize MSRs */
595     guest_state->star = info->msrs.star;
596     guest_state->lstar = info->msrs.lstar;
597     guest_state->sfmask = info->msrs.sfmask;
598     guest_state->KernelGsBase = info->msrs.kern_gs_base;
599
600     guest_state->cpl = info->cpl;
601
602     v3_set_vmcb_segments((vmcb_t*)(info->vmm_data), &(info->segments));
603
604     guest_state->rax = info->vm_regs.rax;
605     guest_state->rip = info->rip;
606     guest_state->rsp = info->vm_regs.rsp;
607
608 #ifdef V3_CONFIG_SYMCALL
609     if (info->sym_core_state.symcall_state.sym_call_active == 0) {
610         update_irq_entry_state(info);
611     }
612 #else 
613     update_irq_entry_state(info);
614 #endif
615
616
617     /* ** */
618
619     /*
620       PrintDebug("SVM Entry to CS=%p  rip=%p...\n", 
621       (void *)(addr_t)info->segments.cs.base, 
622       (void *)(addr_t)info->rip);
623     */
624
625 #ifdef V3_CONFIG_SYMCALL
626     if (info->sym_core_state.symcall_state.sym_call_active == 1) {
627         if (guest_ctrl->guest_ctrl.V_IRQ == 1) {
628             V3_Print("!!! Injecting Interrupt during Sym call !!!\n");
629         }
630     }
631 #endif
632
633     v3_svm_config_tsc_virtualization(info);
634
635     //V3_Print("Calling v3_svm_launch\n");
636     {   
637         uint64_t entry_tsc = 0;
638         uint64_t exit_tsc = 0;
639         
640         rdtscll(entry_tsc);
641
642         v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[V3_Get_CPU()]);
643
644         rdtscll(exit_tsc);
645
646         guest_cycles = exit_tsc - entry_tsc;
647     }
648
649
650     //V3_Print("SVM Returned: Exit Code: %x, guest_rip=%lx\n", (uint32_t)(guest_ctrl->exit_code), (unsigned long)guest_state->rip);
651
652     v3_last_exit = (uint32_t)(guest_ctrl->exit_code);
653
654     v3_advance_time(info, &guest_cycles);
655
656     info->num_exits++;
657
658     // Save Guest state from VMCB
659     info->rip = guest_state->rip;
660     info->vm_regs.rsp = guest_state->rsp;
661     info->vm_regs.rax = guest_state->rax;
662
663     info->cpl = guest_state->cpl;
664
665     info->ctrl_regs.cr0 = guest_state->cr0;
666     info->ctrl_regs.cr2 = guest_state->cr2;
667     info->ctrl_regs.cr3 = guest_state->cr3;
668     info->ctrl_regs.cr4 = guest_state->cr4;
669     info->dbg_regs.dr6 = guest_state->dr6;
670     info->dbg_regs.dr7 = guest_state->dr7;
671     info->ctrl_regs.cr8 = guest_ctrl->guest_ctrl.V_TPR;
672     info->ctrl_regs.rflags = guest_state->rflags;
673     info->ctrl_regs.efer = guest_state->efer;
674     
675     /* Synchronize MSRs */
676     info->msrs.star =  guest_state->star;
677     info->msrs.lstar = guest_state->lstar;
678     info->msrs.sfmask = guest_state->sfmask;
679     info->msrs.kern_gs_base = guest_state->KernelGsBase;
680
681     v3_get_vmcb_segments((vmcb_t*)(info->vmm_data), &(info->segments));
682     info->cpu_mode = v3_get_vm_cpu_mode(info);
683     info->mem_mode = v3_get_vm_mem_mode(info);
684     /* ** */
685
686     // save exit info here
687     exit_code = guest_ctrl->exit_code;
688     exit_info1 = guest_ctrl->exit_info1;
689     exit_info2 = guest_ctrl->exit_info2;
690
691 #ifdef V3_CONFIG_SYMCALL
692     if (info->sym_core_state.symcall_state.sym_call_active == 0) {
693         update_irq_exit_state(info);
694     }
695 #else
696     update_irq_exit_state(info);
697 #endif
698
699     // reenable global interrupts after vm exit
700     v3_stgi();
701  
702     // Conditionally yield the CPU if the timeslice has expired
703     v3_yield_cond(info,-1);
704
705     // This update timers is for time-dependent handlers
706     // if we're slaved to host time
707     v3_advance_time(info, NULL);
708     v3_update_timers(info);
709
710     {
711         int ret = v3_handle_svm_exit(info, exit_code, exit_info1, exit_info2);
712         
713         if (ret != 0) {
714             PrintError("Error in SVM exit handler (ret=%d)\n", ret);
715             PrintError("  last Exit was %d (exit code=0x%llx)\n", v3_last_exit, (uint64_t) exit_code);
716             return -1;
717         }
718     }
719
720     if (info->timeouts.timeout_active) {
721         /* Check to see if any timeouts have expired */
722         v3_handle_timeouts(info, guest_cycles);
723     }
724
725
726     return 0;
727 }
728
729
730 int v3_start_svm_guest(struct guest_info * info) {
731     //    vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
732     //  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
733
734     PrintDebug("Starting SVM core %u (on logical core %u)\n", info->vcpu_id, info->pcpu_id);
735
736     if (info->vcpu_id == 0) {
737         info->core_run_state = CORE_RUNNING;
738     } else  { 
739         PrintDebug("SVM core %u (on %u): Waiting for core initialization\n", info->vcpu_id, info->pcpu_id);
740
741         while (info->core_run_state == CORE_STOPPED) {
742             
743             if (info->vm_info->run_state == VM_STOPPED) {
744                 // The VM was stopped before this core was initialized. 
745                 return 0;
746             }
747
748             v3_yield(info,-1);
749             //PrintDebug("SVM core %u: still waiting for INIT\n", info->vcpu_id);
750         }
751
752         PrintDebug("SVM core %u(on %u) initialized\n", info->vcpu_id, info->pcpu_id);
753
754         // We'll be paranoid about race conditions here
755         v3_wait_at_barrier(info);
756     } 
757
758     PrintDebug("SVM core %u(on %u): I am starting at CS=0x%x (base=0x%p, limit=0x%x),  RIP=0x%p\n", 
759                info->vcpu_id, info->pcpu_id, 
760                info->segments.cs.selector, (void *)(info->segments.cs.base), 
761                info->segments.cs.limit, (void *)(info->rip));
762
763
764
765     PrintDebug("SVM core %u: Launching SVM VM (vmcb=%p) (on cpu %u)\n", 
766                info->vcpu_id, (void *)info->vmm_data, info->pcpu_id);
767     //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
768     
769     v3_start_time(info);
770
771     while (1) {
772
773         if (info->vm_info->run_state == VM_STOPPED) {
774             info->core_run_state = CORE_STOPPED;
775             break;
776         }
777         
778         if (v3_svm_enter(info) == -1) {
779             vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
780             addr_t host_addr;
781             addr_t linear_addr = 0;
782             
783             info->vm_info->run_state = VM_ERROR;
784             
785             V3_Print("SVM core %u: SVM ERROR!!\n", info->vcpu_id); 
786             
787             v3_print_guest_state(info);
788             
789             V3_Print("SVM core %u: SVM Exit Code: %p\n", info->vcpu_id, (void *)(addr_t)guest_ctrl->exit_code); 
790             
791             V3_Print("SVM core %u: exit_info1 low = 0x%.8x\n", info->vcpu_id, *(uint_t*)&(guest_ctrl->exit_info1));
792             V3_Print("SVM core %u: exit_info1 high = 0x%.8x\n", info->vcpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
793             
794             V3_Print("SVM core %u: exit_info2 low = 0x%.8x\n", info->vcpu_id, *(uint_t*)&(guest_ctrl->exit_info2));
795             V3_Print("SVM core %u: exit_info2 high = 0x%.8x\n", info->vcpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
796             
797             linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
798             
799             if (info->mem_mode == PHYSICAL_MEM) {
800                 v3_gpa_to_hva(info, linear_addr, &host_addr);
801             } else if (info->mem_mode == VIRTUAL_MEM) {
802                 v3_gva_to_hva(info, linear_addr, &host_addr);
803             }
804             
805             V3_Print("SVM core %u: Host Address of rip = 0x%p\n", info->vcpu_id, (void *)host_addr);
806             
807             V3_Print("SVM core %u: Instr (15 bytes) at %p:\n", info->vcpu_id, (void *)host_addr);
808             v3_dump_mem((uint8_t *)host_addr, 15);
809             
810             v3_print_stack(info);
811
812             break;
813         }
814
815         v3_wait_at_barrier(info);
816
817
818         if (info->vm_info->run_state == VM_STOPPED) {
819             info->core_run_state = CORE_STOPPED;
820             break;
821         }
822
823         
824
825 /*
826         if ((info->num_exits % 50000) == 0) {
827             V3_Print("SVM Exit number %d\n", (uint32_t)info->num_exits);
828             v3_print_guest_state(info);
829         }
830 */
831         
832     }
833
834     // Need to take down the other cores on error... 
835
836     return 0;
837 }
838
839
840
841
842 int v3_reset_svm_vm_core(struct guest_info * core, addr_t rip) {
843     // init vmcb_bios
844
845     // Write the RIP, CS, and descriptor
846     // assume the rest is already good to go
847     //
848     // vector VV -> rip at 0
849     //              CS = VV00
850     //  This means we start executing at linear address VV000
851     //
852     // So the selector needs to be VV00
853     // and the base needs to be VV000
854     //
855     core->rip = 0;
856     core->segments.cs.selector = rip << 8;
857     core->segments.cs.limit = 0xffff;
858     core->segments.cs.base = rip << 12;
859
860     return 0;
861 }
862
863
864
865
866
867
868 /* Checks machine SVM capability */
869 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
870 int v3_is_svm_capable() {
871     uint_t vm_cr_low = 0, vm_cr_high = 0;
872     uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
873
874     v3_cpuid(CPUID_EXT_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
875   
876     PrintDebug("CPUID_EXT_FEATURE_IDS_ecx=0x%x\n", ecx);
877
878     if ((ecx & CPUID_EXT_FEATURE_IDS_ecx_svm_avail) == 0) {
879       V3_Print("SVM Not Available\n");
880       return 0;
881     }  else {
882         v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
883         
884         PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
885         
886         if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
887             V3_Print("SVM is available but is disabled.\n");
888             
889             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
890             
891             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx);
892             
893             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
894                 V3_Print("SVM BIOS Disabled, not unlockable\n");
895             } else {
896                 V3_Print("SVM is locked with a key\n");
897             }
898             return 0;
899
900         } else {
901             V3_Print("SVM is available and  enabled.\n");
902
903             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
904             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_eax=0x%x\n", eax);
905             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ebx=0x%x\n", ebx);
906             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ecx=0x%x\n", ecx);
907             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx);
908
909             return 1;
910         }
911     }
912 }
913
914 static int has_svm_nested_paging() {
915     uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
916     
917     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
918     
919     //PrintDebug("CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx);
920     
921     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
922         V3_Print("SVM Nested Paging not supported\n");
923         return 0;
924     } else {
925         V3_Print("SVM Nested Paging supported\n");
926         return 1;
927     }
928  }
929  
930
931
932 void v3_init_svm_cpu(int cpu_id) {
933     reg_ex_t msr;
934     extern v3_cpu_arch_t v3_cpu_types[];
935
936     // Enable SVM on the CPU
937     v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
938     msr.e_reg.low |= EFER_MSR_svm_enable;
939     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
940
941     V3_Print("SVM Enabled\n");
942
943     // Setup the host state save area
944     host_vmcbs[cpu_id] = (addr_t)V3_AllocPages(4);
945
946     if (!host_vmcbs[cpu_id]) {
947         PrintError("Failed to allocate VMCB\n");
948         return;
949     }
950
951     /* 64-BIT-ISSUE */
952     //  msr.e_reg.high = 0;
953     //msr.e_reg.low = (uint_t)host_vmcb;
954     msr.r_reg = host_vmcbs[cpu_id];
955
956     PrintDebug("Host State being saved at %p\n", (void *)host_vmcbs[cpu_id]);
957     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
958
959
960     if (has_svm_nested_paging() == 1) {
961         v3_cpu_types[cpu_id] = V3_SVM_REV3_CPU;
962     } else {
963         v3_cpu_types[cpu_id] = V3_SVM_CPU;
964     }
965 }
966
967
968
969 void v3_deinit_svm_cpu(int cpu_id) {
970     reg_ex_t msr;
971     extern v3_cpu_arch_t v3_cpu_types[];
972
973     // reset SVM_VM_HSAVE_PA_MSR
974     // Does setting it to NULL disable??
975     msr.r_reg = 0;
976     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
977
978     // Disable SVM?
979     v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
980     msr.e_reg.low &= ~EFER_MSR_svm_enable;
981     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
982
983     v3_cpu_types[cpu_id] = V3_INVALID_CPU;
984
985     V3_FreePages((void *)host_vmcbs[cpu_id], 4);
986
987     V3_Print("Host CPU %d host area freed, and SVM disabled\n", cpu_id);
988     return;
989 }
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040 #if 0
1041 /* 
1042  * Test VMSAVE/VMLOAD Latency 
1043  */
1044 #define vmsave ".byte 0x0F,0x01,0xDB ; "
1045 #define vmload ".byte 0x0F,0x01,0xDA ; "
1046 {
1047     uint32_t start_lo, start_hi;
1048     uint32_t end_lo, end_hi;
1049     uint64_t start, end;
1050     
1051     __asm__ __volatile__ (
1052                           "rdtsc ; "
1053                           "movl %%eax, %%esi ; "
1054                           "movl %%edx, %%edi ; "
1055                           "movq  %%rcx, %%rax ; "
1056                           vmsave
1057                           "rdtsc ; "
1058                           : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
1059                           : "c"(host_vmcb[cpu_id]), "0"(0), "1"(0), "2"(0), "3"(0)
1060                           );
1061     
1062     start = start_hi;
1063     start <<= 32;
1064     start += start_lo;
1065     
1066     end = end_hi;
1067     end <<= 32;
1068     end += end_lo;
1069     
1070     PrintDebug("VMSave Cycle Latency: %d\n", (uint32_t)(end - start));
1071     
1072     __asm__ __volatile__ (
1073                           "rdtsc ; "
1074                           "movl %%eax, %%esi ; "
1075                           "movl %%edx, %%edi ; "
1076                           "movq  %%rcx, %%rax ; "
1077                           vmload
1078                           "rdtsc ; "
1079                           : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
1080                               : "c"(host_vmcb[cpu_id]), "0"(0), "1"(0), "2"(0), "3"(0)
1081                               );
1082         
1083         start = start_hi;
1084         start <<= 32;
1085         start += start_lo;
1086
1087         end = end_hi;
1088         end <<= 32;
1089         end += end_lo;
1090
1091
1092         PrintDebug("VMLoad Cycle Latency: %d\n", (uint32_t)(end - start));
1093     }
1094     /* End Latency Test */
1095
1096 #endif
1097
1098
1099
1100
1101
1102
1103
1104 #if 0
1105 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
1106   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
1107   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
1108   uint_t i = 0;
1109
1110
1111   guest_state->rsp = vm_info.vm_regs.rsp;
1112   guest_state->rip = vm_info.rip;
1113
1114
1115   /* I pretty much just gutted this from TVMM */
1116   /* Note: That means its probably wrong */
1117
1118   // set the segment registers to mirror ours
1119   guest_state->cs.selector = 1<<3;
1120   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
1121   guest_state->cs.attrib.fields.S = 1;
1122   guest_state->cs.attrib.fields.P = 1;
1123   guest_state->cs.attrib.fields.db = 1;
1124   guest_state->cs.attrib.fields.G = 1;
1125   guest_state->cs.limit = 0xfffff;
1126   guest_state->cs.base = 0;
1127   
1128   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
1129   for ( i = 0; segregs[i] != NULL; i++) {
1130     struct vmcb_selector * seg = segregs[i];
1131     
1132     seg->selector = 2<<3;
1133     seg->attrib.fields.type = 0x2; // Data Segment+read/write
1134     seg->attrib.fields.S = 1;
1135     seg->attrib.fields.P = 1;
1136     seg->attrib.fields.db = 1;
1137     seg->attrib.fields.G = 1;
1138     seg->limit = 0xfffff;
1139     seg->base = 0;
1140   }
1141
1142
1143   {
1144     /* JRL THIS HAS TO GO */
1145     
1146     //    guest_state->tr.selector = GetTR_Selector();
1147     guest_state->tr.attrib.fields.type = 0x9; 
1148     guest_state->tr.attrib.fields.P = 1;
1149     // guest_state->tr.limit = GetTR_Limit();
1150     //guest_state->tr.base = GetTR_Base();// - 0x2000;
1151     /* ** */
1152   }
1153
1154
1155   /* ** */
1156
1157
1158   guest_state->efer |= EFER_MSR_svm_enable;
1159   guest_state->rflags = 0x00000002; // The reserved bit is always 1
1160   ctrl_area->svm_instrs.VMRUN = 1;
1161   guest_state->cr0 = 0x00000001;    // PE 
1162   ctrl_area->guest_ASID = 1;
1163
1164
1165   //  guest_state->cpl = 0;
1166
1167
1168
1169   // Setup exits
1170
1171   ctrl_area->cr_writes.cr4 = 1;
1172   
1173   ctrl_area->exceptions.de = 1;
1174   ctrl_area->exceptions.df = 1;
1175   ctrl_area->exceptions.pf = 1;
1176   ctrl_area->exceptions.ts = 1;
1177   ctrl_area->exceptions.ss = 1;
1178   ctrl_area->exceptions.ac = 1;
1179   ctrl_area->exceptions.mc = 1;
1180   ctrl_area->exceptions.gp = 1;
1181   ctrl_area->exceptions.ud = 1;
1182   ctrl_area->exceptions.np = 1;
1183   ctrl_area->exceptions.of = 1;
1184   ctrl_area->exceptions.nmi = 1;
1185
1186   
1187
1188   ctrl_area->instrs.IOIO_PROT = 1;
1189   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
1190
1191   if (!ctrl_area->IOPM_BASE_PA) { 
1192       PrintError("Cannot allocate IO bitmap\n");
1193       return;
1194   }
1195   
1196   {
1197     reg_ex_t tmp_reg;
1198     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
1199     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
1200   }
1201
1202   ctrl_area->instrs.INTR = 1;
1203
1204   
1205   {
1206     char gdt_buf[6];
1207     char idt_buf[6];
1208
1209     memset(gdt_buf, 0, 6);
1210     memset(idt_buf, 0, 6);
1211
1212
1213     uint_t gdt_base, idt_base;
1214     ushort_t gdt_limit, idt_limit;
1215     
1216     GetGDTR(gdt_buf);
1217     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
1218     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
1219     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
1220
1221     GetIDTR(idt_buf);
1222     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
1223     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
1224     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
1225
1226
1227     // gdt_base -= 0x2000;
1228     //idt_base -= 0x2000;
1229
1230     guest_state->gdtr.base = gdt_base;
1231     guest_state->gdtr.limit = gdt_limit;
1232     guest_state->idtr.base = idt_base;
1233     guest_state->idtr.limit = idt_limit;
1234
1235
1236   }
1237   
1238   
1239   // also determine if CPU supports nested paging
1240   /*
1241   if (vm_info.page_tables) {
1242     //   if (0) {
1243     // Flush the TLB on entries/exits
1244     ctrl_area->TLB_CONTROL = 1;
1245
1246     // Enable Nested Paging
1247     ctrl_area->NP_ENABLE = 1;
1248
1249     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
1250
1251         // Set the Nested Page Table pointer
1252     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
1253
1254
1255     //   ctrl_area->N_CR3 = Get_CR3();
1256     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
1257
1258     guest_state->g_pat = 0x7040600070406ULL;
1259
1260     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
1261     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
1262     // Enable Paging
1263     //    guest_state->cr0 |= 0x80000000;
1264   }
1265   */
1266
1267 }
1268
1269
1270
1271
1272
1273 #endif
1274
1275