if (v3_dev->iface == IOMMU) {
struct v3_guest_mem_region region;
int flags = 0;
+ uintptr_t gpa = 0;
host_dev->hw_dev.iommu_domain = iommu_domain_alloc();
- if (V3_get_guest_mem_region(v3_ctx, ®ion) == -1) {
- printk("Error getting VM memory region for IOMMU support\n");
- return -1;
- }
+ while (V3_get_guest_mem_region(v3_ctx, ®ion, gpa)) {
- printk("Memory region: start=%p, end=%p\n", (void *)region.start, (void *)region.end);
+ printk("Memory region: start=%p, end=%p\n", (void *)region.start, (void *)region.end);
- flags = IOMMU_READ | IOMMU_WRITE; // Need to see what IOMMU_CACHE means
+ flags = IOMMU_READ | IOMMU_WRITE; // Need to see what IOMMU_CACHE means
- /* This version could be wrong */
+ /* This version could be wrong */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
- // Guest VAs start at zero and go to end of memory
- iommu_map_range(host_dev->hw_dev.iommu_domain, 0, region.start, (region.end - region.start), flags);
+ // Guest VAs start at zero and go to end of memory
+ iommu_map_range(host_dev->hw_dev.iommu_domain, 0, region.start, (region.end - region.start), flags);
#else
- /* Linux actually made the interface worse... Now you can only map memory in powers of 2 (meant to only be pages...) */
- {
- u64 size = region.end - region.start;
- u32 page_size = 512 * 4096; // assume large 64bit pages (2MB)
- u64 dpa = 0; // same as gpa
- u64 hpa = region.start;
-
- do {
- if (size < page_size) {
- page_size = 4096; // less than a 2MB granularity, so we switch to small pages (4KB)
- }
-
- printk("Mapping IOMMU region dpa=%p hpa=%p (size=%d)\n", (void *)dpa, (void *)hpa, page_size);
-
- if (iommu_map(host_dev->hw_dev.iommu_domain, dpa, hpa,
- get_order(page_size), flags)) {
- printk("ERROR: Could not map sub region (DPA=%p) (HPA=%p) (order=%d)\n",
- (void *)dpa, (void *)hpa, get_order(page_size));
- break;
- }
-
- hpa += page_size;
- dpa += page_size;
-
- size -= page_size;
- } while (size);
- }
+ /* Linux actually made the interface worse... Now you can only map memory in powers of 2 (meant to only be pages...) */
+ {
+ u64 size = region.end - region.start;
+ u32 page_size = 512 * 4096; // assume large 64bit pages (2MB)
+ u64 hpa = region.start;
+
+ do {
+ if (size < page_size) {
+ page_size = 4096; // less than a 2MB granularity, so we switch to small pages (4KB)
+ }
+
+ printk("Mapping IOMMU region gpa=%p hpa=%p (size=%d)\n", (void *)gpa, (void *)hpa, page_size);
+
+ if (iommu_map(host_dev->hw_dev.iommu_domain, gpa, hpa,
+ get_order(page_size), flags)) {
+ printk("ERROR: Could not map sub region (GPA=%p) (HPA=%p) (order=%d)\n",
+ (void *)gpa, (void *)hpa, get_order(page_size));
+ break;
+ }
+
+ hpa += page_size;
+ gpa += page_size;
+
+ size -= page_size;
+ } while (size > 0);
+ }
#endif
+ }
if (iommu_attach_device(host_dev->hw_dev.iommu_domain, &(dev->dev))) {
printk("ERROR attaching host PCI device to IOMMU domain\n");
/* This is ugly and should be abstracted out to a function in the memory manager */
-int V3_get_guest_mem_region(struct v3_vm_info * vm, struct v3_guest_mem_region * region) {
+int V3_get_guest_mem_region(struct v3_vm_info * vm, struct v3_guest_mem_region * region, uint64_t gpa) {
+ struct v3_mem_region * v3_reg = NULL;
+
+ memset(region, 0, sizeof(struct v3_guest_mem_region));
if (!vm) {
PrintError(vm, VCORE_NONE, "Tried to get a menregion from a NULL vm pointer\n");
}
- region->start = vm->mem_map.base_region.host_addr;
- region->end = vm->mem_map.base_region.host_addr + (vm->mem_map.base_region.guest_end - vm->mem_map.base_region.guest_start);
+ v3_reg = v3_get_base_region(vm, gpa);
+
+ if (v3_reg == NULL) {
+ return 0;
+ }
+
+ region->start = v3_reg->host_addr;
+ region->end = v3_reg->host_addr + (v3_reg->guest_end - v3_reg->guest_start);
- return 0;
+ return 1;
}