8 #include <interfaces/vmm_numa.h>
21 unsigned int num_nodes;
22 unsigned int num_cpus;
23 unsigned int num_mem_regions;
25 u32 * cpu_to_node_map;
27 // For now an array, but we might want to move this to an rbtree
28 struct mem_region * mem_to_node_map;
34 int create_numa_topology_from_user(void __user * argp) {
35 struct v3_numa_topo user_topo;
37 if (copy_from_user(&user_topo, argp, sizeof(struct v3_numa_topo))) {
38 ERROR("Could not read in NUMA topology from user\n");
42 argp += sizeof(struct v3_numa_topo);
44 topology.num_nodes = user_topo.num_nodes;
45 topology.num_cpus = user_topo.num_cpus;
46 topology.num_mem_regions = user_topo.num_mem_regions;
49 /* Read in the CPU to Node mapping */
51 topology.cpu_to_node_map = kmalloc(GFP_KERNEL,
55 if (IS_ERR(topology.cpu_to_node_map)) {
56 ERROR("Could not allocate cpu to node map\n");
61 if (copy_from_user(topology.cpu_to_node_map, argp,
62 sizeof(u32) * topology.num_cpus)) {
63 ERROR("Could not copy cpu to node map from user space\n");
64 kfree(topology.cpu_to_node_map);
68 argp += sizeof(u32) * topology.num_cpus;
71 /* Read in the memory to Node Mapping */
75 topology.mem_to_node_map = kmalloc(GFP_KERNEL,
76 sizeof(struct mem_region) *
77 topology.num_mem_regions);
79 if (IS_ERR(topology.mem_to_node_map)) {
80 ERROR("Could not allocate mem to node map\n");
81 kfree(topology.cpu_to_node_map);
85 if (copy_from_user(topology.mem_to_node_map, argp,
86 sizeof(struct mem_region) * topology.num_mem_regions)) {
87 ERROR("Coudl not copy mem to node map from user space\n");
88 kfree(topology.cpu_to_node_map);
89 kfree(topology.mem_to_node_map);
93 /* The memory range comes in as the base_addr and the number of pages
94 We need to fix it up to be the base addr and end addr instead
95 We just perform the transformation inline
97 for (i = 0; i < topology.num_mem_regions; i++) {
98 struct mem_region * region = &(topology.mem_to_node_map[i]);
100 region->end_addr = region->base_addr + (region->end_addr * 4096);
106 /* Read in the distance table */
108 topology.distance_table = kmalloc(GFP_KERNEL,
110 (topology.num_nodes * topology.num_nodes));
112 if (IS_ERR(topology.distance_table)) {
113 ERROR("Could not allocate distance table\n");
114 kfree(topology.cpu_to_node_map);
115 kfree(topology.mem_to_node_map);
120 if (copy_from_user(topology.distance_table, argp,
121 sizeof(u32) * (topology.num_nodes * topology.num_nodes))) {
122 ERROR("Could not copy distance table from user space\n");
123 kfree(topology.cpu_to_node_map);
124 kfree(topology.mem_to_node_map);
125 kfree(topology.distance_table);
131 /* Report what we found */
136 printk("Created NUMA topology from user space\n");
137 printk("Number of Nodes: %d, CPUs: %d, MEM regions: %d\n",
138 topology.num_nodes, topology.num_cpus, topology.num_mem_regions);
140 printk("CPU mapping\n");
141 for (i = 0; i < topology.num_cpus; i++) {
142 printk("\tCPU %d -> Node %d\n", i, topology.cpu_to_node_map[i]);
145 printk("Memory mapping\n");
147 for (i = 0; i < topology.num_mem_regions; i++) {
148 struct mem_region * region = &(topology.mem_to_node_map[i]);
149 printk("\tMEM %p - %p -> Node %d\n",
156 printk("Distance Table\n");
157 for (i = 0; i < topology.num_nodes; i++) {
162 for (i = 0; i < topology.num_nodes; i++) {
165 for (j = 0; j < topology.num_nodes; j++) {
166 printk("\t%d", topology.distance_table[j + (i * topology.num_nodes)]);
184 int numa_num_nodes(void) {
185 return num_online_nodes();
190 int numa_addr_to_node(uintptr_t phys_addr) {
191 return page_to_nid(pfn_to_page(phys_addr >> PAGE_SHIFT));
194 int numa_cpu_to_node(int cpu_id) {
195 return cpu_to_node(cpu_id);
199 int numa_get_distance(int node1, int node2) {
200 return node_distance(node1, node2);
204 /* Ugly fix for interface type differences... */
205 static int phys_ptr_to_node(void * phys_ptr) {
206 return numa_addr_to_node((uintptr_t)phys_ptr);
209 struct v3_numa_hooks numa_hooks = {
210 .cpu_to_node = numa_cpu_to_node,
211 .phys_addr_to_node = phys_ptr_to_node,
212 .get_distance = numa_get_distance,
216 int palacios_init_numa( void ) {
218 V3_Init_NUMA(&numa_hooks);