X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=kitten%2Finclude%2Farch-x86_64%2Fsmp.h;fp=kitten%2Finclude%2Farch-x86_64%2Fsmp.h;h=c127ed6ea852912989b50f57d8cbc1a318d7fff6;hb=66a1a4c7a9edcd7d8bc207aca093d694a6e6b5b2;hp=0000000000000000000000000000000000000000;hpb=f7cf9c19ecb0a589dd45ae0d2c91814bd3c2acc2;p=palacios.releases.git diff --git a/kitten/include/arch-x86_64/smp.h b/kitten/include/arch-x86_64/smp.h new file mode 100644 index 0000000..c127ed6 --- /dev/null +++ b/kitten/include/arch-x86_64/smp.h @@ -0,0 +1,127 @@ +#ifndef _ASM_SMP_H +#define _ASM_SMP_H + +/* + * We need the APIC definitions automatically as part of 'smp.h' + */ +#ifndef __ASSEMBLY__ +/* #include s */ +#include +#include +extern int disable_apic; +#endif + +#ifndef __ASSEMBLY__ +#include +//#include +//#include +#include +//#include +#include + +struct pt_regs; + +extern cpumask_t cpu_present_mask; +extern cpumask_t cpu_possible_map; +extern cpumask_t cpu_online_map; +extern cpumask_t cpu_callout_map; +extern cpumask_t cpu_initialized; + +/* + * Private routines/data + */ + +extern void smp_alloc_memory(void); +extern volatile unsigned long smp_invalidate_needed; +extern int pic_mode; +extern void lock_ipi_call_lock(void); +extern void unlock_ipi_call_lock(void); +extern int smp_num_siblings; +extern void smp_send_reschedule(int cpu); +void smp_stop_cpu(void); +extern int smp_call_function_single(int cpuid, void (*func) (void *info), + void *info, int retry, int wait); + +extern cpumask_t cpu_sibling_map[NR_CPUS]; +extern cpumask_t cpu_core_map[NR_CPUS]; +extern uint16_t phys_proc_id[NR_CPUS]; +extern uint16_t cpu_core_id[NR_CPUS]; +extern uint16_t cpu_llc_id[NR_CPUS]; + +#define SMP_TRAMPOLINE_BASE 0x6000 + +/* + * On x86 all CPUs are mapped 1:1 to the APIC space. + * This simplifies scheduling and IPI sending and + * compresses data structures. + */ + +static inline int num_booting_cpus(void) +{ + return cpus_weight(cpu_callout_map); +} + +#define raw_smp_processor_id() read_pda(cpunumber) + +static inline int hard_smp_processor_id(void) +{ + /* we don't want to mark this access volatile - bad code generation */ + return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID)); +} + +extern int safe_smp_processor_id(void); +extern int __cpu_disable(void); +extern void __cpu_die(unsigned int cpu); +extern void prefill_possible_map(void); +extern unsigned num_processors; +extern unsigned disabled_cpus; + +#endif /* !ASSEMBLY */ + +#define NO_PROC_ID 0xFF /* No processor magic marker */ + + +#ifndef ASSEMBLY +/* + * Some lowlevel functions might want to know about + * the real APIC ID <-> CPU # mapping. + */ +extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */ +extern u8 x86_cpu_to_log_apicid[NR_CPUS]; +extern u8 bios_cpu_apicid[]; + +static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) +{ + return cpus_addr(cpumask)[0]; +} + +static inline int cpu_present_to_apicid(int mps_cpu) +{ + if (mps_cpu < NR_CPUS) + return (int)bios_cpu_apicid[mps_cpu]; + else + return BAD_APICID; +} + +#endif /* !ASSEMBLY */ + +#include +#define stack_smp_processor_id() \ +({ \ + struct task_struct *task; \ + __asm__("andq %%rsp,%0; ":"=r" (task) : "0" (CURRENT_MASK)); \ + task->arch.cpu; \ +}) + +#ifndef __ASSEMBLY__ +static __inline int logical_smp_processor_id(void) +{ + /* we don't want to mark this access volatile - bad code generation */ + return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); +} +#endif + +#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] + +#endif +