aboutsummaryrefslogtreecommitdiffstats
path: root/src/fw/smp.c
diff options
context:
space:
mode:
authorKevin O'Connor <kevin@koconnor.net>2014-05-24 10:49:50 -0400
committerKevin O'Connor <kevin@koconnor.net>2014-06-04 11:06:58 -0400
commit0673b7870063a3affbad9046fb6d385a4e734c19 (patch)
tree4aaa57d88e3f8e35930600bbe5e64762fd1ca971 /src/fw/smp.c
parent31bcda20eff45de7fdd2aad30d42f28165ccd69a (diff)
downloadseabios-0673b7870063a3affbad9046fb6d385a4e734c19.tar.gz
smp: Replace QEMU SMP init assembler code with C; run only in 32bit mode.
Change the multi-processor init code to trampoline into 32bit mode on each of the additional processors. Implement an atomic lock so that each processor performs its initialization serially. Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Kevin O'Connor <kevin@koconnor.net>
Diffstat (limited to 'src/fw/smp.c')
-rw-r--r--src/fw/smp.c110
1 files changed, 56 insertions, 54 deletions
diff --git a/src/fw/smp.c b/src/fw/smp.c
index 38fe383a..a2df9dee 100644
--- a/src/fw/smp.c
+++ b/src/fw/smp.c
@@ -1,4 +1,4 @@
-// CPU count detection
+// QEMU multi-CPU initialization code
//
// Copyright (C) 2008 Kevin O'Connor <kevin@koconnor.net>
// Copyright (C) 2006 Fabrice Bellard
@@ -20,8 +20,8 @@
#define APIC_ENABLED 0x0100
-struct { u32 ecx, eax, edx; } smp_mtrr[32] VARFSEG;
-u32 smp_mtrr_count VARFSEG;
+static struct { u32 index; u64 val; } smp_mtrr[32];
+static u32 smp_mtrr_count;
void
wrmsr_smp(u32 index, u64 val)
@@ -31,58 +31,48 @@ wrmsr_smp(u32 index, u64 val)
warn_noalloc();
return;
}
- smp_mtrr[smp_mtrr_count].ecx = index;
- smp_mtrr[smp_mtrr_count].eax = val;
- smp_mtrr[smp_mtrr_count].edx = val >> 32;
+ smp_mtrr[smp_mtrr_count].index = index;
+ smp_mtrr[smp_mtrr_count].val = val;
smp_mtrr_count++;
}
-u32 CountCPUs VARFSEG;
u32 MaxCountCPUs;
+static u32 CountCPUs;
// 256 bits for the found APIC IDs
-u32 FoundAPICIDs[256/32] VARFSEG;
-extern void smp_ap_boot_code(void);
-ASM16(
- " .global smp_ap_boot_code\n"
- "smp_ap_boot_code:\n"
-
- // Setup data segment
- " movw $" __stringify(SEG_BIOS) ", %ax\n"
- " movw %ax, %ds\n"
-
- // MTRR setup
- " movl $smp_mtrr, %esi\n"
- " movl smp_mtrr_count, %ebx\n"
- "1:testl %ebx, %ebx\n"
- " jz 2f\n"
- " movl 0(%esi), %ecx\n"
- " movl 4(%esi), %eax\n"
- " movl 8(%esi), %edx\n"
- " wrmsr\n"
- " addl $12, %esi\n"
- " decl %ebx\n"
- " jmp 1b\n"
- "2:\n"
-
- // get apic ID on EBX, set bit on FoundAPICIDs
- " movl $1, %eax\n"
- " cpuid\n"
- " shrl $24, %ebx\n"
- " lock btsl %ebx, FoundAPICIDs\n"
-
- // Increment the cpu counter
- " lock incl CountCPUs\n"
-
- // Halt the processor.
- "1:hlt\n"
- " jmp 1b\n"
- );
+static u32 FoundAPICIDs[256/32];
int apic_id_is_present(u8 apic_id)
{
return !!(FoundAPICIDs[apic_id/32] & (1ul << (apic_id % 32)));
}
+void VISIBLE32FLAT
+handle_smp(void)
+{
+ // Enable CPU caching
+ setcr0(getcr0() & ~(CR0_CD|CR0_NW));
+
+ // Detect apic_id
+ u32 eax, ebx, ecx, cpuid_features;
+ cpuid(1, &eax, &ebx, &ecx, &cpuid_features);
+ u8 apic_id = ebx>>24;
+ dprintf(DEBUG_HDL_smp, "handle_smp: apic_id=%d\n", apic_id);
+
+ // MTRR setup
+ int i;
+ for (i=0; i<smp_mtrr_count; i++)
+ wrmsr(smp_mtrr[i].index, smp_mtrr[i].val);
+
+ // Set bit on FoundAPICIDs
+ FoundAPICIDs[apic_id/32] |= (1 << (apic_id % 32));
+
+ CountCPUs++;
+}
+
+// Atomic lock for shared stack across processors.
+u32 SMPLock __VISIBLE;
+u32 SMPStack __VISIBLE;
+
// find and initialize the CPUs by launching a SIPI to them
void
smp_setup(void)
@@ -104,15 +94,14 @@ smp_setup(void)
// mark the BSP initial APIC ID as found, too:
u8 apic_id = ebx>>24;
FoundAPICIDs[apic_id/32] |= (1 << (apic_id % 32));
-
- // Init the counter.
- writel(&CountCPUs, 1);
+ CountCPUs = 1;
// Setup jump trampoline to counter code.
u64 old = *(u64*)BUILD_AP_BOOT_ADDR;
- // ljmpw $SEG_BIOS, $(smp_ap_boot_code - BUILD_BIOS_ADDR)
+ // ljmpw $SEG_BIOS, $(entry_smp - BUILD_BIOS_ADDR)
+ extern void entry_smp(void);
u64 new = (0xea | ((u64)SEG_BIOS<<24)
- | (((u32)smp_ap_boot_code - BUILD_BIOS_ADDR) << 8));
+ | (((u32)entry_smp - BUILD_BIOS_ADDR) << 8));
*(u64*)BUILD_AP_BOOT_ADDR = new;
// enable local APIC
@@ -125,6 +114,9 @@ smp_setup(void)
/* Set LINT1 as NMI, level triggered */
writel(APIC_LINT1, 0x8400);
+ // Init the lock.
+ writel(&SMPLock, 1);
+
// broadcast SIPI
barrier();
writel(APIC_ICR_LOW, 0x000C4500);
@@ -132,9 +124,19 @@ smp_setup(void)
writel(APIC_ICR_LOW, 0x000C4600 | sipi_vector);
// Wait for other CPUs to process the SIPI.
- u8 cmos_smp_count = rtc_read(CMOS_BIOS_SMP_COUNT);
- while (cmos_smp_count + 1 != readl(&CountCPUs))
- yield();
+ u8 cmos_smp_count = rtc_read(CMOS_BIOS_SMP_COUNT) + 1;
+ while (cmos_smp_count != CountCPUs)
+ asm volatile(
+ // Release lock and allow other processors to use the stack.
+ " movl %%esp, %1\n"
+ " movl $0, %0\n"
+ // Reacquire lock and take back ownership of stack.
+ "1:rep ; nop\n"
+ " lock btsl $0, %0\n"
+ " jc 1b\n"
+ : "+m" (SMPLock), "+m" (SMPStack)
+ : : "cc", "memory");
+ yield();
// Restore memory.
*(u64*)BUILD_AP_BOOT_ADDR = old;
@@ -143,6 +145,6 @@ smp_setup(void)
if (!MaxCountCPUs || MaxCountCPUs < CountCPUs)
MaxCountCPUs = CountCPUs;
- dprintf(1, "Found %d cpu(s) max supported %d cpu(s)\n", readl(&CountCPUs),
- MaxCountCPUs);
+ dprintf(1, "Found %d cpu(s) max supported %d cpu(s)\n", CountCPUs,
+ MaxCountCPUs);
}