summaryrefslogtreecommitdiffstats
path: root/UefiCpuPkg/PiSmmCpuDxeSmm
diff options
context:
space:
mode:
Diffstat (limited to 'UefiCpuPkg/PiSmmCpuDxeSmm')
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c1001
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/MpFuncs.nasm153
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c23
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c29
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c22
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.h6
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c10
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c10
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h39
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf7
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c114
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c128
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfileInternal.h43
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/SyncTimer.c19
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/X64/MpFuncs.nasm189
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c199
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.nasm6
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c30
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c171
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.h61
20 files changed, 456 insertions, 1804 deletions
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c b/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
index d67fb49890..caad70ac84 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
@@ -1,74 +1,15 @@
/** @file
Code for Processor S3 restoration
-Copyright (c) 2006 - 2023, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2006 - 2024, Intel Corporation. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
#include "PiSmmCpuDxeSmm.h"
#include <PiPei.h>
-#include <Ppi/MpServices2.h>
-#pragma pack(1)
-typedef struct {
- UINTN Lock;
- VOID *StackStart;
- UINTN StackSize;
- VOID *ApFunction;
- IA32_DESCRIPTOR GdtrProfile;
- IA32_DESCRIPTOR IdtrProfile;
- UINT32 BufferStart;
- UINT32 Cr3;
- UINTN InitializeFloatingPointUnitsAddress;
-} MP_CPU_EXCHANGE_INFO;
-#pragma pack()
-
-typedef struct {
- UINT8 *RendezvousFunnelAddress;
- UINTN PModeEntryOffset;
- UINTN FlatJumpOffset;
- UINTN Size;
- UINTN LModeEntryOffset;
- UINTN LongJumpOffset;
-} MP_ASSEMBLY_ADDRESS_MAP;
-
-//
-// Flags used when program the register.
-//
-typedef struct {
- volatile UINTN MemoryMappedLock; // Spinlock used to program mmio
- volatile UINT32 *CoreSemaphoreCount; // Semaphore container used to program
- // core level semaphore.
- volatile UINT32 *PackageSemaphoreCount; // Semaphore container used to program
- // package level semaphore.
-} PROGRAM_CPU_REGISTER_FLAGS;
-
-//
-// Signal that SMM BASE relocation is complete.
-//
-volatile BOOLEAN mInitApsAfterSmmBaseReloc;
-
-/**
- Get starting address and size of the rendezvous entry for APs.
- Information for fixing a jump instruction in the code is also returned.
-
- @param AddressMap Output buffer for address map information.
-**/
-VOID *
-EFIAPI
-AsmGetAddressMap (
- MP_ASSEMBLY_ADDRESS_MAP *AddressMap
- );
-
-#define LEGACY_REGION_SIZE (2 * 0x1000)
-#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
-
-PROGRAM_CPU_REGISTER_FLAGS mCpuFlags;
-ACPI_CPU_DATA mAcpiCpuData;
-volatile UINT32 mNumberToFinish;
-MP_CPU_EXCHANGE_INFO *mExchangeInfo;
-BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
+BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
//
// S3 boot flag
@@ -82,651 +23,6 @@ SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;
BOOLEAN mAcpiS3Enable = TRUE;
-UINT8 *mApHltLoopCode = NULL;
-UINT8 mApHltLoopCodeTemplate[] = {
- 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
- 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
- 0xFA, // cli
- 0xF4, // hlt
- 0xEB, 0xFC // jmp $-2
-};
-
-/**
- Sync up the MTRR values for all processors.
-
- @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
-**/
-VOID
-EFIAPI
-LoadMtrrData (
- EFI_PHYSICAL_ADDRESS MtrrTable
- )
-
-/*++
-
-Routine Description:
-
- Sync up the MTRR values for all processors.
-
-Arguments:
-
-Returns:
- None
-
---*/
-{
- MTRR_SETTINGS *MtrrSettings;
-
- MtrrSettings = (MTRR_SETTINGS *)(UINTN)MtrrTable;
- MtrrSetAllMtrrs (MtrrSettings);
-}
-
-/**
- Increment semaphore by 1.
-
- @param Sem IN: 32-bit unsigned integer
-
-**/
-VOID
-S3ReleaseSemaphore (
- IN OUT volatile UINT32 *Sem
- )
-{
- InterlockedIncrement (Sem);
-}
-
-/**
- Decrement the semaphore by 1 if it is not zero.
-
- Performs an atomic decrement operation for semaphore.
- The compare exchange operation must be performed using
- MP safe mechanisms.
-
- @param Sem IN: 32-bit unsigned integer
-
-**/
-VOID
-S3WaitForSemaphore (
- IN OUT volatile UINT32 *Sem
- )
-{
- UINT32 Value;
-
- do {
- Value = *Sem;
- } while (Value == 0 ||
- InterlockedCompareExchange32 (
- Sem,
- Value,
- Value - 1
- ) != Value);
-}
-
-/**
- Read / write CR value.
-
- @param[in] CrIndex The CR index which need to read/write.
- @param[in] Read Read or write. TRUE is read.
- @param[in,out] CrValue CR value.
-
- @retval EFI_SUCCESS means read/write success, else return EFI_UNSUPPORTED.
-**/
-UINTN
-ReadWriteCr (
- IN UINT32 CrIndex,
- IN BOOLEAN Read,
- IN OUT UINTN *CrValue
- )
-{
- switch (CrIndex) {
- case 0:
- if (Read) {
- *CrValue = AsmReadCr0 ();
- } else {
- AsmWriteCr0 (*CrValue);
- }
-
- break;
- case 2:
- if (Read) {
- *CrValue = AsmReadCr2 ();
- } else {
- AsmWriteCr2 (*CrValue);
- }
-
- break;
- case 3:
- if (Read) {
- *CrValue = AsmReadCr3 ();
- } else {
- AsmWriteCr3 (*CrValue);
- }
-
- break;
- case 4:
- if (Read) {
- *CrValue = AsmReadCr4 ();
- } else {
- AsmWriteCr4 (*CrValue);
- }
-
- break;
- default:
- return EFI_UNSUPPORTED;
- }
-
- return EFI_SUCCESS;
-}
-
-/**
- Initialize the CPU registers from a register table.
-
- @param[in] RegisterTable The register table for this AP.
- @param[in] ApLocation AP location info for this ap.
- @param[in] CpuStatus CPU status info for this CPU.
- @param[in] CpuFlags Flags data structure used when program the register.
-
- @note This service could be called by BSP/APs.
-**/
-VOID
-ProgramProcessorRegister (
- IN CPU_REGISTER_TABLE *RegisterTable,
- IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,
- IN CPU_STATUS_INFORMATION *CpuStatus,
- IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags
- )
-{
- CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
- UINTN Index;
- UINTN Value;
- CPU_REGISTER_TABLE_ENTRY *RegisterTableEntryHead;
- volatile UINT32 *SemaphorePtr;
- UINT32 FirstThread;
- UINT32 CurrentThread;
- UINT32 CurrentCore;
- UINTN ProcessorIndex;
- UINT32 *ThreadCountPerPackage;
- UINT8 *ThreadCountPerCore;
- EFI_STATUS Status;
- UINT64 CurrentValue;
-
- //
- // Traverse Register Table of this logical processor
- //
- RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *)(UINTN)RegisterTable->RegisterTableEntry;
-
- for (Index = 0; Index < RegisterTable->TableLength; Index++) {
- RegisterTableEntry = &RegisterTableEntryHead[Index];
-
- //
- // Check the type of specified register
- //
- switch (RegisterTableEntry->RegisterType) {
- //
- // The specified register is Control Register
- //
- case ControlRegister:
- Status = ReadWriteCr (RegisterTableEntry->Index, TRUE, &Value);
- if (EFI_ERROR (Status)) {
- break;
- }
-
- if (RegisterTableEntry->TestThenWrite) {
- CurrentValue = BitFieldRead64 (
- Value,
- RegisterTableEntry->ValidBitStart,
- RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1
- );
- if (CurrentValue == RegisterTableEntry->Value) {
- break;
- }
- }
-
- Value = (UINTN)BitFieldWrite64 (
- Value,
- RegisterTableEntry->ValidBitStart,
- RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
- RegisterTableEntry->Value
- );
- ReadWriteCr (RegisterTableEntry->Index, FALSE, &Value);
- break;
- //
- // The specified register is Model Specific Register
- //
- case Msr:
- if (RegisterTableEntry->TestThenWrite) {
- Value = (UINTN)AsmReadMsr64 (RegisterTableEntry->Index);
- if (RegisterTableEntry->ValidBitLength >= 64) {
- if (Value == RegisterTableEntry->Value) {
- break;
- }
- } else {
- CurrentValue = BitFieldRead64 (
- Value,
- RegisterTableEntry->ValidBitStart,
- RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1
- );
- if (CurrentValue == RegisterTableEntry->Value) {
- break;
- }
- }
- }
-
- //
- // If this function is called to restore register setting after INIT signal,
- // there is no need to restore MSRs in register table.
- //
- if (RegisterTableEntry->ValidBitLength >= 64) {
- //
- // If length is not less than 64 bits, then directly write without reading
- //
- AsmWriteMsr64 (
- RegisterTableEntry->Index,
- RegisterTableEntry->Value
- );
- } else {
- //
- // Set the bit section according to bit start and length
- //
- AsmMsrBitFieldWrite64 (
- RegisterTableEntry->Index,
- RegisterTableEntry->ValidBitStart,
- RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
- RegisterTableEntry->Value
- );
- }
-
- break;
- //
- // MemoryMapped operations
- //
- case MemoryMapped:
- AcquireSpinLock (&CpuFlags->MemoryMappedLock);
- MmioBitFieldWrite32 (
- (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),
- RegisterTableEntry->ValidBitStart,
- RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
- (UINT32)RegisterTableEntry->Value
- );
- ReleaseSpinLock (&CpuFlags->MemoryMappedLock);
- break;
- //
- // Enable or disable cache
- //
- case CacheControl:
- //
- // If value of the entry is 0, then disable cache. Otherwise, enable cache.
- //
- if (RegisterTableEntry->Value == 0) {
- AsmDisableCache ();
- } else {
- AsmEnableCache ();
- }
-
- break;
-
- case Semaphore:
- // Semaphore works logic like below:
- //
- // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);
- // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);
- //
- // All threads (T0...Tn) waits in P() line and continues running
- // together.
- //
- //
- // T0 T1 ... Tn
- //
- // V(0...n) V(0...n) ... V(0...n)
- // n * P(0) n * P(1) ... n * P(n)
- //
- ASSERT (
- (ApLocation != NULL) &&
- (CpuStatus->ThreadCountPerPackage != 0) &&
- (CpuStatus->ThreadCountPerCore != 0) &&
- (CpuFlags->CoreSemaphoreCount != NULL) &&
- (CpuFlags->PackageSemaphoreCount != NULL)
- );
- switch (RegisterTableEntry->Value) {
- case CoreDepType:
- SemaphorePtr = CpuFlags->CoreSemaphoreCount;
- ThreadCountPerCore = (UINT8 *)(UINTN)CpuStatus->ThreadCountPerCore;
-
- CurrentCore = ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core;
- //
- // Get Offset info for the first thread in the core which current thread belongs to.
- //
- FirstThread = CurrentCore * CpuStatus->MaxThreadCount;
- CurrentThread = FirstThread + ApLocation->Thread;
-
- //
- // Different cores may have different valid threads in them. If driver maintail clearly
- // thread index in different cores, the logic will be much complicated.
- // Here driver just simply records the max thread number in all cores and use it as expect
- // thread number for all cores.
- // In below two steps logic, first current thread will Release semaphore for each thread
- // in current core. Maybe some threads are not valid in this core, but driver don't
- // care. Second, driver will let current thread wait semaphore for all valid threads in
- // current core. Because only the valid threads will do release semaphore for this
- // thread, driver here only need to wait the valid thread count.
- //
-
- //
- // First Notify ALL THREADs in current Core that this thread is ready.
- //
- for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex++) {
- S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
- }
-
- //
- // Second, check whether all VALID THREADs (not all threads) in current core are ready.
- //
- for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerCore[CurrentCore]; ProcessorIndex++) {
- S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
- }
-
- break;
-
- case PackageDepType:
- SemaphorePtr = CpuFlags->PackageSemaphoreCount;
- ThreadCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ThreadCountPerPackage;
- //
- // Get Offset info for the first thread in the package which current thread belongs to.
- //
- FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;
- //
- // Get the possible threads count for current package.
- //
- CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;
-
- //
- // Different packages may have different valid threads in them. If driver maintail clearly
- // thread index in different packages, the logic will be much complicated.
- // Here driver just simply records the max thread number in all packages and use it as expect
- // thread number for all packages.
- // In below two steps logic, first current thread will Release semaphore for each thread
- // in current package. Maybe some threads are not valid in this package, but driver don't
- // care. Second, driver will let current thread wait semaphore for all valid threads in
- // current package. Because only the valid threads will do release semaphore for this
- // thread, driver here only need to wait the valid thread count.
- //
-
- //
- // First Notify ALL THREADS in current package that this thread is ready.
- //
- for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount; ProcessorIndex++) {
- S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
- }
-
- //
- // Second, check whether VALID THREADS (not all threads) in current package are ready.
- //
- for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerPackage[ApLocation->Package]; ProcessorIndex++) {
- S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
- }
-
- break;
-
- default:
- break;
- }
-
- break;
-
- default:
- break;
- }
- }
-}
-
-/**
-
- Set Processor register for one AP.
-
- @param PreSmmRegisterTable Use pre Smm register table or register table.
-
-**/
-VOID
-SetRegister (
- IN BOOLEAN PreSmmRegisterTable
- )
-{
- CPU_FEATURE_INIT_DATA *FeatureInitData;
- CPU_REGISTER_TABLE *RegisterTable;
- CPU_REGISTER_TABLE *RegisterTables;
- UINT32 InitApicId;
- UINTN ProcIndex;
- UINTN Index;
-
- FeatureInitData = &mAcpiCpuData.CpuFeatureInitData;
-
- if (PreSmmRegisterTable) {
- RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)FeatureInitData->PreSmmInitRegisterTable;
- } else {
- RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)FeatureInitData->RegisterTable;
- }
-
- if (RegisterTables == NULL) {
- return;
- }
-
- InitApicId = GetInitialApicId ();
- RegisterTable = NULL;
- ProcIndex = (UINTN)-1;
- for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
- if (RegisterTables[Index].InitialApicId == InitApicId) {
- RegisterTable = &RegisterTables[Index];
- ProcIndex = Index;
- break;
- }
- }
-
- ASSERT (RegisterTable != NULL);
-
- if (FeatureInitData->ApLocation != 0) {
- ProgramProcessorRegister (
- RegisterTable,
- (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)FeatureInitData->ApLocation + ProcIndex,
- &FeatureInitData->CpuStatus,
- &mCpuFlags
- );
- } else {
- ProgramProcessorRegister (
- RegisterTable,
- NULL,
- &FeatureInitData->CpuStatus,
- &mCpuFlags
- );
- }
-}
-
-/**
- The function is invoked before SMBASE relocation in S3 path to restores CPU status.
-
- The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
- and restores MTRRs for both BSP and APs.
-
- @param IsBsp The CPU this function executes on is BSP or not.
-
-**/
-VOID
-InitializeCpuBeforeRebase (
- IN BOOLEAN IsBsp
- )
-{
- LoadMtrrData (mAcpiCpuData.MtrrTable);
-
- SetRegister (TRUE);
-
- ProgramVirtualWireMode ();
- if (!IsBsp) {
- DisableLvtInterrupts ();
- }
-
- //
- // Count down the number with lock mechanism.
- //
- InterlockedDecrement (&mNumberToFinish);
-
- if (IsBsp) {
- //
- // Bsp wait here till all AP finish the initialization before rebase
- //
- while (mNumberToFinish > 0) {
- CpuPause ();
- }
- }
-}
-
-/**
- The function is invoked after SMBASE relocation in S3 path to restores CPU status.
-
- The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
- data saved by normal boot path for both BSP and APs.
-
- @param IsBsp The CPU this function executes on is BSP or not.
-
-**/
-VOID
-InitializeCpuAfterRebase (
- IN BOOLEAN IsBsp
- )
-{
- UINTN TopOfStack;
- UINT8 Stack[128];
-
- SetRegister (FALSE);
-
- if (mSmmS3ResumeState->MpService2Ppi == 0) {
- if (IsBsp) {
- while (mNumberToFinish > 0) {
- CpuPause ();
- }
- } else {
- //
- // Place AP into the safe code, count down the number with lock mechanism in the safe code.
- //
- TopOfStack = (UINTN)Stack + sizeof (Stack);
- TopOfStack &= ~(UINTN)(CPU_STACK_ALIGNMENT - 1);
- CopyMem ((VOID *)(UINTN)mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));
- TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);
- }
- }
-}
-
-/**
- Cpu initialization procedure.
-
- @param[in,out] Buffer The pointer to private data buffer.
-
-**/
-VOID
-EFIAPI
-InitializeCpuProcedure (
- IN OUT VOID *Buffer
- )
-{
- BOOLEAN IsBsp;
-
- IsBsp = (BOOLEAN)(mBspApicId == GetApicId ());
-
- //
- // Skip initialization if mAcpiCpuData is not valid
- //
- if (mAcpiCpuData.NumberOfCpus > 0) {
- //
- // First time microcode load and restore MTRRs
- //
- InitializeCpuBeforeRebase (IsBsp);
- }
-
- if (IsBsp) {
- //
- // Issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) to execute first SMI init.
- //
- ExecuteFirstSmiInit ();
- }
-
- //
- // Skip initialization if mAcpiCpuData is not valid
- //
- if (mAcpiCpuData.NumberOfCpus > 0) {
- if (IsBsp) {
- //
- // mNumberToFinish should be set before AP executes InitializeCpuAfterRebase()
- //
- mNumberToFinish = (UINT32)(mNumberOfCpus - 1);
- //
- // Signal that SMM base relocation is complete and to continue initialization for all APs.
- //
- mInitApsAfterSmmBaseReloc = TRUE;
- } else {
- //
- // AP Wait for BSP to signal SMM Base relocation done.
- //
- while (!mInitApsAfterSmmBaseReloc) {
- CpuPause ();
- }
- }
-
- //
- // Restore MSRs for BSP and all APs
- //
- InitializeCpuAfterRebase (IsBsp);
- }
-}
-
-/**
- Prepares startup vector for APs.
-
- This function prepares startup vector for APs.
-
- @param WorkingBuffer The address of the work buffer.
-**/
-VOID
-PrepareApStartupVector (
- EFI_PHYSICAL_ADDRESS WorkingBuffer
- )
-{
- EFI_PHYSICAL_ADDRESS StartupVector;
- MP_ASSEMBLY_ADDRESS_MAP AddressMap;
-
- //
- // Get the address map of startup code for AP,
- // including code size, and offset of long jump instructions to redirect.
- //
- ZeroMem (&AddressMap, sizeof (AddressMap));
- AsmGetAddressMap (&AddressMap);
-
- StartupVector = WorkingBuffer;
-
- //
- // Copy AP startup code to startup vector, and then redirect the long jump
- // instructions for mode switching.
- //
- CopyMem ((VOID *)(UINTN)StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
- *(UINT32 *)(UINTN)(StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32)(StartupVector + AddressMap.PModeEntryOffset);
- if (AddressMap.LongJumpOffset != 0) {
- *(UINT32 *)(UINTN)(StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32)(StartupVector + AddressMap.LModeEntryOffset);
- }
-
- //
- // Get the start address of exchange data between BSP and AP.
- //
- mExchangeInfo = (MP_CPU_EXCHANGE_INFO *)(UINTN)(StartupVector + AddressMap.Size);
- ZeroMem ((VOID *)mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
-
- CopyMem ((VOID *)(UINTN)&mExchangeInfo->GdtrProfile, (VOID *)(UINTN)mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
- CopyMem ((VOID *)(UINTN)&mExchangeInfo->IdtrProfile, (VOID *)(UINTN)mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
-
- mExchangeInfo->StackStart = (VOID *)(UINTN)mAcpiCpuData.StackAddress;
- mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
- mExchangeInfo->BufferStart = (UINT32)StartupVector;
- mExchangeInfo->Cr3 = (UINT32)(AsmReadCr3 ());
- mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;
- mExchangeInfo->ApFunction = (VOID *)(UINTN)InitializeCpuProcedure;
-}
-
/**
Restore SMM Configuration in S3 boot path.
@@ -775,12 +71,11 @@ SmmRestoreCpu (
VOID
)
{
- SMM_S3_RESUME_STATE *SmmS3ResumeState;
- IA32_DESCRIPTOR Ia32Idtr;
- IA32_DESCRIPTOR X64Idtr;
- IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
- EFI_STATUS Status;
- EDKII_PEI_MP_SERVICES2_PPI *Mp2ServicePpi;
+ SMM_S3_RESUME_STATE *SmmS3ResumeState;
+ IA32_DESCRIPTOR Ia32Idtr;
+ IA32_DESCRIPTOR X64Idtr;
+ IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
+ EFI_STATUS Status;
DEBUG ((DEBUG_INFO, "SmmRestoreCpu()\n"));
@@ -829,38 +124,10 @@ SmmRestoreCpu (
}
}
- mBspApicId = GetApicId ();
//
- // Skip AP initialization if mAcpiCpuData is not valid
+ // Issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) to execute first SMI init.
//
- if (mAcpiCpuData.NumberOfCpus > 0) {
- if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
- ASSERT (mNumberOfCpus <= mAcpiCpuData.NumberOfCpus);
- } else {
- ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);
- }
-
- mNumberToFinish = (UINT32)mNumberOfCpus;
-
- //
- // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.
- //
- mInitApsAfterSmmBaseReloc = FALSE;
-
- if (mSmmS3ResumeState->MpService2Ppi != 0) {
- Mp2ServicePpi = (EDKII_PEI_MP_SERVICES2_PPI *)(UINTN)mSmmS3ResumeState->MpService2Ppi;
- Mp2ServicePpi->StartupAllCPUs (Mp2ServicePpi, InitializeCpuProcedure, 0, NULL);
- } else {
- PrepareApStartupVector (mAcpiCpuData.StartupVector);
- //
- // Send INIT IPI - SIPI to all APs
- //
- SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
- InitializeCpuProcedure (NULL);
- }
- } else {
- InitializeCpuProcedure (NULL);
- }
+ ExecuteFirstSmiInit ();
//
// Set a flag to restore SMM configuration in S3 path.
@@ -920,19 +187,15 @@ SmmRestoreCpu (
/**
Initialize SMM S3 resume state structure used during S3 Resume.
- @param[in] Cr3 The base address of the page tables to use in SMM.
-
**/
VOID
InitSmmS3ResumeState (
- IN UINT32 Cr3
+ VOID
)
{
VOID *GuidHob;
EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
SMM_S3_RESUME_STATE *SmmS3ResumeState;
- EFI_PHYSICAL_ADDRESS Address;
- EFI_STATUS Status;
if (!mAcpiS3Enable) {
return;
@@ -968,7 +231,6 @@ InitSmmS3ResumeState (
}
SmmS3ResumeState->SmmS3Cr0 = (UINT32)AsmReadCr0 ();
- SmmS3ResumeState->SmmS3Cr3 = Cr3;
SmmS3ResumeState->SmmS3Cr4 = (UINT32)AsmReadCr4 ();
if (sizeof (UINTN) == sizeof (UINT64)) {
@@ -981,248 +243,9 @@ InitSmmS3ResumeState (
//
// Patch SmmS3ResumeState->SmmS3Cr3
+ // The SmmS3Cr3 is only used by S3Resume PEIM to switch CPU from 32bit to 64bit
//
- InitSmmS3Cr3 ();
- }
-
- //
- // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
- // protected mode on S3 path
- //
- Address = BASE_4GB - 1;
- Status = gBS->AllocatePages (
- AllocateMaxAddress,
- EfiACPIMemoryNVS,
- EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),
- &Address
- );
- ASSERT_EFI_ERROR (Status);
- mApHltLoopCode = (UINT8 *)(UINTN)Address;
-}
-
-/**
- Copy register table from non-SMRAM into SMRAM.
-
- @param[in] DestinationRegisterTableList Points to destination register table.
- @param[in] SourceRegisterTableList Points to source register table.
- @param[in] NumberOfCpus Number of CPUs.
-
-**/
-VOID
-CopyRegisterTable (
- IN CPU_REGISTER_TABLE *DestinationRegisterTableList,
- IN CPU_REGISTER_TABLE *SourceRegisterTableList,
- IN UINT32 NumberOfCpus
- )
-{
- UINTN Index;
- CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
-
- CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
- for (Index = 0; Index < NumberOfCpus; Index++) {
- if (DestinationRegisterTableList[Index].TableLength != 0) {
- DestinationRegisterTableList[Index].AllocatedSize = DestinationRegisterTableList[Index].TableLength * sizeof (CPU_REGISTER_TABLE_ENTRY);
- RegisterTableEntry = AllocateCopyPool (
- DestinationRegisterTableList[Index].AllocatedSize,
- (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry
- );
- ASSERT (RegisterTableEntry != NULL);
- DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;
- }
- }
-}
-
-/**
- Check whether the register table is empty or not.
-
- @param[in] RegisterTable Point to the register table.
- @param[in] NumberOfCpus Number of CPUs.
-
- @retval TRUE The register table is empty.
- @retval FALSE The register table is not empty.
-**/
-BOOLEAN
-IsRegisterTableEmpty (
- IN CPU_REGISTER_TABLE *RegisterTable,
- IN UINT32 NumberOfCpus
- )
-{
- UINTN Index;
-
- if (RegisterTable != NULL) {
- for (Index = 0; Index < NumberOfCpus; Index++) {
- if (RegisterTable[Index].TableLength != 0) {
- return FALSE;
- }
- }
- }
-
- return TRUE;
-}
-
-/**
- Copy the data used to initialize processor register into SMRAM.
-
- @param[in,out] CpuFeatureInitDataDst Pointer to the destination CPU_FEATURE_INIT_DATA structure.
- @param[in] CpuFeatureInitDataSrc Pointer to the source CPU_FEATURE_INIT_DATA structure.
-
-**/
-VOID
-CopyCpuFeatureInitDatatoSmram (
- IN OUT CPU_FEATURE_INIT_DATA *CpuFeatureInitDataDst,
- IN CPU_FEATURE_INIT_DATA *CpuFeatureInitDataSrc
- )
-{
- CPU_STATUS_INFORMATION *CpuStatus;
-
- if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus)) {
- CpuFeatureInitDataDst->PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
- ASSERT (CpuFeatureInitDataDst->PreSmmInitRegisterTable != 0);
-
- CopyRegisterTable (
- (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataDst->PreSmmInitRegisterTable,
- (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->PreSmmInitRegisterTable,
- mAcpiCpuData.NumberOfCpus
- );
- }
-
- if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->RegisterTable, mAcpiCpuData.NumberOfCpus)) {
- CpuFeatureInitDataDst->RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
- ASSERT (CpuFeatureInitDataDst->RegisterTable != 0);
-
- CopyRegisterTable (
- (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataDst->RegisterTable,
- (CPU_REGISTER_TABLE *)(UINTN)CpuFeatureInitDataSrc->RegisterTable,
- mAcpiCpuData.NumberOfCpus
- );
- }
-
- CpuStatus = &CpuFeatureInitDataDst->CpuStatus;
- CopyMem (CpuStatus, &CpuFeatureInitDataSrc->CpuStatus, sizeof (CPU_STATUS_INFORMATION));
-
- if (CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerPackage != 0) {
- CpuStatus->ThreadCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
- sizeof (UINT32) * CpuStatus->PackageCount,
- (UINT32 *)(UINTN)CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerPackage
- );
- ASSERT (CpuStatus->ThreadCountPerPackage != 0);
- }
-
- if (CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerCore != 0) {
- CpuStatus->ThreadCountPerCore = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
- sizeof (UINT8) * (CpuStatus->PackageCount * CpuStatus->MaxCoreCount),
- (UINT32 *)(UINTN)CpuFeatureInitDataSrc->CpuStatus.ThreadCountPerCore
- );
- ASSERT (CpuStatus->ThreadCountPerCore != 0);
- }
-
- if (CpuFeatureInitDataSrc->ApLocation != 0) {
- CpuFeatureInitDataDst->ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
- mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),
- (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)CpuFeatureInitDataSrc->ApLocation
- );
- ASSERT (CpuFeatureInitDataDst->ApLocation != 0);
- }
-}
-
-/**
- Get ACPI CPU data.
-
-**/
-VOID
-GetAcpiCpuData (
- VOID
- )
-{
- ACPI_CPU_DATA *AcpiCpuData;
- IA32_DESCRIPTOR *Gdtr;
- IA32_DESCRIPTOR *Idtr;
- VOID *GdtForAp;
- VOID *IdtForAp;
- VOID *MachineCheckHandlerForAp;
- CPU_STATUS_INFORMATION *CpuStatus;
-
- if (!mAcpiS3Enable) {
- return;
- }
-
- //
- // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
- //
- mAcpiCpuData.NumberOfCpus = 0;
-
- //
- // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
- //
- AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);
- if (AcpiCpuData == 0) {
- return;
- }
-
- //
- // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
- //
- CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));
-
- mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));
- ASSERT (mAcpiCpuData.MtrrTable != 0);
-
- CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));
-
- mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
- ASSERT (mAcpiCpuData.GdtrProfile != 0);
-
- CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));
-
- mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
- ASSERT (mAcpiCpuData.IdtrProfile != 0);
-
- CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));
-
- //
- // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
- //
- Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;
- Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;
-
- GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);
- ASSERT (GdtForAp != NULL);
- IdtForAp = (VOID *)((UINTN)GdtForAp + (Gdtr->Limit + 1));
- MachineCheckHandlerForAp = (VOID *)((UINTN)IdtForAp + (Idtr->Limit + 1));
-
- CopyMem (GdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);
- CopyMem (IdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);
- CopyMem (MachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);
-
- Gdtr->Base = (UINTN)GdtForAp;
- Idtr->Base = (UINTN)IdtForAp;
- mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;
-
- ZeroMem (&mAcpiCpuData.CpuFeatureInitData, sizeof (CPU_FEATURE_INIT_DATA));
-
- if (!PcdGetBool (PcdCpuFeaturesInitOnS3Resume)) {
- //
- // If the CPU features will not be initialized by CpuFeaturesPei module during
- // next ACPI S3 resume, copy the CPU features initialization data into SMRAM,
- // which will be consumed in SmmRestoreCpu during next S3 resume.
- //
- CopyCpuFeatureInitDatatoSmram (&mAcpiCpuData.CpuFeatureInitData, &AcpiCpuData->CpuFeatureInitData);
-
- CpuStatus = &mAcpiCpuData.CpuFeatureInitData.CpuStatus;
-
- mCpuFlags.CoreSemaphoreCount = AllocateZeroPool (
- sizeof (UINT32) * CpuStatus->PackageCount *
- CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
- );
- ASSERT (mCpuFlags.CoreSemaphoreCount != NULL);
-
- mCpuFlags.PackageSemaphoreCount = AllocateZeroPool (
- sizeof (UINT32) * CpuStatus->PackageCount *
- CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
- );
- ASSERT (mCpuFlags.PackageSemaphoreCount != NULL);
-
- InitializeSpinLock ((SPIN_LOCK *)&mCpuFlags.MemoryMappedLock);
+ InitSmmS3Cr3 ((UINTN *)&SmmS3ResumeState->SmmS3Cr3);
}
}
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/MpFuncs.nasm b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/MpFuncs.nasm
deleted file mode 100644
index dbd1418c0d..0000000000
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/MpFuncs.nasm
+++ /dev/null
@@ -1,153 +0,0 @@
-;------------------------------------------------------------------------------ ;
-; Copyright (c) 2016, Intel Corporation. All rights reserved.<BR>
-; SPDX-License-Identifier: BSD-2-Clause-Patent
-;
-; Module Name:
-;
-; MpFuncs.nasm
-;
-; Abstract:
-;
-; This is the assembly code for Multi-processor S3 support
-;
-;-------------------------------------------------------------------------------
-
-SECTION .text
-
-extern ASM_PFX(InitializeFloatingPointUnits)
-
-%define VacantFlag 0x0
-%define NotVacantFlag 0xff
-
-%define LockLocation RendezvousFunnelProcEnd - RendezvousFunnelProcStart
-%define StackStart LockLocation + 0x4
-%define StackSize LockLocation + 0x8
-%define RendezvousProc LockLocation + 0xC
-%define GdtrProfile LockLocation + 0x10
-%define IdtrProfile LockLocation + 0x16
-%define BufferStart LockLocation + 0x1C
-
-;-------------------------------------------------------------------------------------
-;RendezvousFunnelProc procedure follows. All APs execute their procedure. This
-;procedure serializes all the AP processors through an Init sequence. It must be
-;noted that APs arrive here very raw...ie: real mode, no stack.
-;ALSO THIS PROCEDURE IS EXECUTED BY APs ONLY ON 16 BIT MODE. HENCE THIS PROC
-;IS IN MACHINE CODE.
-;-------------------------------------------------------------------------------------
-;RendezvousFunnelProc (&WakeUpBuffer,MemAddress);
-
-BITS 16
-global ASM_PFX(RendezvousFunnelProc)
-ASM_PFX(RendezvousFunnelProc):
-RendezvousFunnelProcStart:
-
-; At this point CS = 0x(vv00) and ip= 0x0.
-
- mov ax, cs
- mov ds, ax
- mov es, ax
- mov ss, ax
- xor ax, ax
- mov fs, ax
- mov gs, ax
-
-flat32Start:
-
- mov si, BufferStart
- mov edx,dword [si] ; EDX is keeping the start address of wakeup buffer
-
- mov si, GdtrProfile
-o32 lgdt [cs:si]
-
- mov si, IdtrProfile
-o32 lidt [cs:si]
-
- xor ax, ax
- mov ds, ax
-
- mov eax, cr0 ; Get control register 0
- or eax, 0x000000001 ; Set PE bit (bit #0)
- mov cr0, eax
-
-FLAT32_JUMP:
-
-a32 jmp dword 0x20:0x0
-
-BITS 32
-PMODE_ENTRY: ; protected mode entry point
-
- mov ax, 0x8
-o16 mov ds, ax
-o16 mov es, ax
-o16 mov fs, ax
-o16 mov gs, ax
-o16 mov ss, ax ; Flat mode setup.
-
- mov esi, edx
-
- mov edi, esi
- add edi, LockLocation
- mov al, NotVacantFlag
-TestLock:
- xchg byte [edi], al
- cmp al, NotVacantFlag
- jz TestLock
-
-ProgramStack:
-
- mov edi, esi
- add edi, StackSize
- mov eax, dword [edi]
- mov edi, esi
- add edi, StackStart
- add eax, dword [edi]
- mov esp, eax
- mov dword [edi], eax
-
-Releaselock:
-
- mov al, VacantFlag
- mov edi, esi
- add edi, LockLocation
- xchg byte [edi], al
-
- ;
- ; Call assembly function to initialize FPU.
- ;
- mov ebx, ASM_PFX(InitializeFloatingPointUnits)
- call ebx
- ;
- ; Call C Function
- ;
- mov edi, esi
- add edi, RendezvousProc
- mov eax, dword [edi]
-
- test eax, eax
- jz GoToSleep
- call eax ; Call C function
-
-GoToSleep:
- cli
- hlt
- jmp $-2
-
-RendezvousFunnelProcEnd:
-;-------------------------------------------------------------------------------------
-; AsmGetAddressMap (&AddressMap);
-;-------------------------------------------------------------------------------------
-global ASM_PFX(AsmGetAddressMap)
-ASM_PFX(AsmGetAddressMap):
-
- pushad
- mov ebp,esp
-
- mov ebx, dword [ebp+0x24]
- mov dword [ebx], RendezvousFunnelProcStart
- mov dword [ebx+0x4], PMODE_ENTRY - RendezvousFunnelProcStart
- mov dword [ebx+0x8], FLAT32_JUMP - RendezvousFunnelProcStart
- mov dword [ebx+0xc], RendezvousFunnelProcEnd - RendezvousFunnelProcStart
-
- popad
- ret
-
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
index b11264ce4a..dfc9668dbc 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
@@ -1,7 +1,7 @@
/** @file
Page table manipulation functions for IA-32 processors
-Copyright (c) 2009 - 2023, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2009 - 2024, Intel Corporation. All rights reserved.<BR>
Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
@@ -67,15 +67,19 @@ SmmInitPageTable (
}
/**
- Page Fault handler for SMM use.
+ Allocate free Page for PageFault handler use.
+
+ @return Page address.
**/
-VOID
-SmiDefaultPFHandler (
+UINT64
+AllocPage (
VOID
)
{
CpuDeadLoop ();
+
+ return 0;
}
/**
@@ -179,13 +183,7 @@ SmiPFHandler (
}
if (IsSmmCommBufferForbiddenAddress (PFAddress)) {
- DumpCpuContext (InterruptType, SystemContext);
DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%x)!\n", PFAddress));
- DEBUG_CODE (
- DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextIa32->Eip);
- );
- CpuDeadLoop ();
- goto Exit;
}
}
@@ -196,7 +194,10 @@ SmiPFHandler (
);
} else {
DumpCpuContext (InterruptType, SystemContext);
- SmiDefaultPFHandler ();
+ DEBUG_CODE (
+ DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextIa32->Eip);
+ );
+ CpuDeadLoop ();
}
Exit:
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c
index 636dc8d92f..0c1cc51ada 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c
@@ -1,7 +1,7 @@
/** @file
SMM CPU misc functions for Ia32 arch specific.
-Copyright (c) 2015 - 2023, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2015 - 2024, Intel Corporation. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
@@ -142,33 +142,6 @@ InitGdt (
}
/**
- Transfer AP to safe hlt-loop after it finished restore CPU features on S3 patch.
-
- @param[in] ApHltLoopCode The address of the safe hlt-loop function.
- @param[in] TopOfStack A pointer to the new stack to use for the ApHltLoopCode.
- @param[in] NumberToFinishAddress Address of Semaphore of APs finish count.
-
-**/
-VOID
-TransferApToSafeState (
- IN UINTN ApHltLoopCode,
- IN UINTN TopOfStack,
- IN UINTN NumberToFinishAddress
- )
-{
- SwitchStack (
- (SWITCH_STACK_ENTRY_POINT)ApHltLoopCode,
- (VOID *)NumberToFinishAddress,
- NULL,
- (VOID *)TopOfStack
- );
- //
- // It should never reach here
- //
- ASSERT (FALSE);
-}
-
-/**
Initialize the shadow stack related data structure.
@param CpuIndex The index of CPU.
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c
index 650090e534..b279c8a09c 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c
@@ -1,7 +1,7 @@
/** @file
IA-32 processor specific functions to enable SMM profile.
-Copyright (c) 2012 - 2016, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2012 - 2024, Intel Corporation. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
@@ -12,13 +12,17 @@ SPDX-License-Identifier: BSD-2-Clause-Patent
/**
Create SMM page table for S3 path.
+ @param[out] Cr3 The base address of the page tables.
+
**/
VOID
InitSmmS3Cr3 (
- VOID
+ OUT UINTN *Cr3
)
{
- mSmmS3ResumeState->SmmS3Cr3 = GenSmmPageTable (PagingPae, mPhysicalAddressBits);
+ ASSERT (Cr3 != NULL);
+
+ *Cr3 = GenSmmPageTable (PagingPae, mPhysicalAddressBits);
return;
}
@@ -72,3 +76,15 @@ ClearTrapFlag (
{
SystemContext.SystemContextIa32->Eflags &= (UINTN) ~BIT8;
}
+
+/**
+ Create new entry in page table for page fault address in SmmProfilePFHandler.
+
+**/
+VOID
+SmmProfileMapPFAddress (
+ VOID
+ )
+{
+ CpuDeadLoop ();
+}
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.h b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.h
index 6c95f2bb19..de4a3a3a25 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.h
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.h
@@ -1,7 +1,7 @@
/** @file
IA-32 processor specific header file to enable SMM profile.
-Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2012 - 2024, Intel Corporation. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
@@ -73,10 +73,12 @@ RestorePageTableAbove4G (
/**
Create SMM page table for S3 path.
+ @param[out] Cr3 The base address of the page tables.
+
**/
VOID
InitSmmS3Cr3 (
- VOID
+ OUT UINTN *Cr3
);
/**
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
index 10baf3ceb9..570e99177f 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
@@ -1,7 +1,7 @@
/** @file
SMM MP service implementation
-Copyright (c) 2009 - 2023, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2009 - 2024, Intel Corporation. All rights reserved.<BR>
Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
@@ -268,7 +268,7 @@ SmmWaitForApArrival (
// Sync with APs 1st timeout
//
for (Timer = StartSyncTimer ();
- !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal);
+ !IsSyncTimerTimeout (Timer, mTimeoutTicker) && !(LmceEn && LmceSignal);
)
{
mSmmMpSyncData->AllApArrivedWithException = AllCpusInSmmExceptBlockedDisabled ();
@@ -309,7 +309,7 @@ SmmWaitForApArrival (
// Sync with APs 2nd timeout.
//
for (Timer = StartSyncTimer ();
- !IsSyncTimerTimeout (Timer);
+ !IsSyncTimerTimeout (Timer, mTimeoutTicker2);
)
{
mSmmMpSyncData->AllApArrivedWithException = AllCpusInSmmExceptBlockedDisabled ();
@@ -736,7 +736,7 @@ APHandler (
// Timeout BSP
//
for (Timer = StartSyncTimer ();
- !IsSyncTimerTimeout (Timer) &&
+ !IsSyncTimerTimeout (Timer, mTimeoutTicker) &&
!(*mSmmMpSyncData->InsideSmm);
)
{
@@ -764,7 +764,7 @@ APHandler (
// Now clock BSP for the 2nd time
//
for (Timer = StartSyncTimer ();
- !IsSyncTimerTimeout (Timer) &&
+ !IsSyncTimerTimeout (Timer, mTimeoutTicker2) &&
!(*mSmmMpSyncData->InsideSmm);
)
{
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
index e400bee8d5..e7149ff7fd 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
@@ -1,7 +1,7 @@
/** @file
Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
-Copyright (c) 2009 - 2023, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2009 - 2024, Intel Corporation. All rights reserved.<BR>
Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
Copyright (C) 2023 - 2024 Advanced Micro Devices, Inc. All rights reserved.<BR>
@@ -435,8 +435,8 @@ ExecuteFirstSmiInit (
/**
SMM Ready To Lock event notification handler.
- The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to
- perform additional lock actions that must be performed from SMM on the next SMI.
+ mSmmReadyToLock is set to perform additional lock actions that must be
+ performed from SMM on the next SMI.
@param[in] Protocol Points to the protocol's unique identifier.
@param[in] Interface Points to the interface instance.
@@ -452,8 +452,6 @@ SmmReadyToLockEventNotify (
IN EFI_HANDLE Handle
)
{
- GetAcpiCpuData ();
-
//
// Cache a copy of UEFI memory map before we start profiling feature.
//
@@ -1361,7 +1359,7 @@ PiCpuSmmEntry (
InitSmmProfile (Cr3);
GetAcpiS3EnableFlag ();
- InitSmmS3ResumeState (Cr3);
+ InitSmmS3ResumeState ();
DEBUG ((DEBUG_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
index f42910ddf1..abbdd79f05 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
@@ -1,7 +1,7 @@
/** @file
Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
-Copyright (c) 2009 - 2023, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2009 - 2024, Intel Corporation. All rights reserved.<BR>
Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
Copyright (C) 2023 Advanced Micro Devices, Inc. All rights reserved.<BR>
@@ -471,6 +471,9 @@ extern BOOLEAN mSmmDebugAgentSupport;
//
extern UINT64 mAddressEncMask;
+extern UINT64 mTimeoutTicker;
+extern UINT64 mTimeoutTicker2;
+
/**
Create 4G PageTable in SMRAM.
@@ -533,15 +536,17 @@ StartSyncTimer (
);
/**
- Check if the SMM AP Sync timer is timeout.
+ Check if the SMM AP Sync Timer is timeout specified by Timeout.
- @param Timer The start timer from the begin.
+ @param Timer The start timer from the begin.
+ @param Timeout The timeout ticker to wait.
**/
BOOLEAN
EFIAPI
IsSyncTimerTimeout (
- IN UINT64 Timer
+ IN UINT64 Timer,
+ IN UINT64 Timeout
);
/**
@@ -1040,20 +1045,9 @@ extern BOOLEAN mSmmS3Flag;
/**
Initialize SMM S3 resume state structure used during S3 Resume.
- @param[in] Cr3 The base address of the page tables to use in SMM.
-
**/
VOID
InitSmmS3ResumeState (
- IN UINT32 Cr3
- );
-
-/**
- Get ACPI CPU data.
-
-**/
-VOID
-GetAcpiCpuData (
VOID
);
@@ -1076,21 +1070,6 @@ GetAcpiS3EnableFlag (
);
/**
- Transfer AP to safe hlt-loop after it finished restore CPU features on S3 patch.
-
- @param[in] ApHltLoopCode The address of the safe hlt-loop function.
- @param[in] TopOfStack A pointer to the new stack to use for the ApHltLoopCode.
- @param[in] NumberToFinishAddress Address of Semaphore of APs finish count.
-
-**/
-VOID
-TransferApToSafeState (
- IN UINTN ApHltLoopCode,
- IN UINTN TopOfStack,
- IN UINTN NumberToFinishAddress
- );
-
-/**
Set ShadowStack memory.
@param[in] Cr3 The page table base address.
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf
index 3354f94a64..2412f4caeb 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf
@@ -4,7 +4,7 @@
# This SMM driver performs SMM initialization, deploy SMM Entry Vector,
# provides CPU specific services in SMM.
#
-# Copyright (c) 2009 - 2023, Intel Corporation. All rights reserved.<BR>
+# Copyright (c) 2009 - 2024, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
# Copyright (C) 2023 - 2024 Advanced Micro Devices, Inc. All rights reserved.<BR>
#
@@ -53,7 +53,6 @@
Ia32/SmmProfileArch.h
Ia32/SmiEntry.nasm
Ia32/SmiException.nasm
- Ia32/MpFuncs.nasm
Ia32/Cet.nasm
[Sources.X64]
@@ -63,7 +62,6 @@
X64/SmmProfileArch.h
X64/SmiEntry.nasm
X64/SmiException.nasm
- X64/MpFuncs.nasm
X64/Cet.nasm
[Packages]
@@ -132,16 +130,15 @@
gUefiCpuPkgTokenSpaceGuid.PcdSmmApPerfLogEnable ## CONSUMES
[Pcd]
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmApSyncTimeout2 ## CONSUMES
gUefiCpuPkgTokenSpaceGuid.PcdCpuMaxLogicalProcessorNumber ## SOMETIMES_CONSUMES
gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmProfileSize ## SOMETIMES_CONSUMES
gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmStackSize ## CONSUMES
gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmApSyncTimeout ## CONSUMES
- gUefiCpuPkgTokenSpaceGuid.PcdCpuS3DataAddress ## SOMETIMES_CONSUMES
gUefiCpuPkgTokenSpaceGuid.PcdCpuHotPlugDataAddress ## SOMETIMES_PRODUCES
gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmCodeAccessCheckEnable ## CONSUMES
gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmSyncMode ## CONSUMES
gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmShadowStackSize ## SOMETIMES_CONSUMES
- gUefiCpuPkgTokenSpaceGuid.PcdCpuFeaturesInitOnS3Resume ## CONSUMES
gEfiMdeModulePkgTokenSpaceGuid.PcdAcpiS3Enable ## CONSUMES
gEfiMdeModulePkgTokenSpaceGuid.PcdPteMemoryEncryptionAddressOrMask ## CONSUMES
gEfiMdeModulePkgTokenSpaceGuid.PcdNullPointerDetectionPropertyMask ## CONSUMES
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
index b8c356bfe8..6e0c251397 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
@@ -1647,115 +1647,49 @@ EdkiiSmmClearMemoryAttributes (
}
/**
- Create page table based on input PagingMode, LinearAddress and Length.
+ Create page table based on input PagingMode and PhysicalAddressBits in smm.
+
+ @param[in] PagingMode The paging mode.
+ @param[in] PhysicalAddressBits The bits of physical address to map.
- @param[in, out] PageTable The pointer to the page table.
- @param[in] PagingMode The paging mode.
- @param[in] LinearAddress The start of the linear address range.
- @param[in] Length The length of the linear address range.
+ @retval PageTable Address
**/
-VOID
-GenPageTable (
- IN OUT UINTN *PageTable,
- IN PAGING_MODE PagingMode,
- IN UINT64 LinearAddress,
- IN UINT64 Length
+UINTN
+GenSmmPageTable (
+ IN PAGING_MODE PagingMode,
+ IN UINT8 PhysicalAddressBits
)
{
- RETURN_STATUS Status;
UINTN PageTableBufferSize;
+ UINTN PageTable;
VOID *PageTableBuffer;
IA32_MAP_ATTRIBUTE MapAttribute;
IA32_MAP_ATTRIBUTE MapMask;
+ RETURN_STATUS Status;
+ UINTN GuardPage;
+ UINTN Index;
+ UINT64 Length;
+ Length = LShiftU64 (1, PhysicalAddressBits);
+ PageTable = 0;
+ PageTableBufferSize = 0;
MapMask.Uint64 = MAX_UINT64;
- MapAttribute.Uint64 = mAddressEncMask|LinearAddress;
+ MapAttribute.Uint64 = mAddressEncMask;
MapAttribute.Bits.Present = 1;
MapAttribute.Bits.ReadWrite = 1;
MapAttribute.Bits.UserSupervisor = 1;
MapAttribute.Bits.Accessed = 1;
MapAttribute.Bits.Dirty = 1;
- PageTableBufferSize = 0;
-
- Status = PageTableMap (
- PageTable,
- PagingMode,
- NULL,
- &PageTableBufferSize,
- LinearAddress,
- Length,
- &MapAttribute,
- &MapMask,
- NULL
- );
- if (Status == RETURN_BUFFER_TOO_SMALL) {
- DEBUG ((DEBUG_INFO, "GenSMMPageTable: 0x%x bytes needed for initial SMM page table\n", PageTableBufferSize));
- PageTableBuffer = AllocatePageTableMemory (EFI_SIZE_TO_PAGES (PageTableBufferSize));
- ASSERT (PageTableBuffer != NULL);
- Status = PageTableMap (
- PageTable,
- PagingMode,
- PageTableBuffer,
- &PageTableBufferSize,
- LinearAddress,
- Length,
- &MapAttribute,
- &MapMask,
- NULL
- );
- }
+ Status = PageTableMap (&PageTable, PagingMode, NULL, &PageTableBufferSize, 0, Length, &MapAttribute, &MapMask, NULL);
+ ASSERT (Status == RETURN_BUFFER_TOO_SMALL);
+ DEBUG ((DEBUG_INFO, "GenSMMPageTable: 0x%x bytes needed for initial SMM page table\n", PageTableBufferSize));
+ PageTableBuffer = AllocatePageTableMemory (EFI_SIZE_TO_PAGES (PageTableBufferSize));
+ ASSERT (PageTableBuffer != NULL);
+ Status = PageTableMap (&PageTable, PagingMode, PageTableBuffer, &PageTableBufferSize, 0, Length, &MapAttribute, &MapMask, NULL);
ASSERT (Status == RETURN_SUCCESS);
ASSERT (PageTableBufferSize == 0);
-}
-
-/**
- Create page table based on input PagingMode and PhysicalAddressBits in smm.
-
- @param[in] PagingMode The paging mode.
- @param[in] PhysicalAddressBits The bits of physical address to map.
-
- @retval PageTable Address
-
-**/
-UINTN
-GenSmmPageTable (
- IN PAGING_MODE PagingMode,
- IN UINT8 PhysicalAddressBits
- )
-{
- UINTN PageTable;
- RETURN_STATUS Status;
- UINTN GuardPage;
- UINTN Index;
- UINT64 Length;
- PAGING_MODE SmramPagingMode;
-
- PageTable = 0;
- Length = LShiftU64 (1, PhysicalAddressBits);
- ASSERT (Length > mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize);
-
- if (sizeof (UINTN) == sizeof (UINT64)) {
- SmramPagingMode = m5LevelPagingNeeded ? Paging5Level4KB : Paging4Level4KB;
- } else {
- SmramPagingMode = PagingPae4KB;
- }
-
- ASSERT (mCpuHotPlugData.SmrrBase % SIZE_4KB == 0);
- ASSERT (mCpuHotPlugData.SmrrSize % SIZE_4KB == 0);
- GenPageTable (&PageTable, PagingMode, 0, mCpuHotPlugData.SmrrBase);
-
- //
- // Map smram range in 4K page granularity to avoid subsequent page split when smm ready to lock.
- // If BSP are splitting the 1G/2M paging entries to 512 2M/4K paging entries, and all APs are
- // still running in SMI at the same time, which might access the affected linear-address range
- // between the time of modification and the time of invalidation access. That will be a potential
- // problem leading exception happen.
- //
- GenPageTable (&PageTable, SmramPagingMode, mCpuHotPlugData.SmrrBase, mCpuHotPlugData.SmrrSize);
-
- GenPageTable (&PageTable, PagingMode, mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize, Length - mCpuHotPlugData.SmrrBase - mCpuHotPlugData.SmrrSize);
if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
//
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
index 8142d3ceac..115d477fd0 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
@@ -1,7 +1,7 @@
/** @file
Enable SMM profile.
-Copyright (c) 2012 - 2023, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2012 - 2024, Intel Corporation. All rights reserved.<BR>
Copyright (c) 2017 - 2020, AMD Incorporated. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
@@ -298,41 +298,35 @@ IsInSmmRanges (
}
/**
- Check if the memory address will be mapped by 4KB-page.
+ Check if the SMM profile page fault address above 4GB is in protected range or not.
- @param Address The address of Memory.
- @param Nx The flag indicates if the memory is execute-disable.
+ @param[in] Address The address of Memory.
+ @param[out] Nx The flag indicates if the memory is execute-disable.
+
+ @retval TRUE The input address is in protected range.
+ @retval FALSE The input address is not in protected range.
**/
BOOLEAN
-IsAddressValid (
- IN EFI_PHYSICAL_ADDRESS Address,
- IN BOOLEAN *Nx
+IsSmmProfilePFAddressAbove4GValid (
+ IN EFI_PHYSICAL_ADDRESS Address,
+ OUT BOOLEAN *Nx
)
{
UINTN Index;
- if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
- //
- // Check configuration
- //
- for (Index = 0; Index < mProtectionMemRangeCount; Index++) {
- if ((Address >= mProtectionMemRange[Index].Range.Base) && (Address < mProtectionMemRange[Index].Range.Top)) {
- *Nx = mProtectionMemRange[Index].Nx;
- return mProtectionMemRange[Index].Present;
- }
- }
-
- *Nx = TRUE;
- return FALSE;
- } else {
- *Nx = TRUE;
- if (IsInSmmRanges (Address)) {
- *Nx = FALSE;
+ //
+ // Check configuration
+ //
+ for (Index = 0; Index < mProtectionMemRangeCount; Index++) {
+ if ((Address >= mProtectionMemRange[Index].Range.Base) && (Address < mProtectionMemRange[Index].Range.Top)) {
+ *Nx = mProtectionMemRange[Index].Nx;
+ return mProtectionMemRange[Index].Present;
}
-
- return TRUE;
}
+
+ *Nx = TRUE;
+ return FALSE;
}
/**
@@ -438,8 +432,23 @@ InitProtectedMemRange (
&MemorySpaceMap
);
for (Index = 0; Index < NumberOfDescriptors; Index++) {
- if (MemorySpaceMap[Index].GcdMemoryType == EfiGcdMemoryTypeMemoryMappedIo) {
- NumberOfAddedDescriptors++;
+ if ((MemorySpaceMap[Index].GcdMemoryType == EfiGcdMemoryTypeMemoryMappedIo)) {
+ if (ADDRESS_IS_ALIGNED (MemorySpaceMap[Index].BaseAddress, SIZE_4KB) &&
+ (MemorySpaceMap[Index].Length % SIZE_4KB == 0))
+ {
+ NumberOfAddedDescriptors++;
+ } else {
+ //
+ // Skip the MMIO range that BaseAddress and Length are not 4k aligned since
+ // the minimum granularity of the page table is 4k
+ //
+ DEBUG ((
+ DEBUG_WARN,
+ "MMIO range [0x%lx, 0x%lx] is skipped since it is not 4k aligned.\n",
+ MemorySpaceMap[Index].BaseAddress,
+ MemorySpaceMap[Index].BaseAddress + MemorySpaceMap[Index].Length
+ ));
+ }
}
}
@@ -486,15 +495,16 @@ InitProtectedMemRange (
// Create MMIO ranges which are set to present and execution-disable.
//
for (Index = 0; Index < NumberOfDescriptors; Index++) {
- if (MemorySpaceMap[Index].GcdMemoryType != EfiGcdMemoryTypeMemoryMappedIo) {
- continue;
+ if ((MemorySpaceMap[Index].GcdMemoryType == EfiGcdMemoryTypeMemoryMappedIo) &&
+ ADDRESS_IS_ALIGNED (MemorySpaceMap[Index].BaseAddress, SIZE_4KB) &&
+ (MemorySpaceMap[Index].Length % SIZE_4KB == 0))
+ {
+ mProtectionMemRange[NumberOfProtectRange].Range.Base = MemorySpaceMap[Index].BaseAddress;
+ mProtectionMemRange[NumberOfProtectRange].Range.Top = MemorySpaceMap[Index].BaseAddress + MemorySpaceMap[Index].Length;
+ mProtectionMemRange[NumberOfProtectRange].Present = TRUE;
+ mProtectionMemRange[NumberOfProtectRange].Nx = TRUE;
+ NumberOfProtectRange++;
}
-
- mProtectionMemRange[NumberOfProtectRange].Range.Base = MemorySpaceMap[Index].BaseAddress;
- mProtectionMemRange[NumberOfProtectRange].Range.Top = MemorySpaceMap[Index].BaseAddress + MemorySpaceMap[Index].Length;
- mProtectionMemRange[NumberOfProtectRange].Present = TRUE;
- mProtectionMemRange[NumberOfProtectRange].Nx = TRUE;
- NumberOfProtectRange++;
}
//
@@ -600,11 +610,7 @@ InitPaging (
PERF_FUNCTION_BEGIN ();
PageTable = AsmReadCr3 ();
- if (sizeof (UINTN) == sizeof (UINT32)) {
- Limit = BASE_4GB;
- } else {
- Limit = (IsRestrictedMemoryAccess ()) ? LShiftU64 (1, mPhysicalAddressBits) : BASE_4GB;
- }
+ Limit = LShiftU64 (1, mPhysicalAddressBits);
WRITE_UNPROTECT_RO_PAGES (WriteProtect, CetEnabled);
@@ -723,6 +729,11 @@ SmmProfileStart (
// The flag indicates SMM profile starts to work.
//
mSmmProfileStart = TRUE;
+
+ //
+ // Tell #PF handler to prepare a #DB subsequently.
+ //
+ mSetupDebugTrap = TRUE;
}
/**
@@ -1110,11 +1121,6 @@ InitSmmProfile (
// Initialize profile IDT.
//
InitIdtr ();
-
- //
- // Tell #PF handler to prepare a #DB subsequently.
- //
- mSetupDebugTrap = TRUE;
}
/**
@@ -1166,6 +1172,21 @@ RestorePageTableBelow4G (
// PDPTE
//
PTIndex = (UINTN)BitFieldRead64 (PFAddress, 30, 38);
+
+ if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
+ //
+ // For 32-bit case, because a full map page table for 0-4G is created by default,
+ // and since the PDPTE must be one non-leaf entry, the PDPTE must always be present.
+ // So, ASSERT it must be the 64-bit case running here.
+ //
+ ASSERT (sizeof (UINT64) == sizeof (UINTN));
+
+ //
+ // If the entry is not present, allocate one page from page pool for it
+ //
+ PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ }
+
ASSERT (PageTable[PTIndex] != 0);
PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
@@ -1173,9 +1194,9 @@ RestorePageTableBelow4G (
// PD
//
PTIndex = (UINTN)BitFieldRead64 (PFAddress, 21, 29);
- if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
+ if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
//
- // Large page
+ // A 2M page size will be used directly when the 2M entry is marked as non-present.
//
//
@@ -1202,7 +1223,8 @@ RestorePageTableBelow4G (
}
} else {
//
- // Small page
+ // If the 2M entry is marked as present, a 4K page size will be utilized.
+ // In this scenario, the 2M entry must be a non-leaf entry.
//
ASSERT (PageTable[PTIndex] != 0);
PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
@@ -1305,14 +1327,6 @@ SmmProfilePFHandler (
UINT8 SoftSmiValue;
EFI_SMM_SAVE_STATE_IO_INFO IoInfo;
- if (!mSmmProfileStart) {
- //
- // If SMM profile does not start, call original page fault handler.
- //
- SmiDefaultPFHandler ();
- return;
- }
-
if (mBtsSupported) {
DisableBTS ();
}
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfileInternal.h b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfileInternal.h
index 964dd52817..42a6effe52 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfileInternal.h
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfileInternal.h
@@ -1,7 +1,7 @@
/** @file
SMM profile internal header file.
-Copyright (c) 2012 - 2018, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2012 - 2024, Intel Corporation. All rights reserved.<BR>
Copyright (c) 2020, AMD Incorporated. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
@@ -96,12 +96,11 @@ typedef struct {
UINT64 SmiCmd;
} SMM_PROFILE_ENTRY;
-extern SMM_S3_RESUME_STATE *mSmmS3ResumeState;
-extern UINTN gSmiExceptionHandlers[];
-extern BOOLEAN mXdSupported;
-X86_ASSEMBLY_PATCH_LABEL gPatchXdSupported;
-X86_ASSEMBLY_PATCH_LABEL gPatchMsrIa32MiscEnableSupported;
-extern UINTN *mPFEntryCount;
+extern UINTN gSmiExceptionHandlers[];
+extern BOOLEAN mXdSupported;
+X86_ASSEMBLY_PATCH_LABEL gPatchXdSupported;
+X86_ASSEMBLY_PATCH_LABEL gPatchMsrIa32MiscEnableSupported;
+extern UINTN *mPFEntryCount;
extern UINT64 (*mLastPFEntryValue)[MAX_PF_ENTRY_COUNT];
extern UINT64 *(*mLastPFEntryPointer)[MAX_PF_ENTRY_COUNT];
@@ -130,24 +129,38 @@ IsAddressSplit (
);
/**
- Check if the memory address will be mapped by 4KB-page.
+ Check if the SMM profile page fault address above 4GB is in protected range or not.
- @param Address The address of Memory.
- @param Nx The flag indicates if the memory is execute-disable.
+ @param[in] Address The address of Memory.
+ @param[out] Nx The flag indicates if the memory is execute-disable.
+
+ @retval TRUE The input address is in protected range.
+ @retval FALSE The input address is not in protected range.
**/
BOOLEAN
-IsAddressValid (
- IN EFI_PHYSICAL_ADDRESS Address,
- IN BOOLEAN *Nx
+IsSmmProfilePFAddressAbove4GValid (
+ IN EFI_PHYSICAL_ADDRESS Address,
+ OUT BOOLEAN *Nx
+ );
+
+/**
+ Allocate free Page for PageFault handler use.
+
+ @return Page address.
+
+**/
+UINT64
+AllocPage (
+ VOID
);
/**
- Page Fault handler for SMM use.
+ Create new entry in page table for page fault address in SmmProfilePFHandler.
**/
VOID
-SmiDefaultPFHandler (
+SmmProfileMapPFAddress (
VOID
);
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SyncTimer.c b/UefiCpuPkg/PiSmmCpuDxeSmm/SyncTimer.c
index 0c070c5736..8d29ba7326 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/SyncTimer.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SyncTimer.c
@@ -1,7 +1,7 @@
/** @file
SMM Timer feature support
-Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2009 - 2024, Intel Corporation. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
@@ -9,6 +9,9 @@ SPDX-License-Identifier: BSD-2-Clause-Patent
#include "PiSmmCpuDxeSmm.h"
UINT64 mTimeoutTicker = 0;
+
+UINT64 mTimeoutTicker2 = 0;
+
//
// Number of counts in a roll-over cycle of the performance counter.
//
@@ -36,6 +39,10 @@ InitializeSmmTimer (
MultU64x64 (TimerFrequency, PcdGet64 (PcdCpuSmmApSyncTimeout)),
1000 * 1000
);
+ mTimeoutTicker2 = DivU64x32 (
+ MultU64x64 (TimerFrequency, PcdGet64 (PcdCpuSmmApSyncTimeout2)),
+ 1000 * 1000
+ );
if (End < Start) {
mCountDown = TRUE;
mCycle = Start - End;
@@ -59,15 +66,17 @@ StartSyncTimer (
}
/**
- Check if the SMM AP Sync timer is timeout.
+ Check if the SMM AP Sync Timer is timeout specified by Timeout.
- @param Timer The start timer from the begin.
+ @param Timer The start timer from the begin.
+ @param Timeout The timeout ticker to wait.
**/
BOOLEAN
EFIAPI
IsSyncTimerTimeout (
- IN UINT64 Timer
+ IN UINT64 Timer,
+ IN UINT64 Timeout
)
{
UINT64 CurrentTimer;
@@ -105,5 +114,5 @@ IsSyncTimerTimeout (
}
}
- return (BOOLEAN)(Delta >= mTimeoutTicker);
+ return (BOOLEAN)(Delta >= Timeout);
}
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/MpFuncs.nasm b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/MpFuncs.nasm
deleted file mode 100644
index a12538f72b..0000000000
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/MpFuncs.nasm
+++ /dev/null
@@ -1,189 +0,0 @@
-;------------------------------------------------------------------------------ ;
-; Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.<BR>
-; SPDX-License-Identifier: BSD-2-Clause-Patent
-;
-; Module Name:
-;
-; MpFuncs.nasm
-;
-; Abstract:
-;
-; This is the assembly code for Multi-processor S3 support
-;
-;-------------------------------------------------------------------------------
-
-%define VacantFlag 0x0
-%define NotVacantFlag 0xff
-
-%define LockLocation RendezvousFunnelProcEnd - RendezvousFunnelProcStart
-%define StackStartAddressLocation LockLocation + 0x8
-%define StackSizeLocation LockLocation + 0x10
-%define CProcedureLocation LockLocation + 0x18
-%define GdtrLocation LockLocation + 0x20
-%define IdtrLocation LockLocation + 0x2A
-%define BufferStartLocation LockLocation + 0x34
-%define Cr3OffsetLocation LockLocation + 0x38
-%define InitializeFloatingPointUnitsAddress LockLocation + 0x3C
-
-;-------------------------------------------------------------------------------------
-;RendezvousFunnelProc procedure follows. All APs execute their procedure. This
-;procedure serializes all the AP processors through an Init sequence. It must be
-;noted that APs arrive here very raw...ie: real mode, no stack.
-;ALSO THIS PROCEDURE IS EXECUTED BY APs ONLY ON 16 BIT MODE. HENCE THIS PROC
-;IS IN MACHINE CODE.
-;-------------------------------------------------------------------------------------
-;RendezvousFunnelProc (&WakeUpBuffer,MemAddress);
-
-;text SEGMENT
-DEFAULT REL
-SECTION .text
-
-BITS 16
-global ASM_PFX(RendezvousFunnelProc)
-ASM_PFX(RendezvousFunnelProc):
-RendezvousFunnelProcStart:
-
-; At this point CS = 0x(vv00) and ip= 0x0.
-
- mov ax, cs
- mov ds, ax
- mov es, ax
- mov ss, ax
- xor ax, ax
- mov fs, ax
- mov gs, ax
-
-flat32Start:
-
- mov si, BufferStartLocation
- mov edx,dword [si] ; EDX is keeping the start address of wakeup buffer
-
- mov si, Cr3OffsetLocation
- mov ecx,dword [si] ; ECX is keeping the value of CR3
-
- mov si, GdtrLocation
-o32 lgdt [cs:si]
-
- mov si, IdtrLocation
-o32 lidt [cs:si]
-
- xor ax, ax
- mov ds, ax
-
- mov eax, cr0 ; Get control register 0
- or eax, 0x000000001 ; Set PE bit (bit #0)
- mov cr0, eax
-
-FLAT32_JUMP:
-
-a32 jmp dword 0x20:0x0
-
-BITS 32
-PMODE_ENTRY: ; protected mode entry point
-
- mov ax, 0x18
-o16 mov ds, ax
-o16 mov es, ax
-o16 mov fs, ax
-o16 mov gs, ax
-o16 mov ss, ax ; Flat mode setup.
-
- mov eax, cr4
- bts eax, 5
- mov cr4, eax
-
- mov cr3, ecx
-
- mov esi, edx ; Save wakeup buffer address
-
- mov ecx, 0xc0000080 ; EFER MSR number.
- rdmsr ; Read EFER.
- bts eax, 8 ; Set LME=1.
- wrmsr ; Write EFER.
-
- mov eax, cr0 ; Read CR0.
- bts eax, 31 ; Set PG=1.
- mov cr0, eax ; Write CR0.
-
-LONG_JUMP:
-
-a16 jmp dword 0x38:0x0
-
-BITS 64
-LongModeStart:
-
- mov ax, 0x30
-o16 mov ds, ax
-o16 mov es, ax
-o16 mov ss, ax
-
- mov edi, esi
- add edi, LockLocation
- mov al, NotVacantFlag
-TestLock:
- xchg byte [edi], al
- cmp al, NotVacantFlag
- jz TestLock
-
-ProgramStack:
-
- mov edi, esi
- add edi, StackSizeLocation
- mov rax, qword [edi]
- mov edi, esi
- add edi, StackStartAddressLocation
- add rax, qword [edi]
- mov rsp, rax
- mov qword [edi], rax
-
-Releaselock:
-
- mov al, VacantFlag
- mov edi, esi
- add edi, LockLocation
- xchg byte [edi], al
-
- ;
- ; Call assembly function to initialize FPU.
- ;
- mov rax, qword [esi + InitializeFloatingPointUnitsAddress]
- sub rsp, 0x20
- call rax
- add rsp, 0x20
-
- ;
- ; Call C Function
- ;
- mov edi, esi
- add edi, CProcedureLocation
- mov rax, qword [edi]
-
- test rax, rax
- jz GoToSleep
-
- sub rsp, 0x20
- call rax
- add rsp, 0x20
-
-GoToSleep:
- cli
- hlt
- jmp $-2
-
-RendezvousFunnelProcEnd:
-
-;-------------------------------------------------------------------------------------
-; AsmGetAddressMap (&AddressMap);
-;-------------------------------------------------------------------------------------
-; comments here for definition of address map
-global ASM_PFX(AsmGetAddressMap)
-ASM_PFX(AsmGetAddressMap):
- lea rax, [RendezvousFunnelProcStart]
- mov qword [rcx], rax
- mov qword [rcx+0x8], PMODE_ENTRY - RendezvousFunnelProcStart
- mov qword [rcx+0x10], FLAT32_JUMP - RendezvousFunnelProcStart
- mov qword [rcx+0x18], RendezvousFunnelProcEnd - RendezvousFunnelProcStart
- mov qword [rcx+0x20], LongModeStart - RendezvousFunnelProcStart
- mov qword [rcx+0x28], LONG_JUMP - RendezvousFunnelProcStart
- ret
-
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
index 5964884762..abaa3349f4 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
@@ -1,7 +1,7 @@
/** @file
Page Fault (#PF) handler for X64 processors
-Copyright (c) 2009 - 2023, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2009 - 2024, Intel Corporation. All rights reserved.<BR>
Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
@@ -201,7 +201,6 @@ SmmInitPageTable (
UINT64 *PdptEntry;
UINT64 *Pml4Entry;
UINT64 *Pml5Entry;
- UINT8 PhysicalAddressBits;
//
// Initialize spin lock
@@ -226,31 +225,29 @@ SmmInitPageTable (
//
// Generate initial SMM page table.
- // Only map [0, 4G] when PcdCpuSmmRestrictedMemoryAccess is FALSE.
//
- PhysicalAddressBits = mCpuSmmRestrictedMemoryAccess ? mPhysicalAddressBits : 32;
- PageTable = GenSmmPageTable (mPagingMode, PhysicalAddressBits);
+ PageTable = GenSmmPageTable (mPagingMode, mPhysicalAddressBits);
+
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
+ if (m5LevelPagingNeeded) {
+ Pml5Entry = (UINT64 *)PageTable;
+ //
+ // Set Pml5Entry sub-entries number for smm PF handler usage.
+ //
+ SetSubEntriesNum (Pml5Entry, 1);
+ Pml4Entry = (UINT64 *)((*Pml5Entry) & ~mAddressEncMask & gPhyMask);
+ } else {
+ Pml4Entry = (UINT64 *)PageTable;
+ }
- if (m5LevelPagingNeeded) {
- Pml5Entry = (UINT64 *)PageTable;
//
- // Set Pml5Entry sub-entries number for smm PF handler usage.
+ // Set IA32_PG_PMNT bit to mask first 4 PdptEntry.
//
- SetSubEntriesNum (Pml5Entry, 1);
- Pml4Entry = (UINT64 *)((*Pml5Entry) & ~mAddressEncMask & gPhyMask);
- } else {
- Pml4Entry = (UINT64 *)PageTable;
- }
-
- //
- // Set IA32_PG_PMNT bit to mask first 4 PdptEntry.
- //
- PdptEntry = (UINT64 *)((*Pml4Entry) & ~mAddressEncMask & gPhyMask);
- for (Index = 0; Index < 4; Index++) {
- PdptEntry[Index] |= IA32_PG_PMNT;
- }
+ PdptEntry = (UINT64 *)((*Pml4Entry) & ~mAddressEncMask & gPhyMask);
+ for (Index = 0; Index < 4; Index++) {
+ PdptEntry[Index] |= IA32_PG_PMNT;
+ }
- if (!mCpuSmmRestrictedMemoryAccess) {
//
// Set Pml4Entry sub-entries number for smm PF handler usage.
//
@@ -704,152 +701,6 @@ AllocPage (
}
/**
- Page Fault handler for SMM use.
-
-**/
-VOID
-SmiDefaultPFHandler (
- VOID
- )
-{
- UINT64 *PageTable;
- UINT64 *PageTableTop;
- UINT64 PFAddress;
- UINTN StartBit;
- UINTN EndBit;
- UINT64 PTIndex;
- UINTN Index;
- SMM_PAGE_SIZE_TYPE PageSize;
- UINTN NumOfPages;
- UINTN PageAttribute;
- EFI_STATUS Status;
- UINT64 *UpperEntry;
- BOOLEAN Enable5LevelPaging;
- IA32_CR4 Cr4;
-
- //
- // Set default SMM page attribute
- //
- PageSize = SmmPageSize2M;
- NumOfPages = 1;
- PageAttribute = 0;
-
- EndBit = 0;
- PageTableTop = (UINT64 *)(AsmReadCr3 () & gPhyMask);
- PFAddress = AsmReadCr2 ();
-
- Cr4.UintN = AsmReadCr4 ();
- Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 != 0);
-
- Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
- //
- // If platform not support page table attribute, set default SMM page attribute
- //
- if (Status != EFI_SUCCESS) {
- PageSize = SmmPageSize2M;
- NumOfPages = 1;
- PageAttribute = 0;
- }
-
- if (PageSize >= MaxSmmPageSizeType) {
- PageSize = SmmPageSize2M;
- }
-
- if (NumOfPages > 512) {
- NumOfPages = 512;
- }
-
- switch (PageSize) {
- case SmmPageSize4K:
- //
- // BIT12 to BIT20 is Page Table index
- //
- EndBit = 12;
- break;
- case SmmPageSize2M:
- //
- // BIT21 to BIT29 is Page Directory index
- //
- EndBit = 21;
- PageAttribute |= (UINTN)IA32_PG_PS;
- break;
- case SmmPageSize1G:
- if (!m1GPageTableSupport) {
- DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
- ASSERT (FALSE);
- }
-
- //
- // BIT30 to BIT38 is Page Directory Pointer Table index
- //
- EndBit = 30;
- PageAttribute |= (UINTN)IA32_PG_PS;
- break;
- default:
- ASSERT (FALSE);
- }
-
- //
- // If execute-disable is enabled, set NX bit
- //
- if (mXdEnabled) {
- PageAttribute |= IA32_PG_NX;
- }
-
- for (Index = 0; Index < NumOfPages; Index++) {
- PageTable = PageTableTop;
- UpperEntry = NULL;
- for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {
- PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
- if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
- //
- // If the entry is not present, allocate one page from page pool for it
- //
- PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
- } else {
- //
- // Save the upper entry address
- //
- UpperEntry = PageTable + PTIndex;
- }
-
- //
- // BIT9 to BIT11 of entry is used to save access record,
- // initialize value is 7
- //
- PageTable[PTIndex] |= (UINT64)IA32_PG_A;
- SetAccNum (PageTable + PTIndex, 7);
- PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
- }
-
- PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
- if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
- //
- // Check if the entry has already existed, this issue may occur when the different
- // size page entries created under the same entry
- //
- DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
- DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));
- ASSERT (FALSE);
- }
-
- //
- // Fill the new entry
- //
- PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |
- PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
- if (UpperEntry != NULL) {
- SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);
- }
-
- //
- // Get the next page address if we need to create more page tables
- //
- PFAddress += (1ull << EndBit);
- }
-}
-
-/**
ThePage Fault handler wrapper for SMM use.
@param InterruptType Defines the type of interrupt or exception that
@@ -965,13 +816,7 @@ SmiPFHandler (
}
if (mCpuSmmRestrictedMemoryAccess && IsSmmCommBufferForbiddenAddress (PFAddress)) {
- DumpCpuContext (InterruptType, SystemContext);
DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));
- DEBUG_CODE (
- DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
- );
- CpuDeadLoop ();
- goto Exit;
}
}
@@ -981,7 +826,11 @@ SmiPFHandler (
SystemContext.SystemContextX64->ExceptionData
);
} else {
- SmiDefaultPFHandler ();
+ DumpCpuContext (InterruptType, SystemContext);
+ DEBUG_CODE (
+ DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
+ );
+ CpuDeadLoop ();
}
Exit:
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.nasm b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.nasm
index f329a988f8..cddc55fca5 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.nasm
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.nasm
@@ -13,6 +13,7 @@
;-------------------------------------------------------------------------------
extern ASM_PFX(SmiPFHandler)
+extern ASM_PFX(mSetupDebugTrap)
global ASM_PFX(gcSmiIdtr)
global ASM_PFX(gcSmiGdtr)
@@ -369,9 +370,14 @@ ASM_PFX(PageFaultIdtHandlerSmmProfile):
mov rsp, rbp
+; Check if mSetupDebugTrap is TRUE (non-zero)
+ cmp byte [dword ASM_PFX(mSetupDebugTrap)], 0
+ jz SkipSettingTF
+
; Enable TF bit after page fault handler runs
bts dword [rsp + 40], 8 ;RFLAGS
+SkipSettingTF:
pop rbp
add rsp, 16 ; skip INT# & ErrCode
iretq
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c
index c4f21e2155..ca706ee32c 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c
@@ -1,7 +1,7 @@
/** @file
SMM CPU misc functions for x64 arch specific.
-Copyright (c) 2015 - 2023, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2015 - 2024, Intel Corporation. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
@@ -133,34 +133,6 @@ GetProtectedModeCS (
}
/**
- Transfer AP to safe hlt-loop after it finished restore CPU features on S3 patch.
-
- @param[in] ApHltLoopCode The address of the safe hlt-loop function.
- @param[in] TopOfStack A pointer to the new stack to use for the ApHltLoopCode.
- @param[in] NumberToFinishAddress Address of Semaphore of APs finish count.
-
-**/
-VOID
-TransferApToSafeState (
- IN UINTN ApHltLoopCode,
- IN UINTN TopOfStack,
- IN UINTN NumberToFinishAddress
- )
-{
- AsmDisablePaging64 (
- GetProtectedModeCS (),
- (UINT32)ApHltLoopCode,
- (UINT32)NumberToFinishAddress,
- 0,
- (UINT32)TopOfStack
- );
- //
- // It should never reach here
- //
- ASSERT (FALSE);
-}
-
-/**
Initialize the shadow stack related data structure.
@param CpuIndex The index of CPU.
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
index 01432d466c..a95653ddbf 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
@@ -1,7 +1,7 @@
/** @file
X64 processor specific functions to enable SMM profile.
-Copyright (c) 2012 - 2019, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2012 - 2024, Intel Corporation. All rights reserved.<BR>
Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
@@ -29,20 +29,21 @@ UINT64 *mPFPageUplink[MAX_PF_PAGE_COUNT];
/**
Create SMM page table for S3 path.
+ @param[out] Cr3 The base address of the page tables.
+
**/
VOID
InitSmmS3Cr3 (
- VOID
+ OUT UINTN *Cr3
)
{
+ ASSERT (Cr3 != NULL);
+
//
// Generate level4 page table for the first 4GB memory space
// Return the address of PML4 (to set CR3)
//
- //
- // The SmmS3Cr3 is only used by S3Resume PEIM to switch CPU from 32bit to 64bit
- //
- mSmmS3ResumeState->SmmS3Cr3 = (UINT32)GenSmmPageTable (Paging4Level, 32);
+ *Cr3 = GenSmmPageTable (Paging4Level, 32);
return;
}
@@ -109,6 +110,156 @@ AcquirePage (
}
/**
+ Create new entry in page table for page fault address in SmmProfilePFHandler.
+
+**/
+VOID
+SmmProfileMapPFAddress (
+ VOID
+ )
+{
+ UINT64 *PageTable;
+ UINT64 *PageTableTop;
+ UINT64 PFAddress;
+ UINTN StartBit;
+ UINTN EndBit;
+ UINT64 PTIndex;
+ UINTN Index;
+ SMM_PAGE_SIZE_TYPE PageSize;
+ UINTN NumOfPages;
+ UINTN PageAttribute;
+ EFI_STATUS Status;
+ UINT64 *UpperEntry;
+ BOOLEAN Enable5LevelPaging;
+ IA32_CR4 Cr4;
+
+ //
+ // Set default SMM page attribute
+ //
+ PageSize = SmmPageSize2M;
+ NumOfPages = 1;
+ PageAttribute = 0;
+
+ EndBit = 0;
+ PageTableTop = (UINT64 *)(AsmReadCr3 () & gPhyMask);
+ PFAddress = AsmReadCr2 ();
+
+ Cr4.UintN = AsmReadCr4 ();
+ Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 != 0);
+
+ Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
+ //
+ // If platform not support page table attribute, set default SMM page attribute
+ //
+ if (Status != EFI_SUCCESS) {
+ PageSize = SmmPageSize2M;
+ NumOfPages = 1;
+ PageAttribute = 0;
+ }
+
+ if (PageSize >= MaxSmmPageSizeType) {
+ PageSize = SmmPageSize2M;
+ }
+
+ if (NumOfPages > 512) {
+ NumOfPages = 512;
+ }
+
+ switch (PageSize) {
+ case SmmPageSize4K:
+ //
+ // BIT12 to BIT20 is Page Table index
+ //
+ EndBit = 12;
+ break;
+ case SmmPageSize2M:
+ //
+ // BIT21 to BIT29 is Page Directory index
+ //
+ EndBit = 21;
+ PageAttribute |= (UINTN)IA32_PG_PS;
+ break;
+ case SmmPageSize1G:
+ if (!m1GPageTableSupport) {
+ DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
+ ASSERT (FALSE);
+ }
+
+ //
+ // BIT30 to BIT38 is Page Directory Pointer Table index
+ //
+ EndBit = 30;
+ PageAttribute |= (UINTN)IA32_PG_PS;
+ break;
+ default:
+ ASSERT (FALSE);
+ }
+
+ //
+ // If execute-disable is enabled, set NX bit
+ //
+ if (mXdEnabled) {
+ PageAttribute |= IA32_PG_NX;
+ }
+
+ for (Index = 0; Index < NumOfPages; Index++) {
+ PageTable = PageTableTop;
+ UpperEntry = NULL;
+ for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > 12; StartBit -= 9) {
+ PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
+
+ //
+ // Iterate through the page table to find the appropriate page table entry for page creation if one of the following cases is met:
+ // 1) StartBit > EndBit: The PageSize of current entry is bigger than the platform-specified PageSize granularity.
+ // 2) IA32_PG_P bit is 0 & IA32_PG_PS bit is not 0: The current entry is present and it's a non-leaf entry.
+ //
+ if ((StartBit > EndBit) || ((((PageTable[PTIndex] & IA32_PG_P) != 0) && ((PageTable[PTIndex] & IA32_PG_PS) == 0)))) {
+ if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
+ //
+ // If the entry is not present, allocate one page from page pool for it
+ //
+ PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ } else {
+ //
+ // Save the upper entry address
+ //
+ UpperEntry = PageTable + PTIndex;
+ }
+
+ //
+ // BIT9 to BIT11 of entry is used to save access record,
+ // initialize value is 7
+ //
+ PageTable[PTIndex] |= (UINT64)IA32_PG_A;
+ SetAccNum (PageTable + PTIndex, 7);
+ PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
+ } else {
+ //
+ // Found the appropriate entry.
+ //
+ break;
+ }
+ }
+
+ PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
+
+ //
+ // Fill the new entry
+ //
+ PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << StartBit) - 1)) |
+ PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
+ if (UpperEntry != NULL) {
+ SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);
+ }
+
+ //
+ // Get the next page address if we need to create more page tables
+ //
+ PFAddress += (1ull << StartBit);
+ }
+}
+
+/**
Update page table to map the memory correctly in order to make the instruction
which caused page fault execute successfully. And it also save the original page
table to be restored in single-step exception.
@@ -207,7 +358,7 @@ RestorePageTableAbove4G (
// If page entry does not existed in page table at all, create a new entry.
//
if (!Existed) {
- if (IsAddressValid (PFAddress, &Nx)) {
+ if (IsSmmProfilePFAddressAbove4GValid (PFAddress, &Nx)) {
//
// If page fault address above 4GB is in protected range but it causes a page fault exception,
// Will create a page entry for this page fault address, make page table entry as present/rw and execution-disable.
@@ -219,7 +370,7 @@ RestorePageTableAbove4G (
//
// Create one entry in page table for page fault address.
//
- SmiDefaultPFHandler ();
+ SmmProfileMapPFAddress ();
//
// Find the page table entry created just now.
//
@@ -250,7 +401,7 @@ RestorePageTableAbove4G (
PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
for (Index = 0; Index < 512; Index++) {
PageTable[Index] = Address | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
- if (!IsAddressValid (Address, &Nx)) {
+ if (!IsSmmProfilePFAddressAbove4GValid (Address, &Nx)) {
PageTable[Index] = PageTable[Index] & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
}
@@ -268,7 +419,7 @@ RestorePageTableAbove4G (
//
// Update 2MB page entry.
//
- if (!IsAddressValid (Address, &Nx)) {
+ if (!IsSmmProfilePFAddressAbove4GValid (Address, &Nx)) {
//
// Patch to remove present flag and rw flag.
//
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.h b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.h
index 80205c9b3e..5249360a1a 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.h
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.h
@@ -1,7 +1,7 @@
/** @file
X64 processor specific header file to enable SMM profile.
-Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2012 - 2024, Intel Corporation. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
@@ -55,6 +55,8 @@ typedef struct _PEBS_RECORD {
#pragma pack ()
+extern BOOLEAN m1GPageTableSupport;
+
#define PHYSICAL_ADDRESS_MASK ((1ull << 52) - SIZE_4KB)
/**
@@ -81,10 +83,12 @@ RestorePageTableAbove4G (
/**
Create SMM page table for S3 path.
+ @param[out] Cr3 The base address of the page tables.
+
**/
VOID
InitSmmS3Cr3 (
- VOID
+ OUT UINTN *Cr3
);
/**
@@ -96,4 +100,57 @@ InitPagesForPFHandler (
VOID
);
+/**
+ Set sub-entries number in entry.
+
+ @param[in, out] Entry Pointer to entry
+ @param[in] SubEntryNum Sub-entries number based on 0:
+ 0 means there is 1 sub-entry under this entry
+ 0x1ff means there is 512 sub-entries under this entry
+
+**/
+VOID
+SetSubEntriesNum (
+ IN OUT UINT64 *Entry,
+ IN UINT64 SubEntryNum
+ );
+
+/**
+ Return sub-entries number in entry.
+
+ @param[in] Entry Pointer to entry
+
+ @return Sub-entries number based on 0:
+ 0 means there is 1 sub-entry under this entry
+ 0x1ff means there is 512 sub-entries under this entry
+**/
+UINT64
+GetSubEntriesNum (
+ IN UINT64 *Entry
+ );
+
+/**
+ Allocate free Page for PageFault handler use.
+
+ @return Page address.
+
+**/
+UINT64
+AllocPage (
+ VOID
+ );
+
+/**
+ Set access record in entry.
+
+ @param[in, out] Entry Pointer to entry
+ @param[in] Acc Access record value
+
+**/
+VOID
+SetAccNum (
+ IN OUT UINT64 *Entry,
+ IN UINT64 Acc
+ );
+
#endif // _SMM_PROFILE_ARCH_H_