summaryrefslogtreecommitdiffstats
path: root/UefiCpuPkg/PiSmmCpuDxeSmm/X64
diff options
context:
space:
mode:
Diffstat (limited to 'UefiCpuPkg/PiSmmCpuDxeSmm/X64')
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/X64/MpFuncs.nasm189
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c199
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.nasm6
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c30
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c171
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.h61
6 files changed, 251 insertions, 405 deletions
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/MpFuncs.nasm b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/MpFuncs.nasm
deleted file mode 100644
index a12538f72b..0000000000
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/MpFuncs.nasm
+++ /dev/null
@@ -1,189 +0,0 @@
-;------------------------------------------------------------------------------ ;
-; Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.<BR>
-; SPDX-License-Identifier: BSD-2-Clause-Patent
-;
-; Module Name:
-;
-; MpFuncs.nasm
-;
-; Abstract:
-;
-; This is the assembly code for Multi-processor S3 support
-;
-;-------------------------------------------------------------------------------
-
-%define VacantFlag 0x0
-%define NotVacantFlag 0xff
-
-%define LockLocation RendezvousFunnelProcEnd - RendezvousFunnelProcStart
-%define StackStartAddressLocation LockLocation + 0x8
-%define StackSizeLocation LockLocation + 0x10
-%define CProcedureLocation LockLocation + 0x18
-%define GdtrLocation LockLocation + 0x20
-%define IdtrLocation LockLocation + 0x2A
-%define BufferStartLocation LockLocation + 0x34
-%define Cr3OffsetLocation LockLocation + 0x38
-%define InitializeFloatingPointUnitsAddress LockLocation + 0x3C
-
-;-------------------------------------------------------------------------------------
-;RendezvousFunnelProc procedure follows. All APs execute their procedure. This
-;procedure serializes all the AP processors through an Init sequence. It must be
-;noted that APs arrive here very raw...ie: real mode, no stack.
-;ALSO THIS PROCEDURE IS EXECUTED BY APs ONLY ON 16 BIT MODE. HENCE THIS PROC
-;IS IN MACHINE CODE.
-;-------------------------------------------------------------------------------------
-;RendezvousFunnelProc (&WakeUpBuffer,MemAddress);
-
-;text SEGMENT
-DEFAULT REL
-SECTION .text
-
-BITS 16
-global ASM_PFX(RendezvousFunnelProc)
-ASM_PFX(RendezvousFunnelProc):
-RendezvousFunnelProcStart:
-
-; At this point CS = 0x(vv00) and ip= 0x0.
-
- mov ax, cs
- mov ds, ax
- mov es, ax
- mov ss, ax
- xor ax, ax
- mov fs, ax
- mov gs, ax
-
-flat32Start:
-
- mov si, BufferStartLocation
- mov edx,dword [si] ; EDX is keeping the start address of wakeup buffer
-
- mov si, Cr3OffsetLocation
- mov ecx,dword [si] ; ECX is keeping the value of CR3
-
- mov si, GdtrLocation
-o32 lgdt [cs:si]
-
- mov si, IdtrLocation
-o32 lidt [cs:si]
-
- xor ax, ax
- mov ds, ax
-
- mov eax, cr0 ; Get control register 0
- or eax, 0x000000001 ; Set PE bit (bit #0)
- mov cr0, eax
-
-FLAT32_JUMP:
-
-a32 jmp dword 0x20:0x0
-
-BITS 32
-PMODE_ENTRY: ; protected mode entry point
-
- mov ax, 0x18
-o16 mov ds, ax
-o16 mov es, ax
-o16 mov fs, ax
-o16 mov gs, ax
-o16 mov ss, ax ; Flat mode setup.
-
- mov eax, cr4
- bts eax, 5
- mov cr4, eax
-
- mov cr3, ecx
-
- mov esi, edx ; Save wakeup buffer address
-
- mov ecx, 0xc0000080 ; EFER MSR number.
- rdmsr ; Read EFER.
- bts eax, 8 ; Set LME=1.
- wrmsr ; Write EFER.
-
- mov eax, cr0 ; Read CR0.
- bts eax, 31 ; Set PG=1.
- mov cr0, eax ; Write CR0.
-
-LONG_JUMP:
-
-a16 jmp dword 0x38:0x0
-
-BITS 64
-LongModeStart:
-
- mov ax, 0x30
-o16 mov ds, ax
-o16 mov es, ax
-o16 mov ss, ax
-
- mov edi, esi
- add edi, LockLocation
- mov al, NotVacantFlag
-TestLock:
- xchg byte [edi], al
- cmp al, NotVacantFlag
- jz TestLock
-
-ProgramStack:
-
- mov edi, esi
- add edi, StackSizeLocation
- mov rax, qword [edi]
- mov edi, esi
- add edi, StackStartAddressLocation
- add rax, qword [edi]
- mov rsp, rax
- mov qword [edi], rax
-
-Releaselock:
-
- mov al, VacantFlag
- mov edi, esi
- add edi, LockLocation
- xchg byte [edi], al
-
- ;
- ; Call assembly function to initialize FPU.
- ;
- mov rax, qword [esi + InitializeFloatingPointUnitsAddress]
- sub rsp, 0x20
- call rax
- add rsp, 0x20
-
- ;
- ; Call C Function
- ;
- mov edi, esi
- add edi, CProcedureLocation
- mov rax, qword [edi]
-
- test rax, rax
- jz GoToSleep
-
- sub rsp, 0x20
- call rax
- add rsp, 0x20
-
-GoToSleep:
- cli
- hlt
- jmp $-2
-
-RendezvousFunnelProcEnd:
-
-;-------------------------------------------------------------------------------------
-; AsmGetAddressMap (&AddressMap);
-;-------------------------------------------------------------------------------------
-; comments here for definition of address map
-global ASM_PFX(AsmGetAddressMap)
-ASM_PFX(AsmGetAddressMap):
- lea rax, [RendezvousFunnelProcStart]
- mov qword [rcx], rax
- mov qword [rcx+0x8], PMODE_ENTRY - RendezvousFunnelProcStart
- mov qword [rcx+0x10], FLAT32_JUMP - RendezvousFunnelProcStart
- mov qword [rcx+0x18], RendezvousFunnelProcEnd - RendezvousFunnelProcStart
- mov qword [rcx+0x20], LongModeStart - RendezvousFunnelProcStart
- mov qword [rcx+0x28], LONG_JUMP - RendezvousFunnelProcStart
- ret
-
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
index 5964884762..abaa3349f4 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
@@ -1,7 +1,7 @@
/** @file
Page Fault (#PF) handler for X64 processors
-Copyright (c) 2009 - 2023, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2009 - 2024, Intel Corporation. All rights reserved.<BR>
Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
@@ -201,7 +201,6 @@ SmmInitPageTable (
UINT64 *PdptEntry;
UINT64 *Pml4Entry;
UINT64 *Pml5Entry;
- UINT8 PhysicalAddressBits;
//
// Initialize spin lock
@@ -226,31 +225,29 @@ SmmInitPageTable (
//
// Generate initial SMM page table.
- // Only map [0, 4G] when PcdCpuSmmRestrictedMemoryAccess is FALSE.
//
- PhysicalAddressBits = mCpuSmmRestrictedMemoryAccess ? mPhysicalAddressBits : 32;
- PageTable = GenSmmPageTable (mPagingMode, PhysicalAddressBits);
+ PageTable = GenSmmPageTable (mPagingMode, mPhysicalAddressBits);
+
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
+ if (m5LevelPagingNeeded) {
+ Pml5Entry = (UINT64 *)PageTable;
+ //
+ // Set Pml5Entry sub-entries number for smm PF handler usage.
+ //
+ SetSubEntriesNum (Pml5Entry, 1);
+ Pml4Entry = (UINT64 *)((*Pml5Entry) & ~mAddressEncMask & gPhyMask);
+ } else {
+ Pml4Entry = (UINT64 *)PageTable;
+ }
- if (m5LevelPagingNeeded) {
- Pml5Entry = (UINT64 *)PageTable;
//
- // Set Pml5Entry sub-entries number for smm PF handler usage.
+ // Set IA32_PG_PMNT bit to mask first 4 PdptEntry.
//
- SetSubEntriesNum (Pml5Entry, 1);
- Pml4Entry = (UINT64 *)((*Pml5Entry) & ~mAddressEncMask & gPhyMask);
- } else {
- Pml4Entry = (UINT64 *)PageTable;
- }
-
- //
- // Set IA32_PG_PMNT bit to mask first 4 PdptEntry.
- //
- PdptEntry = (UINT64 *)((*Pml4Entry) & ~mAddressEncMask & gPhyMask);
- for (Index = 0; Index < 4; Index++) {
- PdptEntry[Index] |= IA32_PG_PMNT;
- }
+ PdptEntry = (UINT64 *)((*Pml4Entry) & ~mAddressEncMask & gPhyMask);
+ for (Index = 0; Index < 4; Index++) {
+ PdptEntry[Index] |= IA32_PG_PMNT;
+ }
- if (!mCpuSmmRestrictedMemoryAccess) {
//
// Set Pml4Entry sub-entries number for smm PF handler usage.
//
@@ -704,152 +701,6 @@ AllocPage (
}
/**
- Page Fault handler for SMM use.
-
-**/
-VOID
-SmiDefaultPFHandler (
- VOID
- )
-{
- UINT64 *PageTable;
- UINT64 *PageTableTop;
- UINT64 PFAddress;
- UINTN StartBit;
- UINTN EndBit;
- UINT64 PTIndex;
- UINTN Index;
- SMM_PAGE_SIZE_TYPE PageSize;
- UINTN NumOfPages;
- UINTN PageAttribute;
- EFI_STATUS Status;
- UINT64 *UpperEntry;
- BOOLEAN Enable5LevelPaging;
- IA32_CR4 Cr4;
-
- //
- // Set default SMM page attribute
- //
- PageSize = SmmPageSize2M;
- NumOfPages = 1;
- PageAttribute = 0;
-
- EndBit = 0;
- PageTableTop = (UINT64 *)(AsmReadCr3 () & gPhyMask);
- PFAddress = AsmReadCr2 ();
-
- Cr4.UintN = AsmReadCr4 ();
- Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 != 0);
-
- Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
- //
- // If platform not support page table attribute, set default SMM page attribute
- //
- if (Status != EFI_SUCCESS) {
- PageSize = SmmPageSize2M;
- NumOfPages = 1;
- PageAttribute = 0;
- }
-
- if (PageSize >= MaxSmmPageSizeType) {
- PageSize = SmmPageSize2M;
- }
-
- if (NumOfPages > 512) {
- NumOfPages = 512;
- }
-
- switch (PageSize) {
- case SmmPageSize4K:
- //
- // BIT12 to BIT20 is Page Table index
- //
- EndBit = 12;
- break;
- case SmmPageSize2M:
- //
- // BIT21 to BIT29 is Page Directory index
- //
- EndBit = 21;
- PageAttribute |= (UINTN)IA32_PG_PS;
- break;
- case SmmPageSize1G:
- if (!m1GPageTableSupport) {
- DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
- ASSERT (FALSE);
- }
-
- //
- // BIT30 to BIT38 is Page Directory Pointer Table index
- //
- EndBit = 30;
- PageAttribute |= (UINTN)IA32_PG_PS;
- break;
- default:
- ASSERT (FALSE);
- }
-
- //
- // If execute-disable is enabled, set NX bit
- //
- if (mXdEnabled) {
- PageAttribute |= IA32_PG_NX;
- }
-
- for (Index = 0; Index < NumOfPages; Index++) {
- PageTable = PageTableTop;
- UpperEntry = NULL;
- for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {
- PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
- if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
- //
- // If the entry is not present, allocate one page from page pool for it
- //
- PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
- } else {
- //
- // Save the upper entry address
- //
- UpperEntry = PageTable + PTIndex;
- }
-
- //
- // BIT9 to BIT11 of entry is used to save access record,
- // initialize value is 7
- //
- PageTable[PTIndex] |= (UINT64)IA32_PG_A;
- SetAccNum (PageTable + PTIndex, 7);
- PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
- }
-
- PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
- if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
- //
- // Check if the entry has already existed, this issue may occur when the different
- // size page entries created under the same entry
- //
- DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
- DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));
- ASSERT (FALSE);
- }
-
- //
- // Fill the new entry
- //
- PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |
- PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
- if (UpperEntry != NULL) {
- SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);
- }
-
- //
- // Get the next page address if we need to create more page tables
- //
- PFAddress += (1ull << EndBit);
- }
-}
-
-/**
ThePage Fault handler wrapper for SMM use.
@param InterruptType Defines the type of interrupt or exception that
@@ -965,13 +816,7 @@ SmiPFHandler (
}
if (mCpuSmmRestrictedMemoryAccess && IsSmmCommBufferForbiddenAddress (PFAddress)) {
- DumpCpuContext (InterruptType, SystemContext);
DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));
- DEBUG_CODE (
- DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
- );
- CpuDeadLoop ();
- goto Exit;
}
}
@@ -981,7 +826,11 @@ SmiPFHandler (
SystemContext.SystemContextX64->ExceptionData
);
} else {
- SmiDefaultPFHandler ();
+ DumpCpuContext (InterruptType, SystemContext);
+ DEBUG_CODE (
+ DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
+ );
+ CpuDeadLoop ();
}
Exit:
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.nasm b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.nasm
index f329a988f8..cddc55fca5 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.nasm
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.nasm
@@ -13,6 +13,7 @@
;-------------------------------------------------------------------------------
extern ASM_PFX(SmiPFHandler)
+extern ASM_PFX(mSetupDebugTrap)
global ASM_PFX(gcSmiIdtr)
global ASM_PFX(gcSmiGdtr)
@@ -369,9 +370,14 @@ ASM_PFX(PageFaultIdtHandlerSmmProfile):
mov rsp, rbp
+; Check if mSetupDebugTrap is TRUE (non-zero)
+ cmp byte [dword ASM_PFX(mSetupDebugTrap)], 0
+ jz SkipSettingTF
+
; Enable TF bit after page fault handler runs
bts dword [rsp + 40], 8 ;RFLAGS
+SkipSettingTF:
pop rbp
add rsp, 16 ; skip INT# & ErrCode
iretq
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c
index c4f21e2155..ca706ee32c 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c
@@ -1,7 +1,7 @@
/** @file
SMM CPU misc functions for x64 arch specific.
-Copyright (c) 2015 - 2023, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2015 - 2024, Intel Corporation. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
@@ -133,34 +133,6 @@ GetProtectedModeCS (
}
/**
- Transfer AP to safe hlt-loop after it finished restore CPU features on S3 patch.
-
- @param[in] ApHltLoopCode The address of the safe hlt-loop function.
- @param[in] TopOfStack A pointer to the new stack to use for the ApHltLoopCode.
- @param[in] NumberToFinishAddress Address of Semaphore of APs finish count.
-
-**/
-VOID
-TransferApToSafeState (
- IN UINTN ApHltLoopCode,
- IN UINTN TopOfStack,
- IN UINTN NumberToFinishAddress
- )
-{
- AsmDisablePaging64 (
- GetProtectedModeCS (),
- (UINT32)ApHltLoopCode,
- (UINT32)NumberToFinishAddress,
- 0,
- (UINT32)TopOfStack
- );
- //
- // It should never reach here
- //
- ASSERT (FALSE);
-}
-
-/**
Initialize the shadow stack related data structure.
@param CpuIndex The index of CPU.
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
index 01432d466c..a95653ddbf 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
@@ -1,7 +1,7 @@
/** @file
X64 processor specific functions to enable SMM profile.
-Copyright (c) 2012 - 2019, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2012 - 2024, Intel Corporation. All rights reserved.<BR>
Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
@@ -29,20 +29,21 @@ UINT64 *mPFPageUplink[MAX_PF_PAGE_COUNT];
/**
Create SMM page table for S3 path.
+ @param[out] Cr3 The base address of the page tables.
+
**/
VOID
InitSmmS3Cr3 (
- VOID
+ OUT UINTN *Cr3
)
{
+ ASSERT (Cr3 != NULL);
+
//
// Generate level4 page table for the first 4GB memory space
// Return the address of PML4 (to set CR3)
//
- //
- // The SmmS3Cr3 is only used by S3Resume PEIM to switch CPU from 32bit to 64bit
- //
- mSmmS3ResumeState->SmmS3Cr3 = (UINT32)GenSmmPageTable (Paging4Level, 32);
+ *Cr3 = GenSmmPageTable (Paging4Level, 32);
return;
}
@@ -109,6 +110,156 @@ AcquirePage (
}
/**
+ Create new entry in page table for page fault address in SmmProfilePFHandler.
+
+**/
+VOID
+SmmProfileMapPFAddress (
+ VOID
+ )
+{
+ UINT64 *PageTable;
+ UINT64 *PageTableTop;
+ UINT64 PFAddress;
+ UINTN StartBit;
+ UINTN EndBit;
+ UINT64 PTIndex;
+ UINTN Index;
+ SMM_PAGE_SIZE_TYPE PageSize;
+ UINTN NumOfPages;
+ UINTN PageAttribute;
+ EFI_STATUS Status;
+ UINT64 *UpperEntry;
+ BOOLEAN Enable5LevelPaging;
+ IA32_CR4 Cr4;
+
+ //
+ // Set default SMM page attribute
+ //
+ PageSize = SmmPageSize2M;
+ NumOfPages = 1;
+ PageAttribute = 0;
+
+ EndBit = 0;
+ PageTableTop = (UINT64 *)(AsmReadCr3 () & gPhyMask);
+ PFAddress = AsmReadCr2 ();
+
+ Cr4.UintN = AsmReadCr4 ();
+ Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 != 0);
+
+ Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
+ //
+ // If platform not support page table attribute, set default SMM page attribute
+ //
+ if (Status != EFI_SUCCESS) {
+ PageSize = SmmPageSize2M;
+ NumOfPages = 1;
+ PageAttribute = 0;
+ }
+
+ if (PageSize >= MaxSmmPageSizeType) {
+ PageSize = SmmPageSize2M;
+ }
+
+ if (NumOfPages > 512) {
+ NumOfPages = 512;
+ }
+
+ switch (PageSize) {
+ case SmmPageSize4K:
+ //
+ // BIT12 to BIT20 is Page Table index
+ //
+ EndBit = 12;
+ break;
+ case SmmPageSize2M:
+ //
+ // BIT21 to BIT29 is Page Directory index
+ //
+ EndBit = 21;
+ PageAttribute |= (UINTN)IA32_PG_PS;
+ break;
+ case SmmPageSize1G:
+ if (!m1GPageTableSupport) {
+ DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
+ ASSERT (FALSE);
+ }
+
+ //
+ // BIT30 to BIT38 is Page Directory Pointer Table index
+ //
+ EndBit = 30;
+ PageAttribute |= (UINTN)IA32_PG_PS;
+ break;
+ default:
+ ASSERT (FALSE);
+ }
+
+ //
+ // If execute-disable is enabled, set NX bit
+ //
+ if (mXdEnabled) {
+ PageAttribute |= IA32_PG_NX;
+ }
+
+ for (Index = 0; Index < NumOfPages; Index++) {
+ PageTable = PageTableTop;
+ UpperEntry = NULL;
+ for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > 12; StartBit -= 9) {
+ PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
+
+ //
+ // Iterate through the page table to find the appropriate page table entry for page creation if one of the following cases is met:
+ // 1) StartBit > EndBit: The PageSize of current entry is bigger than the platform-specified PageSize granularity.
+ // 2) IA32_PG_P bit is 0 & IA32_PG_PS bit is not 0: The current entry is present and it's a non-leaf entry.
+ //
+ if ((StartBit > EndBit) || ((((PageTable[PTIndex] & IA32_PG_P) != 0) && ((PageTable[PTIndex] & IA32_PG_PS) == 0)))) {
+ if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
+ //
+ // If the entry is not present, allocate one page from page pool for it
+ //
+ PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ } else {
+ //
+ // Save the upper entry address
+ //
+ UpperEntry = PageTable + PTIndex;
+ }
+
+ //
+ // BIT9 to BIT11 of entry is used to save access record,
+ // initialize value is 7
+ //
+ PageTable[PTIndex] |= (UINT64)IA32_PG_A;
+ SetAccNum (PageTable + PTIndex, 7);
+ PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
+ } else {
+ //
+ // Found the appropriate entry.
+ //
+ break;
+ }
+ }
+
+ PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
+
+ //
+ // Fill the new entry
+ //
+ PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << StartBit) - 1)) |
+ PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
+ if (UpperEntry != NULL) {
+ SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);
+ }
+
+ //
+ // Get the next page address if we need to create more page tables
+ //
+ PFAddress += (1ull << StartBit);
+ }
+}
+
+/**
Update page table to map the memory correctly in order to make the instruction
which caused page fault execute successfully. And it also save the original page
table to be restored in single-step exception.
@@ -207,7 +358,7 @@ RestorePageTableAbove4G (
// If page entry does not existed in page table at all, create a new entry.
//
if (!Existed) {
- if (IsAddressValid (PFAddress, &Nx)) {
+ if (IsSmmProfilePFAddressAbove4GValid (PFAddress, &Nx)) {
//
// If page fault address above 4GB is in protected range but it causes a page fault exception,
// Will create a page entry for this page fault address, make page table entry as present/rw and execution-disable.
@@ -219,7 +370,7 @@ RestorePageTableAbove4G (
//
// Create one entry in page table for page fault address.
//
- SmiDefaultPFHandler ();
+ SmmProfileMapPFAddress ();
//
// Find the page table entry created just now.
//
@@ -250,7 +401,7 @@ RestorePageTableAbove4G (
PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
for (Index = 0; Index < 512; Index++) {
PageTable[Index] = Address | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
- if (!IsAddressValid (Address, &Nx)) {
+ if (!IsSmmProfilePFAddressAbove4GValid (Address, &Nx)) {
PageTable[Index] = PageTable[Index] & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
}
@@ -268,7 +419,7 @@ RestorePageTableAbove4G (
//
// Update 2MB page entry.
//
- if (!IsAddressValid (Address, &Nx)) {
+ if (!IsSmmProfilePFAddressAbove4GValid (Address, &Nx)) {
//
// Patch to remove present flag and rw flag.
//
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.h b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.h
index 80205c9b3e..5249360a1a 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.h
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.h
@@ -1,7 +1,7 @@
/** @file
X64 processor specific header file to enable SMM profile.
-Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2012 - 2024, Intel Corporation. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
@@ -55,6 +55,8 @@ typedef struct _PEBS_RECORD {
#pragma pack ()
+extern BOOLEAN m1GPageTableSupport;
+
#define PHYSICAL_ADDRESS_MASK ((1ull << 52) - SIZE_4KB)
/**
@@ -81,10 +83,12 @@ RestorePageTableAbove4G (
/**
Create SMM page table for S3 path.
+ @param[out] Cr3 The base address of the page tables.
+
**/
VOID
InitSmmS3Cr3 (
- VOID
+ OUT UINTN *Cr3
);
/**
@@ -96,4 +100,57 @@ InitPagesForPFHandler (
VOID
);
+/**
+ Set sub-entries number in entry.
+
+ @param[in, out] Entry Pointer to entry
+ @param[in] SubEntryNum Sub-entries number based on 0:
+ 0 means there is 1 sub-entry under this entry
+ 0x1ff means there is 512 sub-entries under this entry
+
+**/
+VOID
+SetSubEntriesNum (
+ IN OUT UINT64 *Entry,
+ IN UINT64 SubEntryNum
+ );
+
+/**
+ Return sub-entries number in entry.
+
+ @param[in] Entry Pointer to entry
+
+ @return Sub-entries number based on 0:
+ 0 means there is 1 sub-entry under this entry
+ 0x1ff means there is 512 sub-entries under this entry
+**/
+UINT64
+GetSubEntriesNum (
+ IN UINT64 *Entry
+ );
+
+/**
+ Allocate free Page for PageFault handler use.
+
+ @return Page address.
+
+**/
+UINT64
+AllocPage (
+ VOID
+ );
+
+/**
+ Set access record in entry.
+
+ @param[in, out] Entry Pointer to entry
+ @param[in] Acc Access record value
+
+**/
+VOID
+SetAccNum (
+ IN OUT UINT64 *Entry,
+ IN UINT64 Acc
+ );
+
#endif // _SMM_PROFILE_ARCH_H_