summaryrefslogtreecommitdiffstats
path: root/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm
diff options
context:
space:
mode:
authorJiaxin Wu <jiaxin.wu@intel.com>2024-01-12 15:33:20 +0800
committermergify[bot] <37929162+mergify[bot]@users.noreply.github.com>2024-05-08 01:53:58 +0000
commit2727231b0a6fb4c043479d132df4d36cf9f751c2 (patch)
tree26e6613727c430a619c8385bb371e5b084d34745 /UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm
parent23ed7f209c122b5f9d8c1b35b2b01ece2b716e54 (diff)
downloadedk2-2727231b0a6fb4c043479d132df4d36cf9f751c2.tar.gz
UefiCpuPkg/PiSmmCpuDxeSmm: Remove SmBases relocation logic
This patch is to remove legacy SmBase relocation in PiSmmCpuDxeSmm Driver. The responsibility for SmBase relocation has been transferred to the SmmRelocationInit interface, which now handles the following tasks: 1. Relocates the SmBase for each processor. 2. Generates the gSmmBaseHobGuid HOB. As a result of this change, the PiSmmCpuDxeSmm driver's role in SMM environment setup is simplified to: 1. Utilize the gSmmBaseHobGuid to determine the SmBase. 2. Perform the ExecuteFirstSmiInit() to do early SMM initialization. Cc: Ray Ni <ray.ni@intel.com> Cc: Zeng Star <star.zeng@intel.com> Cc: Gerd Hoffmann <kraxel@redhat.com> Cc: Rahul Kumar <rahul1.kumar@intel.com> Signed-off-by: Jiaxin Wu <jiaxin.wu@intel.com> Reviewed-by: Ray Ni <ray.ni@intel.com>
Diffstat (limited to 'UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm')
-rw-r--r--UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm146
1 files changed, 0 insertions, 146 deletions
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm
deleted file mode 100644
index 9cf3a6dcf9..0000000000
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm
+++ /dev/null
@@ -1,146 +0,0 @@
-;------------------------------------------------------------------------------ ;
-; Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.<BR>
-; SPDX-License-Identifier: BSD-2-Clause-Patent
-;
-; Module Name:
-;
-; SmmInit.nasm
-;
-; Abstract:
-;
-; Functions for relocating SMBASE's for all processors
-;
-;-------------------------------------------------------------------------------
-
-%include "StuffRsbNasm.inc"
-
-extern ASM_PFX(SmmInitHandler)
-extern ASM_PFX(mRebasedFlag)
-extern ASM_PFX(mSmmRelocationOriginalAddress)
-
-global ASM_PFX(gPatchSmmCr3)
-global ASM_PFX(gPatchSmmCr4)
-global ASM_PFX(gPatchSmmCr0)
-global ASM_PFX(gPatchSmmInitStack)
-global ASM_PFX(gcSmiInitGdtr)
-global ASM_PFX(gcSmmInitSize)
-global ASM_PFX(gcSmmInitTemplate)
-global ASM_PFX(gPatchRebasedFlagAddr32)
-global ASM_PFX(gPatchSmmRelocationOriginalAddressPtr32)
-
-%define LONG_MODE_CS 0x38
-
- DEFAULT REL
- SECTION .text
-
-ASM_PFX(gcSmiInitGdtr):
- DW 0
- DQ 0
-
-global ASM_PFX(SmmStartup)
-
-BITS 16
-ASM_PFX(SmmStartup):
- mov eax, 0x80000001 ; read capability
- cpuid
- mov ebx, edx ; rdmsr will change edx. keep it in ebx.
- mov eax, strict dword 0 ; source operand will be patched
-ASM_PFX(gPatchSmmCr3):
- mov cr3, eax
-o32 lgdt [cs:ebp + (ASM_PFX(gcSmiInitGdtr) - ASM_PFX(SmmStartup))]
- mov eax, strict dword 0 ; source operand will be patched
-ASM_PFX(gPatchSmmCr4):
- or ah, 2 ; enable XMM registers access
- mov cr4, eax
- mov ecx, 0xc0000080 ; IA32_EFER MSR
- rdmsr
- or ah, BIT0 ; set LME bit
- test ebx, BIT20 ; check NXE capability
- jz .1
- or ah, BIT3 ; set NXE bit
-.1:
- wrmsr
- mov eax, strict dword 0 ; source operand will be patched
-ASM_PFX(gPatchSmmCr0):
- mov cr0, eax ; enable protected mode & paging
- jmp LONG_MODE_CS : dword 0 ; offset will be patched to @LongMode
-@PatchLongModeOffset:
-
-BITS 64
-@LongMode: ; long-mode starts here
- mov rsp, strict qword 0 ; source operand will be patched
-ASM_PFX(gPatchSmmInitStack):
- and sp, 0xfff0 ; make sure RSP is 16-byte aligned
- ;
- ; According to X64 calling convention, XMM0~5 are volatile, we need to save
- ; them before calling C-function.
- ;
- sub rsp, 0x60
- movdqa [rsp], xmm0
- movdqa [rsp + 0x10], xmm1
- movdqa [rsp + 0x20], xmm2
- movdqa [rsp + 0x30], xmm3
- movdqa [rsp + 0x40], xmm4
- movdqa [rsp + 0x50], xmm5
-
- add rsp, -0x20
- call ASM_PFX(SmmInitHandler)
- add rsp, 0x20
-
- ;
- ; Restore XMM0~5 after calling C-function.
- ;
- movdqa xmm0, [rsp]
- movdqa xmm1, [rsp + 0x10]
- movdqa xmm2, [rsp + 0x20]
- movdqa xmm3, [rsp + 0x30]
- movdqa xmm4, [rsp + 0x40]
- movdqa xmm5, [rsp + 0x50]
-
- StuffRsb64
- rsm
-
-BITS 16
-ASM_PFX(gcSmmInitTemplate):
- mov ebp, [cs:@L1 - ASM_PFX(gcSmmInitTemplate) + 0x8000]
- sub ebp, 0x30000
- jmp ebp
-@L1:
- DQ 0; ASM_PFX(SmmStartup)
-
-ASM_PFX(gcSmmInitSize): DW $ - ASM_PFX(gcSmmInitTemplate)
-
-BITS 64
-global ASM_PFX(SmmRelocationSemaphoreComplete)
-ASM_PFX(SmmRelocationSemaphoreComplete):
- push rax
- mov rax, [ASM_PFX(mRebasedFlag)]
- mov byte [rax], 1
- pop rax
- jmp [ASM_PFX(mSmmRelocationOriginalAddress)]
-
-;
-; Semaphore code running in 32-bit mode
-;
-BITS 32
-global ASM_PFX(SmmRelocationSemaphoreComplete32)
-ASM_PFX(SmmRelocationSemaphoreComplete32):
- push eax
- mov eax, strict dword 0 ; source operand will be patched
-ASM_PFX(gPatchRebasedFlagAddr32):
- mov byte [eax], 1
- pop eax
- jmp dword [dword 0] ; destination will be patched
-ASM_PFX(gPatchSmmRelocationOriginalAddressPtr32):
-
-BITS 64
-global ASM_PFX(PiSmmCpuSmmInitFixupAddress)
-ASM_PFX(PiSmmCpuSmmInitFixupAddress):
- lea rax, [@LongMode]
- lea rcx, [@PatchLongModeOffset - 6]
- mov dword [rcx], eax
-
- lea rax, [ASM_PFX(SmmStartup)]
- lea rcx, [@L1]
- mov qword [rcx], rax
- ret