From b505f11f395ae71b1caed2b4ceb61f34023c709e Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Fri, 8 Mar 2024 07:32:22 -0800 Subject: OvmfPkg/AmdSvsmLib: Add support for the SVSM_CORE_PVALIDATE call BZ: https://bugzilla.tianocore.org/show_bug.cgi?id=4654 The PVALIDATE instruction can only be performed at VMPL0. An SVSM will be present when running at VMPL1 or higher. When an SVSM is present, use the SVSM_CORE_PVALIDATE call to perform memory validation instead of issuing the PVALIDATE instruction directly. Cc: Ard Biesheuvel Cc: Gerd Hoffmann Cc: Jiewen Yao Cc: Laszlo Ersek Acked-by: Gerd Hoffmann Signed-off-by: Tom Lendacky --- OvmfPkg/Library/AmdSvsmLib/AmdSvsmLib.c | 183 +++++++++++++++++++++++++++++++- 1 file changed, 182 insertions(+), 1 deletion(-) diff --git a/OvmfPkg/Library/AmdSvsmLib/AmdSvsmLib.c b/OvmfPkg/Library/AmdSvsmLib/AmdSvsmLib.c index 861bf9591a..fb3fda70e9 100644 --- a/OvmfPkg/Library/AmdSvsmLib/AmdSvsmLib.c +++ b/OvmfPkg/Library/AmdSvsmLib/AmdSvsmLib.c @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -43,6 +44,78 @@ SnpTerminate ( CpuDeadLoop (); } +/** + Issue an SVSM request. + + Invokes the SVSM to process a request on behalf of the guest. + + @param[in,out] SvsmCallData Pointer to the SVSM call data + + @return Contents of RAX upon return from VMGEXIT +**/ +STATIC +UINTN +SvsmMsrProtocol ( + IN OUT SVSM_CALL_DATA *SvsmCallData + ) +{ + MSR_SEV_ES_GHCB_REGISTER Msr; + UINT64 CurrentMsr; + UINT8 Pending; + BOOLEAN InterruptState; + UINTN Ret; + + do { + // + // Be sure that an interrupt can't cause a #VC while the GHCB MSR protocol + // is being used (#VC handler will ASSERT if lower 12-bits are not zero). + // + InterruptState = GetInterruptState (); + if (InterruptState) { + DisableInterrupts (); + } + + Pending = 0; + SvsmCallData->CallPending = &Pending; + + CurrentMsr = AsmReadMsr64 (MSR_SEV_ES_GHCB); + + Msr.Uint64 = 0; + Msr.SnpVmplRequest.Function = GHCB_INFO_SNP_VMPL_REQUEST; + Msr.SnpVmplRequest.Vmpl = 0; + AsmWriteMsr64 (MSR_SEV_ES_GHCB, Msr.Uint64); + + // + // Guest memory is used for the guest-SVSM communication, so fence the + // invocation of the VMGEXIT instruction to ensure VMSA accesses are + // synchronized properly. + // + MemoryFence (); + Ret = AsmVmgExitSvsm (SvsmCallData); + MemoryFence (); + + Msr.Uint64 = AsmReadMsr64 (MSR_SEV_ES_GHCB); + + AsmWriteMsr64 (MSR_SEV_ES_GHCB, CurrentMsr); + + if (InterruptState) { + EnableInterrupts (); + } + + if (Pending != 0) { + SnpTerminate (); + } + + if ((Msr.SnpVmplResponse.Function != GHCB_INFO_SNP_VMPL_RESPONSE) || + (Msr.SnpVmplResponse.ErrorCode != 0)) + { + SnpTerminate (); + } + } while (Ret == SVSM_ERR_INCOMPLETE || Ret == SVSM_ERR_BUSY); + + return Ret; +} + /** Report the presence of an Secure Virtual Services Module (SVSM). @@ -109,6 +182,114 @@ AmdSvsmSnpGetCaa ( return AmdSvsmIsSvsmPresent () ? SvsmInfo->SvsmCaa : 0; } +/** + Issue an SVSM request to perform the PVALIDATE instruction. + + Invokes the SVSM to process the PVALIDATE instruction on behalf of the + guest to validate or invalidate the memory range specified. + + @param[in] Info Pointer to a page state change structure + +**/ +STATIC +VOID +SvsmPvalidate ( + IN SNP_PAGE_STATE_CHANGE_INFO *Info + ) +{ + SVSM_CALL_DATA SvsmCallData; + SVSM_CAA *Caa; + SVSM_PVALIDATE_REQUEST *Request; + SVSM_FUNCTION Function; + BOOLEAN Validate; + UINTN Entry; + UINTN EntryLimit; + UINTN Index; + UINTN EndIndex; + UINT64 Gfn; + UINT64 GfnEnd; + UINTN Ret; + + Caa = (SVSM_CAA *)AmdSvsmSnpGetCaa (); + ZeroMem (Caa->SvsmBuffer, sizeof (Caa->SvsmBuffer)); + + Function.Id.Protocol = 0; + Function.Id.CallId = 1; + + Request = (SVSM_PVALIDATE_REQUEST *)Caa->SvsmBuffer; + EntryLimit = ((sizeof (Caa->SvsmBuffer) - sizeof (*Request)) / + sizeof (Request->Entry[0])) - 1; + + SvsmCallData.Caa = Caa; + SvsmCallData.RaxIn = Function.Uint64; + SvsmCallData.RcxIn = (UINT64)(UINTN)Request; + + Entry = 0; + Index = Info->Header.CurrentEntry; + EndIndex = Info->Header.EndEntry; + + while (Index <= EndIndex) { + Validate = Info->Entry[Index].Operation == SNP_PAGE_STATE_PRIVATE; + + Request->Header.Entries++; + Request->Entry[Entry].Bits.PageSize = Info->Entry[Index].PageSize; + Request->Entry[Entry].Bits.Action = (Validate == TRUE) ? 1 : 0; + Request->Entry[Entry].Bits.IgnoreCf = 0; + Request->Entry[Entry].Bits.Address = Info->Entry[Index].GuestFrameNumber; + + Entry++; + if ((Entry > EntryLimit) || (Index == EndIndex)) { + Ret = SvsmMsrProtocol (&SvsmCallData); + if ((Ret == SVSM_ERR_PVALIDATE_FAIL_SIZE_MISMATCH) && + (Request->Entry[Request->Header.Next].Bits.PageSize != 0)) + { + // Calculate the Index of the entry after the entry that failed + // before clearing the buffer so that processing can continue + // from that point + Index = Index - (Entry - Request->Header.Next) + 2; + + // Obtain the failing GFN before clearing the buffer + Gfn = Request->Entry[Request->Header.Next].Bits.Address; + + // Clear the buffer in prep for creating all new entries + ZeroMem (Caa->SvsmBuffer, sizeof (Caa->SvsmBuffer)); + Entry = 0; + + GfnEnd = Gfn + PAGES_PER_2MB_ENTRY - 1; + for ( ; Gfn <= GfnEnd; Gfn++) { + Request->Header.Entries++; + Request->Entry[Entry].Bits.PageSize = 0; + Request->Entry[Entry].Bits.Action = (Validate == TRUE) ? 1 : 0; + Request->Entry[Entry].Bits.IgnoreCf = 0; + Request->Entry[Entry].Bits.Address = Gfn; + + Entry++; + if ((Entry > EntryLimit) || (Gfn == GfnEnd)) { + Ret = SvsmMsrProtocol (&SvsmCallData); + if (Ret != 0) { + SnpTerminate (); + } + + ZeroMem (Caa->SvsmBuffer, sizeof (Caa->SvsmBuffer)); + Entry = 0; + } + } + + continue; + } + + if (Ret != 0) { + SnpTerminate (); + } + + ZeroMem (Caa->SvsmBuffer, sizeof (Caa->SvsmBuffer)); + Entry = 0; + } + + Index++; + } +} + /** Perform a native PVALIDATE operation for the page ranges specified. @@ -193,7 +374,7 @@ AmdSvsmSnpPvalidate ( IN SNP_PAGE_STATE_CHANGE_INFO *Info ) { - BasePvalidate (Info); + AmdSvsmIsSvsmPresent () ? SvsmPvalidate (Info) : BasePvalidate (Info); } /** -- cgit