MemDetect.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645
  1. /**@file
  2. Memory Detection for Virtual Machines.
  3. Copyright (c) 2020, Rebecca Cran <rebecca@bsdio.com>
  4. Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>
  5. SPDX-License-Identifier: BSD-2-Clause-Patent
  6. Module Name:
  7. MemDetect.c
  8. **/
  9. //
  10. // The package level header files this module uses
  11. //
  12. #include <IndustryStandard/E820.h>
  13. #include <IndustryStandard/Q35MchIch9.h>
  14. #include <PiPei.h>
  15. //
  16. // The Library classes this module consumes
  17. //
  18. #include <Library/BaseLib.h>
  19. #include <Library/BaseMemoryLib.h>
  20. #include <Library/DebugLib.h>
  21. #include <Library/HobLib.h>
  22. #include <Library/IoLib.h>
  23. #include <Library/PcdLib.h>
  24. #include <Library/PciLib.h>
  25. #include <Library/PeimEntryPoint.h>
  26. #include <Library/ResourcePublicationLib.h>
  27. #include <Library/MtrrLib.h>
  28. #include "Platform.h"
  29. #include "Cmos.h"
  30. UINT8 mPhysMemAddressWidth;
  31. STATIC UINT32 mS3AcpiReservedMemoryBase;
  32. STATIC UINT32 mS3AcpiReservedMemorySize;
  33. STATIC UINT16 mQ35TsegMbytes;
  34. BOOLEAN mQ35SmramAtDefaultSmbase = FALSE;
  35. VOID
  36. Q35TsegMbytesInitialization (
  37. VOID
  38. )
  39. {
  40. UINT16 ExtendedTsegMbytes;
  41. RETURN_STATUS PcdStatus;
  42. if (mHostBridgeDevId != INTEL_Q35_MCH_DEVICE_ID) {
  43. DEBUG ((
  44. DEBUG_ERROR,
  45. "%a: no TSEG (SMRAM) on host bridge DID=0x%04x; "
  46. "only DID=0x%04x (Q35) is supported\n",
  47. __FUNCTION__,
  48. mHostBridgeDevId,
  49. INTEL_Q35_MCH_DEVICE_ID
  50. ));
  51. ASSERT (FALSE);
  52. CpuDeadLoop ();
  53. }
  54. //
  55. // Check if QEMU offers an extended TSEG.
  56. //
  57. // This can be seen from writing MCH_EXT_TSEG_MB_QUERY to the MCH_EXT_TSEG_MB
  58. // register, and reading back the register.
  59. //
  60. // On a QEMU machine type that does not offer an extended TSEG, the initial
  61. // write overwrites whatever value a malicious guest OS may have placed in
  62. // the (unimplemented) register, before entering S3 or rebooting.
  63. // Subsequently, the read returns MCH_EXT_TSEG_MB_QUERY unchanged.
  64. //
  65. // On a QEMU machine type that offers an extended TSEG, the initial write
  66. // triggers an update to the register. Subsequently, the value read back
  67. // (which is guaranteed to differ from MCH_EXT_TSEG_MB_QUERY) tells us the
  68. // number of megabytes.
  69. //
  70. PciWrite16 (DRAMC_REGISTER_Q35 (MCH_EXT_TSEG_MB), MCH_EXT_TSEG_MB_QUERY);
  71. ExtendedTsegMbytes = PciRead16 (DRAMC_REGISTER_Q35 (MCH_EXT_TSEG_MB));
  72. if (ExtendedTsegMbytes == MCH_EXT_TSEG_MB_QUERY) {
  73. mQ35TsegMbytes = PcdGet16 (PcdQ35TsegMbytes);
  74. return;
  75. }
  76. DEBUG ((
  77. DEBUG_INFO,
  78. "%a: QEMU offers an extended TSEG (%d MB)\n",
  79. __FUNCTION__,
  80. ExtendedTsegMbytes
  81. ));
  82. PcdStatus = PcdSet16S (PcdQ35TsegMbytes, ExtendedTsegMbytes);
  83. ASSERT_RETURN_ERROR (PcdStatus);
  84. mQ35TsegMbytes = ExtendedTsegMbytes;
  85. }
  86. UINT32
  87. GetSystemMemorySizeBelow4gb (
  88. VOID
  89. )
  90. {
  91. UINT8 Cmos0x34;
  92. UINT8 Cmos0x35;
  93. //
  94. // CMOS 0x34/0x35 specifies the system memory above 16 MB.
  95. // * CMOS(0x35) is the high byte
  96. // * CMOS(0x34) is the low byte
  97. // * The size is specified in 64kb chunks
  98. // * Since this is memory above 16MB, the 16MB must be added
  99. // into the calculation to get the total memory size.
  100. //
  101. Cmos0x34 = (UINT8)CmosRead8 (0x34);
  102. Cmos0x35 = (UINT8)CmosRead8 (0x35);
  103. return (UINT32)(((UINTN)((Cmos0x35 << 8) + Cmos0x34) << 16) + SIZE_16MB);
  104. }
  105. STATIC
  106. UINT64
  107. GetSystemMemorySizeAbove4gb (
  108. )
  109. {
  110. UINT32 Size;
  111. UINTN CmosIndex;
  112. //
  113. // CMOS 0x5b-0x5d specifies the system memory above 4GB MB.
  114. // * CMOS(0x5d) is the most significant size byte
  115. // * CMOS(0x5c) is the middle size byte
  116. // * CMOS(0x5b) is the least significant size byte
  117. // * The size is specified in 64kb chunks
  118. //
  119. Size = 0;
  120. for (CmosIndex = 0x5d; CmosIndex >= 0x5b; CmosIndex--) {
  121. Size = (UINT32)(Size << 8) + (UINT32)CmosRead8 (CmosIndex);
  122. }
  123. return LShiftU64 (Size, 16);
  124. }
  125. /**
  126. Return the highest address that DXE could possibly use, plus one.
  127. **/
  128. STATIC
  129. UINT64
  130. GetFirstNonAddress (
  131. VOID
  132. )
  133. {
  134. UINT64 FirstNonAddress;
  135. UINT64 Pci64Base, Pci64Size;
  136. RETURN_STATUS PcdStatus;
  137. FirstNonAddress = BASE_4GB + GetSystemMemorySizeAbove4gb ();
  138. //
  139. // If DXE is 32-bit, then we're done; PciBusDxe will degrade 64-bit MMIO
  140. // resources to 32-bit anyway. See DegradeResource() in
  141. // "PciResourceSupport.c".
  142. //
  143. #ifdef MDE_CPU_IA32
  144. if (!FeaturePcdGet (PcdDxeIplSwitchToLongMode)) {
  145. return FirstNonAddress;
  146. }
  147. #endif
  148. //
  149. // Otherwise, in order to calculate the highest address plus one, we must
  150. // consider the 64-bit PCI host aperture too. Fetch the default size.
  151. //
  152. Pci64Size = PcdGet64 (PcdPciMmio64Size);
  153. if (Pci64Size == 0) {
  154. if (mBootMode != BOOT_ON_S3_RESUME) {
  155. DEBUG ((
  156. DEBUG_INFO,
  157. "%a: disabling 64-bit PCI host aperture\n",
  158. __FUNCTION__
  159. ));
  160. PcdStatus = PcdSet64S (PcdPciMmio64Size, 0);
  161. ASSERT_RETURN_ERROR (PcdStatus);
  162. }
  163. //
  164. // There's nothing more to do; the amount of memory above 4GB fully
  165. // determines the highest address plus one. The memory hotplug area (see
  166. // below) plays no role for the firmware in this case.
  167. //
  168. return FirstNonAddress;
  169. }
  170. //
  171. // SeaBIOS aligns both boundaries of the 64-bit PCI host aperture to 1GB, so
  172. // that the host can map it with 1GB hugepages. Follow suit.
  173. //
  174. Pci64Base = ALIGN_VALUE (FirstNonAddress, (UINT64)SIZE_1GB);
  175. Pci64Size = ALIGN_VALUE (Pci64Size, (UINT64)SIZE_1GB);
  176. //
  177. // The 64-bit PCI host aperture should also be "naturally" aligned. The
  178. // alignment is determined by rounding the size of the aperture down to the
  179. // next smaller or equal power of two. That is, align the aperture by the
  180. // largest BAR size that can fit into it.
  181. //
  182. Pci64Base = ALIGN_VALUE (Pci64Base, GetPowerOfTwo64 (Pci64Size));
  183. if (mBootMode != BOOT_ON_S3_RESUME) {
  184. //
  185. // The core PciHostBridgeDxe driver will automatically add this range to
  186. // the GCD memory space map through our PciHostBridgeLib instance; here we
  187. // only need to set the PCDs.
  188. //
  189. PcdStatus = PcdSet64S (PcdPciMmio64Base, Pci64Base);
  190. ASSERT_RETURN_ERROR (PcdStatus);
  191. PcdStatus = PcdSet64S (PcdPciMmio64Size, Pci64Size);
  192. ASSERT_RETURN_ERROR (PcdStatus);
  193. DEBUG ((
  194. DEBUG_INFO,
  195. "%a: Pci64Base=0x%Lx Pci64Size=0x%Lx\n",
  196. __FUNCTION__,
  197. Pci64Base,
  198. Pci64Size
  199. ));
  200. }
  201. //
  202. // The useful address space ends with the 64-bit PCI host aperture.
  203. //
  204. FirstNonAddress = Pci64Base + Pci64Size;
  205. return FirstNonAddress;
  206. }
  207. /**
  208. Initialize the mPhysMemAddressWidth variable, based on guest RAM size.
  209. **/
  210. VOID
  211. AddressWidthInitialization (
  212. VOID
  213. )
  214. {
  215. UINT64 FirstNonAddress;
  216. //
  217. // As guest-physical memory size grows, the permanent PEI RAM requirements
  218. // are dominated by the identity-mapping page tables built by the DXE IPL.
  219. // The DXL IPL keys off of the physical address bits advertized in the CPU
  220. // HOB. To conserve memory, we calculate the minimum address width here.
  221. //
  222. FirstNonAddress = GetFirstNonAddress ();
  223. mPhysMemAddressWidth = (UINT8)HighBitSet64 (FirstNonAddress);
  224. //
  225. // If FirstNonAddress is not an integral power of two, then we need an
  226. // additional bit.
  227. //
  228. if ((FirstNonAddress & (FirstNonAddress - 1)) != 0) {
  229. ++mPhysMemAddressWidth;
  230. }
  231. //
  232. // The minimum address width is 36 (covers up to and excluding 64 GB, which
  233. // is the maximum for Ia32 + PAE). The theoretical architecture maximum for
  234. // X64 long mode is 52 bits, but the DXE IPL clamps that down to 48 bits. We
  235. // can simply assert that here, since 48 bits are good enough for 256 TB.
  236. //
  237. if (mPhysMemAddressWidth <= 36) {
  238. mPhysMemAddressWidth = 36;
  239. }
  240. ASSERT (mPhysMemAddressWidth <= 48);
  241. }
  242. /**
  243. Calculate the cap for the permanent PEI memory.
  244. **/
  245. STATIC
  246. UINT32
  247. GetPeiMemoryCap (
  248. VOID
  249. )
  250. {
  251. BOOLEAN Page1GSupport;
  252. UINT32 RegEax;
  253. UINT32 RegEdx;
  254. UINT32 Pml4Entries;
  255. UINT32 PdpEntries;
  256. UINTN TotalPages;
  257. //
  258. // If DXE is 32-bit, then just return the traditional 64 MB cap.
  259. //
  260. #ifdef MDE_CPU_IA32
  261. if (!FeaturePcdGet (PcdDxeIplSwitchToLongMode)) {
  262. return SIZE_64MB;
  263. }
  264. #endif
  265. //
  266. // Dependent on physical address width, PEI memory allocations can be
  267. // dominated by the page tables built for 64-bit DXE. So we key the cap off
  268. // of those. The code below is based on CreateIdentityMappingPageTables() in
  269. // "MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c".
  270. //
  271. Page1GSupport = FALSE;
  272. if (PcdGetBool (PcdUse1GPageTable)) {
  273. AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
  274. if (RegEax >= 0x80000001) {
  275. AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
  276. if ((RegEdx & BIT26) != 0) {
  277. Page1GSupport = TRUE;
  278. }
  279. }
  280. }
  281. if (mPhysMemAddressWidth <= 39) {
  282. Pml4Entries = 1;
  283. PdpEntries = 1 << (mPhysMemAddressWidth - 30);
  284. ASSERT (PdpEntries <= 0x200);
  285. } else {
  286. Pml4Entries = 1 << (mPhysMemAddressWidth - 39);
  287. ASSERT (Pml4Entries <= 0x200);
  288. PdpEntries = 512;
  289. }
  290. TotalPages = Page1GSupport ? Pml4Entries + 1 :
  291. (PdpEntries + 1) * Pml4Entries + 1;
  292. ASSERT (TotalPages <= 0x40201);
  293. //
  294. // Add 64 MB for miscellaneous allocations. Note that for
  295. // mPhysMemAddressWidth values close to 36, the cap will actually be
  296. // dominated by this increment.
  297. //
  298. return (UINT32)(EFI_PAGES_TO_SIZE (TotalPages) + SIZE_64MB);
  299. }
  300. /**
  301. Publish PEI core memory
  302. @return EFI_SUCCESS The PEIM initialized successfully.
  303. **/
  304. EFI_STATUS
  305. PublishPeiMemory (
  306. VOID
  307. )
  308. {
  309. EFI_STATUS Status;
  310. EFI_PHYSICAL_ADDRESS MemoryBase;
  311. UINT64 MemorySize;
  312. UINT32 LowerMemorySize;
  313. UINT32 PeiMemoryCap;
  314. LowerMemorySize = GetSystemMemorySizeBelow4gb ();
  315. if (FeaturePcdGet (PcdSmmSmramRequire)) {
  316. //
  317. // TSEG is chipped from the end of low RAM
  318. //
  319. LowerMemorySize -= mQ35TsegMbytes * SIZE_1MB;
  320. }
  321. //
  322. // If S3 is supported, then the S3 permanent PEI memory is placed next,
  323. // downwards. Its size is primarily dictated by CpuMpPei. The formula below
  324. // is an approximation.
  325. //
  326. if (mS3Supported) {
  327. mS3AcpiReservedMemorySize = SIZE_512KB +
  328. mMaxCpuCount *
  329. PcdGet32 (PcdCpuApStackSize);
  330. mS3AcpiReservedMemoryBase = LowerMemorySize - mS3AcpiReservedMemorySize;
  331. LowerMemorySize = mS3AcpiReservedMemoryBase;
  332. }
  333. if (mBootMode == BOOT_ON_S3_RESUME) {
  334. MemoryBase = mS3AcpiReservedMemoryBase;
  335. MemorySize = mS3AcpiReservedMemorySize;
  336. } else {
  337. PeiMemoryCap = GetPeiMemoryCap ();
  338. DEBUG ((
  339. DEBUG_INFO,
  340. "%a: mPhysMemAddressWidth=%d PeiMemoryCap=%u KB\n",
  341. __FUNCTION__,
  342. mPhysMemAddressWidth,
  343. PeiMemoryCap >> 10
  344. ));
  345. //
  346. // Determine the range of memory to use during PEI
  347. //
  348. // Technically we could lay the permanent PEI RAM over SEC's temporary
  349. // decompression and scratch buffer even if "secure S3" is needed, since
  350. // their lifetimes don't overlap. However, PeiFvInitialization() will cover
  351. // RAM up to PcdOvmfDecompressionScratchEnd with an EfiACPIMemoryNVS memory
  352. // allocation HOB, and other allocations served from the permanent PEI RAM
  353. // shouldn't overlap with that HOB.
  354. //
  355. MemoryBase = mS3Supported && FeaturePcdGet (PcdSmmSmramRequire) ?
  356. PcdGet32 (PcdOvmfDecompressionScratchEnd) :
  357. PcdGet32 (PcdOvmfDxeMemFvBase) + PcdGet32 (PcdOvmfDxeMemFvSize);
  358. MemorySize = LowerMemorySize - MemoryBase;
  359. if (MemorySize > PeiMemoryCap) {
  360. MemoryBase = LowerMemorySize - PeiMemoryCap;
  361. MemorySize = PeiMemoryCap;
  362. }
  363. }
  364. //
  365. // Publish this memory to the PEI Core
  366. //
  367. Status = PublishSystemMemory (MemoryBase, MemorySize);
  368. ASSERT_EFI_ERROR (Status);
  369. return Status;
  370. }
  371. /**
  372. Peform Memory Detection for QEMU / KVM
  373. **/
  374. STATIC
  375. VOID
  376. QemuInitializeRam (
  377. VOID
  378. )
  379. {
  380. UINT64 LowerMemorySize;
  381. UINT64 UpperMemorySize;
  382. MTRR_SETTINGS MtrrSettings;
  383. EFI_STATUS Status;
  384. DEBUG ((DEBUG_INFO, "%a called\n", __FUNCTION__));
  385. //
  386. // Determine total memory size available
  387. //
  388. LowerMemorySize = GetSystemMemorySizeBelow4gb ();
  389. UpperMemorySize = GetSystemMemorySizeAbove4gb ();
  390. if (mBootMode == BOOT_ON_S3_RESUME) {
  391. //
  392. // Create the following memory HOB as an exception on the S3 boot path.
  393. //
  394. // Normally we'd create memory HOBs only on the normal boot path. However,
  395. // CpuMpPei specifically needs such a low-memory HOB on the S3 path as
  396. // well, for "borrowing" a subset of it temporarily, for the AP startup
  397. // vector.
  398. //
  399. // CpuMpPei saves the original contents of the borrowed area in permanent
  400. // PEI RAM, in a backup buffer allocated with the normal PEI services.
  401. // CpuMpPei restores the original contents ("returns" the borrowed area) at
  402. // End-of-PEI. End-of-PEI in turn is emitted by S3Resume2Pei before
  403. // transferring control to the OS's wakeup vector in the FACS.
  404. //
  405. // We expect any other PEIMs that "borrow" memory similarly to CpuMpPei to
  406. // restore the original contents. Furthermore, we expect all such PEIMs
  407. // (CpuMpPei included) to claim the borrowed areas by producing memory
  408. // allocation HOBs, and to honor preexistent memory allocation HOBs when
  409. // looking for an area to borrow.
  410. //
  411. AddMemoryRangeHob (0, BASE_512KB + BASE_128KB);
  412. } else {
  413. //
  414. // Create memory HOBs
  415. //
  416. AddMemoryRangeHob (0, BASE_512KB + BASE_128KB);
  417. if (FeaturePcdGet (PcdSmmSmramRequire)) {
  418. UINT32 TsegSize;
  419. TsegSize = mQ35TsegMbytes * SIZE_1MB;
  420. AddMemoryRangeHob (BASE_1MB, LowerMemorySize - TsegSize);
  421. AddReservedMemoryBaseSizeHob (
  422. LowerMemorySize - TsegSize,
  423. TsegSize,
  424. TRUE
  425. );
  426. } else {
  427. AddMemoryRangeHob (BASE_1MB, LowerMemorySize);
  428. }
  429. if (UpperMemorySize != 0) {
  430. AddMemoryBaseSizeHob (BASE_4GB, UpperMemorySize);
  431. }
  432. }
  433. //
  434. // We'd like to keep the following ranges uncached:
  435. // - [640 KB, 1 MB)
  436. // - [LowerMemorySize, 4 GB)
  437. //
  438. // Everything else should be WB. Unfortunately, programming the inverse (ie.
  439. // keeping the default UC, and configuring the complement set of the above as
  440. // WB) is not reliable in general, because the end of the upper RAM can have
  441. // practically any alignment, and we may not have enough variable MTRRs to
  442. // cover it exactly.
  443. //
  444. if (IsMtrrSupported ()) {
  445. MtrrGetAllMtrrs (&MtrrSettings);
  446. //
  447. // MTRRs disabled, fixed MTRRs disabled, default type is uncached
  448. //
  449. ASSERT ((MtrrSettings.MtrrDefType & BIT11) == 0);
  450. ASSERT ((MtrrSettings.MtrrDefType & BIT10) == 0);
  451. ASSERT ((MtrrSettings.MtrrDefType & 0xFF) == 0);
  452. //
  453. // flip default type to writeback
  454. //
  455. SetMem (&MtrrSettings.Fixed, sizeof MtrrSettings.Fixed, 0x06);
  456. ZeroMem (&MtrrSettings.Variables, sizeof MtrrSettings.Variables);
  457. MtrrSettings.MtrrDefType |= BIT11 | BIT10 | 6;
  458. MtrrSetAllMtrrs (&MtrrSettings);
  459. //
  460. // Set memory range from 640KB to 1MB to uncacheable
  461. //
  462. Status = MtrrSetMemoryAttribute (
  463. BASE_512KB + BASE_128KB,
  464. BASE_1MB - (BASE_512KB + BASE_128KB),
  465. CacheUncacheable
  466. );
  467. ASSERT_EFI_ERROR (Status);
  468. //
  469. // Set memory range from the "top of lower RAM" (RAM below 4GB) to 4GB as
  470. // uncacheable
  471. //
  472. Status = MtrrSetMemoryAttribute (
  473. LowerMemorySize,
  474. SIZE_4GB - LowerMemorySize,
  475. CacheUncacheable
  476. );
  477. ASSERT_EFI_ERROR (Status);
  478. }
  479. }
  480. /**
  481. Publish system RAM and reserve memory regions
  482. **/
  483. VOID
  484. InitializeRamRegions (
  485. VOID
  486. )
  487. {
  488. QemuInitializeRam ();
  489. if (mS3Supported && (mBootMode != BOOT_ON_S3_RESUME)) {
  490. //
  491. // This is the memory range that will be used for PEI on S3 resume
  492. //
  493. BuildMemoryAllocationHob (
  494. mS3AcpiReservedMemoryBase,
  495. mS3AcpiReservedMemorySize,
  496. EfiACPIMemoryNVS
  497. );
  498. //
  499. // Cover the initial RAM area used as stack and temporary PEI heap.
  500. //
  501. // This is reserved as ACPI NVS so it can be used on S3 resume.
  502. //
  503. BuildMemoryAllocationHob (
  504. PcdGet32 (PcdOvmfSecPeiTempRamBase),
  505. PcdGet32 (PcdOvmfSecPeiTempRamSize),
  506. EfiACPIMemoryNVS
  507. );
  508. //
  509. // SEC stores its table of GUIDed section handlers here.
  510. //
  511. BuildMemoryAllocationHob (
  512. PcdGet64 (PcdGuidedExtractHandlerTableAddress),
  513. PcdGet32 (PcdGuidedExtractHandlerTableSize),
  514. EfiACPIMemoryNVS
  515. );
  516. #ifdef MDE_CPU_X64
  517. //
  518. // Reserve the initial page tables built by the reset vector code.
  519. //
  520. // Since this memory range will be used by the Reset Vector on S3
  521. // resume, it must be reserved as ACPI NVS.
  522. //
  523. BuildMemoryAllocationHob (
  524. (EFI_PHYSICAL_ADDRESS)(UINTN)PcdGet32 (PcdOvmfSecPageTablesBase),
  525. (UINT64)(UINTN)PcdGet32 (PcdOvmfSecPageTablesSize),
  526. EfiACPIMemoryNVS
  527. );
  528. #endif
  529. }
  530. if (mBootMode != BOOT_ON_S3_RESUME) {
  531. if (!FeaturePcdGet (PcdSmmSmramRequire)) {
  532. //
  533. // Reserve the lock box storage area
  534. //
  535. // Since this memory range will be used on S3 resume, it must be
  536. // reserved as ACPI NVS.
  537. //
  538. // If S3 is unsupported, then various drivers might still write to the
  539. // LockBox area. We ought to prevent DXE from serving allocation requests
  540. // such that they would overlap the LockBox storage.
  541. //
  542. ZeroMem (
  543. (VOID *)(UINTN)PcdGet32 (PcdOvmfLockBoxStorageBase),
  544. (UINTN)PcdGet32 (PcdOvmfLockBoxStorageSize)
  545. );
  546. BuildMemoryAllocationHob (
  547. (EFI_PHYSICAL_ADDRESS)(UINTN)PcdGet32 (PcdOvmfLockBoxStorageBase),
  548. (UINT64)(UINTN)PcdGet32 (PcdOvmfLockBoxStorageSize),
  549. mS3Supported ? EfiACPIMemoryNVS : EfiBootServicesData
  550. );
  551. }
  552. if (FeaturePcdGet (PcdSmmSmramRequire)) {
  553. UINT32 TsegSize;
  554. //
  555. // Make sure the TSEG area that we reported as a reserved memory resource
  556. // cannot be used for reserved memory allocations.
  557. //
  558. TsegSize = mQ35TsegMbytes * SIZE_1MB;
  559. BuildMemoryAllocationHob (
  560. GetSystemMemorySizeBelow4gb () - TsegSize,
  561. TsegSize,
  562. EfiReservedMemoryType
  563. );
  564. }
  565. }
  566. }