Compare commits

...

21 Commits

Author SHA1 Message Date
Timo Kreuzer
91bdeaeb09 cleanup 2023-05-09 23:24:22 +03:00
Timo Kreuzer
972d275765 Unhack kstack.c even more 2023-05-09 23:24:11 +03:00
Timo Kreuzer
98a3f5e99c Unhack kstack.c 2023-05-09 23:24:00 +03:00
Timo Kreuzer
2a14a9bad0 FIXME: Erase Stack PTEs before releasing them 2023-05-09 23:23:49 +03:00
Timo Kreuzer
544f89e92f ??? Move a delete in MiDeleteSystemPageableVm 2023-05-09 23:23:38 +03:00
Timo Kreuzer
ec3b4fd67c debug hacks 2023-05-09 23:23:26 +03:00
Timo Kreuzer
780f79e94b Use new kstack 2023-05-09 23:23:16 +03:00
Timo Kreuzer
51c35c0dfe +hacked kstack.c 2023-05-09 23:23:04 +03:00
Timo Kreuzer
333caf8b10 Implement new kernel stack PTE allocator for x64 2023-05-09 23:22:51 +03:00
Timo Kreuzer
8d33e43d6f +kvalayout.c 2023-05-09 23:22:38 +03:00
Timo Kreuzer
8cc6a6cc54 +system ptes 2023-05-09 23:22:27 +03:00
Timo Kreuzer
7432b6d794 [NTOS:MM/x64] Randomize location of system PTEs 2023-05-09 23:22:16 +03:00
Timo Kreuzer
9e655d3347 [NTOS:MM/x64] Allocate the debug PTE from system PTEs 2023-05-09 23:22:04 +03:00
Timo Kreuzer
6645cc4df6 [NTOS:MM:x64] Randomize system cache location 2023-05-09 23:21:51 +03:00
Timo Kreuzer
9eedc04a05 [NTOS:MM:x64] Randomize PFN database location 2023-05-09 23:21:40 +03:00
Timo Kreuzer
7ba7d99e65 [NTOS:MM:x64] Randomize non-paged pool location 2023-05-09 23:21:29 +03:00
Timo Kreuzer
ec240cc5a0 [NTOS:MM/x64] Randomize location of paged pool 2023-05-09 23:21:14 +03:00
Timo Kreuzer
3f8dce5133 [NTOS:MM/x64] "Reserve" the entire kernel space except system cache
RosMm uses MEMORY_AREAs to map sections and VACBs in system space. The previous implementation allowed them to be allocated everywhere, except for regions already used otherwise. This makes sense for x86, where the address space is limited, but we don't need that on x64.
This allows to place all other VA regions where we want without the need to allocate memory areas for completely unused regions that do not even have PXEs mapped.
2023-05-09 23:21:02 +03:00
Timo Kreuzer
34dc4b5c91 [NTOS:MM] Map PPEs and PDEs in MmCreateVirtualMappingUnsafe
This is required for both x86 and x64. On x86 we only got away without, because the page fault handler is buggy and considers a fault on kernel PTE addresses as a user mode fault and makes the PDE valid for us. On x64 this is not enough, because it only works for invalid PDEs, not for invalid PPEs and we only got away with this, because RosMm sections are allocated from the first range available for memory areas, which happens to be the system cache WS following directly after the shared user page, which already has a PPE mapped.
The bug in the fault handler needs to stay for now, since ARM3 also depends on it.
2023-05-09 23:20:49 +03:00
Timo Kreuzer
1f36aeb5ba [NTOS:MM/x64] Map PXEs of all assigned VA regions 2023-05-09 23:20:37 +03:00
Timo Kreuzer
5259e7d6e9 [NTOS:MM/x64] Implement initial KASLR support for x64 2023-05-09 23:20:25 +03:00
17 changed files with 710 additions and 55 deletions

View File

@@ -14,35 +14,37 @@
#define HYPER_SPACE 0xFFFFF70000000000ULL // 512 GB hyper space [MiVaProcessSpace]
#define HYPER_SPACE_END 0xFFFFF77FFFFFFFFFULL
//#define MI_SHARED_SYSTEM_PAGE 0xFFFFF78000000000ULL
#define MI_SYSTEM_CACHE_WS_START 0xFFFFF78000001000ULL // 512 GB - 4 KB system cache working set
//#define MI_LOADER_MAPPINGS 0xFFFFF80000000000ULL // 512 GB loader mappings aka KSEG0_BASE (NDK) [MiVaBootLoaded]
#define MM_SYSTEM_SPACE_START 0xFFFFF88000000000ULL // 128 GB system PTEs [MiVaSystemPtes]
#define MI_DEBUG_MAPPING (PVOID)0xFFFFF89FFFFFF000ULL // FIXME should be allocated from System PTEs
#define MI_PAGED_POOL_START (PVOID)0xFFFFF8A000000000ULL // 128 GB paged pool [MiVaPagedPool]
//#define MI_PAGED_POOL_END 0xFFFFF8BFFFFFFFFFULL
//#define MI_SESSION_SPACE_START 0xFFFFF90000000000ULL // 512 GB session space [MiVaSessionSpace]
#define MI_LOADER_MAPPINGS 0xFFFFF80000000000ULL // 512 GB loader mappings aka KSEG0_BASE (NDK) [MiVaBootLoaded]
#define MI_SESSION_SPACE_START 0xFFFFF90000000000ULL // 512 GB session space [MiVaSessionSpace]
//#define MI_SESSION_VIEW_END 0xFFFFF97FFF000000ULL
#define MI_SESSION_SPACE_END 0xFFFFF98000000000ULL
#define MI_SYSTEM_CACHE_START 0xFFFFF98000000000ULL // 1 TB system cache (on Vista+ this is dynamic VA space) [MiVaSystemCache,MiVaSpecialPoolPaged,MiVaSpecialPoolNonPaged]
#define MI_SYSTEM_CACHE_END 0xFFFFFA7FFFFFFFFFULL
#define MI_PFN_DATABASE 0xFFFFFA8000000000ULL // up to 5.5 TB PFN database followed by non paged pool [MiVaPfnDatabase/MiVaNonPagedPool]
#define MI_NONPAGED_POOL_END (PVOID)0xFFFFFFFFFFBFFFFFULL
//#define MM_HAL_VA_START 0xFFFFFFFFFFC00000ULL // 4 MB HAL mappings, defined in NDK [MiVaHal]
#define MI_HIGHEST_SYSTEM_ADDRESS (PVOID)0xFFFFFFFFFFFFFFFFULL
#define MmSystemRangeStart ((PVOID)MI_REAL_SYSTEM_RANGE_START)
/* Dummy values (dynamically assigned) */
#define MI_NONPAGED_POOL_END 0
#define MI_PAGED_POOL_START 0
#define MI_SYSTEM_CACHE_START 0
#define MI_SYSTEM_CACHE_WS_START 0
#define MI_DEBUG_MAPPING 0
/* WOW64 address definitions */
#define MM_HIGHEST_USER_ADDRESS_WOW64 0x7FFEFFFF
#define MM_SYSTEM_RANGE_START_WOW64 0x80000000
/* The size of the virtual memory area that is mapped using a single PDE */
#define PDE_MAPPED_VA (PTE_PER_PAGE * PAGE_SIZE)
#define PDE_MAPPED_VA (PTE_PER_PAGE * (ULONG64)PAGE_SIZE)
#define PPE_MAPPED_VA (PDE_PER_PAGE * (ULONG64)PDE_MAPPED_VA)
#define PXE_MAPPED_VA (PPE_PER_PAGE * (ULONG64)PPE_MAPPED_VA)
extern PVOID MiSystemPteSpaceStart;
/* Misc address definitions */
//#define MI_NON_PAGED_SYSTEM_START_MIN MM_SYSTEM_SPACE_START // FIXME
//#define MI_SYSTEM_PTE_START MM_SYSTEM_SPACE_START
//#define MI_SYSTEM_PTE_END (MI_SYSTEM_PTE_START + MI_NUMBER_SYSTEM_PTES * PAGE_SIZE - 1)
#define MI_SYSTEM_PTE_BASE (PVOID)MiAddressToPte(KSEG0_BASE)
#define MI_SYSTEM_PTE_BASE (PVOID)MiAddressToPte(MiSystemPteSpaceStart)
#define MM_HIGHEST_VAD_ADDRESS (PVOID)((ULONG_PTR)MM_HIGHEST_USER_ADDRESS - (16 * PAGE_SIZE))
#define MI_MAPPING_RANGE_START HYPER_SPACE
#define MI_MAPPING_RANGE_END (MI_MAPPING_RANGE_START + MI_HYPERSPACE_PTES * PAGE_SIZE)
@@ -202,6 +204,13 @@ MiAddressToPxi(PVOID Address)
return ((((ULONG64)Address) >> PXI_SHIFT) & 0x1FF);
}
FORCEINLINE
PVOID
MiPxiToAddress(ULONG Pxi)
{
return (PVOID)((((LONG64)Pxi) << 55) >> 16);
}
/* Convert a PTE into a corresponding address */
FORCEINLINE
PVOID
@@ -370,3 +379,46 @@ MiIsPdeForAddressValid(PVOID Address)
(MiAddressToPde(Address)->u.Hard.Valid));
}
typedef enum _MI_ASSIGNED_REGION_TYPES
{
AssignedRegionNonPagedPool = 0,
AssignedRegionPagedPool = 1,
AssignedRegionSystemCache = 2,
AssignedRegionSystemPtes = 3,
AssignedRegionUltraZero = 4,
AssignedRegionPfnDatabase = 5,
AssignedRegionCfg = 6,
AssignedRegionHyperSpace = 7,
AssignedRegionKernelStacks = 8,
AssignedRegionPageTables = 9,
AssignedRegionSession = 10,
AssignedRegionSecureNonPagedPool = 11,
AssignedRegionSystemImages = 12,
AssignedRegionMaximum = 13
} MI_ASSIGNED_REGION_TYPES, * PMI_ASSIGNED_REGION_TYPES;
typedef struct _MI_SYSTEM_VA_ASSIGNMENT
{
VOID* BaseAddress;
ULONGLONG NumberOfBytes;
} MI_SYSTEM_VA_ASSIGNMENT, *PMI_SYSTEM_VA_ASSIGNMENT;
extern MI_SYSTEM_VA_ASSIGNMENT MiSystemVaRegions[AssignedRegionMaximum];
VOID
NTAPI
MiInitializeKernelVaLayout(
_In_ const LOADER_PARAMETER_BLOCK* LoaderBlock);
VOID
MiInitializeStackAllocator(
VOID);
PMMPTE
MiReserveKernelStackPtes(
_In_ ULONG NumberOfPtes);
VOID
MiReleaseKernelStackPtes(
_In_ PMMPTE FirstPte,
_In_ ULONG NumberOfPtes);

View File

@@ -125,7 +125,7 @@
#define MI_WRITE_VALID_PPE MI_WRITE_VALID_PTE
/* Translating virtual addresses to physical addresses
(See: "Intel<EFBFBD> 64 and IA-32 Architectures Software Developer<EFBFBD>s Manual
(See: "Intel® 64 and IA-32 Architectures Software Developers Manual
Volume 3A: System Programming Guide, Part 1, CHAPTER 4 PAGING")
Page directory (PD) and Page table (PT) definitions
Page directory entry (PDE) and Page table entry (PTE) definitions
@@ -219,3 +219,20 @@ C_ASSERT(PD_COUNT == 1);
(PMMPTE)((ULONG_PTR)MmNonPagedPoolEnd - \
(((x)->u.Subsect.SubsectionAddressHigh << 7) | \
(x)->u.Subsect.SubsectionAddressLow << 3))
FORCEINLINE
PMMPTE
MiReserveKernelStackPtes(
_In_ ULONG NumberOfPtes)
{
return MiReserveSystemPtes(NumberOfPtes, SystemPteSpace);
}
FORCEINLINE
VOID
MiReleaseKernelStackPtes(
_In_ PMMPTE FirstPte,
_In_ ULONG NumberOfPtes)
{
MiReleaseSystemPtes(FirstPte, NumberOfPtes, SystemPteSpace);
}

View File

@@ -434,6 +434,12 @@ typedef struct _MMPFN
MMWSLE Wsle;
struct _MMPFN* NextLRU;
struct _MMPFN* PreviousLRU;
struct
{
ULONG IsStackPfn : 1;
ULONG Level : 3;
} Dbg1;
} MMPFN, *PMMPFN;
extern PMMPFN MmPfnDatabase;

View File

@@ -446,6 +446,9 @@ KiSystemStartup(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
/* Check for break-in */
if (KdPollBreakIn()) DbgBreakPointWithStatus(DBG_STATUS_CONTROL_C);
/* Initialize the kernel VA layout */
MiInitializeKernelVaLayout(LoaderBlock);
}
DPRINT1("Pcr = %p, Gdt = %p, Idt = %p, Tss = %p\n",

View File

@@ -15,9 +15,10 @@ extern "C" {
#define MI_LOWEST_VAD_ADDRESS (PVOID)MM_LOWEST_USER_ADDRESS
/* Make the code cleaner with some definitions for size multiples */
#define _1KB (1024u)
#define _1KB ((SIZE_T)1024u)
#define _1MB (1024 * _1KB)
#define _1GB (1024 * _1MB)
#define _1TB (1024 * _1GB)
/* Everyone loves 64K */
#define _64K (64 * _1KB)

View File

@@ -2311,7 +2311,7 @@ MmArmInitSystem(IN ULONG Phase,
ASSERT(PointerPte == TestPte);
/* Try the last nonpaged pool address */
PointerPte = (PMMPTE)MI_NONPAGED_POOL_END;
PointerPte = (PMMPTE)MmNonPagedPoolEnd;
MI_MAKE_PROTOTYPE_PTE(&TempPte, PointerPte);
TestPte = MiProtoPteToPte(&TempPte);
ASSERT(PointerPte == TestPte);
@@ -2522,14 +2522,12 @@ MmArmInitSystem(IN ULONG Phase,
/* Define limits for system cache */
#ifdef _M_AMD64
MmSizeOfSystemCacheInPages = ((MI_SYSTEM_CACHE_END + 1) - MI_SYSTEM_CACHE_START) / PAGE_SIZE;
MmSizeOfSystemCacheInPages = MiSystemVaRegions[AssignedRegionSystemCache].NumberOfBytes / PAGE_SIZE;
#else
MmSizeOfSystemCacheInPages = ((ULONG_PTR)MI_PAGED_POOL_START - (ULONG_PTR)MI_SYSTEM_CACHE_START) / PAGE_SIZE;
#endif
MmSystemCacheEnd = (PVOID)((ULONG_PTR)MmSystemCacheStart + (MmSizeOfSystemCacheInPages * PAGE_SIZE) - 1);
#ifdef _M_AMD64
ASSERT(MmSystemCacheEnd == (PVOID)MI_SYSTEM_CACHE_END);
#else
#ifndef _M_AMD64
ASSERT(MmSystemCacheEnd == (PVOID)((ULONG_PTR)MI_PAGED_POOL_START - 1));
#endif

View File

@@ -759,6 +759,52 @@ MiResolveDemandZeroFault(IN PVOID Address,
return STATUS_PAGE_FAULT_DEMAND_ZERO;
}
VOID
MiMakeKernelPageTableValid(
_In_ PVOID Address)
{
PEPROCESS CurrentProcess = PsGetCurrentProcess();
PMMPTE PointerPte = MiAddressToPte(Address);
PMMPPE PointerPde = MiAddressToPde(Address);
#if (_MI_PAGING_LEVELS >= 3)
/* Check if the PPE is valid */
PMMPXE PointerPpe = MiAddressToPpe(Address);
if (PointerPpe->u.Hard.Valid == 0)
{
/* Right now, we only handle scenarios where the PPE is totally empty */
ASSERT(PointerPpe->u.Long == 0);
/* Resolve a demand zero fault */
MiResolveDemandZeroFault(PointerPde,
PointerPpe,
MM_EXECUTE_READWRITE,
CurrentProcess,
MM_NOIRQL);
/* We should come back with a valid PPE */
ASSERT(PointerPpe->u.Hard.Valid == 1);
}
#endif
/* Check if the PDE is valid */
if (PointerPde->u.Hard.Valid == 0)
{
/* Right now, we only handle scenarios where the PPE is totally empty */
ASSERT(PointerPde->u.Long == 0);
/* Resolve a demand zero fault */
MiResolveDemandZeroFault(PointerPte,
PointerPde,
MM_EXECUTE_READWRITE,
CurrentProcess,
MM_NOIRQL);
/* We should come back with a valid PPE */
ASSERT(PointerPde->u.Hard.Valid == 1);
}
}
static
NTSTATUS
NTAPI

View File

@@ -624,6 +624,7 @@ MiInsertPageInFreeList(IN PFN_NUMBER PageFrameIndex)
ASSERT(Pfn1->u3.e1.RemovalRequested == 0);
ASSERT(Pfn1->u4.VerifierAllocation == 0);
ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
// Pfn1->PteAddress = NULL;
/* Get the free page list and increment its count */
ListHead = &MmFreePageListHead;
@@ -1143,6 +1144,8 @@ MiDecrementShareCount(IN PMMPFN Pfn1,
DPRINT("Decrementing %p from %p\n", Pfn1, _ReturnAddress());
//if (Pfn1->Dbg1.IsStackPfn) __debugbreak();
/* Page must be in-use */
if ((Pfn1->u3.e1.PageLocation != ActiveAndValid) &&
(Pfn1->u3.e1.PageLocation != StandbyPageList))
@@ -1162,6 +1165,14 @@ MiDecrementShareCount(IN PMMPFN Pfn1,
ASSERT(Pfn1->u2.ShareCount < 0xF000000);
if (!--Pfn1->u2.ShareCount)
{
#if 0
if (Pfn1->u3.e1.PageLocation == ActiveAndValid)
{
PMMPTE PteAddress = (PMMPTE)((ULONG_PTR)Pfn1->PteAddress & ~7);
if (MmIsAddressValid(PteAddress) &&
(PteAddress->u.Hard.Valid)) __debugbreak();
}
#endif
/* Was this a prototype PTE? */
if (Pfn1->u3.e1.PrototypePte)
{
@@ -1263,6 +1274,8 @@ MiDecrementReferenceCount(IN PMMPFN Pfn1,
return;
}
// Pfn1->PteAddress = NULL;
/* Check to see which list this page should go into */
if (Pfn1->u3.e1.Modified == 1)
{

View File

@@ -221,7 +221,7 @@ MmDeleteKernelStack(IN PVOID StackBase,
/* Acquire the PFN lock */
OldIrql = MiAcquirePfnLock();
//__debugbreak();
//
// Loop them
//
@@ -235,6 +235,9 @@ MmDeleteKernelStack(IN PVOID StackBase,
/* Get the PTE's page */
PageFrameNumber = PFN_FROM_PTE(PointerPte);
Pfn1 = MiGetPfnEntry(PageFrameNumber);
if (Pfn1->u3.e1.PageLocation != ActiveAndValid) __debugbreak();
//PointerPte->u.Long = 0;
/* Now get the page of the page table mapping it */
PageTableFrameNumber = Pfn1->u4.PteFrame;
@@ -267,7 +270,7 @@ MmDeleteKernelStack(IN PVOID StackBase,
//
// Release the PTEs
//
MiReleaseSystemPtes(PointerPte, StackPages + 1, SystemPteSpace);
MiReleaseKernelStackPtes(PointerPte, StackPages + 1);
}
PVOID
@@ -321,7 +324,7 @@ MmCreateKernelStack(IN BOOLEAN GuiStack,
//
// Reserve stack pages, plus a guard page
//
StackPte = MiReserveSystemPtes(StackPtes + 1, SystemPteSpace);
StackPte = MiReserveKernelStackPtes(StackPtes + 1);
if (!StackPte) return NULL;
//
@@ -329,6 +332,8 @@ MmCreateKernelStack(IN BOOLEAN GuiStack,
//
BaseAddress = MiPteToAddress(StackPte + StackPtes + 1);
DbgPrint("## CREATE BaseAddress %p, PTE %p, PDE %p\n", BaseAddress, StackPte, MiAddressToPte(StackPte));;
//
// Select the right PTE address where we actually start committing pages
//

View File

@@ -107,7 +107,7 @@ MiReserveAlignedSystemPtes(IN ULONG NumberOfPtes,
// Find the last cluster in the list that doesn't contain enough PTEs
//
PreviousPte = &MmFirstFreeSystemPte[SystemPtePoolType];
//__debugbreak();
while (PreviousPte->u.List.NextEntry != MM_EMPTY_PTE_LIST)
{
//

View File

@@ -310,6 +310,9 @@ MiDeleteSystemPageableVm(IN PMMPTE PointerPte,
PageTableIndex = Pfn1->u4.PteFrame;
Pfn2 = MiGetPfnEntry(PageTableIndex);
/* Destroy the PTE */
MI_ERASE_PTE(PointerPte);
/* Lock the PFN database */
OldIrql = MiAcquirePfnLock();
@@ -323,8 +326,6 @@ MiDeleteSystemPageableVm(IN PMMPTE PointerPte,
/* Release the PFN database */
MiReleasePfnLock(OldIrql);
/* Destroy the PTE */
MI_ERASE_PTE(PointerPte);
}
else
{

View File

@@ -113,6 +113,33 @@ MiInitializeSessionSpaceLayout(VOID)
MmSessionSpace = (PMM_SESSION_SPACE)Add2Ptr(MiSessionImageStart, 0x10000);
}
VOID
NTAPI
MiMapPXEs(
PVOID StartAddress,
PVOID EndAddress)
{
PMMPDE PointerPxe;
MMPDE TmplPxe = ValidKernelPde;
/* Loop the PPEs */
for (PointerPxe = MiAddressToPxe(StartAddress);
PointerPxe <= MiAddressToPxe(EndAddress);
PointerPxe++)
{
/* Check if its already mapped */
if (!PointerPxe->u.Hard.Valid)
{
/* No, map it! */
TmplPxe.u.Hard.PageFrameNumber = MxGetNextPage(1);
MI_WRITE_VALID_PTE(PointerPxe, TmplPxe);
/* Zero out the page table */
RtlZeroMemory(MiPteToAddress(PointerPxe), PAGE_SIZE);
}
}
}
VOID
NTAPI
MiMapPPEs(
@@ -236,22 +263,21 @@ MiInitializePageTable(VOID)
/* Create PDPTs (72 KB) for shared system address space,
* skip page tables TODO: use global pages. */
/* Loop the PXEs */
for (PointerPxe = MiAddressToPxe((PVOID)HYPER_SPACE);
PointerPxe <= MiAddressToPxe(MI_HIGHEST_SYSTEM_ADDRESS);
PointerPxe++)
/* Map the PXEs for all VA regions */
for (ULONG i = 0; i < ARRAYSIZE(MiSystemVaRegions); i++)
{
/* Is the PXE already valid? */
if (!PointerPxe->u.Hard.Valid)
{
/* It's not Initialize it */
TmplPte.u.Flush.PageFrameNumber = MxGetNextPage(1);
*PointerPxe = TmplPte;
PMI_SYSTEM_VA_ASSIGNMENT Region = &MiSystemVaRegions[i];
/* Zero the page. The PXE is the PTE for the PDPT. */
RtlZeroMemory(MiPteToAddress(PointerPxe), PAGE_SIZE);
if (Region->BaseAddress == NULL)
{
continue;
}
/* Map the PXE(s) */
MiMapPXEs(Region->BaseAddress,
Add2Ptr(Region->BaseAddress, Region->NumberOfBytes - 1));
}
PxePfn = PFN_FROM_PXE(MiAddressToPxe((PVOID)HYPER_SPACE));
PsGetCurrentProcess()->Pcb.DirectoryTableBase[1] = PxePfn << PAGE_SHIFT;
@@ -272,11 +298,6 @@ MiInitializePageTable(VOID)
MmLastReservedMappingPte = MiAddressToPte((PVOID)MI_MAPPING_RANGE_END);
MmFirstReservedMappingPte->u.Hard.PageFrameNumber = MI_HYPERSPACE_PTES;
/* Setup debug mapping PTE */
MiMapPPEs((PVOID)MI_DEBUG_MAPPING, (PVOID)MI_DEBUG_MAPPING);
MiMapPDEs((PVOID)MI_DEBUG_MAPPING, (PVOID)MI_DEBUG_MAPPING);
MmDebugPte = MiAddressToPte((PVOID)MI_DEBUG_MAPPING);
/* Setup PDE and PTEs for VAD bitmap and working set list */
MiMapPDEs((PVOID)MI_VAD_BITMAP, (PVOID)(MI_WORKING_SET_LIST + PAGE_SIZE - 1));
MiMapPTEs((PVOID)MI_VAD_BITMAP, (PVOID)(MI_WORKING_SET_LIST + PAGE_SIZE - 1));
@@ -339,17 +360,17 @@ MiBuildNonPagedPool(VOID)
}
/* Don't let the maximum go too high */
if (MmMaximumNonPagedPoolInBytes > MI_MAX_NONPAGED_POOL_SIZE)
if (MmMaximumNonPagedPoolInBytes > MiSystemVaRegions[AssignedRegionNonPagedPool].NumberOfBytes)
{
/* Set it to the upper limit */
MmMaximumNonPagedPoolInBytes = MI_MAX_NONPAGED_POOL_SIZE;
MmMaximumNonPagedPoolInBytes = MiSystemVaRegions[AssignedRegionNonPagedPool].NumberOfBytes;
}
/* Convert nonpaged pool size from bytes to pages */
MmMaximumNonPagedPoolInPages = MmMaximumNonPagedPoolInBytes >> PAGE_SHIFT;
/* Non paged pool starts after the PFN database */
MmNonPagedPoolStart = MmPfnDatabase + MxPfnAllocation * PAGE_SIZE;
/* Get non paged pool start address */
MmNonPagedPoolStart = MiSystemVaRegions[AssignedRegionNonPagedPool].BaseAddress;
/* Calculate the nonpaged pool expansion start region */
MmNonPagedPoolExpansionStart = (PCHAR)MmNonPagedPoolStart +
@@ -386,13 +407,13 @@ MiBuildSystemPteSpace(VOID)
NonPagedSystemSize = (MmNumberOfSystemPtes + 1) * PAGE_SIZE;
/* Put system PTEs at the start of the system VA space */
MiSystemPteSpaceStart = MmNonPagedSystemStart;
MiSystemPteSpaceStart = MiSystemVaRegions[AssignedRegionSystemPtes].BaseAddress;
MiSystemPteSpaceEnd = (PUCHAR)MiSystemPteSpaceStart + NonPagedSystemSize;
/* Map the PPEs and PDEs for the system PTEs */
MiMapPPEs(MiSystemPteSpaceStart, MiSystemPteSpaceEnd);
MiMapPDEs(MiSystemPteSpaceStart, MiSystemPteSpaceEnd);
//__debugbreak();
/* Initialize the system PTE space */
PointerPte = MiAddressToPte(MiSystemPteSpaceStart);
MiInitializeSystemPtes(PointerPte, MmNumberOfSystemPtes, SystemPteSpace);
@@ -404,6 +425,10 @@ MiBuildSystemPteSpace(VOID)
/* Set the counter to maximum */
MiFirstReservedZeroingPte->u.Hard.PageFrameNumber = MI_ZERO_PTES;
/* Allocate the debug PTE from system PTEs */
MmDebugPte = MiReserveSystemPtes(1, SystemPteSpace);
MiDebugMapping = MiPteToAddress(MmDebugPte);
}
static
@@ -703,11 +728,10 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
/* Set some hardcoded addresses */
MmHyperSpaceEnd = (PVOID)HYPER_SPACE_END;
MmNonPagedSystemStart = (PVOID)MM_SYSTEM_SPACE_START;
MmPfnDatabase = (PVOID)MI_PFN_DATABASE;
//MmNonPagedSystemStart = (PVOID)MM_SYSTEM_SPACE_START;
MmPfnDatabase = MiSystemVaRegions[AssignedRegionPfnDatabase].BaseAddress;
MmWorkingSetList = (PVOID)MI_WORKING_SET_LIST;
// PrototypePte.u.Proto.Valid = 1
// PrototypePte.u.ReadOnly
// PrototypePte.u.Prototype
@@ -721,6 +745,8 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
MiBuildSystemPteSpace();
MiInitializeStackAllocator();
/* Map the PFN database pages */
MiBuildPfnDatabase(LoaderBlock);
@@ -762,7 +788,7 @@ MiInitMachineDependent(IN PLOADER_PARAMETER_BLOCK LoaderBlock)
/* Make sure we have everything we need */
ASSERT(MmPfnDatabase);
ASSERT(MmNonPagedSystemStart);
//ASSERT(MmNonPagedSystemStart);
ASSERT(MmNonPagedPoolStart);
ASSERT(MmSizeOfNonPagedPoolInBytes);
ASSERT(MmMaximumNonPagedPoolInBytes);

210
ntoskrnl/mm/amd64/kstack.c Normal file
View File

@@ -0,0 +1,210 @@
/*
* COPYRIGHT: GPL, See COPYING in the top level directory
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/amd64/kstack.c
* PURPOSE: Kernel stack allocator for AMD64
*
* PROGRAMMERS: Timo kreuzer (timo.kreuzer@reactos.org)
*/
/* INCLUDES ***************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
#include <mm/ARM3/miarm.h>
#include <fltkernel.h>
#define MI_STACK_PAGES 32
#define MI_STACK_SIZE (MI_STACK_PAGES * PAGE_SIZE)
PMMPTE MiStackPteBaseByLevel[3];
PMMPTE MiNextFreeStackPteByLevel[3];
KSPIN_LOCK MiStackPteSpinLock;
static
NTSTATUS
MapPageTable(PMMPTE PointerPte)
{
PFN_NUMBER PageFrameNumber;
MMPTE TempPte;
KIRQL OldIrql;
ASSERT(!PointerPte->u.Hard.Valid);
OldIrql = MiAcquirePfnLock();
PageFrameNumber = MiRemoveZeroPage(MI_GET_NEXT_COLOR());
if (PageFrameNumber == 0)
{
return STATUS_NO_MEMORY;
}
DbgPrint("## MAP %p PFN 0x%lx\n", PointerPte, PageFrameNumber);
MiInitializePfn(PageFrameNumber, PointerPte, TRUE);
PMMPFN Pfn = MiGetPfnEntry(PageFrameNumber);
Pfn->Dbg1.IsStackPfn = 1;
MiReleasePfnLock(OldIrql);
MI_MAKE_HARDWARE_PTE(&TempPte,
PointerPte,
MM_EXECUTE_READWRITE,
PageFrameNumber);
MI_WRITE_VALID_PTE(PointerPte, TempPte);
return STATUS_SUCCESS;
}
static
VOID
InsertPtesInList(
_In_ PMMPTE FirstPte,
_In_ ULONG NumberOfPtes,
_In_ ULONG Level)
{
const ULONG PteDelta = (Level == 0) ? MI_STACK_PAGES : 1;
const PMMPTE LastPte = FirstPte + NumberOfPtes - PteDelta;
PMMPTE PointerPte;
ULONG NextEntryOffset;
ASSERT(Level < 3);
ASSERT((Level == 2) || IS_ALIGNED(FirstPte, PAGE_SIZE));
LastPte->u.List.NextEntry = MM_EMPTY_PTE_LIST;
NextEntryOffset = LastPte - MiStackPteBaseByLevel[Level];
/* Link the PTEs to a list */
for (PointerPte = LastPte - PteDelta;
PointerPte >= FirstPte;
PointerPte -= PteDelta)
{
ASSERT(PointerPte->u.Long == 0);
PointerPte->u.List.NextEntry = NextEntryOffset;
NextEntryOffset -= PteDelta;
}
MiNextFreeStackPteByLevel[Level] = FirstPte;
}
VOID
MiInitializeStackAllocator(
VOID)
{
PMMPPE PointerPpe;
ULONG NumberOfPpes;
PVOID BaseAddress = MiSystemVaRegions[AssignedRegionKernelStacks].BaseAddress;
SIZE_T SizeInBytes = MiSystemVaRegions[AssignedRegionKernelStacks].NumberOfBytes;
ASSERT(IS_ALIGNED(BaseAddress, PPE_MAPPED_VA));
ASSERT(MiAddressToPxe(BaseAddress)->u.Hard.Valid);
/* Initialize the PTE base per level */
MiStackPteBaseByLevel[0] = MiAddressToPte(BaseAddress);
MiStackPteBaseByLevel[1] = MiAddressToPde(BaseAddress);
MiStackPteBaseByLevel[2] = MiAddressToPpe(BaseAddress);
/* Link the PPEs */
PointerPpe = MiAddressToPpe(BaseAddress);
NumberOfPpes = SizeInBytes / PPE_MAPPED_VA;
InsertPtesInList(PointerPpe, NumberOfPpes, 2);
}
static
PMMPTE
AllocateStackPtes(ULONG Level)
{
PMMPTE PointerPte;
PMMPDE PointerPde;
ULONG NextPteOffset;
NTSTATUS Status;
/* Bail out if we reached PXE level */
if (Level == 3)
{
return NULL;
}
PointerPte = MiNextFreeStackPteByLevel[Level];
if (PointerPte == NULL)
{
PointerPde = AllocateStackPtes(Level + 1);
if (PointerPde == NULL)
{
return NULL;
}
Status = MapPageTable(PointerPde);
if (!NT_SUCCESS(Status))
{
__debugbreak();
return NULL;
}
PointerPte = MiPdeToPte(PointerPde);
InsertPtesInList(PointerPte, PTE_PER_PAGE, Level);
}
NextPteOffset = PointerPte->u.List.NextEntry;
if (NextPteOffset != MM_EMPTY_PTE_LIST)
{
MiNextFreeStackPteByLevel[Level] =
MiStackPteBaseByLevel[Level] + NextPteOffset;
}
else
{
MiNextFreeStackPteByLevel[Level] = NULL;
}
return PointerPte;
}
PMMPTE
MiReserveKernelStackPtes(
_In_ ULONG NumberOfPtes)
{
PMMPTE PointerPte;
KIRQL OldIrql;
ASSERT(NumberOfPtes <= MI_STACK_PAGES);
KeAcquireSpinLock(&MiStackPteSpinLock, &OldIrql);
PointerPte = AllocateStackPtes(0);
KeReleaseSpinLock(&MiStackPteSpinLock, OldIrql);
return PointerPte;
}
VOID
MiReleaseKernelStackPtes(
_In_ PMMPTE FirstPte,
_In_ ULONG NumberOfPtes)
{
PMMPTE PointerPte;
KIRQL OldIrql;
ASSERT(NumberOfPtes <= MI_STACK_PAGES);
ASSERT(IS_ALIGNED(MiPteToAddress(FirstPte), MI_STACK_SIZE));
/* Zero the PTEs */
RtlZeroMemory(FirstPte, MI_STACK_PAGES * sizeof(MMPTE));
KeAcquireSpinLock(&MiStackPteSpinLock, &OldIrql);
PointerPte = MiNextFreeStackPteByLevel[0];
if (PointerPte != NULL)
{
FirstPte->u.List.NextEntry = PointerPte - MiStackPteBaseByLevel[0];
}
else
{
FirstPte->u.List.NextEntry = MM_EMPTY_PTE_LIST;
}
MiNextFreeStackPteByLevel[0] = FirstPte;
KeReleaseSpinLock(&MiStackPteSpinLock, OldIrql);
}

View File

@@ -0,0 +1,258 @@
/*
* COPYRIGHT: GPL, See COPYING in the top level directory
* PROJECT: ReactOS kernel
* FILE: ntoskrnl/mm/amd64/kvalayout.c
* PURPOSE: Kernel virtual address layout for AMD64
*
* PROGRAMMERS: Timo kreuzer (timo.kreuzer@reactos.org)
*/
/* INCLUDES ***************************************************************/
#include <ntoskrnl.h>
#define NDEBUG
#include <debug.h>
#include <mm/ARM3/miarm.h>
#include <fltkernel.h>
/* Random seed */
ULONG MiRandomSeed = 'MRnd';
// FIXME: Should be part of MI_VISIBLE_STATE
MI_SYSTEM_VA_ASSIGNMENT MiSystemVaRegions[AssignedRegionMaximum];
ULONG MiSystemVaAssignment[8];
static RTL_BITMAP MiSystemVaAssignmentBitmap = { 256, MiSystemVaAssignment };
static
VOID
ReservePxi(
_In_ ULONG Pxi)
{
ULONG BitIndex = Pxi - 256;
ASSERT(BitIndex < 256);
RtlSetBit(&MiSystemVaAssignmentBitmap, BitIndex);
}
static
VOID
ReserveVaRange(
_In_ ULONG_PTR StartAddress,
_In_ ULONG_PTR NumberOfBytes)
{
ULONG_PTR EndingAddress = StartAddress + NumberOfBytes - 1;
ULONG Pxi;
for (Pxi = MiAddressToPxi((PVOID)StartAddress);
Pxi <= MiAddressToPxi((PVOID)EndingAddress);
Pxi++)
{
ReservePxi(Pxi);
}
}
static
VOID
ReserveVaRegion(
_In_ MI_ASSIGNED_REGION_TYPES Region,
ULONG64 BaseAddress,
ULONG64 NumberOfBytes)
{
ReserveVaRange(BaseAddress, NumberOfBytes);
MiSystemVaRegions[Region].BaseAddress = (PVOID)BaseAddress;
MiSystemVaRegions[Region].NumberOfBytes = NumberOfBytes;
}
static
ULONG
AcquireRandomPxiRange(ULONG NumberOfPxis)
{
ULONG AvailableSlots = 0;
ULONG SkipCount;
ULONG Index = 0;
/* First count available slots */
for (ULONG i = 0; i < (256 - NumberOfPxis); i++)
{
if (RtlAreBitsClear(&MiSystemVaAssignmentBitmap, i, NumberOfPxis))
{
AvailableSlots++;
}
}
/* We should have plenty available */
if (AvailableSlots < 100)
{
KeBugCheck(MEMORY_MANAGEMENT);
}
/* Get a random skip count */
SkipCount = RtlRandomEx(&MiRandomSeed);
SkipCount %= AvailableSlots;
/* Skip over unavailable and 'SkipCount' available slots */
while (!RtlAreBitsClear(&MiSystemVaAssignmentBitmap, Index, NumberOfPxis) ||
(SkipCount-- != 0))
{
Index++;
}
/* Now set the bits */
for (ULONG i = 0; i < NumberOfPxis; i++)
{
RtlSetBit(&MiSystemVaAssignmentBitmap, Index + i);
}
return Index + 256;
}
/*
* The function:
* - Allocates only as many PXIs as needed.
* - Randomizes the location within the PXI range as possible.
*/
static
VOID
RandomizeVaRegion(
_In_ MI_ASSIGNED_REGION_TYPES Region,
_In_ ULONG64 NumberOfBytes,
_In_ ULONG64 Alignment)
{
ULONG_PTR FullSize = ALIGN_UP_BY(NumberOfBytes, PXE_MAPPED_VA);
ULONG NumberOfPxis = FullSize / PXE_MAPPED_VA;
ASSERT(NumberOfPxis != 0);
ASSERT(Alignment >= PDE_MAPPED_VA);
ASSERT(Alignment <= PXE_MAPPED_VA);
ASSERT((Alignment & (Alignment - 1)) == 0);
/* Get a random PXI range for the region and reserve it */
ULONG Pxi = AcquireRandomPxiRange(NumberOfPxis);
/* Calculate the number of available slots inside the PXI range */
ULONG64 MaxOffset = FullSize - NumberOfBytes;
ULONG AvailableSlots = 1 + (MaxOffset / Alignment);
/* Get a random slot index */
ULONG SlotIndex = RtlRandomEx(&MiRandomSeed) % AvailableSlots;
/* Calculate the actual base address */
PVOID BaseAddress = Add2Ptr(MiPxiToAddress(Pxi), SlotIndex * Alignment);
/* Set up the region */
MiSystemVaRegions[Region].BaseAddress = (PVOID)BaseAddress;
MiSystemVaRegions[Region].NumberOfBytes = NumberOfBytes;
}
static
PFN_NUMBER
FindHighestPfnNumber(
_In_ const LOADER_PARAMETER_BLOCK* LoaderBlock)
{
PMEMORY_ALLOCATION_DESCRIPTOR Descriptor;
PLIST_ENTRY ListEntry;
PFN_NUMBER HighestPfn = 0;
for (ListEntry = LoaderBlock->MemoryDescriptorListHead.Flink;
ListEntry != &LoaderBlock->MemoryDescriptorListHead;
ListEntry = ListEntry->Flink)
{
Descriptor = CONTAINING_RECORD(ListEntry, MEMORY_ALLOCATION_DESCRIPTOR, ListEntry);
PFN_NUMBER UpperPfn = Descriptor->BasePage + Descriptor->PageCount;
HighestPfn = max(HighestPfn, UpperPfn);
}
return HighestPfn;
}
static
VOID
SetupVaRegions(
_In_ const LOADER_PARAMETER_BLOCK* LoaderBlock)
{
SIZE_T BootImageSize;
/* Reserve the shared user page VA */
ReserveVaRange(KI_USER_SHARED_DATA, PAGE_SIZE);
/* Reserve 4 MB for the hal */
ReserveVaRange(MM_HAL_VA_START, 4 * _1MB);
/* Reserve 512 GB for the page tables */
ReserveVaRegion(AssignedRegionPageTables, PTE_BASE, 512 * _1GB);
/* Reserve 512 GB for hyper space */
ReserveVaRegion(AssignedRegionHyperSpace, HYPER_SPACE, 512 * _1GB);
/* Reserve 512 GB for session space */
ReserveVaRegion(AssignedRegionSession, MI_SESSION_SPACE_START, 512 * _1GB);
/* Reserve loader mappings */
BootImageSize = LoaderBlock->Extension->LoaderPagesSpanned * PAGE_SIZE;
ReserveVaRegion(AssignedRegionSystemImages,
MI_LOADER_MAPPINGS,
BootImageSize + PAGE_SIZE);
/* Reserve up to 8 TB for the PFN database */
PFN_NUMBER HighestPfn = FindHighestPfnNumber(LoaderBlock);
ULONG64 PfnDbSize = HighestPfn * sizeof(MMPFN) + _1MB;
RandomizeVaRegion(AssignedRegionPfnDatabase, PfnDbSize, PDE_MAPPED_VA);
/* Reserve 2 TB system cache */
RandomizeVaRegion(AssignedRegionSystemCache, 2 * _1TB, 512 * _1GB);
/* Reserve 128 GB for non-paged pool */
RandomizeVaRegion(AssignedRegionNonPagedPool, 128 * _1GB, PDE_MAPPED_VA);
/* Reserve 128 GB for paged pool */
RandomizeVaRegion(AssignedRegionPagedPool, 128 * _1GB, PDE_MAPPED_VA);
/* Reserve 128 GB for system PTEs */
RandomizeVaRegion(AssignedRegionSystemPtes, 128 * _1GB, PDE_MAPPED_VA);
/* Reserve 128 GB for kernel stacks */
RandomizeVaRegion(AssignedRegionKernelStacks, 128 * _1GB, PPE_MAPPED_VA);
/*
AssignedRegionUltraZero = 4,
AssignedRegionCfg = 6,
AssignedRegionSecureNonPagedPool = 11,
*/
MmPagedPoolStart = MiSystemVaRegions[AssignedRegionPagedPool].BaseAddress;
}
static
VOID
RelocatePageTables()
{
// TODO
}
static
VOID
RelocateBootLoadedImages()
{
// TODO
}
VOID
NTAPI
MiInitializeKernelVaLayout(
_In_ const LOADER_PARAMETER_BLOCK* LoaderBlock)
{
/* Initialize the random seed */
if (LoaderBlock->Extension->LoaderPerformanceData != 0)
{
MiRandomSeed ^= LoaderBlock->Extension->LoaderPerformanceData->StartTime;
MiRandomSeed ^= _rotl(LoaderBlock->Extension->LoaderPerformanceData->EndTime, 16);
}
MiRandomSeed ^= _rotl(__rdtsc(), MiRandomSeed & 0x1F);
SetupVaRegions(LoaderBlock);
RelocatePageTables();
RelocateBootLoadedImages();
}

View File

@@ -585,6 +585,9 @@ MmCreatePageFileMapping(PEPROCESS Process,
return STATUS_SUCCESS;
}
VOID
MiMakeKernelPageTableValid(
_In_ PVOID Address);
NTSTATUS
NTAPI
@@ -625,6 +628,12 @@ MmCreateVirtualMappingUnsafe(PEPROCESS Process,
if (!MiSynchronizeSystemPde(MiAddressToPde(Address)))
MiFillSystemPageDirectory(Address, PAGE_SIZE);
#endif
/* Lock the system cache WS */
MiLockWorkingSet(PsGetCurrentThread(), &MmSystemCacheWs);
/* Make the page table valid */
MiMakeKernelPageTableValid(Address);
}
else
{
@@ -663,6 +672,10 @@ MmCreateVirtualMappingUnsafe(PEPROCESS Process,
MiIncrementPageTableReferences(Address);
MiUnlockProcessWorkingSetUnsafe(Process, PsGetCurrentThread());
}
else
{
MiUnlockWorkingSet(PsGetCurrentThread(), &MmSystemCacheWs);
}
return(STATUS_SUCCESS);
}

View File

@@ -69,8 +69,12 @@ MiInitSystemMemoryAreas(VOID)
//
#ifdef _M_AMD64
// Reserved range FFFF800000000000 - FFFFF68000000000
MiCreateArm3StaticMemoryArea((PVOID)MI_REAL_SYSTEM_RANGE_START, PTE_BASE - MI_REAL_SYSTEM_RANGE_START, FALSE);
// On x64 we reserve everything except system cache, which is used for all RosMm mappings
ULONG64 SystemCacheStart = (ULONG64)MiSystemVaRegions[AssignedRegionSystemCache].BaseAddress;
ULONG64 SystemCacheEnd = SystemCacheStart + MiSystemVaRegions[AssignedRegionSystemCache].NumberOfBytes;
MiCreateArm3StaticMemoryArea((PVOID)MI_REAL_SYSTEM_RANGE_START, SystemCacheStart - MI_REAL_SYSTEM_RANGE_START, FALSE);
MiCreateArm3StaticMemoryArea((PVOID)SystemCacheEnd, 0xFFFFFFFFFFFFFFFFULL - SystemCacheEnd, FALSE);
return;
#endif /* _M_AMD64 */
// The loader mappings. The only Executable area.

View File

@@ -353,6 +353,8 @@ elseif(ARCH STREQUAL "amd64")
${REACTOS_SOURCE_DIR}/ntoskrnl/ke/amd64/spinlock.c
${REACTOS_SOURCE_DIR}/ntoskrnl/ke/amd64/thrdini.c
${REACTOS_SOURCE_DIR}/ntoskrnl/mm/amd64/init.c
${REACTOS_SOURCE_DIR}/ntoskrnl/mm/amd64/kstack.c
${REACTOS_SOURCE_DIR}/ntoskrnl/mm/amd64/kvalayout.c
${REACTOS_SOURCE_DIR}/ntoskrnl/mm/amd64/procsup.c
${REACTOS_SOURCE_DIR}/ntoskrnl/ps/amd64/psctx.c
${REACTOS_SOURCE_DIR}/ntoskrnl/ke/amd64/stubs.c