Compare commits

...

7 Commits

Author SHA1 Message Date
Timo Kreuzer
e7c2799141 [KMTEST] Handle ReactOS / Vista+ style of reserved mappings 2024-11-07 16:58:22 +02:00
Timo Kreuzer
0f10de854e [NTOS:MM] Remove an over-sensitive ASSERT 2024-11-07 16:58:21 +02:00
Timo Kreuzer
6e5e26d1a0 [NTOS:MM] Silence a debug print 2024-11-07 16:58:14 +02:00
Timo Kreuzer
8cd4a8b428 [NTOS:EX] Add magic marker for reactos in shared user data 2024-11-07 16:58:14 +02:00
Timo Kreuzer
cc802ecba1 [CMAKE][LWIP] Fix build with newer CMake versions 2024-11-07 16:58:13 +02:00
Doug Lyons
0109f91810 Changes based on reviewer comments. 2024-11-07 04:17:52 -06:00
Doug Lyons
be1a884ea0 [NTOS:MM] Fix MmAllocateMappingAddress and MmFreeMappingAddress.
This fixes kmtest:MmReservedMapping failures and hang.
Based on mm-implement-mappingaddress.patch by Thomas Faber and some changes by Oleg Dubinskiy.

Signed-off-by: Oleg Dubinskiy <oleg.dubinskij30@gmail.com>
Signed-off-by: Thomas Faber <thomas.faber@reactos.org>
2024-11-06 22:31:42 -06:00
7 changed files with 321 additions and 26 deletions

View File

@@ -262,19 +262,19 @@ set(DOXYGEN_IN ${LWIP_DIR}/doc/doxygen/lwip.Doxyfile.cmake.in)
set(DOXYGEN_OUT ${LWIP_DIR}/doc/doxygen/lwip.Doxyfile)
configure_file(${DOXYGEN_IN} ${DOXYGEN_OUT})
find_package(Doxygen)
if (DOXYGEN_FOUND)
message(STATUS "Doxygen build started")
add_custom_target(lwipdocs
COMMAND ${CMAKE_COMMAND} -E remove_directory ${DOXYGEN_DIR}/${DOXYGEN_OUTPUT_DIR}/html
COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYGEN_OUT}
WORKING_DIRECTORY ${DOXYGEN_DIR}
COMMENT "Generating API documentation with Doxygen"
VERBATIM)
else (DOXYGEN_FOUND)
message(STATUS "Doxygen needs to be installed to generate the doxygen documentation")
endif (DOXYGEN_FOUND)
#find_package(Doxygen)
#if (DOXYGEN_FOUND)
# message(STATUS "Doxygen build started")
#
# add_custom_target(lwipdocs
# COMMAND ${CMAKE_COMMAND} -E remove_directory ${DOXYGEN_DIR}/${DOXYGEN_OUTPUT_DIR}/html
# COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYGEN_OUT}
# WORKING_DIRECTORY ${DOXYGEN_DIR}
# COMMENT "Generating API documentation with Doxygen"
# VERBATIM)
#else (DOXYGEN_FOUND)
# message(STATUS "Doxygen needs to be installed to generate the doxygen documentation")
#endif (DOXYGEN_FOUND)
# lwIP libraries
add_library(lwipcore EXCLUDE_FROM_ALL ${lwipnoapps_SRCS})

View File

@@ -9,6 +9,8 @@
#include <kmt_test.h>
static BOOLEAN g_IsPae;
static ULONG g_OsVersion;
static BOOLEAN g_IsReactOS;
#ifdef _M_IX86
@@ -76,7 +78,7 @@ ValidateMapping(
BOOLEAN Valid = TRUE;
#if defined(_M_IX86) || defined(_M_AMD64)
PUCHAR CurrentAddress;
ULONGLONG PteValue;
ULONGLONG PteValue, ExpectedValue;
ULONG i;
for (i = 0; i < ValidPtes; i++)
@@ -110,10 +112,26 @@ ValidateMapping(
CurrentAddress, PteValue, PoolTag & ~1);
CurrentAddress = (PUCHAR)BaseAddress - 2 * PAGE_SIZE;
PteValue = GET_PTE_VALUE(CurrentAddress);
if (g_IsReactOS || g_OsVersion >= 0x0600)
{
/* On ReactOS and on Vista+ the size is stored in the NextEntry field
of a MMPTE_LIST structure */
#ifdef _M_IX86
ExpectedValue = (TotalPtes + 2) << 12;
#elif defined(_M_AMD64)
ExpectedValue = ((ULONG64)TotalPtes + 2) << 32;
#endif
}
else
{
/* On Windows 2003 the size is shifted by 1 bit only */
ExpectedValue = (TotalPtes + 2) * 2;
}
Valid = Valid &&
ok(PteValue == (TotalPtes + 2) * 2,
ok(PteValue == ExpectedValue,
"PTE for %p contains 0x%I64x, expected %x\n",
CurrentAddress, PteValue, (TotalPtes + 2) * 2);
CurrentAddress, PteValue, ExpectedValue);
#endif
return Valid;
@@ -281,6 +299,9 @@ START_TEST(MmReservedMapping)
PVOID Mapping;
g_IsPae = ExIsProcessorFeaturePresent(PF_PAE_ENABLED);
g_OsVersion = SharedUserData->NtMajorVersion << 8 | SharedUserData->NtMinorVersion;
g_IsReactOS = *(PULONG)(KI_USER_SHARED_DATA + PAGE_SIZE - sizeof(ULONG)) == 0x8eac705;
ok(g_IsReactOS == 1, "Not reactos\n");
pMmAllocatePagesForMdlEx = KmtGetSystemRoutineAddress(L"MmAllocatePagesForMdlEx");

View File

@@ -1330,6 +1330,9 @@ ExpInitializeExecutive(IN ULONG Cpu,
/* Set the machine type */
SharedUserData->ImageNumberLow = IMAGE_FILE_MACHINE_NATIVE;
SharedUserData->ImageNumberHigh = IMAGE_FILE_MACHINE_NATIVE;
/* ReactOS magic */
*(PULONG)(KI_USER_SHARED_DATA + PAGE_SIZE - sizeof(ULONG)) = 0x8eac705;
}
VOID

View File

@@ -529,7 +529,7 @@ MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress,
else
{
//
// Conver to internal caching attribute
// Convert to internal caching attribute
//
CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType];
}
@@ -1622,29 +1622,297 @@ MmAdvanceMdl(IN PMDL Mdl,
}
/*
* @unimplemented
* @implemented
*/
PVOID
NTAPI
MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress,
IN ULONG PoolTag,
IN PMDL MemoryDescriptorList,
IN PMDL Mdl,
IN MEMORY_CACHING_TYPE CacheType)
{
UNIMPLEMENTED;
return 0;
PPFN_NUMBER MdlPages, LastPage;
PFN_COUNT PageCount;
BOOLEAN IsIoMapping;
MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
PMMPTE PointerPte;
MMPTE TempPte;
ASSERT(Mdl->ByteCount != 0);
//
// Get the list of pages and count
//
MdlPages = MmGetMdlPfnArray(Mdl);
PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(Mdl),
Mdl->ByteCount);
LastPage = MdlPages + PageCount;
//
// Sanity checks
//
ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA |
MDL_SOURCE_IS_NONPAGED_POOL |
MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0);
//
// Get the correct cache type
//
IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0;
CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];
//
// Get the first PTE we reserved
//
ASSERT(MappingAddress);
PointerPte = MiAddressToPte(MappingAddress) - 2;
ASSERT(!PointerPte[0].u.Hard.Valid &&
!PointerPte[1].u.Hard.Valid);
//
// Verify that the pool tag matches
//
TempPte.u.Long = PoolTag;
TempPte.u.Hard.Valid = 0;
if (PointerPte[1].u.Long != TempPte.u.Long)
{
KeBugCheckEx(SYSTEM_PTE_MISUSE,
0x104, /* Trying to map an address it does not own */
(ULONG_PTR)MappingAddress,
PoolTag,
PointerPte[1].u.Long);
}
//
// We must have a size, and our helper PTEs must be invalid
//
if (PointerPte[0].u.List.NextEntry < 3)
{
KeBugCheckEx(SYSTEM_PTE_MISUSE,
0x105, /* Trying to map an invalid address */
(ULONG_PTR)MappingAddress,
PoolTag,
(ULONG_PTR)_ReturnAddress());
}
//
// If the mapping isn't big enough, fail
//
if (PointerPte[0].u.List.NextEntry - 2 < PageCount)
{
DPRINT1("Reserved mapping too small. Need %Iu pages, have %Iu\n",
PageCount,
PointerPte[0].u.List.NextEntry - 2);
return NULL;
}
//
// Skip our two helper PTEs
//
PointerPte += 2;
//
// Get the template
//
TempPte = ValidKernelPte;
switch (CacheAttribute)
{
case MiNonCached:
//
// Disable caching
//
MI_PAGE_DISABLE_CACHE(&TempPte);
MI_PAGE_WRITE_THROUGH(&TempPte);
break;
case MiWriteCombined:
//
// Enable write combining
//
MI_PAGE_DISABLE_CACHE(&TempPte);
MI_PAGE_WRITE_COMBINED(&TempPte);
break;
default:
//
// Nothing to do
//
break;
}
//
// Loop all PTEs
//
do
{
//
// We're done here
//
if (*MdlPages == LIST_HEAD) break;
//
// Write the PTE
//
TempPte.u.Hard.PageFrameNumber = *MdlPages;
MI_WRITE_VALID_PTE(PointerPte++, TempPte);
} while (++MdlPages < LastPage);
//
// Mark it as mapped
//
ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
Mdl->MappedSystemVa = MappingAddress;
Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
//
// Check if it was partial
//
if (Mdl->MdlFlags & MDL_PARTIAL)
{
//
// Write the appropriate flag here too
//
Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
}
//
// Return the mapped address
//
return (PVOID)((ULONG_PTR)MappingAddress + Mdl->ByteOffset);
}
/*
* @unimplemented
* @implemented
*/
VOID
NTAPI
MmUnmapReservedMapping(IN PVOID BaseAddress,
IN ULONG PoolTag,
IN PMDL MemoryDescriptorList)
IN PMDL Mdl)
{
UNIMPLEMENTED;
PVOID Base;
PFN_COUNT PageCount, ExtraPageCount;
PPFN_NUMBER MdlPages;
PMMPTE PointerPte;
MMPTE TempPte;
//
// Sanity check
//
ASSERT(Mdl->ByteCount != 0);
ASSERT(BaseAddress > MM_HIGHEST_USER_ADDRESS);
//
// Get base and count information
//
Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
//
// Sanity checks
//
ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0);
ASSERT(PageCount != 0);
ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
//
// Get the first PTE we reserved
//
PointerPte = MiAddressToPte(BaseAddress) - 2;
ASSERT(!PointerPte[0].u.Hard.Valid &&
!PointerPte[1].u.Hard.Valid);
//
// Verify that the pool tag matches
//
TempPte.u.Long = PoolTag;
TempPte.u.Hard.Valid = 0;
if (PointerPte[1].u.Long != TempPte.u.Long)
{
KeBugCheckEx(SYSTEM_PTE_MISUSE,
0x108, /* Trying to unmap an address it does not own */
(ULONG_PTR)BaseAddress,
PoolTag,
PointerPte[1].u.Long);
}
//
// We must have a size
//
if (PointerPte[0].u.Long < (3 << 1))
{
KeBugCheckEx(SYSTEM_PTE_MISUSE,
0x109, /* Mapping apparently empty */
(ULONG_PTR)BaseAddress,
PoolTag,
(ULONG_PTR)_ReturnAddress());
}
//
// Skip our two helper PTEs
//
PointerPte += 2;
//
// This should be a resident system PTE
//
ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
ASSERT(PointerPte->u.Hard.Valid == 1);
// TODO: check the MDL range makes sense with regard to the mapping range
// TODO: check if any of them are already zero
// TODO: check if any outside the MDL range are nonzero
// TODO: find out what to do with extra PTEs
//
// Check if the caller wants us to free advanced pages
//
if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES)
{
//
// Get the MDL page array
//
MdlPages = MmGetMdlPfnArray(Mdl);
/* Number of extra pages stored after the PFN array */
ExtraPageCount = MdlPages[PageCount];
//
// Do the math
//
PageCount += ExtraPageCount;
PointerPte -= ExtraPageCount;
ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
//
// Get the new base address
//
BaseAddress = (PVOID)((ULONG_PTR)BaseAddress -
(ExtraPageCount << PAGE_SHIFT));
}
//
// Zero the PTEs
//
RtlZeroMemory(PointerPte, PageCount * sizeof(MMPTE));
//
// Flush the TLB
//
KeFlushEntireTb(TRUE, TRUE);
//
// Remove flags
//
Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
MDL_PARTIAL_HAS_BEEN_MAPPED |
MDL_FREE_EXTRA_PTES);
}
/*

View File

@@ -1002,7 +1002,6 @@ MI_WRITE_INVALID_PTE(IN PMMPTE PointerPte,
{
/* Write the invalid PTE */
ASSERT(InvalidPte.u.Hard.Valid == 0);
ASSERT(InvalidPte.u.Long != 0);
*PointerPte = InvalidPte;
}

View File

@@ -1580,6 +1580,10 @@ MmAllocateMappingAddress(
PMMPTE PointerPte;
MMPTE TempPte;
/* Fast exit if PoolTag is NULL */
if (!PoolTag)
return NULL;
/* How many PTEs does the caller want? */
SizeInPages = BYTES_TO_PAGES(NumberOfBytes);
if (SizeInPages == 0)

View File

@@ -193,7 +193,7 @@ MiAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
PMMPFN Pfn1;
INT LookForZeroedPages;
ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
DPRINT1("ARM3-DEBUG: Being called with %I64x %I64x %I64x %lx %d %lu\n", LowAddress, HighAddress, SkipBytes, TotalBytes, CacheAttribute, MdlFlags);
DPRINT("ARM3-DEBUG: Being called with %I64x %I64x %I64x %lx %d %lu\n", LowAddress, HighAddress, SkipBytes, TotalBytes, CacheAttribute, MdlFlags);
//
// Convert the low address into a PFN