Move solution and projects to src

This commit is contained in:
TSR Berry 2023-04-08 01:22:00 +02:00 committed by Mary
parent cd124bda58
commit cee7121058
3466 changed files with 55 additions and 55 deletions

View file

@ -0,0 +1,470 @@
using Ryujinx.Memory.Range;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
namespace Ryujinx.Memory
{
/// <summary>
/// Represents a address space manager.
/// Supports virtual memory region mapping, address translation and read/write access to mapped regions.
/// </summary>
public sealed class AddressSpaceManager : IVirtualMemoryManager, IWritableBlock
{
public const int PageBits = PageTable<nuint>.PageBits;
public const int PageSize = PageTable<nuint>.PageSize;
public const int PageMask = PageTable<nuint>.PageMask;
/// <inheritdoc/>
public bool Supports4KBPages => true;
/// <summary>
/// Address space width in bits.
/// </summary>
public int AddressSpaceBits { get; }
private readonly ulong _addressSpaceSize;
private readonly MemoryBlock _backingMemory;
private readonly PageTable<nuint> _pageTable;
/// <summary>
/// Creates a new instance of the memory manager.
/// </summary>
/// <param name="backingMemory">Physical backing memory where virtual memory will be mapped to</param>
/// <param name="addressSpaceSize">Size of the address space</param>
public AddressSpaceManager(MemoryBlock backingMemory, ulong addressSpaceSize)
{
ulong asSize = PageSize;
int asBits = PageBits;
while (asSize < addressSpaceSize)
{
asSize <<= 1;
asBits++;
}
AddressSpaceBits = asBits;
_addressSpaceSize = asSize;
_backingMemory = backingMemory;
_pageTable = new PageTable<nuint>();
}
/// <inheritdoc/>
public void Map(ulong va, ulong pa, ulong size, MemoryMapFlags flags)
{
AssertValidAddressAndSize(va, size);
while (size != 0)
{
_pageTable.Map(va, (nuint)(ulong)_backingMemory.GetPointer(pa, PageSize));
va += PageSize;
pa += PageSize;
size -= PageSize;
}
}
/// <inheritdoc/>
public void MapForeign(ulong va, nuint hostPointer, ulong size)
{
AssertValidAddressAndSize(va, size);
while (size != 0)
{
_pageTable.Map(va, hostPointer);
va += PageSize;
hostPointer += PageSize;
size -= PageSize;
}
}
/// <inheritdoc/>
public void Unmap(ulong va, ulong size)
{
AssertValidAddressAndSize(va, size);
while (size != 0)
{
_pageTable.Unmap(va);
va += PageSize;
size -= PageSize;
}
}
/// <inheritdoc/>
public T Read<T>(ulong va) where T : unmanaged
{
return MemoryMarshal.Cast<byte, T>(GetSpan(va, Unsafe.SizeOf<T>()))[0];
}
/// <inheritdoc/>
public void Read(ulong va, Span<byte> data)
{
ReadImpl(va, data);
}
/// <inheritdoc/>
public void Write<T>(ulong va, T value) where T : unmanaged
{
Write(va, MemoryMarshal.Cast<T, byte>(MemoryMarshal.CreateSpan(ref value, 1)));
}
/// <inheritdoc/>
public void Write(ulong va, ReadOnlySpan<byte> data)
{
if (data.Length == 0)
{
return;
}
AssertValidAddressAndSize(va, (ulong)data.Length);
if (IsContiguousAndMapped(va, data.Length))
{
data.CopyTo(GetHostSpanContiguous(va, data.Length));
}
else
{
int offset = 0, size;
if ((va & PageMask) != 0)
{
size = Math.Min(data.Length, PageSize - (int)(va & PageMask));
data.Slice(0, size).CopyTo(GetHostSpanContiguous(va, size));
offset += size;
}
for (; offset < data.Length; offset += size)
{
size = Math.Min(data.Length - offset, PageSize);
data.Slice(offset, size).CopyTo(GetHostSpanContiguous(va + (ulong)offset, size));
}
}
}
/// <inheritdoc/>
public bool WriteWithRedundancyCheck(ulong va, ReadOnlySpan<byte> data)
{
Write(va, data);
return true;
}
/// <inheritdoc/>
public ReadOnlySpan<byte> GetSpan(ulong va, int size, bool tracked = false)
{
if (size == 0)
{
return ReadOnlySpan<byte>.Empty;
}
if (IsContiguousAndMapped(va, size))
{
return GetHostSpanContiguous(va, size);
}
else
{
Span<byte> data = new byte[size];
ReadImpl(va, data);
return data;
}
}
/// <inheritdoc/>
public unsafe WritableRegion GetWritableRegion(ulong va, int size, bool tracked = false)
{
if (size == 0)
{
return new WritableRegion(null, va, Memory<byte>.Empty);
}
if (IsContiguousAndMapped(va, size))
{
return new WritableRegion(null, va, new NativeMemoryManager<byte>((byte*)GetHostAddress(va), size).Memory);
}
else
{
Memory<byte> memory = new byte[size];
GetSpan(va, size).CopyTo(memory.Span);
return new WritableRegion(this, va, memory);
}
}
/// <inheritdoc/>
public unsafe ref T GetRef<T>(ulong va) where T : unmanaged
{
if (!IsContiguous(va, Unsafe.SizeOf<T>()))
{
ThrowMemoryNotContiguous();
}
return ref *(T*)GetHostAddress(va);
}
/// <inheritdoc/>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private int GetPagesCount(ulong va, uint size, out ulong startVa)
{
// WARNING: Always check if ulong does not overflow during the operations.
startVa = va & ~(ulong)PageMask;
ulong vaSpan = (va - startVa + size + PageMask) & ~(ulong)PageMask;
return (int)(vaSpan / PageSize);
}
private void ThrowMemoryNotContiguous() => throw new MemoryNotContiguousException();
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private bool IsContiguousAndMapped(ulong va, int size) => IsContiguous(va, size) && IsMapped(va);
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private bool IsContiguous(ulong va, int size)
{
if (!ValidateAddress(va) || !ValidateAddressAndSize(va, (ulong)size))
{
return false;
}
int pages = GetPagesCount(va, (uint)size, out va);
for (int page = 0; page < pages - 1; page++)
{
if (!ValidateAddress(va + PageSize))
{
return false;
}
if (GetHostAddress(va) + PageSize != GetHostAddress(va + PageSize))
{
return false;
}
va += PageSize;
}
return true;
}
/// <inheritdoc/>
public IEnumerable<HostMemoryRange> GetHostRegions(ulong va, ulong size)
{
if (size == 0)
{
return Enumerable.Empty<HostMemoryRange>();
}
return GetHostRegionsImpl(va, size);
}
/// <inheritdoc/>
public IEnumerable<MemoryRange> GetPhysicalRegions(ulong va, ulong size)
{
if (size == 0)
{
return Enumerable.Empty<MemoryRange>();
}
var hostRegions = GetHostRegionsImpl(va, size);
if (hostRegions == null)
{
return null;
}
var regions = new MemoryRange[hostRegions.Count];
ulong backingStart = (ulong)_backingMemory.Pointer;
ulong backingEnd = backingStart + _backingMemory.Size;
int count = 0;
for (int i = 0; i < regions.Length; i++)
{
var hostRegion = hostRegions[i];
if ((ulong)hostRegion.Address >= backingStart && (ulong)hostRegion.Address < backingEnd)
{
regions[count++] = new MemoryRange((ulong)hostRegion.Address - backingStart, hostRegion.Size);
}
}
if (count != regions.Length)
{
return new ArraySegment<MemoryRange>(regions, 0, count);
}
return regions;
}
private List<HostMemoryRange> GetHostRegionsImpl(ulong va, ulong size)
{
if (!ValidateAddress(va) || !ValidateAddressAndSize(va, size))
{
return null;
}
int pages = GetPagesCount(va, (uint)size, out va);
var regions = new List<HostMemoryRange>();
nuint regionStart = GetHostAddress(va);
ulong regionSize = PageSize;
for (int page = 0; page < pages - 1; page++)
{
if (!ValidateAddress(va + PageSize))
{
return null;
}
nuint newHostAddress = GetHostAddress(va + PageSize);
if (GetHostAddress(va) + PageSize != newHostAddress)
{
regions.Add(new HostMemoryRange(regionStart, regionSize));
regionStart = newHostAddress;
regionSize = 0;
}
va += PageSize;
regionSize += PageSize;
}
regions.Add(new HostMemoryRange(regionStart, regionSize));
return regions;
}
private void ReadImpl(ulong va, Span<byte> data)
{
if (data.Length == 0)
{
return;
}
AssertValidAddressAndSize(va, (ulong)data.Length);
int offset = 0, size;
if ((va & PageMask) != 0)
{
size = Math.Min(data.Length, PageSize - (int)(va & PageMask));
GetHostSpanContiguous(va, size).CopyTo(data.Slice(0, size));
offset += size;
}
for (; offset < data.Length; offset += size)
{
size = Math.Min(data.Length - offset, PageSize);
GetHostSpanContiguous(va + (ulong)offset, size).CopyTo(data.Slice(offset, size));
}
}
/// <inheritdoc/>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public bool IsMapped(ulong va)
{
if (!ValidateAddress(va))
{
return false;
}
return _pageTable.Read(va) != 0;
}
/// <inheritdoc/>
public bool IsRangeMapped(ulong va, ulong size)
{
if (size == 0UL)
{
return true;
}
if (!ValidateAddressAndSize(va, size))
{
return false;
}
int pages = GetPagesCount(va, (uint)size, out va);
for (int page = 0; page < pages; page++)
{
if (!IsMapped(va))
{
return false;
}
va += PageSize;
}
return true;
}
private bool ValidateAddress(ulong va)
{
return va < _addressSpaceSize;
}
/// <summary>
/// Checks if the combination of virtual address and size is part of the addressable space.
/// </summary>
/// <param name="va">Virtual address of the range</param>
/// <param name="size">Size of the range in bytes</param>
/// <returns>True if the combination of virtual address and size is part of the addressable space</returns>
private bool ValidateAddressAndSize(ulong va, ulong size)
{
ulong endVa = va + size;
return endVa >= va && endVa >= size && endVa <= _addressSpaceSize;
}
/// <summary>
/// Ensures the combination of virtual address and size is part of the addressable space.
/// </summary>
/// <param name="va">Virtual address of the range</param>
/// <param name="size">Size of the range in bytes</param>
/// <exception cref="InvalidMemoryRegionException">Throw when the memory region specified outside the addressable space</exception>
private void AssertValidAddressAndSize(ulong va, ulong size)
{
if (!ValidateAddressAndSize(va, size))
{
throw new InvalidMemoryRegionException($"va=0x{va:X16}, size=0x{size:X16}");
}
}
private unsafe Span<byte> GetHostSpanContiguous(ulong va, int size)
{
return new Span<byte>((void*)GetHostAddress(va), size);
}
private nuint GetHostAddress(ulong va)
{
return _pageTable.Read(va) + (nuint)(va & PageMask);
}
/// <inheritdoc/>
public void TrackingReprotect(ulong va, ulong size, MemoryPermission protection)
{
throw new NotImplementedException();
}
/// <inheritdoc/>
public void SignalMemoryTracking(ulong va, ulong size, bool write, bool precise = false, int? exemptId = null)
{
// Only the ARM Memory Manager has tracking for now.
}
}
}

View file

@ -0,0 +1,8 @@
namespace Ryujinx.Memory
{
public interface IRefCounted
{
void IncrementReferenceCount();
void DecrementReferenceCount();
}
}

View file

@ -0,0 +1,205 @@
using Ryujinx.Memory.Range;
using System;
using System.Buffers;
using System.Collections.Generic;
namespace Ryujinx.Memory
{
public interface IVirtualMemoryManager
{
/// <summary>
/// Indicates whenever the memory manager supports aliasing pages at 4KB granularity.
/// </summary>
/// <returns>True if 4KB pages are supported by the memory manager, false otherwise</returns>
bool Supports4KBPages { get; }
/// <summary>
/// Maps a virtual memory range into a physical memory range.
/// </summary>
/// <remarks>
/// Addresses and size must be page aligned.
/// </remarks>
/// <param name="va">Virtual memory address</param>
/// <param name="pa">Physical memory address where the region should be mapped to</param>
/// <param name="size">Size to be mapped</param>
/// <param name="flags">Flags controlling memory mapping</param>
void Map(ulong va, ulong pa, ulong size, MemoryMapFlags flags);
/// <summary>
/// Maps a virtual memory range into an arbitrary host memory range.
/// </summary>
/// <remarks>
/// Addresses and size must be page aligned.
/// Not all memory managers supports this feature.
/// </remarks>
/// <param name="va">Virtual memory address</param>
/// <param name="hostPointer">Host pointer where the virtual region should be mapped</param>
/// <param name="size">Size to be mapped</param>
void MapForeign(ulong va, nuint hostPointer, ulong size);
/// <summary>
/// Unmaps a previously mapped range of virtual memory.
/// </summary>
/// <param name="va">Virtual address of the range to be unmapped</param>
/// <param name="size">Size of the range to be unmapped</param>
void Unmap(ulong va, ulong size);
/// <summary>
/// Reads data from CPU mapped memory.
/// </summary>
/// <typeparam name="T">Type of the data being read</typeparam>
/// <param name="va">Virtual address of the data in memory</param>
/// <returns>The data</returns>
/// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
T Read<T>(ulong va) where T : unmanaged;
/// <summary>
/// Reads data from CPU mapped memory.
/// </summary>
/// <param name="va">Virtual address of the data in memory</param>
/// <param name="data">Span to store the data being read into</param>
/// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
void Read(ulong va, Span<byte> data);
/// <summary>
/// Writes data to CPU mapped memory.
/// </summary>
/// <typeparam name="T">Type of the data being written</typeparam>
/// <param name="va">Virtual address to write the data into</param>
/// <param name="value">Data to be written</param>
/// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
void Write<T>(ulong va, T value) where T : unmanaged;
/// <summary>
/// Writes data to CPU mapped memory, with write tracking.
/// </summary>
/// <param name="va">Virtual address to write the data into</param>
/// <param name="data">Data to be written</param>
/// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
void Write(ulong va, ReadOnlySpan<byte> data);
/// <summary>
/// Writes data to CPU mapped memory, with write tracking.
/// </summary>
/// <param name="va">Virtual address to write the data into</param>
/// <param name="data">Data to be written</param>
/// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
public void Write(ulong va, ReadOnlySequence<byte> data)
{
foreach (ReadOnlyMemory<byte> segment in data)
{
Write(va, segment.Span);
va += (ulong)segment.Length;
}
}
/// <summary>
/// Writes data to the application process, returning false if the data was not changed.
/// This triggers read memory tracking, as a redundancy check would be useless if the data is not up to date.
/// </summary>
/// <remarks>The memory manager can return that memory has changed when it hasn't to avoid expensive data copies.</remarks>
/// <param name="va">Virtual address to write the data into</param>
/// <param name="data">Data to be written</param>
/// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
/// <returns>True if the data was changed, false otherwise</returns>
bool WriteWithRedundancyCheck(ulong va, ReadOnlySpan<byte> data);
void Fill(ulong va, ulong size, byte value)
{
const int MaxChunkSize = 1 << 24;
for (ulong subOffset = 0; subOffset < size; subOffset += MaxChunkSize)
{
int copySize = (int)Math.Min(MaxChunkSize, size - subOffset);
using var writableRegion = GetWritableRegion(va + subOffset, copySize);
writableRegion.Memory.Span.Fill(value);
}
}
/// <summary>
/// Gets a read-only span of data from CPU mapped memory.
/// </summary>
/// <param name="va">Virtual address of the data</param>
/// <param name="size">Size of the data</param>
/// <param name="tracked">True if read tracking is triggered on the span</param>
/// <returns>A read-only span of the data</returns>
/// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
ReadOnlySpan<byte> GetSpan(ulong va, int size, bool tracked = false);
/// <summary>
/// Gets a region of memory that can be written to.
/// </summary>
/// <param name="va">Virtual address of the data</param>
/// <param name="size">Size of the data</param>
/// <param name="tracked">True if write tracking is triggered on the span</param>
/// <returns>A writable region of memory containing the data</returns>
/// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
WritableRegion GetWritableRegion(ulong va, int size, bool tracked = false);
/// <summary>
/// Gets a reference for the given type at the specified virtual memory address.
/// </summary>
/// <remarks>
/// The data must be located at a contiguous memory region.
/// </remarks>
/// <typeparam name="T">Type of the data to get the reference</typeparam>
/// <param name="va">Virtual address of the data</param>
/// <returns>A reference to the data in memory</returns>
/// <exception cref="MemoryNotContiguousException">Throw if the specified memory region is not contiguous in physical memory</exception>
ref T GetRef<T>(ulong va) where T : unmanaged;
/// <summary>
/// Gets the host regions that make up the given virtual address region.
/// If any part of the virtual region is unmapped, null is returned.
/// </summary>
/// <param name="va">Virtual address of the range</param>
/// <param name="size">Size of the range</param>
/// <returns>Array of host regions</returns>
IEnumerable<HostMemoryRange> GetHostRegions(ulong va, ulong size);
/// <summary>
/// Gets the physical regions that make up the given virtual address region.
/// If any part of the virtual region is unmapped, null is returned.
/// </summary>
/// <param name="va">Virtual address of the range</param>
/// <param name="size">Size of the range</param>
/// <returns>Array of physical regions</returns>
IEnumerable<MemoryRange> GetPhysicalRegions(ulong va, ulong size);
/// <summary>
/// Checks if the page at a given CPU virtual address is mapped.
/// </summary>
/// <param name="va">Virtual address to check</param>
/// <returns>True if the address is mapped, false otherwise</returns>
bool IsMapped(ulong va);
/// <summary>
/// Checks if a memory range is mapped.
/// </summary>
/// <param name="va">Virtual address of the range</param>
/// <param name="size">Size of the range in bytes</param>
/// <returns>True if the entire range is mapped, false otherwise</returns>
bool IsRangeMapped(ulong va, ulong size);
/// <summary>
/// Alerts the memory tracking that a given region has been read from or written to.
/// This should be called before read/write is performed.
/// </summary>
/// <param name="va">Virtual address of the region</param>
/// <param name="size">Size of the region</param>
/// <param name="write">True if the region was written, false if read</param>
/// <param name="precise">True if the access is precise, false otherwise</param>
/// <param name="exemptId">Optional ID of the handles that should not be signalled</param>
void SignalMemoryTracking(ulong va, ulong size, bool write, bool precise = false, int? exemptId = null);
/// <summary>
/// Reprotect a region of virtual memory for tracking.
/// </summary>
/// <param name="va">Virtual address base</param>
/// <param name="size">Size of the region to protect</param>
/// <param name="protection">Memory protection to set</param>
void TrackingReprotect(ulong va, ulong size, MemoryPermission protection);
}
}

View file

@ -0,0 +1,11 @@
using System;
namespace Ryujinx.Memory
{
public interface IWritableBlock
{
void Write(ulong va, ReadOnlySpan<byte> data);
void WriteUntracked(ulong va, ReadOnlySpan<byte> data) => Write(va, data);
}
}

View file

@ -0,0 +1,9 @@
namespace Ryujinx.Memory
{
/// <summary>
/// Function that handles a invalid memory access from the emulated CPU.
/// </summary>
/// <param name="va">Virtual address of the invalid region that is being accessed</param>
/// <returns>True if the invalid access should be ignored, false otherwise</returns>
public delegate bool InvalidAccessHandler(ulong va);
}

View file

@ -0,0 +1,19 @@
using System;
namespace Ryujinx.Memory
{
public class InvalidMemoryRegionException : Exception
{
public InvalidMemoryRegionException() : base("Attempted to access an invalid memory region.")
{
}
public InvalidMemoryRegionException(string message) : base(message)
{
}
public InvalidMemoryRegionException(string message, Exception innerException) : base(message, innerException)
{
}
}
}

View file

@ -0,0 +1,52 @@
using System;
namespace Ryujinx.Memory
{
/// <summary>
/// Flags that controls allocation and other properties of the memory block memory.
/// </summary>
[Flags]
public enum MemoryAllocationFlags
{
/// <summary>
/// No special allocation settings.
/// </summary>
None = 0,
/// <summary>
/// Reserve a region of memory on the process address space,
/// without actually allocation any backing memory.
/// </summary>
Reserve = 1 << 0,
/// <summary>
/// Enables read and write tracking of the memory block.
/// This currently does nothing and is reserved for future use.
/// </summary>
Tracked = 1 << 1,
/// <summary>
/// Enables mirroring of the memory block through aliasing of memory pages.
/// When enabled, this allows creating more memory blocks sharing the same backing storage.
/// </summary>
Mirrorable = 1 << 2,
/// <summary>
/// Indicates that the memory block should support mapping views of a mirrorable memory block.
/// The block that is to have their views mapped should be created with the <see cref="Mirrorable"/> flag.
/// </summary>
ViewCompatible = 1 << 3,
/// <summary>
/// If used with the <see cref="Mirrorable"/> flag, indicates that the memory block will only be used as
/// backing storage and will never be accessed directly, so the memory for the block will not be mapped.
/// </summary>
NoMap = 1 << 4,
/// <summary>
/// Indicates that the memory will be used to store JIT generated code.
/// On some platforms, this requires special flags to be passed that will allow the memory to be executable.
/// </summary>
Jit = 1 << 5
}
}

View file

@ -0,0 +1,443 @@
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Threading;
namespace Ryujinx.Memory
{
/// <summary>
/// Represents a block of contiguous physical guest memory.
/// </summary>
public sealed class MemoryBlock : IWritableBlock, IDisposable
{
private readonly bool _usesSharedMemory;
private readonly bool _isMirror;
private readonly bool _viewCompatible;
private readonly bool _forJit;
private IntPtr _sharedMemory;
private IntPtr _pointer;
/// <summary>
/// Pointer to the memory block data.
/// </summary>
public IntPtr Pointer => _pointer;
/// <summary>
/// Size of the memory block.
/// </summary>
public ulong Size { get; }
/// <summary>
/// Creates a new instance of the memory block class.
/// </summary>
/// <param name="size">Size of the memory block in bytes</param>
/// <param name="flags">Flags that controls memory block memory allocation</param>
/// <exception cref="OutOfMemoryException">Throw when there's no enough memory to allocate the requested size</exception>
/// <exception cref="PlatformNotSupportedException">Throw when the current platform is not supported</exception>
public MemoryBlock(ulong size, MemoryAllocationFlags flags = MemoryAllocationFlags.None)
{
if (flags.HasFlag(MemoryAllocationFlags.Mirrorable))
{
_sharedMemory = MemoryManagement.CreateSharedMemory(size, flags.HasFlag(MemoryAllocationFlags.Reserve));
if (!flags.HasFlag(MemoryAllocationFlags.NoMap))
{
_pointer = MemoryManagement.MapSharedMemory(_sharedMemory, size);
}
_usesSharedMemory = true;
}
else if (flags.HasFlag(MemoryAllocationFlags.Reserve))
{
_viewCompatible = flags.HasFlag(MemoryAllocationFlags.ViewCompatible);
_forJit = flags.HasFlag(MemoryAllocationFlags.Jit);
_pointer = MemoryManagement.Reserve(size, _forJit, _viewCompatible);
}
else
{
_forJit = flags.HasFlag(MemoryAllocationFlags.Jit);
_pointer = MemoryManagement.Allocate(size, _forJit);
}
Size = size;
}
/// <summary>
/// Creates a new instance of the memory block class, with a existing backing storage.
/// </summary>
/// <param name="size">Size of the memory block in bytes</param>
/// <param name="sharedMemory">Shared memory to use as backing storage for this block</param>
/// <exception cref="OutOfMemoryException">Throw when there's no enough address space left to map the shared memory</exception>
/// <exception cref="PlatformNotSupportedException">Throw when the current platform is not supported</exception>
private MemoryBlock(ulong size, IntPtr sharedMemory)
{
_pointer = MemoryManagement.MapSharedMemory(sharedMemory, size);
Size = size;
_usesSharedMemory = true;
_isMirror = true;
}
/// <summary>
/// Creates a memory block that shares the backing storage with this block.
/// The memory and page commitments will be shared, however memory protections are separate.
/// </summary>
/// <returns>A new memory block that shares storage with this one</returns>
/// <exception cref="NotSupportedException">Throw when the current memory block does not support mirroring</exception>
/// <exception cref="OutOfMemoryException">Throw when there's no enough address space left to map the shared memory</exception>
/// <exception cref="PlatformNotSupportedException">Throw when the current platform is not supported</exception>
public MemoryBlock CreateMirror()
{
if (_sharedMemory == IntPtr.Zero)
{
throw new NotSupportedException("Mirroring is not supported on the memory block because the Mirrorable flag was not set.");
}
return new MemoryBlock(Size, _sharedMemory);
}
/// <summary>
/// Commits a region of memory that has previously been reserved.
/// This can be used to allocate memory on demand.
/// </summary>
/// <param name="offset">Starting offset of the range to be committed</param>
/// <param name="size">Size of the range to be committed</param>
/// <returns>True if the operation was successful, false otherwise</returns>
/// <exception cref="ObjectDisposedException">Throw when the memory block has already been disposed</exception>
/// <exception cref="InvalidMemoryRegionException">Throw when either <paramref name="offset"/> or <paramref name="size"/> are out of range</exception>
public bool Commit(ulong offset, ulong size)
{
return MemoryManagement.Commit(GetPointerInternal(offset, size), size, _forJit);
}
/// <summary>
/// Decommits a region of memory that has previously been reserved and optionally comitted.
/// This can be used to free previously allocated memory on demand.
/// </summary>
/// <param name="offset">Starting offset of the range to be decommitted</param>
/// <param name="size">Size of the range to be decommitted</param>
/// <returns>True if the operation was successful, false otherwise</returns>
/// <exception cref="ObjectDisposedException">Throw when the memory block has already been disposed</exception>
/// <exception cref="InvalidMemoryRegionException">Throw when either <paramref name="offset"/> or <paramref name="size"/> are out of range</exception>
public bool Decommit(ulong offset, ulong size)
{
return MemoryManagement.Decommit(GetPointerInternal(offset, size), size);
}
/// <summary>
/// Maps a view of memory from another memory block.
/// </summary>
/// <param name="srcBlock">Memory block from where the backing memory will be taken</param>
/// <param name="srcOffset">Offset on <paramref name="srcBlock"/> of the region that should be mapped</param>
/// <param name="dstOffset">Offset to map the view into on this block</param>
/// <param name="size">Size of the range to be mapped</param>
/// <exception cref="NotSupportedException">Throw when the source memory block does not support mirroring</exception>
/// <exception cref="ObjectDisposedException">Throw when the memory block has already been disposed</exception>
/// <exception cref="InvalidMemoryRegionException">Throw when either <paramref name="offset"/> or <paramref name="size"/> are out of range</exception>
public void MapView(MemoryBlock srcBlock, ulong srcOffset, ulong dstOffset, ulong size)
{
if (srcBlock._sharedMemory == IntPtr.Zero)
{
throw new ArgumentException("The source memory block is not mirrorable, and thus cannot be mapped on the current block.");
}
MemoryManagement.MapView(srcBlock._sharedMemory, srcOffset, GetPointerInternal(dstOffset, size), size, this);
}
/// <summary>
/// Unmaps a view of memory from another memory block.
/// </summary>
/// <param name="srcBlock">Memory block from where the backing memory was taken during map</param>
/// <param name="offset">Offset of the view previously mapped with <see cref="MapView"/></param>
/// <param name="size">Size of the range to be unmapped</param>
public void UnmapView(MemoryBlock srcBlock, ulong offset, ulong size)
{
MemoryManagement.UnmapView(srcBlock._sharedMemory, GetPointerInternal(offset, size), size, this);
}
/// <summary>
/// Reprotects a region of memory.
/// </summary>
/// <param name="offset">Starting offset of the range to be reprotected</param>
/// <param name="size">Size of the range to be reprotected</param>
/// <param name="permission">New memory permissions</param>
/// <param name="throwOnFail">True if a failed reprotect should throw</param>
/// <exception cref="ObjectDisposedException">Throw when the memory block has already been disposed</exception>
/// <exception cref="InvalidMemoryRegionException">Throw when either <paramref name="offset"/> or <paramref name="size"/> are out of range</exception>
/// <exception cref="MemoryProtectionException">Throw when <paramref name="permission"/> is invalid</exception>
public void Reprotect(ulong offset, ulong size, MemoryPermission permission, bool throwOnFail = true)
{
MemoryManagement.Reprotect(GetPointerInternal(offset, size), size, permission, _viewCompatible, throwOnFail);
}
/// <summary>
/// Reads bytes from the memory block.
/// </summary>
/// <param name="offset">Starting offset of the range being read</param>
/// <param name="data">Span where the bytes being read will be copied to</param>
/// <exception cref="ObjectDisposedException">Throw when the memory block has already been disposed</exception>
/// <exception cref="InvalidMemoryRegionException">Throw when the memory region specified for the the data is out of range</exception>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public void Read(ulong offset, Span<byte> data)
{
GetSpan(offset, data.Length).CopyTo(data);
}
/// <summary>
/// Reads data from the memory block.
/// </summary>
/// <typeparam name="T">Type of the data</typeparam>
/// <param name="offset">Offset where the data is located</param>
/// <returns>Data at the specified address</returns>
/// <exception cref="ObjectDisposedException">Throw when the memory block has already been disposed</exception>
/// <exception cref="InvalidMemoryRegionException">Throw when the memory region specified for the the data is out of range</exception>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public T Read<T>(ulong offset) where T : unmanaged
{
return GetRef<T>(offset);
}
/// <summary>
/// Writes bytes to the memory block.
/// </summary>
/// <param name="offset">Starting offset of the range being written</param>
/// <param name="data">Span where the bytes being written will be copied from</param>
/// <exception cref="ObjectDisposedException">Throw when the memory block has already been disposed</exception>
/// <exception cref="InvalidMemoryRegionException">Throw when the memory region specified for the the data is out of range</exception>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public void Write(ulong offset, ReadOnlySpan<byte> data)
{
data.CopyTo(GetSpan(offset, data.Length));
}
/// <summary>
/// Writes data to the memory block.
/// </summary>
/// <typeparam name="T">Type of the data being written</typeparam>
/// <param name="offset">Offset to write the data into</param>
/// <param name="data">Data to be written</param>
/// <exception cref="ObjectDisposedException">Throw when the memory block has already been disposed</exception>
/// <exception cref="InvalidMemoryRegionException">Throw when the memory region specified for the the data is out of range</exception>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public void Write<T>(ulong offset, T data) where T : unmanaged
{
GetRef<T>(offset) = data;
}
/// <summary>
/// Copies data from one memory location to another.
/// </summary>
/// <param name="dstOffset">Destination offset to write the data into</param>
/// <param name="srcOffset">Source offset to read the data from</param>
/// <param name="size">Size of the copy in bytes</param>
/// <exception cref="ObjectDisposedException">Throw when the memory block has already been disposed</exception>
/// <exception cref="InvalidMemoryRegionException">Throw when <paramref name="srcOffset"/>, <paramref name="dstOffset"/> or <paramref name="size"/> is out of range</exception>
public void Copy(ulong dstOffset, ulong srcOffset, ulong size)
{
const int MaxChunkSize = 1 << 24;
for (ulong offset = 0; offset < size; offset += MaxChunkSize)
{
int copySize = (int)Math.Min(MaxChunkSize, size - offset);
Write(dstOffset + offset, GetSpan(srcOffset + offset, copySize));
}
}
/// <summary>
/// Fills a region of memory with <paramref name="value"/>.
/// </summary>
/// <param name="offset">Offset of the region to fill with <paramref name="value"/></param>
/// <param name="size">Size in bytes of the region to fill</param>
/// <param name="value">Value to use for the fill</param>
/// <exception cref="ObjectDisposedException">Throw when the memory block has already been disposed</exception>
/// <exception cref="InvalidMemoryRegionException">Throw when either <paramref name="offset"/> or <paramref name="size"/> are out of range</exception>
public void Fill(ulong offset, ulong size, byte value)
{
const int MaxChunkSize = 1 << 24;
for (ulong subOffset = 0; subOffset < size; subOffset += MaxChunkSize)
{
int copySize = (int)Math.Min(MaxChunkSize, size - subOffset);
GetSpan(offset + subOffset, copySize).Fill(value);
}
}
/// <summary>
/// Gets a reference of the data at a given memory block region.
/// </summary>
/// <typeparam name="T">Data type</typeparam>
/// <param name="offset">Offset of the memory region</param>
/// <returns>A reference to the given memory region data</returns>
/// <exception cref="ObjectDisposedException">Throw when the memory block has already been disposed</exception>
/// <exception cref="InvalidMemoryRegionException">Throw when either <paramref name="offset"/> or <paramref name="size"/> are out of range</exception>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public unsafe ref T GetRef<T>(ulong offset) where T : unmanaged
{
IntPtr ptr = _pointer;
ObjectDisposedException.ThrowIf(ptr == IntPtr.Zero, this);
int size = Unsafe.SizeOf<T>();
ulong endOffset = offset + (ulong)size;
if (endOffset > Size || endOffset < offset)
{
ThrowInvalidMemoryRegionException();
}
return ref Unsafe.AsRef<T>((void*)PtrAddr(ptr, offset));
}
/// <summary>
/// Gets the pointer of a given memory block region.
/// </summary>
/// <param name="offset">Start offset of the memory region</param>
/// <param name="size">Size in bytes of the region</param>
/// <returns>The pointer to the memory region</returns>
/// <exception cref="ObjectDisposedException">Throw when the memory block has already been disposed</exception>
/// <exception cref="InvalidMemoryRegionException">Throw when either <paramref name="offset"/> or <paramref name="size"/> are out of range</exception>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public IntPtr GetPointer(ulong offset, ulong size) => GetPointerInternal(offset, size);
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private IntPtr GetPointerInternal(ulong offset, ulong size)
{
IntPtr ptr = _pointer;
ObjectDisposedException.ThrowIf(ptr == IntPtr.Zero, this);
ulong endOffset = offset + size;
if (endOffset > Size || endOffset < offset)
{
ThrowInvalidMemoryRegionException();
}
return PtrAddr(ptr, offset);
}
/// <summary>
/// Gets the <see cref="Span{T}"/> of a given memory block region.
/// </summary>
/// <param name="offset">Start offset of the memory region</param>
/// <param name="size">Size in bytes of the region</param>
/// <returns>Span of the memory region</returns>
/// <exception cref="ObjectDisposedException">Throw when the memory block has already been disposed</exception>
/// <exception cref="InvalidMemoryRegionException">Throw when either <paramref name="offset"/> or <paramref name="size"/> are out of range</exception>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public unsafe Span<byte> GetSpan(ulong offset, int size)
{
return new Span<byte>((void*)GetPointerInternal(offset, (ulong)size), size);
}
/// <summary>
/// Gets the <see cref="Memory{T}"/> of a given memory block region.
/// </summary>
/// <param name="offset">Start offset of the memory region</param>
/// <param name="size">Size in bytes of the region</param>
/// <returns>Memory of the memory region</returns>
/// <exception cref="ObjectDisposedException">Throw when the memory block has already been disposed</exception>
/// <exception cref="InvalidMemoryRegionException">Throw when either <paramref name="offset"/> or <paramref name="size"/> are out of range</exception>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public unsafe Memory<byte> GetMemory(ulong offset, int size)
{
return new NativeMemoryManager<byte>((byte*)GetPointerInternal(offset, (ulong)size), size).Memory;
}
/// <summary>
/// Gets a writable region of a given memory block region.
/// </summary>
/// <param name="offset">Start offset of the memory region</param>
/// <param name="size">Size in bytes of the region</param>
/// <returns>Writable region of the memory region</returns>
/// <exception cref="ObjectDisposedException">Throw when the memory block has already been disposed</exception>
/// <exception cref="InvalidMemoryRegionException">Throw when either <paramref name="offset"/> or <paramref name="size"/> are out of range</exception>
public WritableRegion GetWritableRegion(ulong offset, int size)
{
return new WritableRegion(null, offset, GetMemory(offset, size));
}
/// <summary>
/// Adds a 64-bits offset to a native pointer.
/// </summary>
/// <param name="pointer">Native pointer</param>
/// <param name="offset">Offset to add</param>
/// <returns>Native pointer with the added offset</returns>
private IntPtr PtrAddr(IntPtr pointer, ulong offset)
{
return (IntPtr)(pointer.ToInt64() + (long)offset);
}
/// <summary>
/// Frees the memory allocated for this memory block.
/// </summary>
/// <remarks>
/// It's an error to use the memory block after disposal.
/// </remarks>
public void Dispose()
{
FreeMemory();
GC.SuppressFinalize(this);
}
~MemoryBlock() => FreeMemory();
private void FreeMemory()
{
IntPtr ptr = Interlocked.Exchange(ref _pointer, IntPtr.Zero);
// If pointer is null, the memory was already freed or never allocated.
if (ptr != IntPtr.Zero)
{
if (_usesSharedMemory)
{
MemoryManagement.UnmapSharedMemory(ptr, Size);
}
else
{
MemoryManagement.Free(ptr, Size);
}
}
if (!_isMirror)
{
IntPtr sharedMemory = Interlocked.Exchange(ref _sharedMemory, IntPtr.Zero);
if (sharedMemory != IntPtr.Zero)
{
MemoryManagement.DestroySharedMemory(sharedMemory);
}
}
}
/// <summary>
/// Checks if the specified memory allocation flags are supported on the current platform.
/// </summary>
/// <param name="flags">Flags to be checked</param>
/// <returns>True if the platform supports all the flags, false otherwise</returns>
public static bool SupportsFlags(MemoryAllocationFlags flags)
{
if (flags.HasFlag(MemoryAllocationFlags.ViewCompatible))
{
if (OperatingSystem.IsWindows())
{
return OperatingSystem.IsWindowsVersionAtLeast(10, 0, 17134);
}
return OperatingSystem.IsLinux() || OperatingSystem.IsMacOS();
}
return true;
}
public static ulong GetPageSize()
{
return (ulong)Environment.SystemPageSize;
}
private static void ThrowInvalidMemoryRegionException() => throw new InvalidMemoryRegionException();
}
}

View file

@ -0,0 +1,9 @@
namespace Ryujinx.Memory
{
static class MemoryConstants
{
public const int PageBits = 12;
public const int PageSize = 1 << PageBits;
public const int PageMask = PageSize - 1;
}
}

View file

@ -0,0 +1,206 @@
using System;
namespace Ryujinx.Memory
{
public static class MemoryManagement
{
public static IntPtr Allocate(ulong size, bool forJit)
{
if (OperatingSystem.IsWindows())
{
return MemoryManagementWindows.Allocate((IntPtr)size);
}
else if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
{
return MemoryManagementUnix.Allocate(size, forJit);
}
else
{
throw new PlatformNotSupportedException();
}
}
public static IntPtr Reserve(ulong size, bool forJit, bool viewCompatible)
{
if (OperatingSystem.IsWindows())
{
return MemoryManagementWindows.Reserve((IntPtr)size, viewCompatible);
}
else if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
{
return MemoryManagementUnix.Reserve(size, forJit);
}
else
{
throw new PlatformNotSupportedException();
}
}
public static bool Commit(IntPtr address, ulong size, bool forJit)
{
if (OperatingSystem.IsWindows())
{
return MemoryManagementWindows.Commit(address, (IntPtr)size);
}
else if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
{
return MemoryManagementUnix.Commit(address, size, forJit);
}
else
{
throw new PlatformNotSupportedException();
}
}
public static bool Decommit(IntPtr address, ulong size)
{
if (OperatingSystem.IsWindows())
{
return MemoryManagementWindows.Decommit(address, (IntPtr)size);
}
else if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
{
return MemoryManagementUnix.Decommit(address, size);
}
else
{
throw new PlatformNotSupportedException();
}
}
public static void MapView(IntPtr sharedMemory, ulong srcOffset, IntPtr address, ulong size, MemoryBlock owner)
{
if (OperatingSystem.IsWindows())
{
MemoryManagementWindows.MapView(sharedMemory, srcOffset, address, (IntPtr)size, owner);
}
else if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
{
MemoryManagementUnix.MapView(sharedMemory, srcOffset, address, size);
}
else
{
throw new PlatformNotSupportedException();
}
}
public static void UnmapView(IntPtr sharedMemory, IntPtr address, ulong size, MemoryBlock owner)
{
if (OperatingSystem.IsWindows())
{
MemoryManagementWindows.UnmapView(sharedMemory, address, (IntPtr)size, owner);
}
else if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
{
MemoryManagementUnix.UnmapView(address, size);
}
else
{
throw new PlatformNotSupportedException();
}
}
public static void Reprotect(IntPtr address, ulong size, MemoryPermission permission, bool forView, bool throwOnFail)
{
bool result;
if (OperatingSystem.IsWindows())
{
result = MemoryManagementWindows.Reprotect(address, (IntPtr)size, permission, forView);
}
else if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
{
result = MemoryManagementUnix.Reprotect(address, size, permission);
}
else
{
throw new PlatformNotSupportedException();
}
if (!result && throwOnFail)
{
throw new MemoryProtectionException(permission);
}
}
public static bool Free(IntPtr address, ulong size)
{
if (OperatingSystem.IsWindows())
{
return MemoryManagementWindows.Free(address, (IntPtr)size);
}
else if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
{
return MemoryManagementUnix.Free(address);
}
else
{
throw new PlatformNotSupportedException();
}
}
public static IntPtr CreateSharedMemory(ulong size, bool reserve)
{
if (OperatingSystem.IsWindows())
{
return MemoryManagementWindows.CreateSharedMemory((IntPtr)size, reserve);
}
else if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
{
return MemoryManagementUnix.CreateSharedMemory(size, reserve);
}
else
{
throw new PlatformNotSupportedException();
}
}
public static void DestroySharedMemory(IntPtr handle)
{
if (OperatingSystem.IsWindows())
{
MemoryManagementWindows.DestroySharedMemory(handle);
}
else if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
{
MemoryManagementUnix.DestroySharedMemory(handle);
}
else
{
throw new PlatformNotSupportedException();
}
}
public static IntPtr MapSharedMemory(IntPtr handle, ulong size)
{
if (OperatingSystem.IsWindows())
{
return MemoryManagementWindows.MapSharedMemory(handle);
}
else if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
{
return MemoryManagementUnix.MapSharedMemory(handle, size);
}
else
{
throw new PlatformNotSupportedException();
}
}
public static void UnmapSharedMemory(IntPtr address, ulong size)
{
if (OperatingSystem.IsWindows())
{
MemoryManagementWindows.UnmapSharedMemory(address);
}
else if (OperatingSystem.IsLinux() || OperatingSystem.IsMacOS())
{
MemoryManagementUnix.UnmapSharedMemory(address, size);
}
else
{
throw new PlatformNotSupportedException();
}
}
}
}

View file

@ -0,0 +1,200 @@
using System;
using System.Collections.Concurrent;
using System.Runtime.Versioning;
using System.Text;
using static Ryujinx.Memory.MemoryManagerUnixHelper;
namespace Ryujinx.Memory
{
[SupportedOSPlatform("linux")]
[SupportedOSPlatform("macos")]
static class MemoryManagementUnix
{
private static readonly ConcurrentDictionary<IntPtr, ulong> _allocations = new ConcurrentDictionary<IntPtr, ulong>();
public static IntPtr Allocate(ulong size, bool forJit)
{
return AllocateInternal(size, MmapProts.PROT_READ | MmapProts.PROT_WRITE, forJit);
}
public static IntPtr Reserve(ulong size, bool forJit)
{
return AllocateInternal(size, MmapProts.PROT_NONE, forJit);
}
private static IntPtr AllocateInternal(ulong size, MmapProts prot, bool forJit, bool shared = false)
{
MmapFlags flags = MmapFlags.MAP_ANONYMOUS;
if (shared)
{
flags |= MmapFlags.MAP_SHARED | MmapFlags.MAP_UNLOCKED;
}
else
{
flags |= MmapFlags.MAP_PRIVATE;
}
if (prot == MmapProts.PROT_NONE)
{
flags |= MmapFlags.MAP_NORESERVE;
}
if (OperatingSystem.IsMacOSVersionAtLeast(10, 14) && forJit)
{
flags |= MmapFlags.MAP_JIT_DARWIN;
if (prot == (MmapProts.PROT_READ | MmapProts.PROT_WRITE))
{
prot |= MmapProts.PROT_EXEC;
}
}
IntPtr ptr = mmap(IntPtr.Zero, size, prot, flags, -1, 0);
if (ptr == new IntPtr(-1L))
{
throw new OutOfMemoryException();
}
if (!_allocations.TryAdd(ptr, size))
{
// This should be impossible, kernel shouldn't return an already mapped address.
throw new InvalidOperationException();
}
return ptr;
}
public static bool Commit(IntPtr address, ulong size, bool forJit)
{
MmapProts prot = MmapProts.PROT_READ | MmapProts.PROT_WRITE;
if (OperatingSystem.IsMacOSVersionAtLeast(10, 14) && forJit)
{
prot |= MmapProts.PROT_EXEC;
}
return mprotect(address, size, prot) == 0;
}
public static bool Decommit(IntPtr address, ulong size)
{
// Must be writable for madvise to work properly.
mprotect(address, size, MmapProts.PROT_READ | MmapProts.PROT_WRITE);
madvise(address, size, MADV_REMOVE);
return mprotect(address, size, MmapProts.PROT_NONE) == 0;
}
public static bool Reprotect(IntPtr address, ulong size, MemoryPermission permission)
{
return mprotect(address, size, GetProtection(permission)) == 0;
}
private static MmapProts GetProtection(MemoryPermission permission)
{
return permission switch
{
MemoryPermission.None => MmapProts.PROT_NONE,
MemoryPermission.Read => MmapProts.PROT_READ,
MemoryPermission.ReadAndWrite => MmapProts.PROT_READ | MmapProts.PROT_WRITE,
MemoryPermission.ReadAndExecute => MmapProts.PROT_READ | MmapProts.PROT_EXEC,
MemoryPermission.ReadWriteExecute => MmapProts.PROT_READ | MmapProts.PROT_WRITE | MmapProts.PROT_EXEC,
MemoryPermission.Execute => MmapProts.PROT_EXEC,
_ => throw new MemoryProtectionException(permission)
};
}
public static bool Free(IntPtr address)
{
if (_allocations.TryRemove(address, out ulong size))
{
return munmap(address, size) == 0;
}
return false;
}
public static bool Unmap(IntPtr address, ulong size)
{
return munmap(address, size) == 0;
}
public unsafe static IntPtr CreateSharedMemory(ulong size, bool reserve)
{
int fd;
if (OperatingSystem.IsMacOS())
{
byte[] memName = Encoding.ASCII.GetBytes("Ryujinx-XXXXXX");
fixed (byte* pMemName = memName)
{
fd = shm_open((IntPtr)pMemName, 0x2 | 0x200 | 0x800 | 0x400, 384); // O_RDWR | O_CREAT | O_EXCL | O_TRUNC, 0600
if (fd == -1)
{
throw new OutOfMemoryException();
}
if (shm_unlink((IntPtr)pMemName) != 0)
{
throw new OutOfMemoryException();
}
}
}
else
{
byte[] fileName = Encoding.ASCII.GetBytes("/dev/shm/Ryujinx-XXXXXX");
fixed (byte* pFileName = fileName)
{
fd = mkstemp((IntPtr)pFileName);
if (fd == -1)
{
throw new OutOfMemoryException();
}
if (unlink((IntPtr)pFileName) != 0)
{
throw new OutOfMemoryException();
}
}
}
if (ftruncate(fd, (IntPtr)size) != 0)
{
throw new OutOfMemoryException();
}
return (IntPtr)fd;
}
public static void DestroySharedMemory(IntPtr handle)
{
close((int)handle);
}
public static IntPtr MapSharedMemory(IntPtr handle, ulong size)
{
return mmap(IntPtr.Zero, size, MmapProts.PROT_READ | MmapProts.PROT_WRITE, MmapFlags.MAP_SHARED, (int)handle, 0);
}
public static void UnmapSharedMemory(IntPtr address, ulong size)
{
munmap(address, size);
}
public static void MapView(IntPtr sharedMemory, ulong srcOffset, IntPtr location, ulong size)
{
mmap(location, size, MmapProts.PROT_READ | MmapProts.PROT_WRITE, MmapFlags.MAP_FIXED | MmapFlags.MAP_SHARED, (int)sharedMemory, (long)srcOffset);
}
public static void UnmapView(IntPtr location, ulong size)
{
mmap(location, size, MmapProts.PROT_NONE, MmapFlags.MAP_FIXED | MmapFlags.MAP_PRIVATE | MmapFlags.MAP_ANONYMOUS | MmapFlags.MAP_NORESERVE, -1, 0);
}
}
}

View file

@ -0,0 +1,144 @@
using Ryujinx.Memory.WindowsShared;
using System;
using System.Runtime.Versioning;
namespace Ryujinx.Memory
{
[SupportedOSPlatform("windows")]
static class MemoryManagementWindows
{
public const int PageSize = 0x1000;
private static readonly PlaceholderManager _placeholders = new PlaceholderManager();
public static IntPtr Allocate(IntPtr size)
{
return AllocateInternal(size, AllocationType.Reserve | AllocationType.Commit);
}
public static IntPtr Reserve(IntPtr size, bool viewCompatible)
{
if (viewCompatible)
{
IntPtr baseAddress = AllocateInternal2(size, AllocationType.Reserve | AllocationType.ReservePlaceholder);
_placeholders.ReserveRange((ulong)baseAddress, (ulong)size);
return baseAddress;
}
return AllocateInternal(size, AllocationType.Reserve);
}
private static IntPtr AllocateInternal(IntPtr size, AllocationType flags = 0)
{
IntPtr ptr = WindowsApi.VirtualAlloc(IntPtr.Zero, size, flags, MemoryProtection.ReadWrite);
if (ptr == IntPtr.Zero)
{
throw new OutOfMemoryException();
}
return ptr;
}
private static IntPtr AllocateInternal2(IntPtr size, AllocationType flags = 0)
{
IntPtr ptr = WindowsApi.VirtualAlloc2(WindowsApi.CurrentProcessHandle, IntPtr.Zero, size, flags, MemoryProtection.NoAccess, IntPtr.Zero, 0);
if (ptr == IntPtr.Zero)
{
throw new OutOfMemoryException();
}
return ptr;
}
public static bool Commit(IntPtr location, IntPtr size)
{
return WindowsApi.VirtualAlloc(location, size, AllocationType.Commit, MemoryProtection.ReadWrite) != IntPtr.Zero;
}
public static bool Decommit(IntPtr location, IntPtr size)
{
return WindowsApi.VirtualFree(location, size, AllocationType.Decommit);
}
public static void MapView(IntPtr sharedMemory, ulong srcOffset, IntPtr location, IntPtr size, MemoryBlock owner)
{
_placeholders.MapView(sharedMemory, srcOffset, location, size, owner);
}
public static void UnmapView(IntPtr sharedMemory, IntPtr location, IntPtr size, MemoryBlock owner)
{
_placeholders.UnmapView(sharedMemory, location, size, owner);
}
public static bool Reprotect(IntPtr address, IntPtr size, MemoryPermission permission, bool forView)
{
if (forView)
{
return _placeholders.ReprotectView(address, size, permission);
}
else
{
return WindowsApi.VirtualProtect(address, size, WindowsApi.GetProtection(permission), out _);
}
}
public static bool Free(IntPtr address, IntPtr size)
{
_placeholders.UnreserveRange((ulong)address, (ulong)size);
return WindowsApi.VirtualFree(address, IntPtr.Zero, AllocationType.Release);
}
public static IntPtr CreateSharedMemory(IntPtr size, bool reserve)
{
var prot = reserve ? FileMapProtection.SectionReserve : FileMapProtection.SectionCommit;
IntPtr handle = WindowsApi.CreateFileMapping(
WindowsApi.InvalidHandleValue,
IntPtr.Zero,
FileMapProtection.PageReadWrite | prot,
(uint)(size.ToInt64() >> 32),
(uint)size.ToInt64(),
null);
if (handle == IntPtr.Zero)
{
throw new OutOfMemoryException();
}
return handle;
}
public static void DestroySharedMemory(IntPtr handle)
{
if (!WindowsApi.CloseHandle(handle))
{
throw new ArgumentException("Invalid handle.", nameof(handle));
}
}
public static IntPtr MapSharedMemory(IntPtr handle)
{
IntPtr ptr = WindowsApi.MapViewOfFile(handle, 4 | 2, 0, 0, IntPtr.Zero);
if (ptr == IntPtr.Zero)
{
throw new OutOfMemoryException();
}
return ptr;
}
public static void UnmapSharedMemory(IntPtr address)
{
if (!WindowsApi.UnmapViewOfFile(address))
{
throw new ArgumentException("Invalid address.", nameof(address));
}
}
}
}

View file

@ -0,0 +1,167 @@
using System;
using System.Runtime.InteropServices;
namespace Ryujinx.Memory
{
public static partial class MemoryManagerUnixHelper
{
[Flags]
public enum MmapProts : uint
{
PROT_NONE = 0,
PROT_READ = 1,
PROT_WRITE = 2,
PROT_EXEC = 4
}
[Flags]
public enum MmapFlags : uint
{
MAP_SHARED = 1,
MAP_PRIVATE = 2,
MAP_ANONYMOUS = 4,
MAP_NORESERVE = 8,
MAP_FIXED = 16,
MAP_UNLOCKED = 32,
MAP_JIT_DARWIN = 0x800
}
[Flags]
public enum OpenFlags : uint
{
O_RDONLY = 0,
O_WRONLY = 1,
O_RDWR = 2,
O_CREAT = 4,
O_EXCL = 8,
O_NOCTTY = 16,
O_TRUNC = 32,
O_APPEND = 64,
O_NONBLOCK = 128,
O_SYNC = 256,
}
private const int MAP_ANONYMOUS_LINUX_GENERIC = 0x20;
private const int MAP_NORESERVE_LINUX_GENERIC = 0x4000;
private const int MAP_UNLOCKED_LINUX_GENERIC = 0x80000;
private const int MAP_NORESERVE_DARWIN = 0x40;
private const int MAP_ANONYMOUS_DARWIN = 0x1000;
public const int MADV_DONTNEED = 4;
public const int MADV_REMOVE = 9;
[LibraryImport("libc", EntryPoint = "mmap", SetLastError = true)]
private static partial IntPtr Internal_mmap(IntPtr address, ulong length, MmapProts prot, int flags, int fd, long offset);
[LibraryImport("libc", SetLastError = true)]
public static partial int mprotect(IntPtr address, ulong length, MmapProts prot);
[LibraryImport("libc", SetLastError = true)]
public static partial int munmap(IntPtr address, ulong length);
[LibraryImport("libc", SetLastError = true)]
public static partial IntPtr mremap(IntPtr old_address, ulong old_size, ulong new_size, int flags, IntPtr new_address);
[LibraryImport("libc", SetLastError = true)]
public static partial int madvise(IntPtr address, ulong size, int advice);
[LibraryImport("libc", SetLastError = true)]
public static partial int mkstemp(IntPtr template);
[LibraryImport("libc", SetLastError = true)]
public static partial int unlink(IntPtr pathname);
[LibraryImport("libc", SetLastError = true)]
public static partial int ftruncate(int fildes, IntPtr length);
[LibraryImport("libc", SetLastError = true)]
public static partial int close(int fd);
[LibraryImport("libc", SetLastError = true)]
public static partial int shm_open(IntPtr name, int oflag, uint mode);
[LibraryImport("libc", SetLastError = true)]
public static partial int shm_unlink(IntPtr name);
private static int MmapFlagsToSystemFlags(MmapFlags flags)
{
int result = 0;
if (flags.HasFlag(MmapFlags.MAP_SHARED))
{
result |= (int)MmapFlags.MAP_SHARED;
}
if (flags.HasFlag(MmapFlags.MAP_PRIVATE))
{
result |= (int)MmapFlags.MAP_PRIVATE;
}
if (flags.HasFlag(MmapFlags.MAP_FIXED))
{
result |= (int)MmapFlags.MAP_FIXED;
}
if (flags.HasFlag(MmapFlags.MAP_ANONYMOUS))
{
if (OperatingSystem.IsLinux())
{
result |= MAP_ANONYMOUS_LINUX_GENERIC;
}
else if (OperatingSystem.IsMacOS())
{
result |= MAP_ANONYMOUS_DARWIN;
}
else
{
throw new NotImplementedException();
}
}
if (flags.HasFlag(MmapFlags.MAP_NORESERVE))
{
if (OperatingSystem.IsLinux())
{
result |= MAP_NORESERVE_LINUX_GENERIC;
}
else if (OperatingSystem.IsMacOS())
{
result |= MAP_NORESERVE_DARWIN;
}
else
{
throw new NotImplementedException();
}
}
if (flags.HasFlag(MmapFlags.MAP_UNLOCKED))
{
if (OperatingSystem.IsLinux())
{
result |= MAP_UNLOCKED_LINUX_GENERIC;
}
else if (OperatingSystem.IsMacOS())
{
// FIXME: Doesn't exist on Darwin
}
else
{
throw new NotImplementedException();
}
}
if (flags.HasFlag(MmapFlags.MAP_JIT_DARWIN) && OperatingSystem.IsMacOSVersionAtLeast(10, 14))
{
result |= (int)MmapFlags.MAP_JIT_DARWIN;
}
return result;
}
public static IntPtr mmap(IntPtr address, ulong length, MmapProts prot, MmapFlags flags, int fd, long offset)
{
return Internal_mmap(address, length, prot, MmapFlagsToSystemFlags(flags), fd, offset);
}
}
}

View file

@ -0,0 +1,23 @@
using System;
namespace Ryujinx.Memory
{
/// <summary>
/// Flags that indicate how the host memory should be mapped.
/// </summary>
[Flags]
public enum MemoryMapFlags
{
/// <summary>
/// No mapping flags.
/// </summary>
None = 0,
/// <summary>
/// Indicates that the implementation is free to ignore the specified backing memory offset
/// and allocate its own private storage for the mapping.
/// This allows some mappings that would otherwise fail due to host platform restrictions to succeed.
/// </summary>
Private = 1 << 0
}
}

View file

@ -0,0 +1,19 @@
using System;
namespace Ryujinx.Memory
{
public class MemoryNotContiguousException : Exception
{
public MemoryNotContiguousException() : base("The specified memory region is not contiguous.")
{
}
public MemoryNotContiguousException(string message) : base(message)
{
}
public MemoryNotContiguousException(string message, Exception innerException) : base(message, innerException)
{
}
}
}

View file

@ -0,0 +1,51 @@
using System;
namespace Ryujinx.Memory
{
/// <summary>
/// Memory access permission control.
/// </summary>
[Flags]
public enum MemoryPermission
{
/// <summary>
/// No access is allowed on the memory region.
/// </summary>
None = 0,
/// <summary>
/// Allow reads on the memory region.
/// </summary>
Read = 1 << 0,
/// <summary>
/// Allow writes on the memory region.
/// </summary>
Write = 1 << 1,
/// <summary>
/// Allow code execution on the memory region.
/// </summary>
Execute = 1 << 2,
/// <summary>
/// Allow reads and writes on the memory region.
/// </summary>
ReadAndWrite = Read | Write,
/// <summary>
/// Allow reads and code execution on the memory region.
/// </summary>
ReadAndExecute = Read | Execute,
/// <summary>
/// Allow reads, writes, and code execution on the memory region.
/// </summary>
ReadWriteExecute = Read | Write | Execute,
/// <summary>
/// Indicates an invalid protection.
/// </summary>
Invalid = 255
}
}

View file

@ -0,0 +1,23 @@
using System;
namespace Ryujinx.Memory
{
class MemoryProtectionException : Exception
{
public MemoryProtectionException()
{
}
public MemoryProtectionException(MemoryPermission permission) : base($"Failed to set memory protection to \"{permission}\".")
{
}
public MemoryProtectionException(string message) : base(message)
{
}
public MemoryProtectionException(string message, Exception innerException) : base(message, innerException)
{
}
}
}

View file

@ -0,0 +1,42 @@
using System;
using System.Buffers;
namespace Ryujinx.Memory
{
public unsafe class NativeMemoryManager<T> : MemoryManager<T> where T : unmanaged
{
private readonly T* _pointer;
private readonly int _length;
public NativeMemoryManager(T* pointer, int length)
{
_pointer = pointer;
_length = length;
}
public override Span<T> GetSpan()
{
return new Span<T>((void*)_pointer, _length);
}
public override MemoryHandle Pin(int elementIndex = 0)
{
if ((uint)elementIndex >= _length)
{
throw new ArgumentOutOfRangeException(nameof(elementIndex));
}
return new MemoryHandle((void*)(_pointer + elementIndex));
}
public override void Unpin()
{
// No need to do anything as pointer already points no native memory, not GC tracked.
}
protected override void Dispose(bool disposing)
{
// Nothing to dispose, MemoryBlock still owns the memory.
}
}
}

View file

@ -0,0 +1,141 @@
namespace Ryujinx.Memory
{
public class PageTable<T> where T : unmanaged
{
public const int PageBits = 12;
public const int PageSize = 1 << PageBits;
public const int PageMask = PageSize - 1;
private const int PtLevelBits = 9; // 9 * 4 + 12 = 48 (max address space size)
private const int PtLevelSize = 1 << PtLevelBits;
private const int PtLevelMask = PtLevelSize - 1;
private readonly T[][][][] _pageTable;
public PageTable()
{
_pageTable = new T[PtLevelSize][][][];
}
public T Read(ulong va)
{
int l3 = (int)(va >> PageBits) & PtLevelMask;
int l2 = (int)(va >> (PageBits + PtLevelBits)) & PtLevelMask;
int l1 = (int)(va >> (PageBits + PtLevelBits * 2)) & PtLevelMask;
int l0 = (int)(va >> (PageBits + PtLevelBits * 3)) & PtLevelMask;
if (_pageTable[l0] == null)
{
return default;
}
if (_pageTable[l0][l1] == null)
{
return default;
}
if (_pageTable[l0][l1][l2] == null)
{
return default;
}
return _pageTable[l0][l1][l2][l3];
}
public void Map(ulong va, T value)
{
int l3 = (int)(va >> PageBits) & PtLevelMask;
int l2 = (int)(va >> (PageBits + PtLevelBits)) & PtLevelMask;
int l1 = (int)(va >> (PageBits + PtLevelBits * 2)) & PtLevelMask;
int l0 = (int)(va >> (PageBits + PtLevelBits * 3)) & PtLevelMask;
if (_pageTable[l0] == null)
{
_pageTable[l0] = new T[PtLevelSize][][];
}
if (_pageTable[l0][l1] == null)
{
_pageTable[l0][l1] = new T[PtLevelSize][];
}
if (_pageTable[l0][l1][l2] == null)
{
_pageTable[l0][l1][l2] = new T[PtLevelSize];
}
_pageTable[l0][l1][l2][l3] = value;
}
public void Unmap(ulong va)
{
int l3 = (int)(va >> PageBits) & PtLevelMask;
int l2 = (int)(va >> (PageBits + PtLevelBits)) & PtLevelMask;
int l1 = (int)(va >> (PageBits + PtLevelBits * 2)) & PtLevelMask;
int l0 = (int)(va >> (PageBits + PtLevelBits * 3)) & PtLevelMask;
if (_pageTable[l0] == null)
{
return;
}
if (_pageTable[l0][l1] == null)
{
return;
}
if (_pageTable[l0][l1][l2] == null)
{
return;
}
_pageTable[l0][l1][l2][l3] = default;
bool empty = true;
for (int i = 0; i < _pageTable[l0][l1][l2].Length; i++)
{
empty &= _pageTable[l0][l1][l2][i].Equals(default);
}
if (empty)
{
_pageTable[l0][l1][l2] = null;
RemoveIfAllNull(l0, l1);
}
}
private void RemoveIfAllNull(int l0, int l1)
{
bool empty = true;
for (int i = 0; i < _pageTable[l0][l1].Length; i++)
{
empty &= (_pageTable[l0][l1][i] == null);
}
if (empty)
{
_pageTable[l0][l1] = null;
RemoveIfAllNull(l0);
}
}
private void RemoveIfAllNull(int l0)
{
bool empty = true;
for (int i = 0; i < _pageTable[l0].Length; i++)
{
empty &= (_pageTable[l0][i] == null);
}
if (empty)
{
_pageTable[l0] = null;
}
}
}
}

View file

@ -0,0 +1,71 @@
using System;
namespace Ryujinx.Memory.Range
{
/// <summary>
/// Range of memory composed of an address and size.
/// </summary>
public struct HostMemoryRange : IEquatable<HostMemoryRange>
{
/// <summary>
/// An empty memory range, with a null address and zero size.
/// </summary>
public static HostMemoryRange Empty => new HostMemoryRange(0, 0);
/// <summary>
/// Start address of the range.
/// </summary>
public nuint Address { get; }
/// <summary>
/// Size of the range in bytes.
/// </summary>
public ulong Size { get; }
/// <summary>
/// Address where the range ends (exclusive).
/// </summary>
public nuint EndAddress => Address + (nuint)Size;
/// <summary>
/// Creates a new memory range with the specified address and size.
/// </summary>
/// <param name="address">Start address</param>
/// <param name="size">Size in bytes</param>
public HostMemoryRange(nuint address, ulong size)
{
Address = address;
Size = size;
}
/// <summary>
/// Checks if the range overlaps with another.
/// </summary>
/// <param name="other">The other range to check for overlap</param>
/// <returns>True if the ranges overlap, false otherwise</returns>
public bool OverlapsWith(HostMemoryRange other)
{
nuint thisAddress = Address;
nuint thisEndAddress = EndAddress;
nuint otherAddress = other.Address;
nuint otherEndAddress = other.EndAddress;
return thisAddress < otherEndAddress && otherAddress < thisEndAddress;
}
public override bool Equals(object obj)
{
return obj is HostMemoryRange other && Equals(other);
}
public bool Equals(HostMemoryRange other)
{
return Address == other.Address && Size == other.Size;
}
public override int GetHashCode()
{
return HashCode.Combine(Address, Size);
}
}
}

View file

@ -0,0 +1,9 @@
namespace Ryujinx.Memory.Range
{
public interface IMultiRangeItem
{
MultiRange Range { get; }
ulong BaseAddress => Range.GetSubRange(0).Address;
}
}

View file

@ -0,0 +1,16 @@
namespace Ryujinx.Memory.Range
{
/// <summary>
/// Range of memory that can be split in two.
/// </summary>
interface INonOverlappingRange : IRange
{
/// <summary>
/// Split this region into two, around the specified address.
/// This region is updated to end at the split address, and a new region is created to represent past that point.
/// </summary>
/// <param name="splitAddress">Address to split the region around</param>
/// <returns>The second part of the split region, with start address at the given split.</returns>
public INonOverlappingRange Split(ulong splitAddress);
}
}

View file

@ -0,0 +1,31 @@
namespace Ryujinx.Memory.Range
{
/// <summary>
/// Range of memory.
/// </summary>
public interface IRange
{
/// <summary>
/// Base address.
/// </summary>
ulong Address { get; }
/// <summary>
/// Size of the range.
/// </summary>
ulong Size { get; }
/// <summary>
/// End address.
/// </summary>
ulong EndAddress { get; }
/// <summary>
/// Check if this range overlaps with another.
/// </summary>
/// <param name="address">Base address</param>
/// <param name="size">Size of the range</param>
/// <returns>True if overlapping, false otherwise</returns>
bool OverlapsWith(ulong address, ulong size);
}
}

View file

@ -0,0 +1,61 @@
namespace Ryujinx.Memory.Range
{
/// <summary>
/// Range of memory composed of an address and size.
/// </summary>
public readonly record struct MemoryRange
{
/// <summary>
/// An empty memory range, with a null address and zero size.
/// </summary>
public static MemoryRange Empty => new MemoryRange(0UL, 0);
/// <summary>
/// Start address of the range.
/// </summary>
public ulong Address { get; }
/// <summary>
/// Size of the range in bytes.
/// </summary>
public ulong Size { get; }
/// <summary>
/// Address where the range ends (exclusive).
/// </summary>
public ulong EndAddress => Address + Size;
/// <summary>
/// Creates a new memory range with the specified address and size.
/// </summary>
/// <param name="address">Start address</param>
/// <param name="size">Size in bytes</param>
public MemoryRange(ulong address, ulong size)
{
Address = address;
Size = size;
}
/// <summary>
/// Checks if the range overlaps with another.
/// </summary>
/// <param name="other">The other range to check for overlap</param>
/// <returns>True if the ranges overlap, false otherwise</returns>
public bool OverlapsWith(MemoryRange other)
{
ulong thisAddress = Address;
ulong thisEndAddress = EndAddress;
ulong otherAddress = other.Address;
ulong otherEndAddress = other.EndAddress;
// If any of the ranges if invalid (address + size overflows),
// then they are never considered to overlap.
if (thisEndAddress < thisAddress || otherEndAddress < otherAddress)
{
return false;
}
return thisAddress < otherEndAddress && otherAddress < thisEndAddress;
}
}
}

View file

@ -0,0 +1,323 @@
using System;
using System.Collections.Generic;
namespace Ryujinx.Memory.Range
{
/// <summary>
/// Sequence of physical memory regions that a single non-contiguous virtual memory region maps to.
/// </summary>
public readonly struct MultiRange : IEquatable<MultiRange>
{
private const ulong InvalidAddress = ulong.MaxValue;
private readonly MemoryRange _singleRange;
private readonly MemoryRange[] _ranges;
private bool HasSingleRange => _ranges == null;
/// <summary>
/// Total of physical sub-ranges on the virtual memory region.
/// </summary>
public int Count => HasSingleRange ? 1 : _ranges.Length;
/// <summary>
/// Creates a new multi-range with a single physical region.
/// </summary>
/// <param name="address">Start address of the region</param>
/// <param name="size">Size of the region in bytes</param>
public MultiRange(ulong address, ulong size)
{
_singleRange = new MemoryRange(address, size);
_ranges = null;
}
/// <summary>
/// Creates a new multi-range with multiple physical regions.
/// </summary>
/// <param name="ranges">Array of physical regions</param>
/// <exception cref="ArgumentNullException"><paramref name="ranges"/> is null</exception>
public MultiRange(MemoryRange[] ranges)
{
_singleRange = MemoryRange.Empty;
_ranges = ranges ?? throw new ArgumentNullException(nameof(ranges));
}
/// <summary>
/// Gets a slice of the multi-range.
/// </summary>
/// <param name="offset">Offset of the slice into the multi-range in bytes</param>
/// <param name="size">Size of the slice in bytes</param>
/// <returns>A new multi-range representing the given slice of this one</returns>
public MultiRange Slice(ulong offset, ulong size)
{
if (HasSingleRange)
{
if (_singleRange.Size - offset < size)
{
throw new ArgumentOutOfRangeException(nameof(size));
}
return new MultiRange(_singleRange.Address + offset, size);
}
else
{
var ranges = new List<MemoryRange>();
foreach (MemoryRange range in _ranges)
{
if ((long)offset <= 0)
{
ranges.Add(new MemoryRange(range.Address, Math.Min(size, range.Size)));
size -= range.Size;
}
else if (offset < range.Size)
{
ulong sliceSize = Math.Min(size, range.Size - offset);
if (range.Address == InvalidAddress)
{
ranges.Add(new MemoryRange(range.Address, sliceSize));
}
else
{
ranges.Add(new MemoryRange(range.Address + offset, sliceSize));
}
size -= sliceSize;
}
if ((long)size <= 0)
{
break;
}
offset -= range.Size;
}
return new MultiRange(ranges.ToArray());
}
}
/// <summary>
/// Gets the physical region at the specified index.
/// </summary>
/// <param name="index">Index of the physical region</param>
/// <returns>Region at the index specified</returns>
/// <exception cref="ArgumentOutOfRangeException"><paramref name="index"/> is invalid</exception>
public MemoryRange GetSubRange(int index)
{
if (HasSingleRange)
{
if (index != 0)
{
throw new ArgumentOutOfRangeException(nameof(index));
}
return _singleRange;
}
else
{
if ((uint)index >= _ranges.Length)
{
throw new ArgumentOutOfRangeException(nameof(index));
}
return _ranges[index];
}
}
/// <summary>
/// Gets the physical region at the specified index, without explicit bounds checking.
/// </summary>
/// <param name="index">Index of the physical region</param>
/// <returns>Region at the index specified</returns>
private MemoryRange GetSubRangeUnchecked(int index)
{
return HasSingleRange ? _singleRange : _ranges[index];
}
/// <summary>
/// Check if two multi-ranges overlap with each other.
/// </summary>
/// <param name="other">Other multi-range to check for overlap</param>
/// <returns>True if any sub-range overlaps, false otherwise</returns>
public bool OverlapsWith(MultiRange other)
{
if (HasSingleRange && other.HasSingleRange)
{
return _singleRange.OverlapsWith(other._singleRange);
}
else
{
for (int i = 0; i < Count; i++)
{
MemoryRange currentRange = GetSubRangeUnchecked(i);
for (int j = 0; j < other.Count; j++)
{
if (currentRange.OverlapsWith(other.GetSubRangeUnchecked(j)))
{
return true;
}
}
}
}
return false;
}
/// <summary>
/// Checks if a given multi-range is fully contained inside another.
/// </summary>
/// <param name="other">Multi-range to be checked</param>
/// <returns>True if all the sub-ranges on <paramref name="other"/> are contained inside the multi-range, with the same order, false otherwise</returns>
public bool Contains(MultiRange other)
{
return FindOffset(other) >= 0;
}
/// <summary>
/// Calculates the offset of a given multi-range inside another, when the multi-range is fully contained
/// inside the other multi-range, otherwise returns -1.
/// </summary>
/// <param name="other">Multi-range that should be fully contained inside this one</param>
/// <returns>Offset in bytes if fully contained, otherwise -1</returns>
public int FindOffset(MultiRange other)
{
int thisCount = Count;
int otherCount = other.Count;
if (thisCount == 1 && otherCount == 1)
{
MemoryRange otherFirstRange = other.GetSubRangeUnchecked(0);
MemoryRange currentFirstRange = GetSubRangeUnchecked(0);
if (otherFirstRange.Address >= currentFirstRange.Address &&
otherFirstRange.EndAddress <= currentFirstRange.EndAddress)
{
return (int)(otherFirstRange.Address - currentFirstRange.Address);
}
}
else if (thisCount >= otherCount)
{
ulong baseOffset = 0;
MemoryRange otherFirstRange = other.GetSubRangeUnchecked(0);
MemoryRange otherLastRange = other.GetSubRangeUnchecked(otherCount - 1);
for (int i = 0; i < (thisCount - otherCount) + 1; baseOffset += GetSubRangeUnchecked(i).Size, i++)
{
MemoryRange currentFirstRange = GetSubRangeUnchecked(i);
MemoryRange currentLastRange = GetSubRangeUnchecked(i + otherCount - 1);
if (otherCount > 1)
{
if (otherFirstRange.Address < currentFirstRange.Address ||
otherFirstRange.EndAddress != currentFirstRange.EndAddress)
{
continue;
}
if (otherLastRange.Address != currentLastRange.Address ||
otherLastRange.EndAddress > currentLastRange.EndAddress)
{
continue;
}
bool fullMatch = true;
for (int j = 1; j < otherCount - 1; j++)
{
if (!GetSubRangeUnchecked(i + j).Equals(other.GetSubRangeUnchecked(j)))
{
fullMatch = false;
break;
}
}
if (!fullMatch)
{
continue;
}
}
else if (currentFirstRange.Address > otherFirstRange.Address ||
currentFirstRange.EndAddress < otherFirstRange.EndAddress)
{
continue;
}
return (int)(baseOffset + (otherFirstRange.Address - currentFirstRange.Address));
}
}
return -1;
}
/// <summary>
/// Gets the total size of all sub-ranges in bytes.
/// </summary>
/// <returns>Total size in bytes</returns>
public ulong GetSize()
{
if (HasSingleRange)
{
return _singleRange.Size;
}
ulong sum = 0;
foreach (MemoryRange range in _ranges)
{
sum += range.Size;
}
return sum;
}
public override bool Equals(object obj)
{
return obj is MultiRange other && Equals(other);
}
public bool Equals(MultiRange other)
{
if (HasSingleRange && other.HasSingleRange)
{
return _singleRange.Equals(other._singleRange);
}
int thisCount = Count;
if (thisCount != other.Count)
{
return false;
}
for (int i = 0; i < thisCount; i++)
{
if (!GetSubRangeUnchecked(i).Equals(other.GetSubRangeUnchecked(i)))
{
return false;
}
}
return true;
}
public override int GetHashCode()
{
if (HasSingleRange)
{
return _singleRange.GetHashCode();
}
HashCode hash = new HashCode();
foreach (MemoryRange range in _ranges)
{
hash.Add(range);
}
return hash.ToHashCode();
}
}
}

View file

@ -0,0 +1,210 @@
using Ryujinx.Common.Collections;
using System.Collections;
using System.Collections.Generic;
namespace Ryujinx.Memory.Range
{
public class MultiRangeList<T> : IEnumerable<T> where T : IMultiRangeItem
{
private readonly IntervalTree<ulong, T> _items;
public int Count { get; private set; }
/// <summary>
/// Creates a new range list.
/// </summary>
public MultiRangeList()
{
_items = new IntervalTree<ulong, T>();
}
/// <summary>
/// Adds a new item to the list.
/// </summary>
/// <param name="item">The item to be added</param>
public void Add(T item)
{
MultiRange range = item.Range;
for (int i = 0; i < range.Count; i++)
{
var subrange = range.GetSubRange(i);
if (IsInvalid(ref subrange))
{
continue;
}
_items.Add(subrange.Address, subrange.EndAddress, item);
}
Count++;
}
/// <summary>
/// Removes an item from the list.
/// </summary>
/// <param name="item">The item to be removed</param>
/// <returns>True if the item was removed, or false if it was not found</returns>
public bool Remove(T item)
{
MultiRange range = item.Range;
int removed = 0;
for (int i = 0; i < range.Count; i++)
{
var subrange = range.GetSubRange(i);
if (IsInvalid(ref subrange))
{
continue;
}
removed += _items.Remove(subrange.Address, item);
}
if (removed > 0)
{
// All deleted intervals are for the same item - the one we removed.
Count--;
}
return removed > 0;
}
/// <summary>
/// Gets all items on the list overlapping the specified memory range.
/// </summary>
/// <param name="address">Start address of the range</param>
/// <param name="size">Size in bytes of the range</param>
/// <param name="output">Output array where matches will be written. It is automatically resized to fit the results</param>
/// <returns>The number of overlapping items found</returns>
public int FindOverlaps(ulong address, ulong size, ref T[] output)
{
return FindOverlaps(new MultiRange(address, size), ref output);
}
/// <summary>
/// Gets all items on the list overlapping the specified memory ranges.
/// </summary>
/// <param name="range">Ranges of memory being searched</param>
/// <param name="output">Output array where matches will be written. It is automatically resized to fit the results</param>
/// <returns>The number of overlapping items found</returns>
public int FindOverlaps(MultiRange range, ref T[] output)
{
int overlapCount = 0;
for (int i = 0; i < range.Count; i++)
{
var subrange = range.GetSubRange(i);
if (IsInvalid(ref subrange))
{
continue;
}
overlapCount = _items.Get(subrange.Address, subrange.EndAddress, ref output, overlapCount);
}
// Remove any duplicates, caused by items having multiple sub range nodes in the tree.
if (overlapCount > 1)
{
int insertPtr = 0;
for (int i = 0; i < overlapCount; i++)
{
T item = output[i];
bool duplicate = false;
for (int j = insertPtr - 1; j >= 0; j--)
{
if (item.Equals(output[j]))
{
duplicate = true;
break;
}
}
if (!duplicate)
{
if (insertPtr != i)
{
output[insertPtr] = item;
}
insertPtr++;
}
}
overlapCount = insertPtr;
}
return overlapCount;
}
/// <summary>
/// Checks if a given sub-range of memory is invalid.
/// Those are used to represent unmapped memory regions (holes in the region mapping).
/// </summary>
/// <param name="subRange">Memory range to checl</param>
/// <returns>True if the memory range is considered invalid, false otherwise</returns>
private static bool IsInvalid(ref MemoryRange subRange)
{
return subRange.Address == ulong.MaxValue;
}
/// <summary>
/// Gets all items on the list starting at the specified memory address.
/// </summary>
/// <param name="baseAddress">Base address to find</param>
/// <param name="output">Output array where matches will be written. It is automatically resized to fit the results</param>
/// <returns>The number of matches found</returns>
public int FindOverlaps(ulong baseAddress, ref T[] output)
{
int count = _items.Get(baseAddress, ref output);
// Only output items with matching base address
int insertPtr = 0;
for (int i = 0; i < count; i++)
{
if (output[i].BaseAddress == baseAddress)
{
if (i != insertPtr)
{
output[insertPtr] = output[i];
}
insertPtr++;
}
}
return insertPtr;
}
private List<T> GetList()
{
var items = _items.AsList();
var result = new List<T>();
foreach (RangeNode<ulong, T> item in items)
{
if (item.Start == item.Value.BaseAddress)
{
result.Add(item.Value);
}
}
return result;
}
public IEnumerator<T> GetEnumerator()
{
return GetList().GetEnumerator();
}
IEnumerator IEnumerable.GetEnumerator()
{
return GetList().GetEnumerator();
}
}
}

View file

@ -0,0 +1,106 @@
using System;
using System.Collections.Generic;
namespace Ryujinx.Memory.Range
{
/// <summary>
/// A range list that assumes ranges are non-overlapping, with list items that can be split in two to avoid overlaps.
/// </summary>
/// <typeparam name="T">Type of the range.</typeparam>
class NonOverlappingRangeList<T> : RangeList<T> where T : INonOverlappingRange
{
/// <summary>
/// Finds a list of regions that cover the desired (address, size) range.
/// If this range starts or ends in the middle of an existing region, it is split and only the relevant part is added.
/// If there is no matching region, or there is a gap, then new regions are created with the factory.
/// Regions are added to the list in address ascending order.
/// </summary>
/// <param name="list">List to add found regions to</param>
/// <param name="address">Start address of the search region</param>
/// <param name="size">Size of the search region</param>
/// <param name="factory">Factory for creating new ranges</param>
public void GetOrAddRegions(List<T> list, ulong address, ulong size, Func<ulong, ulong, T> factory)
{
// (regarding the specific case this generalized function is used for)
// A new region may be split into multiple parts if multiple virtual regions have mapped to it.
// For instance, while a virtual mapping could cover 0-2 in physical space, the space 0-1 may have already been reserved...
// So we need to return both the split 0-1 and 1-2 ranges.
var results = new T[1];
int count = FindOverlapsNonOverlapping(address, size, ref results);
if (count == 0)
{
// The region is fully unmapped. Create and add it to the range list.
T region = factory(address, size);
list.Add(region);
Add(region);
}
else
{
ulong lastAddress = address;
ulong endAddress = address + size;
for (int i = 0; i < count; i++)
{
T region = results[i];
if (count == 1 && region.Address == address && region.Size == size)
{
// Exact match, no splitting required.
list.Add(region);
return;
}
if (lastAddress < region.Address)
{
// There is a gap between this region and the last. We need to fill it.
T fillRegion = factory(lastAddress, region.Address - lastAddress);
list.Add(fillRegion);
Add(fillRegion);
}
if (region.Address < address)
{
// Split the region around our base address and take the high half.
region = Split(region, address);
}
if (region.EndAddress > address + size)
{
// Split the region around our end address and take the low half.
Split(region, address + size);
}
list.Add(region);
lastAddress = region.EndAddress;
}
if (lastAddress < endAddress)
{
// There is a gap between this region and the end. We need to fill it.
T fillRegion = factory(lastAddress, endAddress - lastAddress);
list.Add(fillRegion);
Add(fillRegion);
}
}
}
/// <summary>
/// Splits a region around a target point and updates the region list.
/// The original region's size is modified, but its address stays the same.
/// A new region starting from the split address is added to the region list and returned.
/// </summary>
/// <param name="region">The region to split</param>
/// <param name="splitAddress">The address to split with</param>
/// <returns>The new region (high part)</returns>
private T Split(T region, ulong splitAddress)
{
T newRegion = (T)region.Split(splitAddress);
Update(region);
Add(newRegion);
return newRegion;
}
}
}

View file

@ -0,0 +1,483 @@
using System;
using System.Collections;
using System.Collections.Generic;
using System.Runtime.CompilerServices;
namespace Ryujinx.Memory.Range
{
/// <summary>
/// Sorted list of ranges that supports binary search.
/// </summary>
/// <typeparam name="T">Type of the range.</typeparam>
public class RangeList<T> : IEnumerable<T> where T : IRange
{
private readonly struct RangeItem<TValue> where TValue : IRange
{
public readonly ulong Address;
public readonly ulong EndAddress;
public readonly TValue Value;
public RangeItem(TValue value)
{
Value = value;
Address = value.Address;
EndAddress = value.Address + value.Size;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public bool OverlapsWith(ulong address, ulong endAddress)
{
return Address < endAddress && address < EndAddress;
}
}
private const int BackingInitialSize = 1024;
private const int ArrayGrowthSize = 32;
private RangeItem<T>[] _items;
private readonly int _backingGrowthSize;
public int Count { get; protected set; }
/// <summary>
/// Creates a new range list.
/// </summary>
/// <param name="backingInitialSize">The initial size of the backing array</param>
public RangeList(int backingInitialSize = BackingInitialSize)
{
_backingGrowthSize = backingInitialSize;
_items = new RangeItem<T>[backingInitialSize];
}
/// <summary>
/// Adds a new item to the list.
/// </summary>
/// <param name="item">The item to be added</param>
public void Add(T item)
{
int index = BinarySearch(item.Address);
if (index < 0)
{
index = ~index;
}
Insert(index, new RangeItem<T>(item));
}
/// <summary>
/// Updates an item's end address on the list. Address must be the same.
/// </summary>
/// <param name="item">The item to be updated</param>
/// <returns>True if the item was located and updated, false otherwise</returns>
public bool Update(T item)
{
int index = BinarySearch(item.Address);
if (index >= 0)
{
while (index > 0 && _items[index - 1].Address == item.Address)
{
index--;
}
while (index < Count)
{
if (_items[index].Value.Equals(item))
{
_items[index] = new RangeItem<T>(item);
return true;
}
if (_items[index].Address > item.Address)
{
break;
}
index++;
}
}
return false;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private void Insert(int index, RangeItem<T> item)
{
if (Count + 1 > _items.Length)
{
Array.Resize(ref _items, _items.Length + _backingGrowthSize);
}
if (index >= Count)
{
if (index == Count)
{
_items[Count++] = item;
}
}
else
{
Array.Copy(_items, index, _items, index + 1, Count - index);
_items[index] = item;
Count++;
}
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private void RemoveAt(int index)
{
if (index < --Count)
{
Array.Copy(_items, index + 1, _items, index, Count - index);
}
}
/// <summary>
/// Removes an item from the list.
/// </summary>
/// <param name="item">The item to be removed</param>
/// <returns>True if the item was removed, or false if it was not found</returns>
public bool Remove(T item)
{
int index = BinarySearch(item.Address);
if (index >= 0)
{
while (index > 0 && _items[index - 1].Address == item.Address)
{
index--;
}
while (index < Count)
{
if (_items[index].Value.Equals(item))
{
RemoveAt(index);
return true;
}
if (_items[index].Address > item.Address)
{
break;
}
index++;
}
}
return false;
}
/// <summary>
/// Updates an item's end address.
/// </summary>
/// <param name="item">The item to be updated</param>
public void UpdateEndAddress(T item)
{
int index = BinarySearch(item.Address);
if (index >= 0)
{
while (index > 0 && _items[index - 1].Address == item.Address)
{
index--;
}
while (index < Count)
{
if (_items[index].Value.Equals(item))
{
_items[index] = new RangeItem<T>(item);
return;
}
if (_items[index].Address > item.Address)
{
break;
}
index++;
}
}
}
/// <summary>
/// Gets the first item on the list overlapping in memory with the specified item.
/// </summary>
/// <remarks>
/// Despite the name, this has no ordering guarantees of the returned item.
/// It only ensures that the item returned overlaps the specified item.
/// </remarks>
/// <param name="item">Item to check for overlaps</param>
/// <returns>The overlapping item, or the default value for the type if none found</returns>
public T FindFirstOverlap(T item)
{
return FindFirstOverlap(item.Address, item.Size);
}
/// <summary>
/// Gets the first item on the list overlapping the specified memory range.
/// </summary>
/// <remarks>
/// Despite the name, this has no ordering guarantees of the returned item.
/// It only ensures that the item returned overlaps the specified memory range.
/// </remarks>
/// <param name="address">Start address of the range</param>
/// <param name="size">Size in bytes of the range</param>
/// <returns>The overlapping item, or the default value for the type if none found</returns>
public T FindFirstOverlap(ulong address, ulong size)
{
int index = BinarySearch(address, address + size);
if (index < 0)
{
return default(T);
}
return _items[index].Value;
}
/// <summary>
/// Gets all items overlapping with the specified item in memory.
/// </summary>
/// <param name="item">Item to check for overlaps</param>
/// <param name="output">Output array where matches will be written. It is automatically resized to fit the results</param>
/// <returns>The number of overlapping items found</returns>
public int FindOverlaps(T item, ref T[] output)
{
return FindOverlaps(item.Address, item.Size, ref output);
}
/// <summary>
/// Gets all items on the list overlapping the specified memory range.
/// </summary>
/// <param name="address">Start address of the range</param>
/// <param name="size">Size in bytes of the range</param>
/// <param name="output">Output array where matches will be written. It is automatically resized to fit the results</param>
/// <returns>The number of overlapping items found</returns>
public int FindOverlaps(ulong address, ulong size, ref T[] output)
{
int outputIndex = 0;
ulong endAddress = address + size;
for (int i = 0; i < Count; i++)
{
ref RangeItem<T> item = ref _items[i];
if (item.Address >= endAddress)
{
break;
}
if (item.OverlapsWith(address, endAddress))
{
if (outputIndex == output.Length)
{
Array.Resize(ref output, outputIndex + ArrayGrowthSize);
}
output[outputIndex++] = item.Value;
}
}
return outputIndex;
}
/// <summary>
/// Gets all items overlapping with the specified item in memory.
/// </summary>
/// <remarks>
/// This method only returns correct results if none of the items on the list overlaps with
/// each other. If that is not the case, this method should not be used.
/// This method is faster than the regular method to find all overlaps.
/// </remarks>
/// <param name="item">Item to check for overlaps</param>
/// <param name="output">Output array where matches will be written. It is automatically resized to fit the results</param>
/// <returns>The number of overlapping items found</returns>
public int FindOverlapsNonOverlapping(T item, ref T[] output)
{
return FindOverlapsNonOverlapping(item.Address, item.Size, ref output);
}
/// <summary>
/// Gets all items on the list overlapping the specified memory range.
/// </summary>
/// <remarks>
/// This method only returns correct results if none of the items on the list overlaps with
/// each other. If that is not the case, this method should not be used.
/// This method is faster than the regular method to find all overlaps.
/// </remarks>
/// <param name="address">Start address of the range</param>
/// <param name="size">Size in bytes of the range</param>
/// <param name="output">Output array where matches will be written. It is automatically resized to fit the results</param>
/// <returns>The number of overlapping items found</returns>
public int FindOverlapsNonOverlapping(ulong address, ulong size, ref T[] output)
{
// This is a bit faster than FindOverlaps, but only works
// when none of the items on the list overlaps with each other.
int outputIndex = 0;
ulong endAddress = address + size;
int index = BinarySearch(address, endAddress);
if (index >= 0)
{
while (index > 0 && _items[index - 1].OverlapsWith(address, endAddress))
{
index--;
}
do
{
if (outputIndex == output.Length)
{
Array.Resize(ref output, outputIndex + ArrayGrowthSize);
}
output[outputIndex++] = _items[index++].Value;
}
while (index < Count && _items[index].OverlapsWith(address, endAddress));
}
return outputIndex;
}
/// <summary>
/// Gets all items on the list with the specified memory address.
/// </summary>
/// <param name="address">Address to find</param>
/// <param name="output">Output array where matches will be written. It is automatically resized to fit the results</param>
/// <returns>The number of matches found</returns>
public int FindOverlaps(ulong address, ref T[] output)
{
int index = BinarySearch(address);
int outputIndex = 0;
if (index >= 0)
{
while (index > 0 && _items[index - 1].Address == address)
{
index--;
}
while (index < Count)
{
ref RangeItem<T> overlap = ref _items[index++];
if (overlap.Address != address)
{
break;
}
if (outputIndex == output.Length)
{
Array.Resize(ref output, outputIndex + ArrayGrowthSize);
}
output[outputIndex++] = overlap.Value;
}
}
return outputIndex;
}
/// <summary>
/// Performs binary search on the internal list of items.
/// </summary>
/// <param name="address">Address to find</param>
/// <returns>List index of the item, or complement index of nearest item with lower value on the list</returns>
private int BinarySearch(ulong address)
{
int left = 0;
int right = Count - 1;
while (left <= right)
{
int range = right - left;
int middle = left + (range >> 1);
ref RangeItem<T> item = ref _items[middle];
if (item.Address == address)
{
return middle;
}
if (address < item.Address)
{
right = middle - 1;
}
else
{
left = middle + 1;
}
}
return ~left;
}
/// <summary>
/// Performs binary search for items overlapping a given memory range.
/// </summary>
/// <param name="address">Start address of the range</param>
/// <param name="endAddress">End address of the range</param>
/// <returns>List index of the item, or complement index of nearest item with lower value on the list</returns>
private int BinarySearch(ulong address, ulong endAddress)
{
int left = 0;
int right = Count - 1;
while (left <= right)
{
int range = right - left;
int middle = left + (range >> 1);
ref RangeItem<T> item = ref _items[middle];
if (item.OverlapsWith(address, endAddress))
{
return middle;
}
if (address < item.Address)
{
right = middle - 1;
}
else
{
left = middle + 1;
}
}
return ~left;
}
public IEnumerator<T> GetEnumerator()
{
for (int i = 0; i < Count; i++)
{
yield return _items[i].Value;
}
}
IEnumerator IEnumerable.GetEnumerator()
{
for (int i = 0; i < Count; i++)
{
yield return _items[i].Value;
}
}
}
}

View file

@ -0,0 +1,12 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>net7.0</TargetFramework>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
</PropertyGroup>
<ItemGroup>
<ProjectReference Include="..\Ryujinx.Common\Ryujinx.Common.csproj" />
</ItemGroup>
</Project>

View file

@ -0,0 +1,73 @@
using Ryujinx.Memory.Range;
namespace Ryujinx.Memory.Tracking
{
/// <summary>
/// A region of memory.
/// </summary>
abstract class AbstractRegion : INonOverlappingRange
{
/// <summary>
/// Base address.
/// </summary>
public ulong Address { get; }
/// <summary>
/// Size of the range in bytes.
/// </summary>
public ulong Size { get; protected set; }
/// <summary>
/// End address.
/// </summary>
public ulong EndAddress => Address + Size;
/// <summary>
/// Create a new region.
/// </summary>
/// <param name="address">Base address</param>
/// <param name="size">Size of the range</param>
protected AbstractRegion(ulong address, ulong size)
{
Address = address;
Size = size;
}
/// <summary>
/// Check if this range overlaps with another.
/// </summary>
/// <param name="address">Base address</param>
/// <param name="size">Size of the range</param>
/// <returns>True if overlapping, false otherwise</returns>
public bool OverlapsWith(ulong address, ulong size)
{
return Address < address + size && address < EndAddress;
}
/// <summary>
/// Signals to the handles that a memory event has occurred, and unprotects the region. Assumes that the tracking lock has been obtained.
/// </summary>
/// <param name="address">Address accessed</param>
/// <param name="size">Size of the region affected in bytes</param>
/// <param name="write">Whether the region was written to or read</param>
/// <param name="exemptId">Optional ID of the handles that should not be signalled</param>
public abstract void Signal(ulong address, ulong size, bool write, int? exemptId);
/// <summary>
/// Signals to the handles that a precise memory event has occurred. Assumes that the tracking lock has been obtained.
/// </summary>
/// <param name="address">Address accessed</param>
/// <param name="size">Size of the region affected in bytes</param>
/// <param name="write">Whether the region was written to or read</param>
/// <param name="exemptId">Optional ID of the handles that should not be signalled</param>
public abstract void SignalPrecise(ulong address, ulong size, bool write, int? exemptId);
/// <summary>
/// Split this region into two, around the specified address.
/// This region is updated to end at the split address, and a new region is created to represent past that point.
/// </summary>
/// <param name="splitAddress">Address to split the region around</param>
/// <returns>The second part of the split region, with start address at the given split.</returns>
public abstract INonOverlappingRange Split(ulong splitAddress);
}
}

View file

@ -0,0 +1,199 @@
using System.Runtime.CompilerServices;
namespace Ryujinx.Memory.Tracking
{
/// <summary>
/// A bitmap that can check or set large ranges of true/false values at once.
/// </summary>
readonly struct BitMap
{
public const int IntSize = 64;
private const int IntShift = 6;
private const int IntMask = IntSize - 1;
/// <summary>
/// Masks representing the bitmap. Least significant bit first, 64-bits per mask.
/// </summary>
public readonly long[] Masks;
/// <summary>
/// Create a new bitmap.
/// </summary>
/// <param name="count">The number of bits to reserve</param>
public BitMap(int count)
{
Masks = new long[(count + IntMask) / IntSize];
}
/// <summary>
/// Check if any bit in the bitmap is set.
/// </summary>
/// <returns>True if any bits are set, false otherwise</returns>
public bool AnySet()
{
for (int i = 0; i < Masks.Length; i++)
{
if (Masks[i] != 0)
{
return true;
}
}
return false;
}
/// <summary>
/// Check if a bit in the bitmap is set.
/// </summary>
/// <param name="bit">The bit index to check</param>
/// <returns>True if the bit is set, false otherwise</returns>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public bool IsSet(int bit)
{
int wordIndex = bit >> IntShift;
int wordBit = bit & IntMask;
long wordMask = 1L << wordBit;
return (Masks[wordIndex] & wordMask) != 0;
}
/// <summary>
/// Check if any bit in a range of bits in the bitmap are set. (inclusive)
/// </summary>
/// <param name="start">The first bit index to check</param>
/// <param name="end">The last bit index to check</param>
/// <returns>True if a bit is set, false otherwise</returns>
public bool IsSet(int start, int end)
{
if (start == end)
{
return IsSet(start);
}
int startIndex = start >> IntShift;
int startBit = start & IntMask;
long startMask = -1L << startBit;
int endIndex = end >> IntShift;
int endBit = end & IntMask;
long endMask = (long)(ulong.MaxValue >> (IntMask - endBit));
if (startIndex == endIndex)
{
return (Masks[startIndex] & startMask & endMask) != 0;
}
if ((Masks[startIndex] & startMask) != 0)
{
return true;
}
for (int i = startIndex + 1; i < endIndex; i++)
{
if (Masks[i] != 0)
{
return true;
}
}
if ((Masks[endIndex] & endMask) != 0)
{
return true;
}
return false;
}
/// <summary>
/// Set a bit at a specific index to 1.
/// </summary>
/// <param name="bit">The bit index to set</param>
/// <returns>True if the bit is set, false if it was already set</returns>
public bool Set(int bit)
{
int wordIndex = bit >> IntShift;
int wordBit = bit & IntMask;
long wordMask = 1L << wordBit;
if ((Masks[wordIndex] & wordMask) != 0)
{
return false;
}
Masks[wordIndex] |= wordMask;
return true;
}
/// <summary>
/// Set a range of bits in the bitmap to 1.
/// </summary>
/// <param name="start">The first bit index to set</param>
/// <param name="end">The last bit index to set</param>
public void SetRange(int start, int end)
{
if (start == end)
{
Set(start);
return;
}
int startIndex = start >> IntShift;
int startBit = start & IntMask;
long startMask = -1L << startBit;
int endIndex = end >> IntShift;
int endBit = end & IntMask;
long endMask = (long)(ulong.MaxValue >> (IntMask - endBit));
if (startIndex == endIndex)
{
Masks[startIndex] |= startMask & endMask;
}
else
{
Masks[startIndex] |= startMask;
for (int i = startIndex + 1; i < endIndex; i++)
{
Masks[i] |= -1;
}
Masks[endIndex] |= endMask;
}
}
/// <summary>
/// Clear a bit at a specific index to 0.
/// </summary>
/// <param name="bit">The bit index to clear</param>
/// <returns>True if the bit was set, false if it was not</returns>
public bool Clear(int bit)
{
int wordIndex = bit >> IntShift;
int wordBit = bit & IntMask;
long wordMask = 1L << wordBit;
bool wasSet = (Masks[wordIndex] & wordMask) != 0;
Masks[wordIndex] &= ~wordMask;
return wasSet;
}
/// <summary>
/// Clear the bitmap entirely, setting all bits to 0.
/// </summary>
public void Clear()
{
for (int i = 0; i < Masks.Length; i++)
{
Masks[i] = 0;
}
}
}
}

View file

@ -0,0 +1,152 @@
using System;
using System.Threading;
namespace Ryujinx.Memory.Tracking
{
/// <summary>
/// A bitmap that can be safely modified from multiple threads.
/// </summary>
internal class ConcurrentBitmap
{
public const int IntSize = 64;
public const int IntShift = 6;
public const int IntMask = IntSize - 1;
/// <summary>
/// Masks representing the bitmap. Least significant bit first, 64-bits per mask.
/// </summary>
public readonly long[] Masks;
/// <summary>
/// Create a new multithreaded bitmap.
/// </summary>
/// <param name="count">The number of bits to reserve</param>
/// <param name="set">Whether the bits should be initially set or not</param>
public ConcurrentBitmap(int count, bool set)
{
Masks = new long[(count + IntMask) / IntSize];
if (set)
{
Array.Fill(Masks, -1L);
}
}
/// <summary>
/// Check if any bit in the bitmap is set.
/// </summary>
/// <returns>True if any bits are set, false otherwise</returns>
public bool AnySet()
{
for (int i = 0; i < Masks.Length; i++)
{
if (Interlocked.Read(ref Masks[i]) != 0)
{
return true;
}
}
return false;
}
/// <summary>
/// Check if a bit in the bitmap is set.
/// </summary>
/// <param name="bit">The bit index to check</param>
/// <returns>True if the bit is set, false otherwise</returns>
public bool IsSet(int bit)
{
int wordIndex = bit >> IntShift;
int wordBit = bit & IntMask;
long wordMask = 1L << wordBit;
return (Interlocked.Read(ref Masks[wordIndex]) & wordMask) != 0;
}
/// <summary>
/// Check if any bit in a range of bits in the bitmap are set. (inclusive)
/// </summary>
/// <param name="start">The first bit index to check</param>
/// <param name="end">The last bit index to check</param>
/// <returns>True if a bit is set, false otherwise</returns>
public bool IsSet(int start, int end)
{
if (start == end)
{
return IsSet(start);
}
int startIndex = start >> IntShift;
int startBit = start & IntMask;
long startMask = -1L << startBit;
int endIndex = end >> IntShift;
int endBit = end & IntMask;
long endMask = (long)(ulong.MaxValue >> (IntMask - endBit));
long startValue = Interlocked.Read(ref Masks[startIndex]);
if (startIndex == endIndex)
{
return (startValue & startMask & endMask) != 0;
}
if ((startValue & startMask) != 0)
{
return true;
}
for (int i = startIndex + 1; i < endIndex; i++)
{
if (Interlocked.Read(ref Masks[i]) != 0)
{
return true;
}
}
long endValue = Interlocked.Read(ref Masks[endIndex]);
if ((endValue & endMask) != 0)
{
return true;
}
return false;
}
/// <summary>
/// Set a bit at a specific index to either true or false.
/// </summary>
/// <param name="bit">The bit index to set</param>
/// <param name="value">Whether the bit should be set or not</param>
public void Set(int bit, bool value)
{
int wordIndex = bit >> IntShift;
int wordBit = bit & IntMask;
long wordMask = 1L << wordBit;
if (value)
{
Interlocked.Or(ref Masks[wordIndex], wordMask);
}
else
{
Interlocked.And(ref Masks[wordIndex], ~wordMask);
}
}
/// <summary>
/// Clear the bitmap entirely, setting all bits to 0.
/// </summary>
public void Clear()
{
for (int i = 0; i < Masks.Length; i++)
{
Interlocked.Exchange(ref Masks[i], 0);
}
}
}
}

View file

@ -0,0 +1,55 @@
using System;
namespace Ryujinx.Memory.Tracking
{
public interface IMultiRegionHandle : IDisposable
{
/// <summary>
/// True if any write has occurred to the whole region since the last use of QueryModified (with no subregion specified).
/// </summary>
bool Dirty { get; }
/// <summary>
/// Force the range of handles to be dirty, without reprotecting.
/// </summary>
/// <param name="address">Start address of the range</param>
/// <param name="size">Size of the range</param>
public void ForceDirty(ulong address, ulong size);
/// <summary>
/// Check if any part of the region has been modified, and perform an action for each.
/// Contiguous modified regions are combined.
/// </summary>
/// <param name="modifiedAction">Action to perform for modified regions</param>
void QueryModified(Action<ulong, ulong> modifiedAction);
/// <summary>
/// Check if part of the region has been modified within a given range, and perform an action for each.
/// The range is aligned to the level of granularity of the contained handles.
/// Contiguous modified regions are combined.
/// </summary>
/// <param name="address">Start address of the range</param>
/// <param name="size">Size of the range</param>
/// <param name="modifiedAction">Action to perform for modified regions</param>
void QueryModified(ulong address, ulong size, Action<ulong, ulong> modifiedAction);
/// <summary>
/// Check if part of the region has been modified within a given range, and perform an action for each.
/// The sequence number provided is compared with each handle's saved sequence number.
/// If it is equal, then the handle's dirty flag is ignored. Otherwise, the sequence number is saved.
/// The range is aligned to the level of granularity of the contained handles.
/// Contiguous modified regions are combined.
/// </summary>
/// <param name="address">Start address of the range</param>
/// <param name="size">Size of the range</param>
/// <param name="modifiedAction">Action to perform for modified regions</param>
/// <param name="sequenceNumber">Current sequence number</param>
void QueryModified(ulong address, ulong size, Action<ulong, ulong> modifiedAction, int sequenceNumber);
/// <summary>
/// Signal that one of the subregions of this multi-region has been modified. This sets the overall dirty flag.
/// </summary>
void SignalWrite();
}
}

View file

@ -0,0 +1,18 @@
using System;
namespace Ryujinx.Memory.Tracking
{
public interface IRegionHandle : IDisposable
{
bool Dirty { get; }
ulong Address { get; }
ulong Size { get; }
ulong EndAddress { get; }
void ForceDirty();
void Reprotect(bool asDirty = false);
void RegisterAction(RegionSignal action);
void RegisterPreciseAction(PreciseRegionSignal action);
}
}

View file

@ -0,0 +1,306 @@
using Ryujinx.Common.Pools;
using Ryujinx.Memory.Range;
using System.Collections.Generic;
namespace Ryujinx.Memory.Tracking
{
/// <summary>
/// Manages memory tracking for a given virutal/physical memory block.
/// </summary>
public class MemoryTracking
{
private readonly IVirtualMemoryManager _memoryManager;
private readonly InvalidAccessHandler _invalidAccessHandler;
// Only use these from within the lock.
private readonly NonOverlappingRangeList<VirtualRegion> _virtualRegions;
private readonly int _pageSize;
/// <summary>
/// This lock must be obtained when traversing or updating the region-handle hierarchy.
/// It is not required when reading dirty flags.
/// </summary>
internal object TrackingLock = new object();
/// <summary>
/// Create a new tracking structure for the given "physical" memory block,
/// with a given "virtual" memory manager that will provide mappings and virtual memory protection.
/// </summary>
/// <param name="memoryManager">Virtual memory manager</param>
/// <param name="block">Physical memory block</param>
/// <param name="pageSize">Page size of the virtual memory space</param>
public MemoryTracking(IVirtualMemoryManager memoryManager, int pageSize, InvalidAccessHandler invalidAccessHandler = null)
{
_memoryManager = memoryManager;
_pageSize = pageSize;
_invalidAccessHandler = invalidAccessHandler;
_virtualRegions = new NonOverlappingRangeList<VirtualRegion>();
}
private (ulong address, ulong size) PageAlign(ulong address, ulong size)
{
ulong pageMask = (ulong)_pageSize - 1;
ulong rA = address & ~pageMask;
ulong rS = ((address + size + pageMask) & ~pageMask) - rA;
return (rA, rS);
}
/// <summary>
/// Indicate that a virtual region has been mapped, and which physical region it has been mapped to.
/// Should be called after the mapping is complete.
/// </summary>
/// <param name="va">Virtual memory address</param>
/// <param name="size">Size to be mapped</param>
public void Map(ulong va, ulong size)
{
// A mapping may mean we need to re-evaluate each VirtualRegion's affected area.
// Find all handles that overlap with the range, we need to recalculate their physical regions
lock (TrackingLock)
{
ref var overlaps = ref ThreadStaticArray<VirtualRegion>.Get();
int count = _virtualRegions.FindOverlapsNonOverlapping(va, size, ref overlaps);
for (int i = 0; i < count; i++)
{
VirtualRegion region = overlaps[i];
// If the region has been fully remapped, signal that it has been mapped again.
bool remapped = _memoryManager.IsRangeMapped(region.Address, region.Size);
if (remapped)
{
region.SignalMappingChanged(true);
}
region.UpdateProtection();
}
}
}
/// <summary>
/// Indicate that a virtual region has been unmapped.
/// Should be called before the unmapping is complete.
/// </summary>
/// <param name="va">Virtual memory address</param>
/// <param name="size">Size to be unmapped</param>
public void Unmap(ulong va, ulong size)
{
// An unmapping may mean we need to re-evaluate each VirtualRegion's affected area.
// Find all handles that overlap with the range, we need to notify them that the region was unmapped.
lock (TrackingLock)
{
ref var overlaps = ref ThreadStaticArray<VirtualRegion>.Get();
int count = _virtualRegions.FindOverlapsNonOverlapping(va, size, ref overlaps);
for (int i = 0; i < count; i++)
{
VirtualRegion region = overlaps[i];
region.SignalMappingChanged(false);
}
}
}
/// <summary>
/// Get a list of virtual regions that a handle covers.
/// </summary>
/// <param name="va">Starting virtual memory address of the handle</param>
/// <param name="size">Size of the handle's memory region</param>
/// <returns>A list of virtual regions within the given range</returns>
internal List<VirtualRegion> GetVirtualRegionsForHandle(ulong va, ulong size)
{
List<VirtualRegion> result = new List<VirtualRegion>();
_virtualRegions.GetOrAddRegions(result, va, size, (va, size) => new VirtualRegion(this, va, size));
return result;
}
/// <summary>
/// Remove a virtual region from the range list. This assumes that the lock has been acquired.
/// </summary>
/// <param name="region">Region to remove</param>
internal void RemoveVirtual(VirtualRegion region)
{
_virtualRegions.Remove(region);
}
/// <summary>
/// Obtains a memory tracking handle for the given virtual region, with a specified granularity. This should be disposed when finished with.
/// </summary>
/// <param name="address">CPU virtual address of the region</param>
/// <param name="size">Size of the region</param>
/// <param name="handles">Handles to inherit state from or reuse. When none are present, provide null</param>
/// <param name="granularity">Desired granularity of write tracking</param>
/// <param name="id">Handle ID</param>
/// <returns>The memory tracking handle</returns>
public MultiRegionHandle BeginGranularTracking(ulong address, ulong size, IEnumerable<IRegionHandle> handles, ulong granularity, int id)
{
return new MultiRegionHandle(this, address, size, handles, granularity, id);
}
/// <summary>
/// Obtains a smart memory tracking handle for the given virtual region, with a specified granularity. This should be disposed when finished with.
/// </summary>
/// <param name="address">CPU virtual address of the region</param>
/// <param name="size">Size of the region</param>
/// <param name="granularity">Desired granularity of write tracking</param>
/// <param name="id">Handle ID</param>
/// <returns>The memory tracking handle</returns>
public SmartMultiRegionHandle BeginSmartGranularTracking(ulong address, ulong size, ulong granularity, int id)
{
(address, size) = PageAlign(address, size);
return new SmartMultiRegionHandle(this, address, size, granularity, id);
}
/// <summary>
/// Obtains a memory tracking handle for the given virtual region. This should be disposed when finished with.
/// </summary>
/// <param name="address">CPU virtual address of the region</param>
/// <param name="size">Size of the region</param>
/// <param name="id">Handle ID</param>
/// <returns>The memory tracking handle</returns>
public RegionHandle BeginTracking(ulong address, ulong size, int id)
{
var (paAddress, paSize) = PageAlign(address, size);
lock (TrackingLock)
{
bool mapped = _memoryManager.IsRangeMapped(address, size);
RegionHandle handle = new RegionHandle(this, paAddress, paSize, address, size, id, mapped);
return handle;
}
}
/// <summary>
/// Obtains a memory tracking handle for the given virtual region. This should be disposed when finished with.
/// </summary>
/// <param name="address">CPU virtual address of the region</param>
/// <param name="size">Size of the region</param>
/// <param name="bitmap">The bitmap owning the dirty flag for this handle</param>
/// <param name="bit">The bit of this handle within the dirty flag</param>
/// <param name="id">Handle ID</param>
/// <returns>The memory tracking handle</returns>
internal RegionHandle BeginTrackingBitmap(ulong address, ulong size, ConcurrentBitmap bitmap, int bit, int id)
{
var (paAddress, paSize) = PageAlign(address, size);
lock (TrackingLock)
{
bool mapped = _memoryManager.IsRangeMapped(address, size);
RegionHandle handle = new RegionHandle(this, paAddress, paSize, address, size, bitmap, bit, id, mapped);
return handle;
}
}
/// <summary>
/// Signal that a virtual memory event happened at the given location.
/// </summary>
/// <param name="address">Virtual address accessed</param>
/// <param name="size">Size of the region affected in bytes</param>
/// <param name="write">Whether the region was written to or read</param>
/// <returns>True if the event triggered any tracking regions, false otherwise</returns>
public bool VirtualMemoryEvent(ulong address, ulong size, bool write)
{
return VirtualMemoryEvent(address, size, write, precise: false, null);
}
/// <summary>
/// Signal that a virtual memory event happened at the given location.
/// This can be flagged as a precise event, which will avoid reprotection and call special handlers if possible.
/// A precise event has an exact address and size, rather than triggering on page granularity.
/// </summary>
/// <param name="address">Virtual address accessed</param>
/// <param name="size">Size of the region affected in bytes</param>
/// <param name="write">Whether the region was written to or read</param>
/// <param name="precise">True if the access is precise, false otherwise</param>
/// <param name="exemptId">Optional ID that of the handles that should not be signalled</param>
/// <returns>True if the event triggered any tracking regions, false otherwise</returns>
public bool VirtualMemoryEvent(ulong address, ulong size, bool write, bool precise, int? exemptId = null)
{
// Look up the virtual region using the region list.
// Signal up the chain to relevant handles.
bool shouldThrow = false;
lock (TrackingLock)
{
ref var overlaps = ref ThreadStaticArray<VirtualRegion>.Get();
int count = _virtualRegions.FindOverlapsNonOverlapping(address, size, ref overlaps);
if (count == 0 && !precise)
{
if (_memoryManager.IsRangeMapped(address, size))
{
// TODO: There is currently the possibility that a page can be protected after its virtual region is removed.
// This code handles that case when it happens, but it would be better to find out how this happens.
_memoryManager.TrackingReprotect(address & ~(ulong)(_pageSize - 1), (ulong)_pageSize, MemoryPermission.ReadAndWrite);
return true; // This memory _should_ be mapped, so we need to try again.
}
else
{
shouldThrow = true;
}
}
else
{
for (int i = 0; i < count; i++)
{
VirtualRegion region = overlaps[i];
if (precise)
{
region.SignalPrecise(address, size, write, exemptId);
}
else
{
region.Signal(address, size, write, exemptId);
}
}
}
}
if (shouldThrow)
{
_invalidAccessHandler?.Invoke(address);
// We can't continue - it's impossible to remove protection from the page.
// Even if the access handler wants us to continue, we wouldn't be able to.
throw new InvalidMemoryRegionException();
}
return true;
}
/// <summary>
/// Reprotect a given virtual region. The virtual memory manager will handle this.
/// </summary>
/// <param name="region">Region to reprotect</param>
/// <param name="permission">Memory permission to protect with</param>
internal void ProtectVirtualRegion(VirtualRegion region, MemoryPermission permission)
{
_memoryManager.TrackingReprotect(region.Address, region.Size, permission);
}
/// <summary>
/// Returns the number of virtual regions currently being tracked.
/// Useful for tests and metrics.
/// </summary>
/// <returns>The number of virtual regions</returns>
public int GetRegionCount()
{
lock (TrackingLock)
{
return _virtualRegions.Count;
}
}
}
}

View file

@ -0,0 +1,415 @@
using System;
using System.Collections.Generic;
using System.Numerics;
using System.Runtime.CompilerServices;
using System.Threading;
namespace Ryujinx.Memory.Tracking
{
/// <summary>
/// A region handle that tracks a large region using many smaller handles, to provide
/// granular tracking that can be used to track partial updates. Backed by a bitmap
/// to improve performance when scanning large regions.
/// </summary>
public class MultiRegionHandle : IMultiRegionHandle
{
/// <summary>
/// A list of region handles for each granularity sized chunk of the whole region.
/// </summary>
private readonly RegionHandle[] _handles;
private readonly ulong Address;
private readonly ulong Granularity;
private readonly ulong Size;
private ConcurrentBitmap _dirtyBitmap;
private int _sequenceNumber;
private BitMap _sequenceNumberBitmap;
private BitMap _dirtyCheckedBitmap;
private int _uncheckedHandles;
public bool Dirty { get; private set; } = true;
internal MultiRegionHandle(
MemoryTracking tracking,
ulong address,
ulong size,
IEnumerable<IRegionHandle> handles,
ulong granularity,
int id)
{
_handles = new RegionHandle[(size + granularity - 1) / granularity];
Granularity = granularity;
_dirtyBitmap = new ConcurrentBitmap(_handles.Length, true);
_sequenceNumberBitmap = new BitMap(_handles.Length);
_dirtyCheckedBitmap = new BitMap(_handles.Length);
int i = 0;
if (handles != null)
{
// Inherit from the handles we were given. Any gaps must be filled with new handles,
// and old handles larger than our granularity must copy their state onto new granular handles and dispose.
// It is assumed that the provided handles do not overlap, in order, are on page boundaries,
// and don't extend past the requested range.
foreach (RegionHandle handle in handles)
{
int startIndex = (int)((handle.RealAddress - address) / granularity);
// Fill any gap left before this handle.
while (i < startIndex)
{
RegionHandle fillHandle = tracking.BeginTrackingBitmap(address + (ulong)i * granularity, granularity, _dirtyBitmap, i, id);
fillHandle.Parent = this;
_handles[i++] = fillHandle;
}
lock (tracking.TrackingLock)
{
if (handle is RegionHandle bitHandle && handle.Size == granularity)
{
handle.Parent = this;
bitHandle.ReplaceBitmap(_dirtyBitmap, i);
_handles[i++] = bitHandle;
}
else
{
int endIndex = (int)((handle.RealEndAddress - address) / granularity);
while (i < endIndex)
{
RegionHandle splitHandle = tracking.BeginTrackingBitmap(address + (ulong)i * granularity, granularity, _dirtyBitmap, i, id);
splitHandle.Parent = this;
splitHandle.Reprotect(handle.Dirty);
RegionSignal signal = handle.PreAction;
if (signal != null)
{
splitHandle.RegisterAction(signal);
}
_handles[i++] = splitHandle;
}
handle.Dispose();
}
}
}
}
// Fill any remaining space with new handles.
while (i < _handles.Length)
{
RegionHandle handle = tracking.BeginTrackingBitmap(address + (ulong)i * granularity, granularity, _dirtyBitmap, i, id);
handle.Parent = this;
_handles[i++] = handle;
}
_uncheckedHandles = _handles.Length;
Address = address;
Size = size;
}
public void SignalWrite()
{
Dirty = true;
}
public IEnumerable<RegionHandle> GetHandles()
{
return _handles;
}
public void ForceDirty(ulong address, ulong size)
{
Dirty = true;
int startHandle = (int)((address - Address) / Granularity);
int lastHandle = (int)((address + (size - 1) - Address) / Granularity);
for (int i = startHandle; i <= lastHandle; i++)
{
if (_sequenceNumberBitmap.Clear(i))
{
_uncheckedHandles++;
}
_handles[i].ForceDirty();
}
}
public void QueryModified(Action<ulong, ulong> modifiedAction)
{
if (!Dirty)
{
return;
}
Dirty = false;
QueryModified(Address, Size, modifiedAction);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private void ParseDirtyBits(long dirtyBits, ref int baseBit, ref int prevHandle, ref ulong rgStart, ref ulong rgSize, Action<ulong, ulong> modifiedAction)
{
while (dirtyBits != 0)
{
int bit = BitOperations.TrailingZeroCount(dirtyBits);
dirtyBits &= ~(1L << bit);
int handleIndex = baseBit + bit;
RegionHandle handle = _handles[handleIndex];
if (handleIndex != prevHandle + 1)
{
// Submit handles scanned until the gap as dirty
if (rgSize != 0)
{
modifiedAction(rgStart, rgSize);
rgSize = 0;
}
rgStart = handle.RealAddress;
}
if (handle.Dirty)
{
rgSize += handle.RealSize;
handle.Reprotect();
}
prevHandle = handleIndex;
}
baseBit += ConcurrentBitmap.IntSize;
}
public void QueryModified(ulong address, ulong size, Action<ulong, ulong> modifiedAction)
{
int startHandle = (int)((address - Address) / Granularity);
int lastHandle = (int)((address + (size - 1) - Address) / Granularity);
ulong rgStart = Address + (ulong)startHandle * Granularity;
if (startHandle == lastHandle)
{
RegionHandle handle = _handles[startHandle];
if (handle.Dirty)
{
handle.Reprotect();
modifiedAction(rgStart, handle.RealSize);
}
return;
}
ulong rgSize = 0;
long[] masks = _dirtyBitmap.Masks;
int startIndex = startHandle >> ConcurrentBitmap.IntShift;
int startBit = startHandle & ConcurrentBitmap.IntMask;
long startMask = -1L << startBit;
int endIndex = lastHandle >> ConcurrentBitmap.IntShift;
int endBit = lastHandle & ConcurrentBitmap.IntMask;
long endMask = (long)(ulong.MaxValue >> (ConcurrentBitmap.IntMask - endBit));
long startValue = Volatile.Read(ref masks[startIndex]);
int baseBit = startIndex << ConcurrentBitmap.IntShift;
int prevHandle = startHandle - 1;
if (startIndex == endIndex)
{
ParseDirtyBits(startValue & startMask & endMask, ref baseBit, ref prevHandle, ref rgStart, ref rgSize, modifiedAction);
}
else
{
ParseDirtyBits(startValue & startMask, ref baseBit, ref prevHandle, ref rgStart, ref rgSize, modifiedAction);
for (int i = startIndex + 1; i < endIndex; i++)
{
ParseDirtyBits(Volatile.Read(ref masks[i]), ref baseBit, ref prevHandle, ref rgStart, ref rgSize, modifiedAction);
}
long endValue = Volatile.Read(ref masks[endIndex]);
ParseDirtyBits(endValue & endMask, ref baseBit, ref prevHandle, ref rgStart, ref rgSize, modifiedAction);
}
if (rgSize != 0)
{
modifiedAction(rgStart, rgSize);
}
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private void ParseDirtyBits(long dirtyBits, long mask, int index, long[] seqMasks, long[] checkMasks, ref int baseBit, ref int prevHandle, ref ulong rgStart, ref ulong rgSize, Action<ulong, ulong> modifiedAction)
{
long seqMask = mask & ~seqMasks[index];
long checkMask = (~dirtyBits) & seqMask;
dirtyBits &= seqMask;
while (dirtyBits != 0)
{
int bit = BitOperations.TrailingZeroCount(dirtyBits);
long bitValue = 1L << bit;
dirtyBits &= ~bitValue;
int handleIndex = baseBit + bit;
RegionHandle handle = _handles[handleIndex];
if (handleIndex != prevHandle + 1)
{
// Submit handles scanned until the gap as dirty
if (rgSize != 0)
{
modifiedAction(rgStart, rgSize);
rgSize = 0;
}
rgStart = handle.RealAddress;
}
rgSize += handle.RealSize;
handle.Reprotect(false, (checkMasks[index] & bitValue) == 0);
checkMasks[index] &= ~bitValue;
prevHandle = handleIndex;
}
checkMasks[index] |= checkMask;
seqMasks[index] |= mask;
_uncheckedHandles -= BitOperations.PopCount((ulong)seqMask);
baseBit += ConcurrentBitmap.IntSize;
}
public void QueryModified(ulong address, ulong size, Action<ulong, ulong> modifiedAction, int sequenceNumber)
{
int startHandle = (int)((address - Address) / Granularity);
int lastHandle = (int)((address + (size - 1) - Address) / Granularity);
ulong rgStart = Address + (ulong)startHandle * Granularity;
if (sequenceNumber != _sequenceNumber)
{
if (_uncheckedHandles != _handles.Length)
{
_sequenceNumberBitmap.Clear();
_uncheckedHandles = _handles.Length;
}
_sequenceNumber = sequenceNumber;
}
if (startHandle == lastHandle)
{
var handle = _handles[startHandle];
if (_sequenceNumberBitmap.Set(startHandle))
{
_uncheckedHandles--;
if (handle.DirtyOrVolatile())
{
handle.Reprotect();
modifiedAction(rgStart, handle.RealSize);
}
}
return;
}
if (_uncheckedHandles == 0)
{
return;
}
ulong rgSize = 0;
long[] seqMasks = _sequenceNumberBitmap.Masks;
long[] checkedMasks = _dirtyCheckedBitmap.Masks;
long[] masks = _dirtyBitmap.Masks;
int startIndex = startHandle >> ConcurrentBitmap.IntShift;
int startBit = startHandle & ConcurrentBitmap.IntMask;
long startMask = -1L << startBit;
int endIndex = lastHandle >> ConcurrentBitmap.IntShift;
int endBit = lastHandle & ConcurrentBitmap.IntMask;
long endMask = (long)(ulong.MaxValue >> (ConcurrentBitmap.IntMask - endBit));
long startValue = Volatile.Read(ref masks[startIndex]);
int baseBit = startIndex << ConcurrentBitmap.IntShift;
int prevHandle = startHandle - 1;
if (startIndex == endIndex)
{
ParseDirtyBits(startValue, startMask & endMask, startIndex, seqMasks, checkedMasks, ref baseBit, ref prevHandle, ref rgStart, ref rgSize, modifiedAction);
}
else
{
ParseDirtyBits(startValue, startMask, startIndex, seqMasks, checkedMasks, ref baseBit, ref prevHandle, ref rgStart, ref rgSize, modifiedAction);
for (int i = startIndex + 1; i < endIndex; i++)
{
ParseDirtyBits(Volatile.Read(ref masks[i]), -1L, i, seqMasks, checkedMasks, ref baseBit, ref prevHandle, ref rgStart, ref rgSize, modifiedAction);
}
long endValue = Volatile.Read(ref masks[endIndex]);
ParseDirtyBits(endValue, endMask, endIndex, seqMasks, checkedMasks, ref baseBit, ref prevHandle, ref rgStart, ref rgSize, modifiedAction);
}
if (rgSize != 0)
{
modifiedAction(rgStart, rgSize);
}
}
public void RegisterAction(ulong address, ulong size, RegionSignal action)
{
int startHandle = (int)((address - Address) / Granularity);
int lastHandle = (int)((address + (size - 1) - Address) / Granularity);
for (int i = startHandle; i <= lastHandle; i++)
{
_handles[i].RegisterAction(action);
}
}
public void RegisterPreciseAction(ulong address, ulong size, PreciseRegionSignal action)
{
int startHandle = (int)((address - Address) / Granularity);
int lastHandle = (int)((address + (size - 1) - Address) / Granularity);
for (int i = startHandle; i <= lastHandle; i++)
{
_handles[i].RegisterPreciseAction(action);
}
}
public void Dispose()
{
foreach (var handle in _handles)
{
handle.Dispose();
}
}
}
}

View file

@ -0,0 +1,4 @@
namespace Ryujinx.Memory.Tracking
{
public delegate bool PreciseRegionSignal(ulong address, ulong size, bool write);
}

View file

@ -0,0 +1,464 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading;
namespace Ryujinx.Memory.Tracking
{
/// <summary>
/// A tracking handle for a given region of virtual memory. The Dirty flag is updated whenever any changes are made,
/// and an action can be performed when the region is read to or written from.
/// </summary>
public class RegionHandle : IRegionHandle
{
/// <summary>
/// If more than this number of checks have been performed on a dirty flag since its last reprotect,
/// then it is dirtied infrequently.
/// </summary>
private const int CheckCountForInfrequent = 3;
/// <summary>
/// Number of frequent dirty/consume in a row to make this handle volatile.
/// </summary>
private const int VolatileThreshold = 5;
public bool Dirty
{
get
{
return Bitmap.IsSet(DirtyBit);
}
protected set
{
Bitmap.Set(DirtyBit, value);
}
}
internal int SequenceNumber { get; set; }
internal int Id { get; }
public bool Unmapped { get; private set; }
public ulong Address { get; }
public ulong Size { get; }
public ulong EndAddress { get; }
public ulong RealAddress { get; }
public ulong RealSize { get; }
public ulong RealEndAddress { get; }
internal IMultiRegionHandle Parent { get; set; }
private event Action _onDirty;
private object _preActionLock = new object();
private RegionSignal _preAction; // Action to perform before a read or write. This will block the memory access.
private PreciseRegionSignal _preciseAction; // Action to perform on a precise read or write.
private readonly List<VirtualRegion> _regions;
private readonly MemoryTracking _tracking;
private bool _disposed;
private int _checkCount = 0;
private int _volatileCount = 0;
private bool _volatile;
internal MemoryPermission RequiredPermission
{
get
{
// If this is unmapped, allow reprotecting as RW as it can't be dirtied.
// This is required for the partial unmap cases where part of the data are still being accessed.
if (Unmapped)
{
return MemoryPermission.ReadAndWrite;
}
if (_preAction != null)
{
return MemoryPermission.None;
}
return Dirty ? MemoryPermission.ReadAndWrite : MemoryPermission.Read;
}
}
internal RegionSignal PreAction => _preAction;
internal ConcurrentBitmap Bitmap;
internal int DirtyBit;
/// <summary>
/// Create a new bitmap backed region handle. The handle is registered with the given tracking object,
/// and will be notified of any changes to the specified region.
/// </summary>
/// <param name="tracking">Tracking object for the target memory block</param>
/// <param name="address">Virtual address of the region to track</param>
/// <param name="size">Size of the region to track</param>
/// <param name="realAddress">The real, unaligned address of the handle</param>
/// <param name="realSize">The real, unaligned size of the handle</param>
/// <param name="bitmap">The bitmap the dirty flag for this handle is stored in</param>
/// <param name="bit">The bit index representing the dirty flag for this handle</param>
/// <param name="id">Handle ID</param>
/// <param name="mapped">True if the region handle starts mapped</param>
internal RegionHandle(
MemoryTracking tracking,
ulong address,
ulong size,
ulong realAddress,
ulong realSize,
ConcurrentBitmap bitmap,
int bit,
int id,
bool mapped = true)
{
Bitmap = bitmap;
DirtyBit = bit;
Dirty = mapped;
Id = id;
Unmapped = !mapped;
Address = address;
Size = size;
EndAddress = address + size;
RealAddress = realAddress;
RealSize = realSize;
RealEndAddress = realAddress + realSize;
_tracking = tracking;
_regions = tracking.GetVirtualRegionsForHandle(address, size);
foreach (var region in _regions)
{
region.Handles.Add(this);
}
}
/// <summary>
/// Create a new region handle. The handle is registered with the given tracking object,
/// and will be notified of any changes to the specified region.
/// </summary>
/// <param name="tracking">Tracking object for the target memory block</param>
/// <param name="address">Virtual address of the region to track</param>
/// <param name="size">Size of the region to track</param>
/// <param name="realAddress">The real, unaligned address of the handle</param>
/// <param name="realSize">The real, unaligned size of the handle</param>
/// <param name="id">Handle ID</param>
/// <param name="mapped">True if the region handle starts mapped</param>
internal RegionHandle(MemoryTracking tracking, ulong address, ulong size, ulong realAddress, ulong realSize, int id, bool mapped = true)
{
Bitmap = new ConcurrentBitmap(1, mapped);
Id = id;
Unmapped = !mapped;
Address = address;
Size = size;
EndAddress = address + size;
RealAddress = realAddress;
RealSize = realSize;
RealEndAddress = realAddress + realSize;
_tracking = tracking;
_regions = tracking.GetVirtualRegionsForHandle(address, size);
foreach (var region in _regions)
{
region.Handles.Add(this);
}
}
/// <summary>
/// Replace the bitmap and bit index used to track dirty state.
/// </summary>
/// <remarks>
/// The tracking lock should be held when this is called, to ensure neither bitmap is modified.
/// </remarks>
/// <param name="bitmap">The bitmap the dirty flag for this handle is stored in</param>
/// <param name="bit">The bit index representing the dirty flag for this handle</param>
internal void ReplaceBitmap(ConcurrentBitmap bitmap, int bit)
{
// Assumes the tracking lock is held, so nothing else can signal right now.
var oldBitmap = Bitmap;
var oldBit = DirtyBit;
bitmap.Set(bit, Dirty);
Bitmap = bitmap;
DirtyBit = bit;
Dirty |= oldBitmap.IsSet(oldBit);
}
/// <summary>
/// Clear the volatile state of this handle.
/// </summary>
private void ClearVolatile()
{
_volatileCount = 0;
_volatile = false;
}
/// <summary>
/// Check if this handle is dirty, or if it is volatile. (changes very often)
/// </summary>
/// <returns>True if the handle is dirty or volatile, false otherwise</returns>
public bool DirtyOrVolatile()
{
_checkCount++;
return _volatile || Dirty;
}
/// <summary>
/// Signal that a memory action occurred within this handle's virtual regions.
/// </summary>
/// <param name="address">Address accessed</param>
/// <param name="size">Size of the region affected in bytes</param>
/// <param name="write">Whether the region was written to or read</param>
/// <param name="handleIterable">Reference to the handles being iterated, in case the list needs to be copied</param>
internal void Signal(ulong address, ulong size, bool write, ref IList<RegionHandle> handleIterable)
{
// If this handle was already unmapped (even if just partially),
// then we have nothing to do until it is mapped again.
// The pre-action should be still consumed to avoid flushing on remap.
if (Unmapped)
{
Interlocked.Exchange(ref _preAction, null);
return;
}
if (_preAction != null)
{
// Limit the range to within this handle.
ulong maxAddress = Math.Max(address, RealAddress);
ulong minEndAddress = Math.Min(address + size, RealAddress + RealSize);
// Copy the handles list in case it changes when we're out of the lock.
if (handleIterable is List<RegionHandle>)
{
handleIterable = handleIterable.ToArray();
}
// Temporarily release the tracking lock while we're running the action.
Monitor.Exit(_tracking.TrackingLock);
try
{
lock (_preActionLock)
{
_preAction?.Invoke(maxAddress, minEndAddress - maxAddress);
// The action is removed after it returns, to ensure that the null check above succeeds when
// it's still in progress rather than continuing and possibly missing a required data flush.
Interlocked.Exchange(ref _preAction, null);
}
}
finally
{
Monitor.Enter(_tracking.TrackingLock);
}
}
if (write)
{
bool oldDirty = Dirty;
Dirty = true;
if (!oldDirty)
{
_onDirty?.Invoke();
}
Parent?.SignalWrite();
}
}
/// <summary>
/// Signal that a precise memory action occurred within this handle's virtual regions.
/// If there is no precise action, or the action returns false, the normal signal handler will be called.
/// </summary>
/// <param name="address">Address accessed</param>
/// <param name="size">Size of the region affected in bytes</param>
/// <param name="write">Whether the region was written to or read</param>
/// <param name="handleIterable">Reference to the handles being iterated, in case the list needs to be copied</param>
/// <returns>True if a precise action was performed and returned true, false otherwise</returns>
internal bool SignalPrecise(ulong address, ulong size, bool write, ref IList<RegionHandle> handleIterable)
{
if (!Unmapped && _preciseAction != null && _preciseAction(address, size, write))
{
return true;
}
Signal(address, size, write, ref handleIterable);
return false;
}
/// <summary>
/// Force this handle to be dirty, without reprotecting.
/// </summary>
public void ForceDirty()
{
Dirty = true;
}
/// <summary>
/// Consume the dirty flag for this handle, and reprotect so it can be set on the next write.
/// </summary>
/// <param name="asDirty">True if the handle should be reprotected as dirty, rather than have it cleared</param>
/// <param name="consecutiveCheck">True if this reprotect is the result of consecutive dirty checks</param>
public void Reprotect(bool asDirty, bool consecutiveCheck = false)
{
if (_volatile) return;
Dirty = asDirty;
bool protectionChanged = false;
lock (_tracking.TrackingLock)
{
foreach (VirtualRegion region in _regions)
{
protectionChanged |= region.UpdateProtection();
}
}
if (!protectionChanged)
{
// Counteract the check count being incremented when this handle was forced dirty.
// It doesn't count for protected write tracking.
_checkCount--;
}
else if (!asDirty)
{
if (consecutiveCheck || (_checkCount > 0 && _checkCount < CheckCountForInfrequent))
{
if (++_volatileCount >= VolatileThreshold && _preAction == null)
{
_volatile = true;
return;
}
}
else
{
_volatileCount = 0;
}
_checkCount = 0;
}
}
/// <summary>
/// Consume the dirty flag for this handle, and reprotect so it can be set on the next write.
/// </summary>
/// <param name="asDirty">True if the handle should be reprotected as dirty, rather than have it cleared</param>
public void Reprotect(bool asDirty = false)
{
Reprotect(asDirty, false);
}
/// <summary>
/// Register an action to perform when the tracked region is read or written.
/// The action is automatically removed after it runs.
/// </summary>
/// <param name="action">Action to call on read or write</param>
public void RegisterAction(RegionSignal action)
{
ClearVolatile();
lock (_preActionLock)
{
RegionSignal lastAction = Interlocked.Exchange(ref _preAction, action);
if (lastAction == null && action != lastAction)
{
lock (_tracking.TrackingLock)
{
foreach (VirtualRegion region in _regions)
{
region.UpdateProtection();
}
}
}
}
}
/// <summary>
/// Register an action to perform when a precise access occurs (one with exact address and size).
/// If the action returns true, read/write tracking are skipped.
/// </summary>
/// <param name="action">Action to call on read or write</param>
public void RegisterPreciseAction(PreciseRegionSignal action)
{
_preciseAction = action;
}
/// <summary>
/// Register an action to perform when the region is written to.
/// This action will not be removed when it is called - it is called each time the dirty flag is set.
/// </summary>
/// <param name="action">Action to call on dirty</param>
public void RegisterDirtyEvent(Action action)
{
_onDirty += action;
}
/// <summary>
/// Add a child virtual region to this handle.
/// </summary>
/// <param name="region">Virtual region to add as a child</param>
internal void AddChild(VirtualRegion region)
{
_regions.Add(region);
}
/// <summary>
/// Signal that this handle has been mapped or unmapped.
/// </summary>
/// <param name="mapped">True if the handle has been mapped, false if unmapped</param>
internal void SignalMappingChanged(bool mapped)
{
if (Unmapped == mapped)
{
Unmapped = !mapped;
if (Unmapped)
{
ClearVolatile();
Dirty = false;
}
}
}
/// <summary>
/// Check if this region overlaps with another.
/// </summary>
/// <param name="address">Base address</param>
/// <param name="size">Size of the region</param>
/// <returns>True if overlapping, false otherwise</returns>
public bool OverlapsWith(ulong address, ulong size)
{
return Address < address + size && address < EndAddress;
}
/// <summary>
/// Dispose the handle. Within the tracking lock, this removes references from virtual regions.
/// </summary>
public void Dispose()
{
ObjectDisposedException.ThrowIf(_disposed, this);
_disposed = true;
lock (_tracking.TrackingLock)
{
foreach (VirtualRegion region in _regions)
{
region.RemoveHandle(this);
}
}
}
}
}

View file

@ -0,0 +1,4 @@
namespace Ryujinx.Memory.Tracking
{
public delegate void RegionSignal(ulong address, ulong size);
}

View file

@ -0,0 +1,280 @@
using System;
using System.Runtime.CompilerServices;
namespace Ryujinx.Memory.Tracking
{
/// <summary>
/// A MultiRegionHandle that attempts to segment a region's handles into the regions requested
/// to avoid iterating over granular chunks for canonically large regions.
/// If minimum granularity is to be expected, use MultiRegionHandle.
/// </summary>
public class SmartMultiRegionHandle : IMultiRegionHandle
{
/// <summary>
/// A list of region handles starting at each granularity size increment.
/// </summary>
private readonly RegionHandle[] _handles;
private readonly ulong _address;
private readonly ulong _granularity;
private readonly ulong _size;
private MemoryTracking _tracking;
private readonly int _id;
public bool Dirty { get; private set; } = true;
internal SmartMultiRegionHandle(MemoryTracking tracking, ulong address, ulong size, ulong granularity, int id)
{
// For this multi-region handle, the handle list starts empty.
// As regions are queried, they are added to the _handles array at their start index.
// When a region being added overlaps another, the existing region is split.
// A query can therefore scan multiple regions, though with no overlaps they can cover a large area.
_tracking = tracking;
_handles = new RegionHandle[size / granularity];
_granularity = granularity;
_address = address;
_size = size;
_id = id;
}
public void SignalWrite()
{
Dirty = true;
}
public void ForceDirty(ulong address, ulong size)
{
foreach (var handle in _handles)
{
if (handle != null && handle.OverlapsWith(address, size))
{
handle.ForceDirty();
}
}
}
public void RegisterAction(RegionSignal action)
{
foreach (var handle in _handles)
{
if (handle != null)
{
handle?.RegisterAction((address, size) => action(handle.Address, handle.Size));
}
}
}
public void RegisterPreciseAction(PreciseRegionSignal action)
{
foreach (var handle in _handles)
{
if (handle != null)
{
handle?.RegisterPreciseAction((address, size, write) => action(handle.Address, handle.Size, write));
}
}
}
public void QueryModified(Action<ulong, ulong> modifiedAction)
{
if (!Dirty)
{
return;
}
Dirty = false;
QueryModified(_address, _size, modifiedAction);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private ulong HandlesToBytes(int handles)
{
return (ulong)handles * _granularity;
}
private void SplitHandle(int handleIndex, int splitIndex)
{
RegionHandle handle = _handles[handleIndex];
ulong address = _address + HandlesToBytes(handleIndex);
ulong size = HandlesToBytes(splitIndex - handleIndex);
// First, the target handle must be removed. Its data can still be used to determine the new handles.
RegionSignal signal = handle.PreAction;
handle.Dispose();
RegionHandle splitLow = _tracking.BeginTracking(address, size, _id);
splitLow.Parent = this;
if (signal != null)
{
splitLow.RegisterAction(signal);
}
_handles[handleIndex] = splitLow;
RegionHandle splitHigh = _tracking.BeginTracking(address + size, handle.Size - size, _id);
splitHigh.Parent = this;
if (signal != null)
{
splitHigh.RegisterAction(signal);
}
_handles[splitIndex] = splitHigh;
}
private void CreateHandle(int startHandle, int lastHandle)
{
ulong startAddress = _address + HandlesToBytes(startHandle);
// Scan for the first handle before us. If it's overlapping us, it must be split.
for (int i = startHandle - 1; i >= 0; i--)
{
RegionHandle handle = _handles[i];
if (handle != null)
{
if (handle.EndAddress > startAddress)
{
SplitHandle(i, startHandle);
return; // The remainer of this handle should be filled in later on.
}
break;
}
}
// Scan for handles after us. We should create a handle that goes up to this handle's start point, if present.
for (int i = startHandle + 1; i <= lastHandle; i++)
{
RegionHandle handle = _handles[i];
if (handle != null)
{
// Fill up to the found handle.
handle = _tracking.BeginTracking(startAddress, HandlesToBytes(i - startHandle), _id);
handle.Parent = this;
_handles[startHandle] = handle;
return;
}
}
// Can fill the whole range.
_handles[startHandle] = _tracking.BeginTracking(startAddress, HandlesToBytes(1 + lastHandle - startHandle), _id);
_handles[startHandle].Parent = this;
}
public void QueryModified(ulong address, ulong size, Action<ulong, ulong> modifiedAction)
{
int startHandle = (int)((address - _address) / _granularity);
int lastHandle = (int)((address + (size - 1) - _address) / _granularity);
ulong rgStart = _address + (ulong)startHandle * _granularity;
ulong rgSize = 0;
ulong endAddress = _address + ((ulong)lastHandle + 1) * _granularity;
int i = startHandle;
while (i <= lastHandle)
{
RegionHandle handle = _handles[i];
if (handle == null)
{
// Missing handle. A new handle must be created.
CreateHandle(i, lastHandle);
handle = _handles[i];
}
if (handle.EndAddress > endAddress)
{
// End address of handle is beyond the end of the search. Force a split.
SplitHandle(i, lastHandle + 1);
handle = _handles[i];
}
if (handle.Dirty)
{
rgSize += handle.Size;
handle.Reprotect();
}
else
{
// Submit the region scanned so far as dirty
if (rgSize != 0)
{
modifiedAction(rgStart, rgSize);
rgSize = 0;
}
rgStart = handle.EndAddress;
}
i += (int)(handle.Size / _granularity);
}
if (rgSize != 0)
{
modifiedAction(rgStart, rgSize);
}
}
public void QueryModified(ulong address, ulong size, Action<ulong, ulong> modifiedAction, int sequenceNumber)
{
int startHandle = (int)((address - _address) / _granularity);
int lastHandle = (int)((address + (size - 1) - _address) / _granularity);
ulong rgStart = _address + (ulong)startHandle * _granularity;
ulong rgSize = 0;
ulong endAddress = _address + ((ulong)lastHandle + 1) * _granularity;
int i = startHandle;
while (i <= lastHandle)
{
RegionHandle handle = _handles[i];
if (handle == null)
{
// Missing handle. A new handle must be created.
CreateHandle(i, lastHandle);
handle = _handles[i];
}
if (handle.EndAddress > endAddress)
{
// End address of handle is beyond the end of the search. Force a split.
SplitHandle(i, lastHandle + 1);
handle = _handles[i];
}
if (handle.Dirty && sequenceNumber != handle.SequenceNumber)
{
rgSize += handle.Size;
handle.Reprotect();
}
else
{
// Submit the region scanned so far as dirty
if (rgSize != 0)
{
modifiedAction(rgStart, rgSize);
rgSize = 0;
}
rgStart = handle.EndAddress;
}
handle.SequenceNumber = sequenceNumber;
i += (int)(handle.Size / _granularity);
}
if (rgSize != 0)
{
modifiedAction(rgStart, rgSize);
}
}
public void Dispose()
{
foreach (var handle in _handles)
{
handle?.Dispose();
}
}
}
}

View file

@ -0,0 +1,144 @@
using Ryujinx.Memory.Range;
using System.Collections.Generic;
namespace Ryujinx.Memory.Tracking
{
/// <summary>
/// A region of virtual memory.
/// </summary>
class VirtualRegion : AbstractRegion
{
public List<RegionHandle> Handles = new List<RegionHandle>();
private readonly MemoryTracking _tracking;
private MemoryPermission _lastPermission;
public VirtualRegion(MemoryTracking tracking, ulong address, ulong size, MemoryPermission lastPermission = MemoryPermission.Invalid) : base(address, size)
{
_lastPermission = lastPermission;
_tracking = tracking;
}
/// <inheritdoc/>
public override void Signal(ulong address, ulong size, bool write, int? exemptId)
{
IList<RegionHandle> handles = Handles;
for (int i = 0; i < handles.Count; i++)
{
if (exemptId == null || handles[i].Id != exemptId.Value)
{
handles[i].Signal(address, size, write, ref handles);
}
}
UpdateProtection();
}
/// <inheritdoc/>
public override void SignalPrecise(ulong address, ulong size, bool write, int? exemptId)
{
IList<RegionHandle> handles = Handles;
bool allPrecise = true;
for (int i = 0; i < handles.Count; i++)
{
if (exemptId == null || handles[i].Id != exemptId.Value)
{
allPrecise &= handles[i].SignalPrecise(address, size, write, ref handles);
}
}
// Only update protection if a regular signal handler was called.
// This allows precise actions to skip reprotection costs if they want (they can still do it manually).
if (!allPrecise)
{
UpdateProtection();
}
}
/// <summary>
/// Signal that this region has been mapped or unmapped.
/// </summary>
/// <param name="mapped">True if the region has been mapped, false if unmapped</param>
public void SignalMappingChanged(bool mapped)
{
_lastPermission = MemoryPermission.Invalid;
foreach (RegionHandle handle in Handles)
{
handle.SignalMappingChanged(mapped);
}
}
/// <summary>
/// Gets the strictest permission that the child handles demand. Assumes that the tracking lock has been obtained.
/// </summary>
/// <returns>Protection level that this region demands</returns>
public MemoryPermission GetRequiredPermission()
{
// Start with Read/Write, each handle can strip off permissions as necessary.
// Assumes the tracking lock has already been obtained.
MemoryPermission result = MemoryPermission.ReadAndWrite;
foreach (var handle in Handles)
{
result &= handle.RequiredPermission;
if (result == 0) return result;
}
return result;
}
/// <summary>
/// Updates the protection for this virtual region.
/// </summary>
public bool UpdateProtection()
{
MemoryPermission permission = GetRequiredPermission();
if (_lastPermission != permission)
{
_tracking.ProtectVirtualRegion(this, permission);
_lastPermission = permission;
return true;
}
return false;
}
/// <summary>
/// Removes a handle from this virtual region. If there are no handles left, this virtual region is removed.
/// </summary>
/// <param name="handle">Handle to remove</param>
public void RemoveHandle(RegionHandle handle)
{
lock (_tracking.TrackingLock)
{
Handles.Remove(handle);
UpdateProtection();
if (Handles.Count == 0)
{
_tracking.RemoveVirtual(this);
}
}
}
public override INonOverlappingRange Split(ulong splitAddress)
{
VirtualRegion newRegion = new VirtualRegion(_tracking, splitAddress, EndAddress - splitAddress, _lastPermission);
Size = splitAddress - Address;
// The new region inherits all of our parents.
newRegion.Handles = new List<RegionHandle>(Handles);
foreach (var parent in Handles)
{
parent.AddChild(newRegion);
}
return newRegion;
}
}
}

View file

@ -0,0 +1,87 @@
using Ryujinx.Common.Collections;
using System;
namespace Ryujinx.Memory.WindowsShared
{
/// <summary>
/// A intrusive Red-Black Tree that also supports getting nodes overlapping a given range.
/// </summary>
/// <typeparam name="T">Type of the value stored on the node</typeparam>
class MappingTree<T> : IntrusiveRedBlackTree<RangeNode<T>>
{
private const int ArrayGrowthSize = 16;
public int GetNodes(ulong start, ulong end, ref RangeNode<T>[] overlaps, int overlapCount = 0)
{
RangeNode<T> node = this.GetNodeByKey(start);
for (; node != null; node = node.Successor)
{
if (overlaps.Length <= overlapCount)
{
Array.Resize(ref overlaps, overlapCount + ArrayGrowthSize);
}
overlaps[overlapCount++] = node;
if (node.End >= end)
{
break;
}
}
return overlapCount;
}
}
class RangeNode<T> : IntrusiveRedBlackTreeNode<RangeNode<T>>, IComparable<RangeNode<T>>, IComparable<ulong>
{
public ulong Start { get; }
public ulong End { get; private set; }
public T Value { get; }
public RangeNode(ulong start, ulong end, T value)
{
Start = start;
End = end;
Value = value;
}
public void Extend(ulong sizeDelta)
{
End += sizeDelta;
}
public int CompareTo(RangeNode<T> other)
{
if (Start < other.Start)
{
return -1;
}
else if (Start <= other.End - 1UL)
{
return 0;
}
else
{
return 1;
}
}
public int CompareTo(ulong address)
{
if (address < Start)
{
return 1;
}
else if (address <= End - 1UL)
{
return 0;
}
else
{
return -1;
}
}
}
}

View file

@ -0,0 +1,736 @@
using Ryujinx.Common.Collections;
using Ryujinx.Common.Memory.PartialUnmaps;
using System;
using System.Diagnostics;
using System.Runtime.CompilerServices;
using System.Runtime.Versioning;
using System.Threading;
namespace Ryujinx.Memory.WindowsShared
{
/// <summary>
/// Windows memory placeholder manager.
/// </summary>
[SupportedOSPlatform("windows")]
class PlaceholderManager
{
private const int InitialOverlapsSize = 10;
private readonly MappingTree<ulong> _mappings;
private readonly MappingTree<MemoryPermission> _protections;
private readonly IntPtr _partialUnmapStatePtr;
private readonly Thread _partialUnmapTrimThread;
/// <summary>
/// Creates a new instance of the Windows memory placeholder manager.
/// </summary>
public PlaceholderManager()
{
_mappings = new MappingTree<ulong>();
_protections = new MappingTree<MemoryPermission>();
_partialUnmapStatePtr = PartialUnmapState.GlobalState;
_partialUnmapTrimThread = new Thread(TrimThreadLocalMapLoop);
_partialUnmapTrimThread.Name = "CPU.PartialUnmapTrimThread";
_partialUnmapTrimThread.IsBackground = true;
_partialUnmapTrimThread.Start();
}
/// <summary>
/// Gets a reference to the partial unmap state struct.
/// </summary>
/// <returns>A reference to the partial unmap state struct</returns>
private unsafe ref PartialUnmapState GetPartialUnmapState()
{
return ref Unsafe.AsRef<PartialUnmapState>((void*)_partialUnmapStatePtr);
}
/// <summary>
/// Trims inactive threads from the partial unmap state's thread mapping every few seconds.
/// Should be run in a Background thread so that it doesn't stop the program from closing.
/// </summary>
private void TrimThreadLocalMapLoop()
{
while (true)
{
Thread.Sleep(2000);
GetPartialUnmapState().TrimThreads();
}
}
/// <summary>
/// Reserves a range of the address space to be later mapped as shared memory views.
/// </summary>
/// <param name="address">Start address of the region to reserve</param>
/// <param name="size">Size in bytes of the region to reserve</param>
public void ReserveRange(ulong address, ulong size)
{
lock (_mappings)
{
_mappings.Add(new RangeNode<ulong>(address, address + size, ulong.MaxValue));
}
lock (_protections)
{
_protections.Add(new RangeNode<MemoryPermission>(address, address + size, MemoryPermission.None));
}
}
/// <summary>
/// Unreserves a range of memory that has been previously reserved with <see cref="ReserveRange"/>.
/// </summary>
/// <param name="address">Start address of the region to unreserve</param>
/// <param name="size">Size in bytes of the region to unreserve</param>
/// <exception cref="WindowsApiException">Thrown when the Windows API returns an error unreserving the memory</exception>
public void UnreserveRange(ulong address, ulong size)
{
ulong endAddress = address + size;
lock (_mappings)
{
RangeNode<ulong> node = _mappings.GetNodeByKey(address);
RangeNode<ulong> successorNode;
for (; node != null; node = successorNode)
{
successorNode = node.Successor;
if (IsMapped(node.Value))
{
if (!WindowsApi.UnmapViewOfFile2(WindowsApi.CurrentProcessHandle, (IntPtr)node.Start, 2))
{
throw new WindowsApiException("UnmapViewOfFile2");
}
}
_mappings.Remove(node);
if (node.End >= endAddress)
{
break;
}
}
}
RemoveProtection(address, size);
}
/// <summary>
/// Maps a shared memory view on a previously reserved memory region.
/// </summary>
/// <param name="sharedMemory">Shared memory that will be the backing storage for the view</param>
/// <param name="srcOffset">Offset in the shared memory to map</param>
/// <param name="location">Address to map the view into</param>
/// <param name="size">Size of the view in bytes</param>
/// <param name="owner">Memory block that owns the mapping</param>
public void MapView(IntPtr sharedMemory, ulong srcOffset, IntPtr location, IntPtr size, MemoryBlock owner)
{
ref var partialUnmapLock = ref GetPartialUnmapState().PartialUnmapLock;
partialUnmapLock.AcquireReaderLock();
try
{
UnmapViewInternal(sharedMemory, location, size, owner, updateProtection: false);
MapViewInternal(sharedMemory, srcOffset, location, size, updateProtection: true);
}
finally
{
partialUnmapLock.ReleaseReaderLock();
}
}
/// <summary>
/// Maps a shared memory view on a previously reserved memory region.
/// </summary>
/// <param name="sharedMemory">Shared memory that will be the backing storage for the view</param>
/// <param name="srcOffset">Offset in the shared memory to map</param>
/// <param name="location">Address to map the view into</param>
/// <param name="size">Size of the view in bytes</param>
/// <param name="updateProtection">Indicates if the memory protections should be updated after the map</param>
/// <exception cref="WindowsApiException">Thrown when the Windows API returns an error mapping the memory</exception>
private void MapViewInternal(IntPtr sharedMemory, ulong srcOffset, IntPtr location, IntPtr size, bool updateProtection)
{
SplitForMap((ulong)location, (ulong)size, srcOffset);
var ptr = WindowsApi.MapViewOfFile3(
sharedMemory,
WindowsApi.CurrentProcessHandle,
location,
srcOffset,
size,
0x4000,
MemoryProtection.ReadWrite,
IntPtr.Zero,
0);
if (ptr == IntPtr.Zero)
{
throw new WindowsApiException("MapViewOfFile3");
}
if (updateProtection)
{
UpdateProtection((ulong)location, (ulong)size, MemoryPermission.ReadAndWrite);
}
}
/// <summary>
/// Splits a larger placeholder, slicing at the start and end address, for a new memory mapping.
/// </summary>
/// <param name="address">Address to split</param>
/// <param name="size">Size of the new region</param>
/// <param name="backingOffset">Offset in the shared memory that will be mapped</param>
private void SplitForMap(ulong address, ulong size, ulong backingOffset)
{
ulong endAddress = address + size;
var overlaps = new RangeNode<ulong>[InitialOverlapsSize];
lock (_mappings)
{
int count = _mappings.GetNodes(address, endAddress, ref overlaps);
Debug.Assert(count == 1);
Debug.Assert(!IsMapped(overlaps[0].Value));
var overlap = overlaps[0];
ulong overlapStart = overlap.Start;
ulong overlapEnd = overlap.End;
ulong overlapValue = overlap.Value;
_mappings.Remove(overlap);
bool overlapStartsBefore = overlapStart < address;
bool overlapEndsAfter = overlapEnd > endAddress;
if (overlapStartsBefore && overlapEndsAfter)
{
CheckFreeResult(WindowsApi.VirtualFree(
(IntPtr)address,
(IntPtr)size,
AllocationType.Release | AllocationType.PreservePlaceholder));
_mappings.Add(new RangeNode<ulong>(overlapStart, address, overlapValue));
_mappings.Add(new RangeNode<ulong>(endAddress, overlapEnd, AddBackingOffset(overlapValue, endAddress - overlapStart)));
}
else if (overlapStartsBefore)
{
ulong overlappedSize = overlapEnd - address;
CheckFreeResult(WindowsApi.VirtualFree(
(IntPtr)address,
(IntPtr)overlappedSize,
AllocationType.Release | AllocationType.PreservePlaceholder));
_mappings.Add(new RangeNode<ulong>(overlapStart, address, overlapValue));
}
else if (overlapEndsAfter)
{
ulong overlappedSize = endAddress - overlapStart;
CheckFreeResult(WindowsApi.VirtualFree(
(IntPtr)overlapStart,
(IntPtr)overlappedSize,
AllocationType.Release | AllocationType.PreservePlaceholder));
_mappings.Add(new RangeNode<ulong>(endAddress, overlapEnd, AddBackingOffset(overlapValue, overlappedSize)));
}
_mappings.Add(new RangeNode<ulong>(address, endAddress, backingOffset));
}
}
/// <summary>
/// Unmaps a view that has been previously mapped with <see cref="MapView"/>.
/// </summary>
/// <remarks>
/// For "partial unmaps" (when not the entire mapped range is being unmapped), it might be
/// necessary to unmap the whole range and then remap the sub-ranges that should remain mapped.
/// </remarks>
/// <param name="sharedMemory">Shared memory that the view being unmapped belongs to</param>
/// <param name="location">Address to unmap</param>
/// <param name="size">Size of the region to unmap in bytes</param>
/// <param name="owner">Memory block that owns the mapping</param>
public void UnmapView(IntPtr sharedMemory, IntPtr location, IntPtr size, MemoryBlock owner)
{
ref var partialUnmapLock = ref GetPartialUnmapState().PartialUnmapLock;
partialUnmapLock.AcquireReaderLock();
try
{
UnmapViewInternal(sharedMemory, location, size, owner, updateProtection: true);
}
finally
{
partialUnmapLock.ReleaseReaderLock();
}
}
/// <summary>
/// Unmaps a view that has been previously mapped with <see cref="MapView"/>.
/// </summary>
/// <remarks>
/// For "partial unmaps" (when not the entire mapped range is being unmapped), it might be
/// necessary to unmap the whole range and then remap the sub-ranges that should remain mapped.
/// </remarks>
/// <param name="sharedMemory">Shared memory that the view being unmapped belongs to</param>
/// <param name="location">Address to unmap</param>
/// <param name="size">Size of the region to unmap in bytes</param>
/// <param name="owner">Memory block that owns the mapping</param>
/// <param name="updateProtection">Indicates if the memory protections should be updated after the unmap</param>
/// <exception cref="WindowsApiException">Thrown when the Windows API returns an error unmapping or remapping the memory</exception>
private void UnmapViewInternal(IntPtr sharedMemory, IntPtr location, IntPtr size, MemoryBlock owner, bool updateProtection)
{
ulong startAddress = (ulong)location;
ulong unmapSize = (ulong)size;
ulong endAddress = startAddress + unmapSize;
var overlaps = new RangeNode<ulong>[InitialOverlapsSize];
int count;
lock (_mappings)
{
count = _mappings.GetNodes(startAddress, endAddress, ref overlaps);
}
for (int index = 0; index < count; index++)
{
var overlap = overlaps[index];
if (IsMapped(overlap.Value))
{
lock (_mappings)
{
_mappings.Remove(overlap);
_mappings.Add(new RangeNode<ulong>(overlap.Start, overlap.End, ulong.MaxValue));
}
bool overlapStartsBefore = overlap.Start < startAddress;
bool overlapEndsAfter = overlap.End > endAddress;
if (overlapStartsBefore || overlapEndsAfter)
{
// If the overlap extends beyond the region we are unmapping,
// then we need to re-map the regions that are supposed to remain mapped.
// This is necessary because Windows does not support partial view unmaps.
// That is, you can only fully unmap a view that was previously mapped, you can't just unmap a chunck of it.
ref var partialUnmapState = ref GetPartialUnmapState();
ref var partialUnmapLock = ref partialUnmapState.PartialUnmapLock;
partialUnmapLock.UpgradeToWriterLock();
try
{
partialUnmapState.PartialUnmapsCount++;
if (!WindowsApi.UnmapViewOfFile2(WindowsApi.CurrentProcessHandle, (IntPtr)overlap.Start, 2))
{
throw new WindowsApiException("UnmapViewOfFile2");
}
if (overlapStartsBefore)
{
ulong remapSize = startAddress - overlap.Start;
MapViewInternal(sharedMemory, overlap.Value, (IntPtr)overlap.Start, (IntPtr)remapSize, updateProtection: false);
RestoreRangeProtection(overlap.Start, remapSize);
}
if (overlapEndsAfter)
{
ulong overlappedSize = endAddress - overlap.Start;
ulong remapBackingOffset = overlap.Value + overlappedSize;
ulong remapAddress = overlap.Start + overlappedSize;
ulong remapSize = overlap.End - endAddress;
MapViewInternal(sharedMemory, remapBackingOffset, (IntPtr)remapAddress, (IntPtr)remapSize, updateProtection: false);
RestoreRangeProtection(remapAddress, remapSize);
}
}
finally
{
partialUnmapLock.DowngradeFromWriterLock();
}
}
else if (!WindowsApi.UnmapViewOfFile2(WindowsApi.CurrentProcessHandle, (IntPtr)overlap.Start, 2))
{
throw new WindowsApiException("UnmapViewOfFile2");
}
}
}
CoalesceForUnmap(startAddress, unmapSize, owner);
if (updateProtection)
{
UpdateProtection(startAddress, unmapSize, MemoryPermission.None);
}
}
/// <summary>
/// Coalesces adjacent placeholders after unmap.
/// </summary>
/// <param name="address">Address of the region that was unmapped</param>
/// <param name="size">Size of the region that was unmapped in bytes</param>
/// <param name="owner">Memory block that owns the mapping</param>
private void CoalesceForUnmap(ulong address, ulong size, MemoryBlock owner)
{
ulong endAddress = address + size;
ulong blockAddress = (ulong)owner.Pointer;
ulong blockEnd = blockAddress + owner.Size;
int unmappedCount = 0;
lock (_mappings)
{
RangeNode<ulong> node = _mappings.GetNodeByKey(address);
if (node == null)
{
// Nothing to coalesce if we have no overlaps.
return;
}
RangeNode<ulong> predecessor = node.Predecessor;
RangeNode<ulong> successor = null;
for (; node != null; node = successor)
{
successor = node.Successor;
var overlap = node;
if (!IsMapped(overlap.Value))
{
address = Math.Min(address, overlap.Start);
endAddress = Math.Max(endAddress, overlap.End);
_mappings.Remove(overlap);
unmappedCount++;
}
if (node.End >= endAddress)
{
break;
}
}
if (predecessor != null && !IsMapped(predecessor.Value) && predecessor.Start >= blockAddress)
{
address = Math.Min(address, predecessor.Start);
_mappings.Remove(predecessor);
unmappedCount++;
}
if (successor != null && !IsMapped(successor.Value) && successor.End <= blockEnd)
{
endAddress = Math.Max(endAddress, successor.End);
_mappings.Remove(successor);
unmappedCount++;
}
_mappings.Add(new RangeNode<ulong>(address, endAddress, ulong.MaxValue));
}
if (unmappedCount > 1)
{
size = endAddress - address;
CheckFreeResult(WindowsApi.VirtualFree(
(IntPtr)address,
(IntPtr)size,
AllocationType.Release | AllocationType.CoalescePlaceholders));
}
}
/// <summary>
/// Reprotects a region of memory that has been mapped.
/// </summary>
/// <param name="address">Address of the region to reprotect</param>
/// <param name="size">Size of the region to reprotect in bytes</param>
/// <param name="permission">New permissions</param>
/// <returns>True if the reprotection was successful, false otherwise</returns>
public bool ReprotectView(IntPtr address, IntPtr size, MemoryPermission permission)
{
ref var partialUnmapLock = ref GetPartialUnmapState().PartialUnmapLock;
partialUnmapLock.AcquireReaderLock();
try
{
return ReprotectViewInternal(address, size, permission, false);
}
finally
{
partialUnmapLock.ReleaseReaderLock();
}
}
/// <summary>
/// Reprotects a region of memory that has been mapped.
/// </summary>
/// <param name="address">Address of the region to reprotect</param>
/// <param name="size">Size of the region to reprotect in bytes</param>
/// <param name="permission">New permissions</param>
/// <param name="throwOnError">Throw an exception instead of returning an error if the operation fails</param>
/// <returns>True if the reprotection was successful or if <paramref name="throwOnError"/> is true, false otherwise</returns>
/// <exception cref="WindowsApiException">If <paramref name="throwOnError"/> is true, it is thrown when the Windows API returns an error reprotecting the memory</exception>
private bool ReprotectViewInternal(IntPtr address, IntPtr size, MemoryPermission permission, bool throwOnError)
{
ulong reprotectAddress = (ulong)address;
ulong reprotectSize = (ulong)size;
ulong endAddress = reprotectAddress + reprotectSize;
bool success = true;
lock (_mappings)
{
RangeNode<ulong> node = _mappings.GetNodeByKey(reprotectAddress);
RangeNode<ulong> successorNode;
for (; node != null; node = successorNode)
{
successorNode = node.Successor;
var overlap = node;
ulong mappedAddress = overlap.Start;
ulong mappedSize = overlap.End - overlap.Start;
if (mappedAddress < reprotectAddress)
{
ulong delta = reprotectAddress - mappedAddress;
mappedAddress = reprotectAddress;
mappedSize -= delta;
}
ulong mappedEndAddress = mappedAddress + mappedSize;
if (mappedEndAddress > endAddress)
{
ulong delta = mappedEndAddress - endAddress;
mappedSize -= delta;
}
if (!WindowsApi.VirtualProtect((IntPtr)mappedAddress, (IntPtr)mappedSize, WindowsApi.GetProtection(permission), out _))
{
if (throwOnError)
{
throw new WindowsApiException("VirtualProtect");
}
success = false;
}
if (node.End >= endAddress)
{
break;
}
}
}
UpdateProtection(reprotectAddress, reprotectSize, permission);
return success;
}
/// <summary>
/// Checks the result of a VirtualFree operation, throwing if needed.
/// </summary>
/// <param name="success">Operation result</param>
/// <exception cref="WindowsApiException">Thrown if <paramref name="success"/> is false</exception>
private static void CheckFreeResult(bool success)
{
if (!success)
{
throw new WindowsApiException("VirtualFree");
}
}
/// <summary>
/// Adds an offset to a backing offset. This will do nothing if the backing offset is the special "unmapped" value.
/// </summary>
/// <param name="backingOffset">Backing offset</param>
/// <param name="offset">Offset to be added</param>
/// <returns>Added offset or just <paramref name="backingOffset"/> if the region is unmapped</returns>
private static ulong AddBackingOffset(ulong backingOffset, ulong offset)
{
if (backingOffset == ulong.MaxValue)
{
return backingOffset;
}
return backingOffset + offset;
}
/// <summary>
/// Checks if a region is unmapped.
/// </summary>
/// <param name="backingOffset">Backing offset to check</param>
/// <returns>True if the backing offset is the special "unmapped" value, false otherwise</returns>
private static bool IsMapped(ulong backingOffset)
{
return backingOffset != ulong.MaxValue;
}
/// <summary>
/// Adds a protection to the list of protections.
/// </summary>
/// <param name="address">Address of the protected region</param>
/// <param name="size">Size of the protected region in bytes</param>
/// <param name="permission">Memory permissions of the region</param>
private void UpdateProtection(ulong address, ulong size, MemoryPermission permission)
{
ulong endAddress = address + size;
lock (_protections)
{
RangeNode<MemoryPermission> node = _protections.GetNodeByKey(address);
if (node != null &&
node.Start <= address &&
node.End >= endAddress &&
node.Value == permission)
{
return;
}
RangeNode<MemoryPermission> successorNode;
ulong startAddress = address;
for (; node != null; node = successorNode)
{
successorNode = node.Successor;
var protection = node;
ulong protAddress = protection.Start;
ulong protEndAddress = protection.End;
MemoryPermission protPermission = protection.Value;
_protections.Remove(protection);
if (protPermission == permission)
{
if (startAddress > protAddress)
{
startAddress = protAddress;
}
if (endAddress < protEndAddress)
{
endAddress = protEndAddress;
}
}
else
{
if (startAddress > protAddress)
{
_protections.Add(new RangeNode<MemoryPermission>(protAddress, startAddress, protPermission));
}
if (endAddress < protEndAddress)
{
_protections.Add(new RangeNode<MemoryPermission>(endAddress, protEndAddress, protPermission));
}
}
if (node.End >= endAddress)
{
break;
}
}
_protections.Add(new RangeNode<MemoryPermission>(startAddress, endAddress, permission));
}
}
/// <summary>
/// Removes protection from the list of protections.
/// </summary>
/// <param name="address">Address of the protected region</param>
/// <param name="size">Size of the protected region in bytes</param>
private void RemoveProtection(ulong address, ulong size)
{
ulong endAddress = address + size;
lock (_protections)
{
RangeNode<MemoryPermission> node = _protections.GetNodeByKey(address);
RangeNode<MemoryPermission> successorNode;
for (; node != null; node = successorNode)
{
successorNode = node.Successor;
var protection = node;
ulong protAddress = protection.Start;
ulong protEndAddress = protection.End;
MemoryPermission protPermission = protection.Value;
_protections.Remove(protection);
if (address > protAddress)
{
_protections.Add(new RangeNode<MemoryPermission>(protAddress, address, protPermission));
}
if (endAddress < protEndAddress)
{
_protections.Add(new RangeNode<MemoryPermission>(endAddress, protEndAddress, protPermission));
}
if (node.End >= endAddress)
{
break;
}
}
}
}
/// <summary>
/// Restores the protection of a given memory region that was remapped, using the protections list.
/// </summary>
/// <param name="address">Address of the remapped region</param>
/// <param name="size">Size of the remapped region in bytes</param>
private void RestoreRangeProtection(ulong address, ulong size)
{
ulong endAddress = address + size;
var overlaps = new RangeNode<MemoryPermission>[InitialOverlapsSize];
int count;
lock (_protections)
{
count = _protections.GetNodes(address, endAddress, ref overlaps);
}
ulong startAddress = address;
for (int index = 0; index < count; index++)
{
var protection = overlaps[index];
// If protection is R/W we don't need to reprotect as views are initially mapped as R/W.
if (protection.Value == MemoryPermission.ReadAndWrite)
{
continue;
}
ulong protAddress = protection.Start;
ulong protEndAddress = protection.End;
if (protAddress < address)
{
protAddress = address;
}
if (protEndAddress > endAddress)
{
protEndAddress = endAddress;
}
ReprotectViewInternal((IntPtr)protAddress, (IntPtr)(protEndAddress - protAddress), protection.Value, true);
}
}
}
}

View file

@ -0,0 +1,101 @@
using System;
using System.Runtime.InteropServices;
namespace Ryujinx.Memory.WindowsShared
{
static partial class WindowsApi
{
public static readonly IntPtr InvalidHandleValue = new IntPtr(-1);
public static readonly IntPtr CurrentProcessHandle = new IntPtr(-1);
[LibraryImport("kernel32.dll", SetLastError = true)]
public static partial IntPtr VirtualAlloc(
IntPtr lpAddress,
IntPtr dwSize,
AllocationType flAllocationType,
MemoryProtection flProtect);
[LibraryImport("KernelBase.dll", SetLastError = true)]
public static partial IntPtr VirtualAlloc2(
IntPtr process,
IntPtr lpAddress,
IntPtr dwSize,
AllocationType flAllocationType,
MemoryProtection flProtect,
IntPtr extendedParameters,
ulong parameterCount);
[LibraryImport("kernel32.dll", SetLastError = true)]
[return: MarshalAs(UnmanagedType.Bool)]
public static partial bool VirtualProtect(
IntPtr lpAddress,
IntPtr dwSize,
MemoryProtection flNewProtect,
out MemoryProtection lpflOldProtect);
[LibraryImport("kernel32.dll", SetLastError = true)]
[return: MarshalAs(UnmanagedType.Bool)]
public static partial bool VirtualFree(IntPtr lpAddress, IntPtr dwSize, AllocationType dwFreeType);
[LibraryImport("kernel32.dll", SetLastError = true, EntryPoint = "CreateFileMappingW")]
public static partial IntPtr CreateFileMapping(
IntPtr hFile,
IntPtr lpFileMappingAttributes,
FileMapProtection flProtect,
uint dwMaximumSizeHigh,
uint dwMaximumSizeLow,
[MarshalAs(UnmanagedType.LPWStr)] string lpName);
[LibraryImport("kernel32.dll", SetLastError = true)]
[return: MarshalAs(UnmanagedType.Bool)]
public static partial bool CloseHandle(IntPtr hObject);
[LibraryImport("kernel32.dll", SetLastError = true)]
public static partial IntPtr MapViewOfFile(
IntPtr hFileMappingObject,
uint dwDesiredAccess,
uint dwFileOffsetHigh,
uint dwFileOffsetLow,
IntPtr dwNumberOfBytesToMap);
[LibraryImport("KernelBase.dll", SetLastError = true)]
public static partial IntPtr MapViewOfFile3(
IntPtr hFileMappingObject,
IntPtr process,
IntPtr baseAddress,
ulong offset,
IntPtr dwNumberOfBytesToMap,
ulong allocationType,
MemoryProtection dwDesiredAccess,
IntPtr extendedParameters,
ulong parameterCount);
[LibraryImport("kernel32.dll", SetLastError = true)]
[return: MarshalAs(UnmanagedType.Bool)]
public static partial bool UnmapViewOfFile(IntPtr lpBaseAddress);
[LibraryImport("KernelBase.dll", SetLastError = true)]
[return: MarshalAs(UnmanagedType.Bool)]
public static partial bool UnmapViewOfFile2(IntPtr process, IntPtr lpBaseAddress, ulong unmapFlags);
[LibraryImport("kernel32.dll")]
public static partial uint GetLastError();
[LibraryImport("kernel32.dll")]
public static partial int GetCurrentThreadId();
public static MemoryProtection GetProtection(MemoryPermission permission)
{
return permission switch
{
MemoryPermission.None => MemoryProtection.NoAccess,
MemoryPermission.Read => MemoryProtection.ReadOnly,
MemoryPermission.ReadAndWrite => MemoryProtection.ReadWrite,
MemoryPermission.ReadAndExecute => MemoryProtection.ExecuteRead,
MemoryPermission.ReadWriteExecute => MemoryProtection.ExecuteReadWrite,
MemoryPermission.Execute => MemoryProtection.Execute,
_ => throw new MemoryProtectionException(permission)
};
}
}
}

View file

@ -0,0 +1,24 @@
using System;
namespace Ryujinx.Memory.WindowsShared
{
class WindowsApiException : Exception
{
public WindowsApiException()
{
}
public WindowsApiException(string functionName) : base(CreateMessage(functionName))
{
}
public WindowsApiException(string functionName, Exception inner) : base(CreateMessage(functionName), inner)
{
}
private static string CreateMessage(string functionName)
{
return $"{functionName} returned error code 0x{WindowsApi.GetLastError():X}.";
}
}
}

View file

@ -0,0 +1,52 @@
using System;
namespace Ryujinx.Memory.WindowsShared
{
[Flags]
enum AllocationType : uint
{
CoalescePlaceholders = 0x1,
PreservePlaceholder = 0x2,
Commit = 0x1000,
Reserve = 0x2000,
Decommit = 0x4000,
ReplacePlaceholder = 0x4000,
Release = 0x8000,
ReservePlaceholder = 0x40000,
Reset = 0x80000,
Physical = 0x400000,
TopDown = 0x100000,
WriteWatch = 0x200000,
LargePages = 0x20000000
}
[Flags]
enum MemoryProtection : uint
{
NoAccess = 0x01,
ReadOnly = 0x02,
ReadWrite = 0x04,
WriteCopy = 0x08,
Execute = 0x10,
ExecuteRead = 0x20,
ExecuteReadWrite = 0x40,
ExecuteWriteCopy = 0x80,
GuardModifierflag = 0x100,
NoCacheModifierflag = 0x200,
WriteCombineModifierflag = 0x400
}
[Flags]
enum FileMapProtection : uint
{
PageReadonly = 0x02,
PageReadWrite = 0x04,
PageWriteCopy = 0x08,
PageExecuteRead = 0x20,
PageExecuteReadWrite = 0x40,
SectionCommit = 0x8000000,
SectionImage = 0x1000000,
SectionNoCache = 0x10000000,
SectionReserve = 0x4000000
}
}

View file

@ -0,0 +1,38 @@
using System;
namespace Ryujinx.Memory
{
public sealed class WritableRegion : IDisposable
{
private readonly IWritableBlock _block;
private readonly ulong _va;
private readonly bool _tracked;
private bool NeedsWriteback => _block != null;
public Memory<byte> Memory { get; }
public WritableRegion(IWritableBlock block, ulong va, Memory<byte> memory, bool tracked = false)
{
_block = block;
_va = va;
_tracked = tracked;
Memory = memory;
}
public void Dispose()
{
if (NeedsWriteback)
{
if (_tracked)
{
_block.Write(_va, Memory.Span);
}
else
{
_block.WriteUntracked(_va, Memory.Span);
}
}
}
}
}