Compare commits

...

4 Commits

Author SHA1 Message Date
lizzie
0c751bd96d fix
Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-10-05 00:04:23 +02:00
lizzie
3d08714206 [core] use memcpy instead of hand rolling aligned cases
Hand rolling memcpy like this is always frowned upon because the compiler has more insight on whats going on (plus the code resolves to a worse version of itself on assembly). This removes some branches that are just straight up redundant. May save stuff especially for systems without fastmem enabled.

Signed-off-by: lizzie <lizzie@eden-emu.dev>
2025-10-05 00:04:23 +02:00
crueter
1a13e79c3d [cmake] fix video_core and tests comp errors on Windows (#2631)
did not link to video_core thus did not properly propagate the GPUOpen
target thus failed to find vk_mem_alloc

also msvc sucks

Signed-off-by: crueter <crueter@eden-emu.dev>
Reviewed-on: https://git.eden-emu.dev/eden-emu/eden/pulls/2631
Reviewed-by: MaranBr <maranbr@eden-emu.dev>
2025-10-05 00:00:52 +02:00
Ribbit
268918aece [vk] Implement Shader Read Barrier (#2671)
Adding the shader read barrier keeps every render/compute/transfer write visible before the image is sampled, so it prevents the “read-before-writes-finish” hazards. Without it you can get random stale frames, flickering post process passes, partially updated HUD textures, and corrupted depth-to-color conversions especially in scenes that render into an offscreen image and immediately feed that image to a shader (reflections, bloom, dynamic resolution, depth visualizers, etc.). This fix makes those R2T chains deterministic again across all Vulkan drivers.

Co-authored-by: Ribbit <ribbit@placeholder.com>
Reviewed-on: https://git.eden-emu.dev/eden-emu/eden/pulls/2671
Reviewed-by: MaranBr <maranbr@eden-emu.dev>
Reviewed-by: crueter <crueter@eden-emu.dev>
Co-authored-by: Ribbit <ribbit@eden-emu.dev>
Co-committed-by: Ribbit <ribbit@eden-emu.dev>
2025-10-04 23:58:08 +02:00
7 changed files with 143 additions and 188 deletions

View File

@@ -10,6 +10,7 @@
#include <mutex>
#include <span>
#include <thread>
#include <type_traits>
#include <vector>
#include "common/assert.h"
@@ -681,22 +682,17 @@ struct Memory::Impl {
}
}
[[nodiscard]] u8* GetPointerImpl(u64 vaddr, auto on_unmapped, auto on_rasterizer) const {
template<typename F, typename G>
[[nodiscard]] u8* GetPointerImpl(u64 vaddr, F&& on_unmapped, G&& on_rasterizer) const {
// AARCH64 masks the upper 16 bit of all memory accesses
vaddr = vaddr & 0xffffffffffffULL;
if (!AddressSpaceContains(*current_page_table, vaddr, 1)) [[unlikely]] {
on_unmapped();
return nullptr;
} else {
vaddr &= 0xffffffffffffULL;
if (AddressSpaceContains(*current_page_table, vaddr, 1)) [[likely]] {
// Avoid adding any extra logic to this fast-path block
const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Raw();
if (const uintptr_t pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) {
if (const uintptr_t pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) [[likely]] {
return reinterpret_cast<u8*>(pointer + vaddr);
} else {
switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) {
case Common::PageType::Unmapped:
on_unmapped();
return nullptr;
case Common::PageType::Memory:
ASSERT_MSG(false, "Mapped memory page without a pointer @ 0x{:016X}", vaddr);
return nullptr;
@@ -707,11 +703,18 @@ struct Memory::Impl {
on_rasterizer();
return host_ptr;
}
case Common::PageType::Unmapped: [[unlikely]] {
on_unmapped();
return nullptr;
}
default:
UNREACHABLE();
}
return nullptr;
}
} else {
on_unmapped();
return nullptr;
}
}
@@ -729,172 +732,38 @@ struct Memory::Impl {
GetInteger(vaddr), []() {}, []() {});
}
/**
* Reads a particular data type out of memory at the given virtual address.
*
* @param vaddr The virtual address to read the data type from.
*
* @tparam T The data type to read out of memory. This type *must* be
* trivially copyable, otherwise the behavior of this function
* is undefined.
*
* @returns The instance of T read from the specified virtual address.
*/
/// @brief Reads a particular data type out of memory at the given virtual address.
/// @param vaddr The virtual address to read the data type from.
/// @tparam T The data type to read out of memory.
/// @returns The instance of T read from the specified virtual address.
template <typename T>
T Read(Common::ProcessAddress vaddr) {
// Fast path for aligned reads of common sizes
inline T Read(Common::ProcessAddress vaddr) noexcept requires(std::is_trivially_copyable_v<T>) {
const u64 addr = GetInteger(vaddr);
if constexpr (std::is_same_v<T, u8> || std::is_same_v<T, s8>) {
// 8-bit reads are always aligned
const u8* const ptr = GetPointerImpl(
addr,
[addr]() {
LOG_ERROR(HW_Memory, "Unmapped Read8 @ 0x{:016X}", addr);
},
[&]() { HandleRasterizerDownload(addr, sizeof(T)); });
if (ptr) {
return static_cast<T>(*ptr);
}
return 0;
} else if constexpr (std::is_same_v<T, u16_le> || std::is_same_v<T, s16_le>) {
// Check alignment for 16-bit reads
if ((addr & 1) == 0) {
const u8* const ptr = GetPointerImpl(
addr,
[addr]() {
LOG_ERROR(HW_Memory, "Unmapped Read16 @ 0x{:016X}", addr);
},
[&]() { HandleRasterizerDownload(addr, sizeof(T)); });
if (ptr) {
return static_cast<T>(*reinterpret_cast<const u16*>(ptr));
}
}
} else if constexpr (std::is_same_v<T, u32_le> || std::is_same_v<T, s32_le>) {
// Check alignment for 32-bit reads
if ((addr & 3) == 0) {
const u8* const ptr = GetPointerImpl(
addr,
[addr]() {
LOG_ERROR(HW_Memory, "Unmapped Read32 @ 0x{:016X}", addr);
},
[&]() { HandleRasterizerDownload(addr, sizeof(T)); });
if (ptr) {
return static_cast<T>(*reinterpret_cast<const u32*>(ptr));
}
}
} else if constexpr (std::is_same_v<T, u64_le> || std::is_same_v<T, s64_le>) {
// Check alignment for 64-bit reads
if ((addr & 7) == 0) {
const u8* const ptr = GetPointerImpl(
addr,
[addr]() {
LOG_ERROR(HW_Memory, "Unmapped Read64 @ 0x{:016X}", addr);
},
[&]() { HandleRasterizerDownload(addr, sizeof(T)); });
if (ptr) {
return static_cast<T>(*reinterpret_cast<const u64*>(ptr));
}
}
}
// Fall back to the general case for other types or unaligned access
T result = 0;
const u8* const ptr = GetPointerImpl(
addr,
[addr]() {
LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:016X}", sizeof(T) * 8, addr);
},
[&]() { HandleRasterizerDownload(addr, sizeof(T)); });
if (ptr) {
if (auto const ptr = GetPointerImpl(addr, [addr]() {
LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:016X}", sizeof(T) * 8, addr);
}, [&]() {
HandleRasterizerDownload(addr, sizeof(T));
}); ptr) [[likely]] {
// It may be tempting to rewrite this particular section to use "reinterpret_cast";
// afterall, it's trivially copyable so surely it can be copied ov- Alignment.
// Remember, alignment. memcpy() will deal with all the alignment extremely fast.
T result{};
std::memcpy(&result, ptr, sizeof(T));
return result;
}
return result;
return T{};
}
/**
* Writes a particular data type to memory at the given virtual address.
*
* @param vaddr The virtual address to write the data type to.
*
* @tparam T The data type to write to memory. This type *must* be
* trivially copyable, otherwise the behavior of this function
* is undefined.
*/
/// @brief Writes a particular data type to memory at the given virtual address.
/// @param vaddr The virtual address to write the data type to.
/// @tparam T The data type to write to memory.
template <typename T>
void Write(Common::ProcessAddress vaddr, const T data) {
// Fast path for aligned writes of common sizes
inline void Write(Common::ProcessAddress vaddr, const T data) noexcept requires(std::is_trivially_copyable_v<T>) {
const u64 addr = GetInteger(vaddr);
if constexpr (std::is_same_v<T, u8> || std::is_same_v<T, s8>) {
// 8-bit writes are always aligned
u8* const ptr = GetPointerImpl(
addr,
[addr, data]() {
LOG_ERROR(HW_Memory, "Unmapped Write8 @ 0x{:016X} = 0x{:02X}", addr,
static_cast<u8>(data));
},
[&]() { HandleRasterizerWrite(addr, sizeof(T)); });
if (ptr) {
*ptr = static_cast<u8>(data);
}
return;
} else if constexpr (std::is_same_v<T, u16_le> || std::is_same_v<T, s16_le>) {
// Check alignment for 16-bit writes
if ((addr & 1) == 0) {
u8* const ptr = GetPointerImpl(
addr,
[addr, data]() {
LOG_ERROR(HW_Memory, "Unmapped Write16 @ 0x{:016X} = 0x{:04X}", addr,
static_cast<u16>(data));
},
[&]() { HandleRasterizerWrite(addr, sizeof(T)); });
if (ptr) {
*reinterpret_cast<u16*>(ptr) = static_cast<u16>(data);
return;
}
}
} else if constexpr (std::is_same_v<T, u32_le> || std::is_same_v<T, s32_le>) {
// Check alignment for 32-bit writes
if ((addr & 3) == 0) {
u8* const ptr = GetPointerImpl(
addr,
[addr, data]() {
LOG_ERROR(HW_Memory, "Unmapped Write32 @ 0x{:016X} = 0x{:08X}", addr,
static_cast<u32>(data));
},
[&]() { HandleRasterizerWrite(addr, sizeof(T)); });
if (ptr) {
*reinterpret_cast<u32*>(ptr) = static_cast<u32>(data);
return;
}
}
} else if constexpr (std::is_same_v<T, u64_le> || std::is_same_v<T, s64_le>) {
// Check alignment for 64-bit writes
if ((addr & 7) == 0) {
u8* const ptr = GetPointerImpl(
addr,
[addr, data]() {
LOG_ERROR(HW_Memory, "Unmapped Write64 @ 0x{:016X} = 0x{:016X}", addr,
static_cast<u64>(data));
},
[&]() { HandleRasterizerWrite(addr, sizeof(T)); });
if (ptr) {
*reinterpret_cast<u64*>(ptr) = static_cast<u64>(data);
return;
}
}
}
// Fall back to the general case for other types or unaligned access
u8* const ptr = GetPointerImpl(
addr,
[addr, data]() {
LOG_ERROR(HW_Memory, "Unmapped Write{} @ 0x{:016X} = 0x{:016X}", sizeof(T) * 8,
addr, static_cast<u64>(data));
},
[&]() { HandleRasterizerWrite(addr, sizeof(T)); });
if (ptr) {
if (auto const ptr = GetPointerImpl(addr, [addr, data]() {
LOG_ERROR(HW_Memory, "Unmapped Write{} @ 0x{:016X} = 0x{:016X}", sizeof(T) * 8, addr, u64(data));
}, [&]() { HandleRasterizerWrite(addr, sizeof(T)); }); ptr) [[likely]]
std::memcpy(ptr, &data, sizeof(T));
}
}
template <typename T>

View File

@@ -1,3 +1,6 @@
# SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
# SPDX-License-Identifier: GPL-3.0-or-later
# SPDX-FileCopyrightText: 2018 yuzu Emulator Project
# SPDX-License-Identifier: GPL-2.0-or-later
@@ -21,7 +24,7 @@ add_executable(tests
create_target_directory_groups(tests)
target_link_libraries(tests PRIVATE common core input_common)
target_link_libraries(tests PRIVATE common core input_common video_core)
target_link_libraries(tests PRIVATE ${PLATFORM_LIBRARIES} Catch2::Catch2WithMain Threads::Threads)
add_test(NAME tests COMMAND tests)

View File

@@ -156,6 +156,8 @@ void MaxwellDMA::Launch() {
}
void MaxwellDMA::CopyBlockLinearToPitch() {
u32 bytes_per_pixel = 1;
DMA::ImageOperand src_operand;
src_operand.bytes_per_pixel = bytes_per_pixel;

View File

@@ -46,6 +46,38 @@ namespace Vulkan {
using VideoCommon::ImageViewType;
namespace {
[[nodiscard]] VkImageAspectFlags AspectMaskFromFormat(VideoCore::Surface::PixelFormat format) {
using VideoCore::Surface::SurfaceType;
switch (VideoCore::Surface::GetFormatType(format)) {
case SurfaceType::ColorTexture:
return VK_IMAGE_ASPECT_COLOR_BIT;
case SurfaceType::Depth:
return VK_IMAGE_ASPECT_DEPTH_BIT;
case SurfaceType::Stencil:
return VK_IMAGE_ASPECT_STENCIL_BIT;
case SurfaceType::DepthStencil:
return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
default:
return VK_IMAGE_ASPECT_COLOR_BIT;
}
}
[[nodiscard]] VkImageSubresourceRange SubresourceRangeFromView(const ImageView& image_view) {
auto range = image_view.range;
if ((image_view.flags & VideoCommon::ImageViewFlagBits::Slice) != VideoCommon::ImageViewFlagBits{}) {
range.base.layer = 0;
range.extent.layers = 1;
}
return VkImageSubresourceRange{
.aspectMask = AspectMaskFromFormat(image_view.format),
.baseMipLevel = static_cast<u32>(range.base.level),
.levelCount = static_cast<u32>(range.extent.levels),
.baseArrayLayer = static_cast<u32>(range.base.layer),
.layerCount = static_cast<u32>(range.extent.layers),
};
}
struct PushConstants {
std::array<float, 2> tex_scale;
std::array<float, 2> tex_offset;
@@ -417,6 +449,40 @@ void TransitionImageLayout(vk::CommandBuffer& cmdbuf, VkImage image, VkImageLayo
0, barrier);
}
void RecordShaderReadBarrier(Scheduler& scheduler, const ImageView& image_view) {
const VkImage image = image_view.ImageHandle();
const VkImageSubresourceRange subresource_range = SubresourceRangeFromView(image_view);
scheduler.RequestOutsideRenderPassOperationContext();
scheduler.Record([image, subresource_range](vk::CommandBuffer cmdbuf) {
const VkImageMemoryBarrier barrier{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.pNext = nullptr,
.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
VK_ACCESS_SHADER_WRITE_BIT |
VK_ACCESS_TRANSFER_WRITE_BIT,
.dstAccessMask = VK_ACCESS_SHADER_READ_BIT,
.oldLayout = VK_IMAGE_LAYOUT_GENERAL,
.newLayout = VK_IMAGE_LAYOUT_GENERAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = image,
.subresourceRange = subresource_range,
};
cmdbuf.PipelineBarrier(
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
VK_PIPELINE_STAGE_TRANSFER_BIT |
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
0,
barrier);
});
}
void BeginRenderPass(vk::CommandBuffer& cmdbuf, const Framebuffer* framebuffer) {
const VkRenderPass render_pass = framebuffer->RenderPass();
const VkFramebuffer framebuffer_handle = framebuffer->Handle();
@@ -484,7 +550,7 @@ BlitImageHelper::BlitImageHelper(const Device& device_, Scheduler& scheduler_,
BlitImageHelper::~BlitImageHelper() = default;
void BlitImageHelper::BlitColor(const Framebuffer* dst_framebuffer, VkImageView src_view,
void BlitImageHelper::BlitColor(const Framebuffer* dst_framebuffer, const ImageView& src_image_view,
const Region2D& dst_region, const Region2D& src_region,
Tegra::Engines::Fermi2D::Filter filter,
Tegra::Engines::Fermi2D::Operation operation) {
@@ -496,10 +562,12 @@ void BlitImageHelper::BlitColor(const Framebuffer* dst_framebuffer, VkImageView
const VkPipelineLayout layout = *one_texture_pipeline_layout;
const VkSampler sampler = is_linear ? *linear_sampler : *nearest_sampler;
const VkPipeline pipeline = FindOrEmplaceColorPipeline(key);
const VkImageView src_view = src_image_view.Handle(Shader::TextureType::Color2D);
RecordShaderReadBarrier(scheduler, src_image_view);
scheduler.RequestRenderpass(dst_framebuffer);
scheduler.Record([this, dst_region, src_region, pipeline, layout, sampler,
src_view](vk::CommandBuffer cmdbuf) {
// TODO: Barriers
const VkDescriptorSet descriptor_set = one_texture_descriptor_allocator.Commit();
UpdateOneTextureDescriptorSet(device, descriptor_set, sampler, src_view);
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
@@ -538,7 +606,7 @@ void BlitImageHelper::BlitColor(const Framebuffer* dst_framebuffer, VkImageView
}
void BlitImageHelper::BlitDepthStencil(const Framebuffer* dst_framebuffer,
VkImageView src_depth_view, VkImageView src_stencil_view,
ImageView& src_image_view,
const Region2D& dst_region, const Region2D& src_region,
Tegra::Engines::Fermi2D::Filter filter,
Tegra::Engines::Fermi2D::Operation operation) {
@@ -554,10 +622,13 @@ void BlitImageHelper::BlitDepthStencil(const Framebuffer* dst_framebuffer,
const VkPipelineLayout layout = *two_textures_pipeline_layout;
const VkSampler sampler = *nearest_sampler;
const VkPipeline pipeline = FindOrEmplaceDepthStencilPipeline(key);
const VkImageView src_depth_view = src_image_view.DepthView();
const VkImageView src_stencil_view = src_image_view.StencilView();
RecordShaderReadBarrier(scheduler, src_image_view);
scheduler.RequestRenderpass(dst_framebuffer);
scheduler.Record([dst_region, src_region, pipeline, layout, sampler, src_depth_view,
src_stencil_view, this](vk::CommandBuffer cmdbuf) {
// TODO: Barriers
const VkDescriptorSet descriptor_set = two_textures_descriptor_allocator.Commit();
UpdateTwoTexturesDescriptorSet(device, descriptor_set, sampler, src_depth_view,
src_stencil_view);
@@ -692,6 +763,7 @@ void BlitImageHelper::Convert(VkPipeline pipeline, const Framebuffer* dst_frameb
const VkSampler sampler = *nearest_sampler;
const VkExtent2D extent = GetConversionExtent(src_image_view);
RecordShaderReadBarrier(scheduler, src_image_view);
scheduler.RequestRenderpass(dst_framebuffer);
scheduler.Record([pipeline, layout, sampler, src_view, extent, this](vk::CommandBuffer cmdbuf) {
const VkOffset2D offset{
@@ -717,7 +789,6 @@ void BlitImageHelper::Convert(VkPipeline pipeline, const Framebuffer* dst_frameb
const VkDescriptorSet descriptor_set = one_texture_descriptor_allocator.Commit();
UpdateOneTextureDescriptorSet(device, descriptor_set, sampler, src_view);
// TODO: Barriers
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, layout, 0, descriptor_set,
nullptr);
@@ -737,6 +808,7 @@ void BlitImageHelper::ConvertDepthStencil(VkPipeline pipeline, const Framebuffer
const VkSampler sampler = *nearest_sampler;
const VkExtent2D extent = GetConversionExtent(src_image_view);
RecordShaderReadBarrier(scheduler, src_image_view);
scheduler.RequestRenderpass(dst_framebuffer);
scheduler.Record([pipeline, layout, sampler, src_depth_view, src_stencil_view, extent,
this](vk::CommandBuffer cmdbuf) {
@@ -763,7 +835,6 @@ void BlitImageHelper::ConvertDepthStencil(VkPipeline pipeline, const Framebuffer
const VkDescriptorSet descriptor_set = two_textures_descriptor_allocator.Commit();
UpdateTwoTexturesDescriptorSet(device, descriptor_set, sampler, src_depth_view,
src_stencil_view);
// TODO: Barriers
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, layout, 0, descriptor_set,
nullptr);

View File

@@ -1,4 +1,7 @@
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
// SPDX-FileCopyrightText: Copyright 2025 Eden Emulator Project
// SPDX-License-Identifier: GPL-3.0-or-later
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -43,7 +46,7 @@ public:
StateTracker& state_tracker, DescriptorPool& descriptor_pool);
~BlitImageHelper();
void BlitColor(const Framebuffer* dst_framebuffer, VkImageView src_image_view,
void BlitColor(const Framebuffer* dst_framebuffer, const ImageView& src_image_view,
const Region2D& dst_region, const Region2D& src_region,
Tegra::Engines::Fermi2D::Filter filter,
Tegra::Engines::Fermi2D::Operation operation);
@@ -52,9 +55,9 @@ public:
VkImage src_image, VkSampler src_sampler, const Region2D& dst_region,
const Region2D& src_region, const Extent3D& src_size);
void BlitDepthStencil(const Framebuffer* dst_framebuffer, VkImageView src_depth_view,
VkImageView src_stencil_view, const Region2D& dst_region,
const Region2D& src_region, Tegra::Engines::Fermi2D::Filter filter,
void BlitDepthStencil(const Framebuffer* dst_framebuffer, ImageView& src_image_view,
const Region2D& dst_region, const Region2D& src_region,
Tegra::Engines::Fermi2D::Filter filter,
Tegra::Engines::Fermi2D::Operation operation);
void ConvertD32ToR32(const Framebuffer* dst_framebuffer, const ImageView& src_image_view);

View File

@@ -1086,8 +1086,8 @@ void TextureCacheRuntime::BlitImage(Framebuffer* dst_framebuffer, ImageView& dst
return;
}
if (aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT && !is_src_msaa && !is_dst_msaa) {
blit_image_helper.BlitColor(dst_framebuffer, src.Handle(Shader::TextureType::Color2D),
dst_region, src_region, filter, operation);
blit_image_helper.BlitColor(dst_framebuffer, src, dst_region, src_region, filter,
operation);
return;
}
ASSERT(src.format == dst.format);
@@ -1106,8 +1106,8 @@ void TextureCacheRuntime::BlitImage(Framebuffer* dst_framebuffer, ImageView& dst
}();
if (!can_blit_depth_stencil) {
UNIMPLEMENTED_IF(is_src_msaa || is_dst_msaa);
blit_image_helper.BlitDepthStencil(dst_framebuffer, src.DepthView(), src.StencilView(),
dst_region, src_region, filter, operation);
blit_image_helper.BlitDepthStencil(dst_framebuffer, src, dst_region, src_region,
filter, operation);
return;
}
}
@@ -1968,18 +1968,17 @@ bool Image::BlitScaleHelper(bool scale_up) {
blit_framebuffer =
std::make_unique<Framebuffer>(*runtime, view_ptr, nullptr, extent, scale_up);
}
const auto color_view = blit_view->Handle(Shader::TextureType::Color2D);
runtime->blit_image_helper.BlitColor(blit_framebuffer.get(), color_view, dst_region,
runtime->blit_image_helper.BlitColor(blit_framebuffer.get(), *blit_view, dst_region,
src_region, operation, BLIT_OPERATION);
} else if (aspect_mask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
if (!blit_framebuffer) {
blit_framebuffer =
std::make_unique<Framebuffer>(*runtime, nullptr, view_ptr, extent, scale_up);
}
runtime->blit_image_helper.BlitDepthStencil(blit_framebuffer.get(), blit_view->DepthView(),
blit_view->StencilView(), dst_region,
src_region, operation, BLIT_OPERATION);
runtime->blit_image_helper.BlitDepthStencil(blit_framebuffer.get(), *blit_view,
dst_region, src_region, operation,
BLIT_OPERATION);
} else {
// TODO: Use helper blits where applicable
flags &= ~ImageFlagBits::Rescaled;

View File

@@ -10,4 +10,12 @@
#define VMA_STATIC_VULKAN_FUNCTIONS 0
#define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
#ifdef _MSC_VER
#pragma warning( push )
#pragma warning( disable : 4189 )
#endif
#include "vk_mem_alloc.h"
#ifdef _MSC_VER
#pragma warning( pop )
#endif