diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index eb18a4bd66..6f6e0c23a8 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -785,18 +785,13 @@ void BufferCache
::BindHostGraphicsUniformBuffers(size_t stage) {
}
template ::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32 binding_index,
- bool needs_bind) {
+void BufferCache ::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32 binding_index, bool needs_bind) {
+ ++channel_state->uniform_cache_shots[0];
const Binding& binding = channel_state->uniform_buffers[stage][index];
const DAddr device_addr = binding.device_addr;
const u32 size = (std::min)(binding.size, (*channel_state->uniform_buffer_sizes)[stage][index]);
Buffer& buffer = slot_buffers[binding.buffer_id];
TouchBuffer(buffer, binding.buffer_id);
- const bool sync_buffer = SynchronizeBuffer(buffer, device_addr, size);
- if (sync_buffer) {
- ++channel_state->uniform_cache_hits[0];
- }
- ++channel_state->uniform_cache_shots[0];
const bool use_fast_buffer = binding.buffer_id != NULL_BUFFER_ID &&
size <= channel_state->uniform_buffer_skip_cache_size &&
!memory_tracker.IsRegionGpuModified(device_addr, size);
@@ -827,7 +822,10 @@ void BufferCache ::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32
device_memory.ReadBlockUnsafe(device_addr, span.data(), size);
return;
}
-
+ // Classic cached path
+ if (SynchronizeBuffer(buffer, device_addr, size)) {
+ ++channel_state->uniform_cache_hits[0];
+ }
// Skip binding if it's not needed and if the bound buffer is not the fast version
// This exists to avoid instances where the fast buffer is bound and a GPU write happens
needs_bind |= HasFastUniformBufferBound(stage, binding_index);