gpu: Rework texture caching

We want to cache textures in the compositing color state, not in their
original color state. However, the compositing color state may change
(think multimonitor setups).

So we additionally keep a cache per colorstate.

That means texture lookup is now a 3-step process:

1. Look up in the compositing colorstate's cache

2. Look up in the general cache

3. Upload
This commit is contained in:
Benjamin Otte 2024-07-07 05:12:43 +02:00
parent cf12503fec
commit 881954dfca
5 changed files with 121 additions and 43 deletions

View File

@ -7,8 +7,9 @@
#include "gskgpuimageprivate.h"
#include "gskgpuuploadopprivate.h"
#include "gdk/gdktextureprivate.h"
#include "gdk/gdkcolorstateprivate.h"
#include "gdk/gdkprofilerprivate.h"
#include "gdk/gdktextureprivate.h"
#include "gsk/gskdebugprivate.h"
#include "gsk/gskprivate.h"
@ -42,6 +43,7 @@ struct _GskGpuCache
GskGpuCached *last_cached;
GHashTable *texture_cache;
GHashTable *ccs_texture_caches[GDK_COLOR_STATE_N_IDS];
GHashTable *glyph_cache;
GskGpuCachedAtlas *current_atlas;
@ -257,22 +259,49 @@ struct _GskGpuCachedTexture
GdkTexture *texture;
GskGpuImage *image;
GdkColorState *color_state; /* no ref because global. May be NULL */
};
static GHashTable *
gsk_gpu_cache_get_texture_hash_table (GskGpuCache *cache,
GdkColorState *color_state)
{
if (color_state == NULL)
{
return cache->texture_cache;
}
else if (GDK_IS_DEFAULT_COLOR_STATE (color_state))
{
GdkColorStateId id = GDK_DEFAULT_COLOR_STATE_ID (color_state);
if (cache->ccs_texture_caches[id] == NULL)
cache->ccs_texture_caches[id] = g_hash_table_new (g_direct_hash,
g_direct_equal);
return cache->ccs_texture_caches[id];
}
else
{
return NULL;
}
}
static void
gsk_gpu_cached_texture_free (GskGpuCache *cache,
GskGpuCached *cached)
{
GskGpuCachedTexture *self = (GskGpuCachedTexture *) cached;
GHashTable *texture_cache;
gpointer key, value;
g_clear_object (&self->image);
if (g_hash_table_steal_extended (cache->texture_cache, self->texture, &key, &value))
texture_cache = gsk_gpu_cache_get_texture_hash_table (cache, self->color_state);
if (g_hash_table_steal_extended (texture_cache, self->texture, &key, &value))
{
/* If the texture has been reused already, we put the entry back */
if ((GskGpuCached *) value != cached)
g_hash_table_insert (cache->texture_cache, key, value);
g_hash_table_insert (texture_cache, key, value);
}
/* If the cached item itself is still in use by the texture, we leave
@ -330,11 +359,13 @@ gsk_gpu_cached_texture_destroy_cb (gpointer data)
}
static GskGpuCachedTexture *
gsk_gpu_cached_texture_new (GskGpuCache *cache,
GdkTexture *texture,
GskGpuImage *image)
gsk_gpu_cached_texture_new (GskGpuCache *cache,
GdkTexture *texture,
GskGpuImage *image,
GdkColorState *color_state)
{
GskGpuCachedTexture *self;
GHashTable *texture_cache;
/* First, move any existing renderdata */
self = gdk_texture_get_render_data (texture, cache);
@ -342,12 +373,15 @@ gsk_gpu_cached_texture_new (GskGpuCache *cache,
{
gdk_texture_steal_render_data (texture);
g_object_weak_ref (G_OBJECT (texture), (GWeakNotify) gsk_gpu_cached_texture_destroy_cb, self);
g_hash_table_insert (cache->texture_cache, texture, self);
texture_cache = gsk_gpu_cache_get_texture_hash_table (cache, self->color_state);
g_assert (texture_cache != NULL);
g_hash_table_insert (texture_cache, texture, self);
}
self = gsk_gpu_cached_new (cache, &GSK_GPU_CACHED_TEXTURE_CLASS, NULL);
self->texture = texture;
self->image = g_object_ref (image);
self->color_state = color_state;
((GskGpuCached *)self)->pixels = gsk_gpu_image_get_width (image) * gsk_gpu_image_get_height (image);
self->dead_pixels_counter = &cache->dead_texture_pixels;
self->use_count = 2;
@ -356,7 +390,9 @@ gsk_gpu_cached_texture_new (GskGpuCache *cache,
{
g_object_weak_ref (G_OBJECT (texture), (GWeakNotify) gsk_gpu_cached_texture_destroy_cb, self);
g_hash_table_insert (cache->texture_cache, texture, self);
texture_cache = gsk_gpu_cache_get_texture_hash_table (cache, self->color_state);
g_assert (texture_cache != NULL);
g_hash_table_insert (texture_cache, texture, self);
}
return self;
@ -734,15 +770,23 @@ gsk_gpu_cache_add_atlas_image (GskGpuCache *self,
}
GskGpuImage *
gsk_gpu_cache_lookup_texture_image (GskGpuCache *self,
GdkTexture *texture,
gint64 timestamp)
gsk_gpu_cache_lookup_texture_image (GskGpuCache *self,
GdkTexture *texture,
gint64 timestamp,
GdkColorState *color_state)
{
GskGpuCachedTexture *cache;
GHashTable *texture_cache;
texture_cache = gsk_gpu_cache_get_texture_hash_table (self, color_state);
if (texture_cache == NULL)
return FALSE;
cache = gdk_texture_get_render_data (texture, self);
if (cache == NULL)
cache = g_hash_table_lookup (self->texture_cache, texture);
/* color_state_equal() isn't necessary and if we'd use it,
* we'd need to check for NULLs before */
if (cache == NULL || color_state != cache->color_state)
cache = g_hash_table_lookup (texture_cache, texture);
if (!cache || !cache->image || gsk_gpu_cached_texture_is_invalid (cache))
return NULL;
@ -753,14 +797,16 @@ gsk_gpu_cache_lookup_texture_image (GskGpuCache *self,
}
void
gsk_gpu_cache_cache_texture_image (GskGpuCache *self,
GdkTexture *texture,
gint64 timestamp,
GskGpuImage *image)
gsk_gpu_cache_cache_texture_image (GskGpuCache *self,
GdkTexture *texture,
gint64 timestamp,
GskGpuImage *image,
GdkColorState *color_state)
{
GskGpuCachedTexture *cache;
cache = gsk_gpu_cached_texture_new (self, texture, image);
cache = gsk_gpu_cached_texture_new (self, texture, image, color_state);
g_return_if_fail (cache != NULL);
gsk_gpu_cached_use (self, (GskGpuCached *) cache, timestamp);
}

View File

@ -32,11 +32,13 @@ GskGpuImage * gsk_gpu_cache_get_atlas_image (GskGpuC
GskGpuImage * gsk_gpu_cache_lookup_texture_image (GskGpuCache *self,
GdkTexture *texture,
gint64 timestamp);
gint64 timestamp,
GdkColorState *color_state);
void gsk_gpu_cache_cache_texture_image (GskGpuCache *self,
GdkTexture *texture,
gint64 timestamp,
GskGpuImage *image);
GskGpuImage *image,
GdkColorState *color_state);
typedef enum
{

View File

@ -150,7 +150,7 @@ gsk_gpu_download_op_vk_command (GskGpuOp *op,
GskGpuCache *cache = gsk_gpu_device_get_cache (device);
VkDevice vk_device = gsk_vulkan_device_get_vk_device (GSK_VULKAN_DEVICE (device));
gsk_gpu_cache_cache_texture_image (cache, self->texture, gsk_gpu_frame_get_timestamp (frame), self->image);
gsk_gpu_cache_cache_texture_image (cache, self->texture, gsk_gpu_frame_get_timestamp (frame), self->image, NULL);
if (gsk_vulkan_device_has_feature (GSK_VULKAN_DEVICE (device), GDK_VULKAN_FEATURE_SEMAPHORE_EXPORT))
{

View File

@ -412,7 +412,7 @@ gsk_gpu_frame_upload_texture (GskGpuFrame *self,
image = GSK_GPU_FRAME_GET_CLASS (self)->upload_texture (self, with_mipmap, texture);
if (image)
gsk_gpu_cache_cache_texture_image (gsk_gpu_device_get_cache (priv->device), texture, priv->timestamp, image);
gsk_gpu_cache_cache_texture_image (gsk_gpu_device_get_cache (priv->device), texture, priv->timestamp, image, NULL);
return image;
}
@ -685,7 +685,7 @@ gsk_gpu_frame_download_texture (GskGpuFrame *self,
GskGpuFramePrivate *priv = gsk_gpu_frame_get_instance_private (self);
GskGpuImage *image;
image = gsk_gpu_cache_lookup_texture_image (gsk_gpu_device_get_cache (priv->device), texture, timestamp);
image = gsk_gpu_cache_lookup_texture_image (gsk_gpu_device_get_cache (priv->device), texture, timestamp, NULL);
if (image == NULL)
image = gsk_gpu_frame_upload_texture (self, FALSE, texture);
if (image == NULL)

View File

@ -1699,10 +1699,16 @@ gsk_gpu_node_processor_add_texture_node (GskGpuNodeProcessor *self,
texture = gsk_texture_node_get_texture (node);
timestamp = gsk_gpu_frame_get_timestamp (self->frame);
image = gsk_gpu_cache_lookup_texture_image (cache, texture, timestamp);
if (image == NULL)
image = gsk_gpu_cache_lookup_texture_image (cache, texture, timestamp, self->ccs);
if (image)
{
image = gsk_gpu_frame_upload_texture (self->frame, FALSE, texture);
image_cs = self->ccs;
}
else
{
image = gsk_gpu_cache_lookup_texture_image (cache, texture, timestamp, NULL);
if (image == NULL)
image = gsk_gpu_frame_upload_texture (self->frame, FALSE, texture);
if (image == NULL)
{
GSK_DEBUG (FALLBACK, "Unsupported texture format %u for size %dx%d",
@ -1712,10 +1718,13 @@ gsk_gpu_node_processor_add_texture_node (GskGpuNodeProcessor *self,
gsk_gpu_node_processor_add_cairo_node (self, node);
return;
}
image_cs = gdk_texture_get_color_state (texture);
if (gsk_gpu_image_get_flags (image) & GSK_GPU_IMAGE_SRGB)
{
image_cs = gdk_color_state_get_no_srgb_tf (image_cs);
g_assert (image_cs);
}
}
image_cs = gdk_texture_get_color_state (texture);
if (gsk_gpu_image_get_flags (image) & GSK_GPU_IMAGE_SRGB)
image_cs = gdk_color_state_get_no_srgb_tf (image_cs);
if (texture_node_should_mipmap (node, self->frame, &self->scale))
{
@ -1760,32 +1769,44 @@ gsk_gpu_get_texture_node_as_image (GskGpuFrame *frame,
GdkTexture *texture = gsk_texture_node_get_texture (node);
GskGpuDevice *device = gsk_gpu_frame_get_device (frame);
gint64 timestamp = gsk_gpu_frame_get_timestamp (frame);
GdkColorState *image_cs = gdk_texture_get_color_state (texture);
GdkColorState *image_cs;
GskGpuImage *image;
if (!gdk_color_state_equal (ccs, image_cs))
return gsk_gpu_get_node_as_image_via_offscreen (frame, ccs, clip_bounds, scale, node, out_bounds);
if (texture_node_should_mipmap (node, frame, scale))
return gsk_gpu_get_node_as_image_via_offscreen (frame, ccs, clip_bounds, scale, node, out_bounds);
image = gsk_gpu_cache_lookup_texture_image (gsk_gpu_device_get_cache (device), texture, timestamp);
image = gsk_gpu_cache_lookup_texture_image (gsk_gpu_device_get_cache (device), texture, timestamp, ccs);
if (image)
{
*out_bounds = node->bounds;
return image;
}
image = gsk_gpu_cache_lookup_texture_image (gsk_gpu_device_get_cache (device), texture, timestamp, NULL);
if (image == NULL)
image = gsk_gpu_frame_upload_texture (frame, FALSE, texture);
image_cs = gdk_texture_get_color_state (texture);
if (gsk_gpu_image_get_flags (image) & GSK_GPU_IMAGE_SRGB)
{
image_cs = gdk_color_state_get_no_srgb_tf (image_cs);
g_assert (image_cs);
}
/* Happens ie for oversized textures */
if (image == NULL)
return gsk_gpu_get_node_as_image_via_offscreen (frame, ccs, clip_bounds, scale, node, out_bounds);
if (gsk_gpu_image_get_flags (image) & GSK_GPU_IMAGE_STRAIGHT_ALPHA)
if (!gdk_color_state_equal (ccs, image_cs) ||
gsk_gpu_image_get_flags (image) & GSK_GPU_IMAGE_STRAIGHT_ALPHA)
{
image = gsk_gpu_copy_image (frame, ccs, image, image_cs, FALSE);
image_cs = ccs;
/* We fixed up a cached texture, cache the fixed up version instead */
gsk_gpu_cache_cache_texture_image (gsk_gpu_device_get_cache (gsk_gpu_frame_get_device (frame)),
texture,
gsk_gpu_frame_get_timestamp (frame),
image);
image,
ccs);
}
*out_bounds = node->bounds;
@ -1848,10 +1869,16 @@ gsk_gpu_node_processor_add_texture_scale_node (GskGpuNodeProcessor *self,
timestamp = gsk_gpu_frame_get_timestamp (self->frame);
need_mipmap = scaling_filter == GSK_SCALING_FILTER_TRILINEAR;
image = gsk_gpu_cache_lookup_texture_image (cache, texture, timestamp);
if (image == NULL)
image = gsk_gpu_cache_lookup_texture_image (cache, texture, timestamp, self->ccs);
if (image)
{
image = gsk_gpu_frame_upload_texture (self->frame, need_mipmap, texture);
image_cs = self->ccs;
}
else
{
image = gsk_gpu_cache_lookup_texture_image (cache, texture, timestamp, NULL);
if (image == NULL)
image = gsk_gpu_frame_upload_texture (self->frame, need_mipmap, texture);
if (image == NULL)
{
GSK_DEBUG (FALLBACK, "Unsupported texture format %u for size %dx%d",
@ -1861,10 +1888,13 @@ gsk_gpu_node_processor_add_texture_scale_node (GskGpuNodeProcessor *self,
gsk_gpu_node_processor_add_cairo_node (self, node);
return;
}
image_cs = gdk_texture_get_color_state (texture);
if (gsk_gpu_image_get_flags (image) & GSK_GPU_IMAGE_SRGB)
{
image_cs = gdk_color_state_get_no_srgb_tf (image_cs);
g_assert (image_cs);
}
}
image_cs = gdk_texture_get_color_state (texture);
if (gsk_gpu_image_get_flags (image) & GSK_GPU_IMAGE_SRGB)
image_cs = gdk_color_state_get_no_srgb_tf (image_cs);
if ((gsk_gpu_image_get_flags (image) & GSK_GPU_IMAGE_STRAIGHT_ALPHA) ||
(need_mipmap && !(gsk_gpu_image_get_flags (image) & GSK_GPU_IMAGE_CAN_MIPMAP)) ||