mirror of
https://gitlab.gnome.org/GNOME/gtk.git
synced 2024-12-25 13:11:13 +00:00
gpu: Split cache and device
This is for 3 reasons: 1. Separation of concerns The device is meant to manage the Vulkan/GL device and check stuff like image sizes. Caching is not part of that. 2. Refcounting Images etc want to reference the device, but the cache wants to reference images. If the cache is the device, that's a refcycle. 3. Flexibility It's now easier to implement >1 cache, say one per depth or one per color state.
This commit is contained in:
parent
dd33a2f280
commit
71161b6352
857
gsk/gpu/gskgpucache.c
Normal file
857
gsk/gpu/gskgpucache.c
Normal file
@ -0,0 +1,857 @@
|
||||
#include "config.h"
|
||||
|
||||
#include "gskgpucacheprivate.h"
|
||||
|
||||
#include "gskgpudeviceprivate.h"
|
||||
#include "gskgpuframeprivate.h"
|
||||
#include "gskgpuimageprivate.h"
|
||||
#include "gskgpuuploadopprivate.h"
|
||||
|
||||
#include "gdk/gdktextureprivate.h"
|
||||
#include "gdk/gdkprofilerprivate.h"
|
||||
|
||||
#include "gsk/gskdebugprivate.h"
|
||||
#include "gsk/gskprivate.h"
|
||||
|
||||
#define MAX_SLICES_PER_ATLAS 64
|
||||
|
||||
#define ATLAS_SIZE 1024
|
||||
|
||||
#define MAX_ATLAS_ITEM_SIZE 256
|
||||
|
||||
#define MAX_DEAD_PIXELS (ATLAS_SIZE * ATLAS_SIZE / 2)
|
||||
|
||||
G_STATIC_ASSERT (MAX_ATLAS_ITEM_SIZE < ATLAS_SIZE);
|
||||
G_STATIC_ASSERT (MAX_DEAD_PIXELS < ATLAS_SIZE * ATLAS_SIZE);
|
||||
|
||||
typedef struct _GskGpuCached GskGpuCached;
|
||||
typedef struct _GskGpuCachedClass GskGpuCachedClass;
|
||||
typedef struct _GskGpuCachedAtlas GskGpuCachedAtlas;
|
||||
typedef struct _GskGpuCachedGlyph GskGpuCachedGlyph;
|
||||
typedef struct _GskGpuCachedTexture GskGpuCachedTexture;
|
||||
|
||||
struct _GskGpuCache
|
||||
{
|
||||
GObject parent_instance;
|
||||
|
||||
GskGpuDevice *device;
|
||||
|
||||
GskGpuCached *first_cached;
|
||||
GskGpuCached *last_cached;
|
||||
|
||||
GHashTable *texture_cache;
|
||||
GHashTable *glyph_cache;
|
||||
|
||||
GskGpuCachedAtlas *current_atlas;
|
||||
|
||||
/* atomic */ gsize dead_texture_pixels;
|
||||
};
|
||||
|
||||
G_DEFINE_TYPE (GskGpuCache, gsk_gpu_cache, G_TYPE_OBJECT)
|
||||
|
||||
/* {{{ Cached base class */
|
||||
|
||||
struct _GskGpuCachedClass
|
||||
{
|
||||
gsize size;
|
||||
|
||||
void (* free) (GskGpuCache *cache,
|
||||
GskGpuCached *cached);
|
||||
gboolean (* should_collect) (GskGpuCache *cache,
|
||||
GskGpuCached *cached,
|
||||
gint64 cache_timeout,
|
||||
gint64 timestamp);
|
||||
};
|
||||
|
||||
struct _GskGpuCached
|
||||
{
|
||||
const GskGpuCachedClass *class;
|
||||
|
||||
GskGpuCachedAtlas *atlas;
|
||||
GskGpuCached *next;
|
||||
GskGpuCached *prev;
|
||||
|
||||
gint64 timestamp;
|
||||
gboolean stale;
|
||||
guint pixels; /* For glyphs and textures, pixels. For atlases, dead pixels */
|
||||
};
|
||||
|
||||
static inline void
|
||||
mark_as_stale (GskGpuCached *cached,
|
||||
gboolean stale)
|
||||
{
|
||||
if (cached->stale != stale)
|
||||
{
|
||||
cached->stale = stale;
|
||||
|
||||
if (cached->atlas)
|
||||
{
|
||||
if (stale)
|
||||
((GskGpuCached *) cached->atlas)->pixels += cached->pixels;
|
||||
else
|
||||
((GskGpuCached *) cached->atlas)->pixels -= cached->pixels;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
gsk_gpu_cached_free (GskGpuCache *self,
|
||||
GskGpuCached *cached)
|
||||
{
|
||||
if (cached->next)
|
||||
cached->next->prev = cached->prev;
|
||||
else
|
||||
self->last_cached = cached->prev;
|
||||
if (cached->prev)
|
||||
cached->prev->next = cached->next;
|
||||
else
|
||||
self->first_cached = cached->next;
|
||||
|
||||
mark_as_stale (cached, TRUE);
|
||||
|
||||
cached->class->free (self, cached);
|
||||
}
|
||||
|
||||
static gboolean
|
||||
gsk_gpu_cached_should_collect (GskGpuCache *cache,
|
||||
GskGpuCached *cached,
|
||||
gint64 cache_timeout,
|
||||
gint64 timestamp)
|
||||
{
|
||||
return cached->class->should_collect (cache, cached, cache_timeout, timestamp);
|
||||
}
|
||||
|
||||
static gpointer
|
||||
gsk_gpu_cached_new (GskGpuCache *cache,
|
||||
const GskGpuCachedClass *class,
|
||||
GskGpuCachedAtlas *atlas)
|
||||
{
|
||||
GskGpuCached *cached;
|
||||
|
||||
cached = g_malloc0 (class->size);
|
||||
|
||||
cached->class = class;
|
||||
cached->atlas = atlas;
|
||||
|
||||
cached->prev = cache->last_cached;
|
||||
cache->last_cached = cached;
|
||||
if (cached->prev)
|
||||
cached->prev->next = cached;
|
||||
else
|
||||
cache->first_cached = cached;
|
||||
|
||||
return cached;
|
||||
}
|
||||
|
||||
static void
|
||||
gsk_gpu_cached_use (GskGpuCache *self,
|
||||
GskGpuCached *cached,
|
||||
gint64 timestamp)
|
||||
{
|
||||
cached->timestamp = timestamp;
|
||||
mark_as_stale (cached, FALSE);
|
||||
}
|
||||
|
||||
static inline gboolean
|
||||
gsk_gpu_cached_is_old (GskGpuCache *self,
|
||||
GskGpuCached *cached,
|
||||
gint64 cache_timeout,
|
||||
gint64 timestamp)
|
||||
{
|
||||
if (cache_timeout < 0)
|
||||
return -1;
|
||||
else
|
||||
return timestamp - cached->timestamp > cache_timeout;
|
||||
}
|
||||
|
||||
/* }}} */
|
||||
/* {{{ CachedAtlas */
|
||||
|
||||
struct _GskGpuCachedAtlas
|
||||
{
|
||||
GskGpuCached parent;
|
||||
|
||||
GskGpuImage *image;
|
||||
|
||||
gsize n_slices;
|
||||
struct {
|
||||
gsize width;
|
||||
gsize height;
|
||||
} slices[MAX_SLICES_PER_ATLAS];
|
||||
};
|
||||
|
||||
static void
|
||||
gsk_gpu_cached_atlas_free (GskGpuCache *cache,
|
||||
GskGpuCached *cached)
|
||||
{
|
||||
GskGpuCachedAtlas *self = (GskGpuCachedAtlas *) cached;
|
||||
GskGpuCached *c, *next;
|
||||
|
||||
/* Free all remaining glyphs on this atlas */
|
||||
for (c = cache->first_cached; c != NULL; c = next)
|
||||
{
|
||||
next = c->next;
|
||||
if (c->atlas == self)
|
||||
gsk_gpu_cached_free (cache, c);
|
||||
}
|
||||
|
||||
if (cache->current_atlas == self)
|
||||
cache->current_atlas = NULL;
|
||||
|
||||
g_object_unref (self->image);
|
||||
|
||||
g_free (self);
|
||||
}
|
||||
|
||||
static gboolean
|
||||
gsk_gpu_cached_atlas_should_collect (GskGpuCache *cache,
|
||||
GskGpuCached *cached,
|
||||
gint64 cache_timeout,
|
||||
gint64 timestamp)
|
||||
{
|
||||
return cached->pixels > MAX_DEAD_PIXELS;
|
||||
}
|
||||
|
||||
static const GskGpuCachedClass GSK_GPU_CACHED_ATLAS_CLASS =
|
||||
{
|
||||
sizeof (GskGpuCachedAtlas),
|
||||
gsk_gpu_cached_atlas_free,
|
||||
gsk_gpu_cached_atlas_should_collect
|
||||
};
|
||||
|
||||
static GskGpuCachedAtlas *
|
||||
gsk_gpu_cached_atlas_new (GskGpuCache *cache)
|
||||
{
|
||||
GskGpuCachedAtlas *self;
|
||||
|
||||
self = gsk_gpu_cached_new (cache, &GSK_GPU_CACHED_ATLAS_CLASS, NULL);
|
||||
self->image = gsk_gpu_device_create_atlas_image (cache->device, ATLAS_SIZE, ATLAS_SIZE);
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
/* }}} */
|
||||
/* {{{ CachedTexture */
|
||||
|
||||
struct _GskGpuCachedTexture
|
||||
{
|
||||
GskGpuCached parent;
|
||||
|
||||
/* atomic */ int use_count; /* We count the use by the cache (via the linked
|
||||
* list) and by the texture (via render data or
|
||||
* weak ref.
|
||||
*/
|
||||
|
||||
gsize *dead_pixels_counter;
|
||||
|
||||
GdkTexture *texture;
|
||||
GskGpuImage *image;
|
||||
};
|
||||
|
||||
static void
|
||||
gsk_gpu_cached_texture_free (GskGpuCache *cache,
|
||||
GskGpuCached *cached)
|
||||
{
|
||||
GskGpuCachedTexture *self = (GskGpuCachedTexture *) cached;
|
||||
gpointer key, value;
|
||||
|
||||
g_clear_object (&self->image);
|
||||
|
||||
if (g_hash_table_steal_extended (cache->texture_cache, self->texture, &key, &value))
|
||||
{
|
||||
/* If the texture has been reused already, we put the entry back */
|
||||
if ((GskGpuCached *) value != cached)
|
||||
g_hash_table_insert (cache->texture_cache, key, value);
|
||||
}
|
||||
|
||||
/* If the cached item itself is still in use by the texture, we leave
|
||||
* it to the weak ref or render data to free it.
|
||||
*/
|
||||
if (g_atomic_int_dec_and_test (&self->use_count))
|
||||
{
|
||||
g_free (self);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static inline gboolean
|
||||
gsk_gpu_cached_texture_is_invalid (GskGpuCachedTexture *self)
|
||||
{
|
||||
/* If the use count is less than 2, the orignal texture has died,
|
||||
* and the memory may have been reused for a new texture, so we
|
||||
* can't hand out the image that is for the original texture.
|
||||
*/
|
||||
return g_atomic_int_get (&self->use_count) < 2;
|
||||
}
|
||||
|
||||
static gboolean
|
||||
gsk_gpu_cached_texture_should_collect (GskGpuCache *cache,
|
||||
GskGpuCached *cached,
|
||||
gint64 cache_timeout,
|
||||
gint64 timestamp)
|
||||
{
|
||||
GskGpuCachedTexture *self = (GskGpuCachedTexture *) cached;
|
||||
|
||||
return gsk_gpu_cached_is_old (cache, cached, cache_timeout, timestamp) ||
|
||||
gsk_gpu_cached_texture_is_invalid (self);
|
||||
}
|
||||
|
||||
static const GskGpuCachedClass GSK_GPU_CACHED_TEXTURE_CLASS =
|
||||
{
|
||||
sizeof (GskGpuCachedTexture),
|
||||
gsk_gpu_cached_texture_free,
|
||||
gsk_gpu_cached_texture_should_collect
|
||||
};
|
||||
|
||||
/* Note: this function can run in an arbitrary thread, so it can
|
||||
* only access things atomically
|
||||
*/
|
||||
static void
|
||||
gsk_gpu_cached_texture_destroy_cb (gpointer data)
|
||||
{
|
||||
GskGpuCachedTexture *self = data;
|
||||
|
||||
if (!gsk_gpu_cached_texture_is_invalid (self))
|
||||
g_atomic_pointer_add (self->dead_pixels_counter, ((GskGpuCached *) self)->pixels);
|
||||
|
||||
if (g_atomic_int_dec_and_test (&self->use_count))
|
||||
g_free (self);
|
||||
}
|
||||
|
||||
static GskGpuCachedTexture *
|
||||
gsk_gpu_cached_texture_new (GskGpuCache *cache,
|
||||
GdkTexture *texture,
|
||||
GskGpuImage *image)
|
||||
{
|
||||
GskGpuCachedTexture *self;
|
||||
|
||||
if (gdk_texture_get_render_data (texture, cache))
|
||||
gdk_texture_clear_render_data (texture);
|
||||
else if ((self = g_hash_table_lookup (cache->texture_cache, texture)))
|
||||
g_hash_table_remove (cache->texture_cache, texture);
|
||||
|
||||
self = gsk_gpu_cached_new (cache, &GSK_GPU_CACHED_TEXTURE_CLASS, NULL);
|
||||
self->texture = texture;
|
||||
self->image = g_object_ref (image);
|
||||
((GskGpuCached *)self)->pixels = gsk_gpu_image_get_width (image) * gsk_gpu_image_get_height (image);
|
||||
self->dead_pixels_counter = &cache->dead_texture_pixels;
|
||||
self->use_count = 2;
|
||||
|
||||
if (!gdk_texture_set_render_data (texture, cache, self, gsk_gpu_cached_texture_destroy_cb))
|
||||
{
|
||||
g_object_weak_ref (G_OBJECT (texture), (GWeakNotify) gsk_gpu_cached_texture_destroy_cb, self);
|
||||
|
||||
g_hash_table_insert (cache->texture_cache, texture, self);
|
||||
}
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
/* }}} */
|
||||
/* {{{ CachedGlyph */
|
||||
|
||||
struct _GskGpuCachedGlyph
|
||||
{
|
||||
GskGpuCached parent;
|
||||
|
||||
PangoFont *font;
|
||||
PangoGlyph glyph;
|
||||
GskGpuGlyphLookupFlags flags;
|
||||
float scale;
|
||||
|
||||
GskGpuImage *image;
|
||||
graphene_rect_t bounds;
|
||||
graphene_point_t origin;
|
||||
};
|
||||
|
||||
static void
|
||||
gsk_gpu_cached_glyph_free (GskGpuCache *cache,
|
||||
GskGpuCached *cached)
|
||||
{
|
||||
GskGpuCachedGlyph *self = (GskGpuCachedGlyph *) cached;
|
||||
|
||||
g_hash_table_remove (cache->glyph_cache, self);
|
||||
|
||||
g_object_unref (self->font);
|
||||
g_object_unref (self->image);
|
||||
|
||||
g_free (self);
|
||||
}
|
||||
|
||||
static gboolean
|
||||
gsk_gpu_cached_glyph_should_collect (GskGpuCache *cache,
|
||||
GskGpuCached *cached,
|
||||
gint64 cache_timeout,
|
||||
gint64 timestamp)
|
||||
{
|
||||
if (gsk_gpu_cached_is_old (cache, cached, cache_timeout, timestamp))
|
||||
{
|
||||
if (cached->atlas)
|
||||
mark_as_stale (cached, TRUE);
|
||||
else
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
/* Glyphs are only collected when their atlas is freed */
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
static guint
|
||||
gsk_gpu_cached_glyph_hash (gconstpointer data)
|
||||
{
|
||||
const GskGpuCachedGlyph *glyph = data;
|
||||
|
||||
return GPOINTER_TO_UINT (glyph->font) ^
|
||||
glyph->glyph ^
|
||||
(glyph->flags << 24) ^
|
||||
((guint) glyph->scale * PANGO_SCALE);
|
||||
}
|
||||
|
||||
static gboolean
|
||||
gsk_gpu_cached_glyph_equal (gconstpointer v1,
|
||||
gconstpointer v2)
|
||||
{
|
||||
const GskGpuCachedGlyph *glyph1 = v1;
|
||||
const GskGpuCachedGlyph *glyph2 = v2;
|
||||
|
||||
return glyph1->font == glyph2->font
|
||||
&& glyph1->glyph == glyph2->glyph
|
||||
&& glyph1->flags == glyph2->flags
|
||||
&& glyph1->scale == glyph2->scale;
|
||||
}
|
||||
|
||||
static const GskGpuCachedClass GSK_GPU_CACHED_GLYPH_CLASS =
|
||||
{
|
||||
sizeof (GskGpuCachedGlyph),
|
||||
gsk_gpu_cached_glyph_free,
|
||||
gsk_gpu_cached_glyph_should_collect
|
||||
};
|
||||
|
||||
/* }}} */
|
||||
/* {{{ GskGpuCache */
|
||||
|
||||
static void
|
||||
print_cache_stats (GskGpuCache *self)
|
||||
{
|
||||
GskGpuCached *cached;
|
||||
guint glyphs = 0;
|
||||
guint stale_glyphs = 0;
|
||||
guint textures = 0;
|
||||
guint atlases = 0;
|
||||
GString *ratios = g_string_new ("");
|
||||
|
||||
for (cached = self->first_cached; cached != NULL; cached = cached->next)
|
||||
{
|
||||
if (cached->class == &GSK_GPU_CACHED_GLYPH_CLASS)
|
||||
{
|
||||
glyphs++;
|
||||
if (cached->stale)
|
||||
stale_glyphs++;
|
||||
}
|
||||
else if (cached->class == &GSK_GPU_CACHED_TEXTURE_CLASS)
|
||||
{
|
||||
textures++;
|
||||
}
|
||||
else if (cached->class == &GSK_GPU_CACHED_ATLAS_CLASS)
|
||||
{
|
||||
double ratio;
|
||||
|
||||
atlases++;
|
||||
|
||||
ratio = (double) cached->pixels / (double) (ATLAS_SIZE * ATLAS_SIZE);
|
||||
|
||||
if (ratios->len == 0)
|
||||
g_string_append (ratios, " (ratios ");
|
||||
else
|
||||
g_string_append (ratios, ", ");
|
||||
g_string_append_printf (ratios, "%.2f", ratio);
|
||||
}
|
||||
}
|
||||
|
||||
if (ratios->len > 0)
|
||||
g_string_append (ratios, ")");
|
||||
|
||||
gdk_debug_message ("Cached items\n"
|
||||
" glyphs: %5u (%u stale)\n"
|
||||
" textures: %5u (%u in hash)\n"
|
||||
" atlases: %5u%s",
|
||||
glyphs, stale_glyphs,
|
||||
textures, g_hash_table_size (self->texture_cache),
|
||||
atlases, ratios->str);
|
||||
|
||||
g_string_free (ratios, TRUE);
|
||||
}
|
||||
|
||||
/* Returns TRUE if everything was GC'ed */
|
||||
gboolean
|
||||
gsk_gpu_cache_gc (GskGpuCache *self,
|
||||
gint64 cache_timeout,
|
||||
gint64 timestamp)
|
||||
{
|
||||
GskGpuCached *cached, *prev;
|
||||
gint64 before G_GNUC_UNUSED = GDK_PROFILER_CURRENT_TIME;
|
||||
|
||||
/* We walk the cache from the end so we don't end up with prev
|
||||
* being a leftover glyph on the atlas we are freeing
|
||||
*/
|
||||
for (cached = self->last_cached; cached != NULL; cached = prev)
|
||||
{
|
||||
prev = cached->prev;
|
||||
if (gsk_gpu_cached_should_collect (self, cached, cache_timeout, timestamp))
|
||||
gsk_gpu_cached_free (self, cached);
|
||||
}
|
||||
|
||||
g_atomic_pointer_set (&self->dead_texture_pixels, 0);
|
||||
|
||||
if (GSK_DEBUG_CHECK (GLYPH_CACHE))
|
||||
print_cache_stats (self);
|
||||
|
||||
gdk_profiler_end_mark (before, "Glyph cache GC", NULL);
|
||||
|
||||
return self->last_cached == NULL;
|
||||
}
|
||||
|
||||
gsize
|
||||
gsk_gpu_cache_get_dead_texture_pixels (GskGpuCache *self)
|
||||
{
|
||||
return GPOINTER_TO_SIZE (g_atomic_pointer_get (&self->dead_texture_pixels));
|
||||
}
|
||||
|
||||
static void
|
||||
gsk_gpu_cache_clear_cache (GskGpuCache *self)
|
||||
{
|
||||
for (GskGpuCached *cached = self->first_cached; cached; cached = cached->next)
|
||||
{
|
||||
if (cached->prev == NULL)
|
||||
g_assert (self->first_cached == cached);
|
||||
else
|
||||
g_assert (cached->prev->next == cached);
|
||||
if (cached->next == NULL)
|
||||
g_assert (self->last_cached == cached);
|
||||
else
|
||||
g_assert (cached->next->prev == cached);
|
||||
}
|
||||
|
||||
/* We clear the cache from the end so glyphs get freed before their atlas */
|
||||
while (self->last_cached)
|
||||
gsk_gpu_cached_free (self, self->last_cached);
|
||||
|
||||
g_assert (self->last_cached == NULL);
|
||||
}
|
||||
|
||||
static void
|
||||
gsk_gpu_cache_dispose (GObject *object)
|
||||
{
|
||||
GskGpuCache *self = GSK_GPU_CACHE (object);
|
||||
|
||||
gsk_gpu_cache_clear_cache (self);
|
||||
g_hash_table_unref (self->glyph_cache);
|
||||
g_hash_table_unref (self->texture_cache);
|
||||
|
||||
G_OBJECT_CLASS (gsk_gpu_cache_parent_class)->dispose (object);
|
||||
}
|
||||
|
||||
static void
|
||||
gsk_gpu_cache_finalize (GObject *object)
|
||||
{
|
||||
GskGpuCache *self = GSK_GPU_CACHE (object);
|
||||
|
||||
g_object_unref (self->device);
|
||||
|
||||
G_OBJECT_CLASS (gsk_gpu_cache_parent_class)->finalize (object);
|
||||
}
|
||||
|
||||
static void
|
||||
gsk_gpu_cache_class_init (GskGpuCacheClass *klass)
|
||||
{
|
||||
GObjectClass *object_class = G_OBJECT_CLASS (klass);
|
||||
|
||||
object_class->dispose = gsk_gpu_cache_dispose;
|
||||
object_class->finalize = gsk_gpu_cache_finalize;
|
||||
}
|
||||
|
||||
static void
|
||||
gsk_gpu_cache_init (GskGpuCache *self)
|
||||
{
|
||||
self->glyph_cache = g_hash_table_new (gsk_gpu_cached_glyph_hash,
|
||||
gsk_gpu_cached_glyph_equal);
|
||||
self->texture_cache = g_hash_table_new (g_direct_hash,
|
||||
g_direct_equal);
|
||||
}
|
||||
|
||||
/* This rounds up to the next number that has <= 2 bits set:
|
||||
* 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, ...
|
||||
* That is roughly sqrt(2), so it should limit waste
|
||||
*/
|
||||
static gsize
|
||||
round_up_atlas_size (gsize num)
|
||||
{
|
||||
gsize storage = g_bit_storage (num);
|
||||
|
||||
num = num + (((1 << storage) - 1) >> 2);
|
||||
num &= (((gsize) 7) << storage) >> 2;
|
||||
|
||||
return num;
|
||||
}
|
||||
|
||||
static gboolean
|
||||
gsk_gpu_cached_atlas_allocate (GskGpuCachedAtlas *atlas,
|
||||
gsize width,
|
||||
gsize height,
|
||||
gsize *out_x,
|
||||
gsize *out_y)
|
||||
{
|
||||
gsize i;
|
||||
gsize waste, slice_waste;
|
||||
gsize best_slice;
|
||||
gsize y, best_y;
|
||||
gboolean can_add_slice;
|
||||
|
||||
best_y = 0;
|
||||
best_slice = G_MAXSIZE;
|
||||
can_add_slice = atlas->n_slices < MAX_SLICES_PER_ATLAS;
|
||||
if (can_add_slice)
|
||||
waste = height; /* Require less than 100% waste */
|
||||
else
|
||||
waste = G_MAXSIZE; /* Accept any slice, we can't make better ones */
|
||||
|
||||
for (i = 0, y = 0; i < atlas->n_slices; y += atlas->slices[i].height, i++)
|
||||
{
|
||||
if (atlas->slices[i].height < height || ATLAS_SIZE - atlas->slices[i].width < width)
|
||||
continue;
|
||||
|
||||
slice_waste = atlas->slices[i].height - height;
|
||||
if (slice_waste < waste)
|
||||
{
|
||||
waste = slice_waste;
|
||||
best_slice = i;
|
||||
best_y = y;
|
||||
if (waste == 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (best_slice >= i && i == atlas->n_slices)
|
||||
{
|
||||
gsize slice_height;
|
||||
|
||||
if (!can_add_slice)
|
||||
return FALSE;
|
||||
|
||||
slice_height = round_up_atlas_size (MAX (height, 4));
|
||||
if (slice_height > ATLAS_SIZE - y)
|
||||
return FALSE;
|
||||
|
||||
atlas->n_slices++;
|
||||
if (atlas->n_slices == MAX_SLICES_PER_ATLAS)
|
||||
slice_height = ATLAS_SIZE - y;
|
||||
|
||||
atlas->slices[i].width = 0;
|
||||
atlas->slices[i].height = slice_height;
|
||||
best_y = y;
|
||||
best_slice = i;
|
||||
}
|
||||
|
||||
*out_x = atlas->slices[best_slice].width;
|
||||
*out_y = best_y;
|
||||
|
||||
atlas->slices[best_slice].width += width;
|
||||
g_assert (atlas->slices[best_slice].width <= ATLAS_SIZE);
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static void
|
||||
gsk_gpu_cache_ensure_atlas (GskGpuCache *self,
|
||||
gboolean recreate)
|
||||
{
|
||||
if (self->current_atlas && !recreate)
|
||||
return;
|
||||
|
||||
self->current_atlas = gsk_gpu_cached_atlas_new (self);
|
||||
}
|
||||
|
||||
GskGpuImage *
|
||||
gsk_gpu_cache_get_atlas_image (GskGpuCache *self)
|
||||
{
|
||||
gsk_gpu_cache_ensure_atlas (self, FALSE);
|
||||
|
||||
return self->current_atlas->image;
|
||||
}
|
||||
|
||||
static GskGpuImage *
|
||||
gsk_gpu_cache_add_atlas_image (GskGpuCache *self,
|
||||
gsize width,
|
||||
gsize height,
|
||||
gsize *out_x,
|
||||
gsize *out_y)
|
||||
{
|
||||
if (width > MAX_ATLAS_ITEM_SIZE || height > MAX_ATLAS_ITEM_SIZE)
|
||||
return NULL;
|
||||
|
||||
gsk_gpu_cache_ensure_atlas (self, FALSE);
|
||||
|
||||
if (gsk_gpu_cached_atlas_allocate (self->current_atlas, width, height, out_x, out_y))
|
||||
return self->current_atlas->image;
|
||||
|
||||
gsk_gpu_cache_ensure_atlas (self, TRUE);
|
||||
|
||||
if (gsk_gpu_cached_atlas_allocate (self->current_atlas, width, height, out_x, out_y))
|
||||
return self->current_atlas->image;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
GskGpuImage *
|
||||
gsk_gpu_cache_lookup_texture_image (GskGpuCache *self,
|
||||
GdkTexture *texture,
|
||||
gint64 timestamp)
|
||||
{
|
||||
GskGpuCachedTexture *cache;
|
||||
|
||||
cache = gdk_texture_get_render_data (texture, self);
|
||||
if (cache == NULL)
|
||||
cache = g_hash_table_lookup (self->texture_cache, texture);
|
||||
|
||||
if (!cache || !cache->image || gsk_gpu_cached_texture_is_invalid (cache))
|
||||
return NULL;
|
||||
|
||||
gsk_gpu_cached_use (self, (GskGpuCached *) cache, timestamp);
|
||||
|
||||
return g_object_ref (cache->image);
|
||||
}
|
||||
|
||||
void
|
||||
gsk_gpu_cache_cache_texture_image (GskGpuCache *self,
|
||||
GdkTexture *texture,
|
||||
gint64 timestamp,
|
||||
GskGpuImage *image)
|
||||
{
|
||||
GskGpuCachedTexture *cache;
|
||||
|
||||
cache = gsk_gpu_cached_texture_new (self, texture, image);
|
||||
|
||||
gsk_gpu_cached_use (self, (GskGpuCached *) cache, timestamp);
|
||||
}
|
||||
|
||||
GskGpuImage *
|
||||
gsk_gpu_cache_lookup_glyph_image (GskGpuCache *self,
|
||||
GskGpuFrame *frame,
|
||||
PangoFont *font,
|
||||
PangoGlyph glyph,
|
||||
GskGpuGlyphLookupFlags flags,
|
||||
float scale,
|
||||
graphene_rect_t *out_bounds,
|
||||
graphene_point_t *out_origin)
|
||||
{
|
||||
GskGpuCachedGlyph lookup = {
|
||||
.font = font,
|
||||
.glyph = glyph,
|
||||
.flags = flags,
|
||||
.scale = scale
|
||||
};
|
||||
GskGpuCachedGlyph *cache;
|
||||
PangoRectangle ink_rect;
|
||||
graphene_rect_t rect;
|
||||
graphene_point_t origin;
|
||||
GskGpuImage *image;
|
||||
gsize atlas_x, atlas_y, padding;
|
||||
float subpixel_x, subpixel_y;
|
||||
PangoFont *scaled_font;
|
||||
cairo_hint_metrics_t hint_metrics;
|
||||
|
||||
cache = g_hash_table_lookup (self->glyph_cache, &lookup);
|
||||
if (cache)
|
||||
{
|
||||
gsk_gpu_cached_use (self, (GskGpuCached *) cache, gsk_gpu_frame_get_timestamp (frame));
|
||||
|
||||
*out_bounds = cache->bounds;
|
||||
*out_origin = cache->origin;
|
||||
return cache->image;
|
||||
}
|
||||
|
||||
/* The combination of hint-style != none and hint-metrics == off
|
||||
* leads to broken rendering with some fonts.
|
||||
*/
|
||||
if (gsk_font_get_hint_style (font) != CAIRO_HINT_STYLE_NONE)
|
||||
hint_metrics = CAIRO_HINT_METRICS_ON;
|
||||
else
|
||||
hint_metrics = CAIRO_HINT_METRICS_DEFAULT;
|
||||
|
||||
scaled_font = gsk_reload_font (font, scale, hint_metrics, CAIRO_HINT_STYLE_DEFAULT, CAIRO_ANTIALIAS_DEFAULT);
|
||||
|
||||
subpixel_x = (flags & 3) / 4.f;
|
||||
subpixel_y = ((flags >> 2) & 3) / 4.f;
|
||||
pango_font_get_glyph_extents (scaled_font, glyph, &ink_rect, NULL);
|
||||
origin.x = floor (ink_rect.x * 1.0 / PANGO_SCALE + subpixel_x);
|
||||
origin.y = floor (ink_rect.y * 1.0 / PANGO_SCALE + subpixel_y);
|
||||
rect.size.width = ceil ((ink_rect.x + ink_rect.width) * 1.0 / PANGO_SCALE + subpixel_x) - origin.x;
|
||||
rect.size.height = ceil ((ink_rect.y + ink_rect.height) * 1.0 / PANGO_SCALE + subpixel_y) - origin.y;
|
||||
padding = 1;
|
||||
|
||||
image = gsk_gpu_cache_add_atlas_image (self,
|
||||
rect.size.width + 2 * padding, rect.size.height + 2 * padding,
|
||||
&atlas_x, &atlas_y);
|
||||
if (image)
|
||||
{
|
||||
g_object_ref (image);
|
||||
rect.origin.x = atlas_x + padding;
|
||||
rect.origin.y = atlas_y + padding;
|
||||
cache = gsk_gpu_cached_new (self, &GSK_GPU_CACHED_GLYPH_CLASS, self->current_atlas);
|
||||
}
|
||||
else
|
||||
{
|
||||
image = gsk_gpu_device_create_upload_image (self->device, GDK_MEMORY_DEFAULT, FALSE, rect.size.width, rect.size.height),
|
||||
rect.origin.x = 0;
|
||||
rect.origin.y = 0;
|
||||
padding = 0;
|
||||
cache = gsk_gpu_cached_new (self, &GSK_GPU_CACHED_GLYPH_CLASS, NULL);
|
||||
}
|
||||
|
||||
cache->font = g_object_ref (font);
|
||||
cache->glyph = glyph;
|
||||
cache->flags = flags;
|
||||
cache->scale = scale;
|
||||
cache->bounds = rect;
|
||||
cache->image = image;
|
||||
cache->origin = GRAPHENE_POINT_INIT (- origin.x + subpixel_x,
|
||||
- origin.y + subpixel_y);
|
||||
((GskGpuCached *) cache)->pixels = (rect.size.width + 2 * padding) * (rect.size.height + 2 * padding);
|
||||
|
||||
gsk_gpu_upload_glyph_op (frame,
|
||||
cache->image,
|
||||
scaled_font,
|
||||
glyph,
|
||||
&(cairo_rectangle_int_t) {
|
||||
.x = rect.origin.x - padding,
|
||||
.y = rect.origin.y - padding,
|
||||
.width = rect.size.width + 2 * padding,
|
||||
.height = rect.size.height + 2 * padding,
|
||||
},
|
||||
&GRAPHENE_POINT_INIT (cache->origin.x + padding,
|
||||
cache->origin.y + padding));
|
||||
|
||||
g_hash_table_insert (self->glyph_cache, cache, cache);
|
||||
gsk_gpu_cached_use (self, (GskGpuCached *) cache, gsk_gpu_frame_get_timestamp (frame));
|
||||
|
||||
*out_bounds = cache->bounds;
|
||||
*out_origin = cache->origin;
|
||||
|
||||
g_object_unref (scaled_font);
|
||||
|
||||
return cache->image;
|
||||
}
|
||||
|
||||
GskGpuCache *
|
||||
gsk_gpu_cache_new (GskGpuDevice *device)
|
||||
{
|
||||
GskGpuCache *self;
|
||||
|
||||
self = g_object_new (GSK_TYPE_GPU_CACHE, NULL);
|
||||
self->device = g_object_ref (device);
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
/* }}} */
|
||||
/* vim:set foldmethod=marker expandtab: */
|
63
gsk/gpu/gskgpucacheprivate.h
Normal file
63
gsk/gpu/gskgpucacheprivate.h
Normal file
@ -0,0 +1,63 @@
|
||||
#pragma once
|
||||
|
||||
#include "gskgputypesprivate.h"
|
||||
|
||||
#include <graphene.h>
|
||||
|
||||
G_BEGIN_DECLS
|
||||
|
||||
#define GSK_TYPE_GPU_CACHE (gsk_gpu_cache_get_type ())
|
||||
#define GSK_GPU_CACHE(o) (G_TYPE_CHECK_INSTANCE_CAST ((o), GSK_TYPE_GPU_CACHE, GskGpuCache))
|
||||
#define GSK_GPU_CACHE_CLASS(k) (G_TYPE_CHECK_CLASS_CAST ((k), GSK_TYPE_GPU_CACHE, GskGpuCacheClass))
|
||||
#define GSK_IS_GPU_CACHE(o) (G_TYPE_CHECK_INSTANCE_TYPE ((o), GSK_TYPE_GPU_CACHE))
|
||||
#define GSK_IS_GPU_CACHE_CLASS(k) (G_TYPE_CHECK_CLASS_TYPE ((k), GSK_TYPE_GPU_CACHE))
|
||||
#define GSK_GPU_CACHE_GET_CLASS(o) (G_TYPE_INSTANCE_GET_CLASS ((o), GSK_TYPE_GPU_CACHE, GskGpuCacheClass))
|
||||
|
||||
typedef struct _GskGpuCacheClass GskGpuCacheClass;
|
||||
|
||||
struct _GskGpuCacheClass
|
||||
{
|
||||
GObjectClass parent_class;
|
||||
};
|
||||
|
||||
GType gsk_gpu_cache_get_type (void) G_GNUC_CONST;
|
||||
|
||||
GskGpuCache * gsk_gpu_cache_new (GskGpuDevice *device);
|
||||
|
||||
gboolean gsk_gpu_cache_gc (GskGpuCache *self,
|
||||
gint64 cache_timeout,
|
||||
gint64 timestamp);
|
||||
gsize gsk_gpu_cache_get_dead_texture_pixels (GskGpuCache *self);
|
||||
GskGpuImage * gsk_gpu_cache_get_atlas_image (GskGpuCache *self);
|
||||
|
||||
GskGpuImage * gsk_gpu_cache_lookup_texture_image (GskGpuCache *self,
|
||||
GdkTexture *texture,
|
||||
gint64 timestamp);
|
||||
void gsk_gpu_cache_cache_texture_image (GskGpuCache *self,
|
||||
GdkTexture *texture,
|
||||
gint64 timestamp,
|
||||
GskGpuImage *image);
|
||||
|
||||
typedef enum
|
||||
{
|
||||
GSK_GPU_GLYPH_X_OFFSET_1 = 0x1,
|
||||
GSK_GPU_GLYPH_X_OFFSET_2 = 0x2,
|
||||
GSK_GPU_GLYPH_X_OFFSET_3 = 0x3,
|
||||
GSK_GPU_GLYPH_Y_OFFSET_1 = 0x4,
|
||||
GSK_GPU_GLYPH_Y_OFFSET_2 = 0x8,
|
||||
GSK_GPU_GLYPH_Y_OFFSET_3 = 0xC
|
||||
} GskGpuGlyphLookupFlags;
|
||||
|
||||
GskGpuImage * gsk_gpu_cache_lookup_glyph_image (GskGpuCache *self,
|
||||
GskGpuFrame *frame,
|
||||
PangoFont *font,
|
||||
PangoGlyph glyph,
|
||||
GskGpuGlyphLookupFlags flags,
|
||||
float scale,
|
||||
graphene_rect_t *out_bounds,
|
||||
graphene_point_t *out_origin);
|
||||
|
||||
|
||||
G_DEFINE_AUTOPTR_CLEANUP_FUNC(GskGpuCache, g_object_unref)
|
||||
|
||||
G_END_DECLS
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include "gskgpudeviceprivate.h"
|
||||
|
||||
#include "gskgpucacheprivate.h"
|
||||
#include "gskgpuframeprivate.h"
|
||||
#include "gskgpuimageprivate.h"
|
||||
#include "gskgpuuploadopprivate.h"
|
||||
@ -13,24 +14,8 @@
|
||||
#include "gsk/gskdebugprivate.h"
|
||||
#include "gsk/gskprivate.h"
|
||||
|
||||
#define MAX_SLICES_PER_ATLAS 64
|
||||
|
||||
#define ATLAS_SIZE 1024
|
||||
|
||||
#define MAX_ATLAS_ITEM_SIZE 256
|
||||
|
||||
#define MAX_DEAD_PIXELS (ATLAS_SIZE * ATLAS_SIZE / 2)
|
||||
|
||||
#define CACHE_TIMEOUT 15 /* seconds */
|
||||
|
||||
G_STATIC_ASSERT (MAX_ATLAS_ITEM_SIZE < ATLAS_SIZE);
|
||||
G_STATIC_ASSERT (MAX_DEAD_PIXELS < ATLAS_SIZE * ATLAS_SIZE);
|
||||
|
||||
typedef struct _GskGpuCached GskGpuCached;
|
||||
typedef struct _GskGpuCachedClass GskGpuCachedClass;
|
||||
typedef struct _GskGpuCachedAtlas GskGpuCachedAtlas;
|
||||
typedef struct _GskGpuCachedGlyph GskGpuCachedGlyph;
|
||||
typedef struct _GskGpuCachedTexture GskGpuCachedTexture;
|
||||
typedef struct _GskGpuDevicePrivate GskGpuDevicePrivate;
|
||||
|
||||
struct _GskGpuDevicePrivate
|
||||
@ -38,484 +23,36 @@ struct _GskGpuDevicePrivate
|
||||
GdkDisplay *display;
|
||||
gsize max_image_size;
|
||||
|
||||
GskGpuCached *first_cached;
|
||||
GskGpuCached *last_cached;
|
||||
GskGpuCache *cache; /* we don't own a ref, but manage the cache */
|
||||
guint cache_gc_source;
|
||||
int cache_timeout; /* in seconds, or -1 to disable gc */
|
||||
|
||||
GHashTable *texture_cache;
|
||||
GHashTable *glyph_cache;
|
||||
|
||||
GskGpuCachedAtlas *current_atlas;
|
||||
|
||||
/* atomic */ gsize dead_texture_pixels;
|
||||
};
|
||||
|
||||
G_DEFINE_TYPE_WITH_PRIVATE (GskGpuDevice, gsk_gpu_device, G_TYPE_OBJECT)
|
||||
|
||||
/* {{{ Cached base class */
|
||||
|
||||
struct _GskGpuCachedClass
|
||||
{
|
||||
gsize size;
|
||||
|
||||
void (* free) (GskGpuDevice *device,
|
||||
GskGpuCached *cached);
|
||||
gboolean (* should_collect) (GskGpuDevice *device,
|
||||
GskGpuCached *cached,
|
||||
gint64 timestamp);
|
||||
};
|
||||
|
||||
struct _GskGpuCached
|
||||
{
|
||||
const GskGpuCachedClass *class;
|
||||
|
||||
GskGpuCachedAtlas *atlas;
|
||||
GskGpuCached *next;
|
||||
GskGpuCached *prev;
|
||||
|
||||
gint64 timestamp;
|
||||
gboolean stale;
|
||||
guint pixels; /* For glyphs and textures, pixels. For atlases, dead pixels */
|
||||
};
|
||||
|
||||
static inline void
|
||||
mark_as_stale (GskGpuCached *cached,
|
||||
gboolean stale)
|
||||
{
|
||||
if (cached->stale != stale)
|
||||
{
|
||||
cached->stale = stale;
|
||||
|
||||
if (cached->atlas)
|
||||
{
|
||||
if (stale)
|
||||
((GskGpuCached *) cached->atlas)->pixels += cached->pixels;
|
||||
else
|
||||
((GskGpuCached *) cached->atlas)->pixels -= cached->pixels;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
gsk_gpu_cached_free (GskGpuDevice *device,
|
||||
GskGpuCached *cached)
|
||||
{
|
||||
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (device);
|
||||
|
||||
if (cached->next)
|
||||
cached->next->prev = cached->prev;
|
||||
else
|
||||
priv->last_cached = cached->prev;
|
||||
if (cached->prev)
|
||||
cached->prev->next = cached->next;
|
||||
else
|
||||
priv->first_cached = cached->next;
|
||||
|
||||
mark_as_stale (cached, TRUE);
|
||||
|
||||
cached->class->free (device, cached);
|
||||
}
|
||||
|
||||
/* Returns TRUE if everything was GC'ed */
|
||||
static gboolean
|
||||
gsk_gpu_cached_should_collect (GskGpuDevice *device,
|
||||
GskGpuCached *cached,
|
||||
gint64 timestamp)
|
||||
{
|
||||
return cached->class->should_collect (device, cached, timestamp);
|
||||
}
|
||||
|
||||
static gpointer
|
||||
gsk_gpu_cached_new (GskGpuDevice *device,
|
||||
const GskGpuCachedClass *class,
|
||||
GskGpuCachedAtlas *atlas)
|
||||
{
|
||||
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (device);
|
||||
GskGpuCached *cached;
|
||||
|
||||
cached = g_malloc0 (class->size);
|
||||
|
||||
cached->class = class;
|
||||
cached->atlas = atlas;
|
||||
|
||||
cached->prev = priv->last_cached;
|
||||
priv->last_cached = cached;
|
||||
if (cached->prev)
|
||||
cached->prev->next = cached;
|
||||
else
|
||||
priv->first_cached = cached;
|
||||
|
||||
return cached;
|
||||
}
|
||||
|
||||
static void
|
||||
gsk_gpu_cached_use (GskGpuDevice *device,
|
||||
GskGpuCached *cached,
|
||||
gint64 timestamp)
|
||||
{
|
||||
cached->timestamp = timestamp;
|
||||
mark_as_stale (cached, FALSE);
|
||||
}
|
||||
|
||||
static inline gboolean
|
||||
gsk_gpu_cached_is_old (GskGpuDevice *device,
|
||||
GskGpuCached *cached,
|
||||
gint64 timestamp)
|
||||
{
|
||||
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (device);
|
||||
|
||||
if (priv->cache_timeout < 0)
|
||||
return FALSE;
|
||||
else
|
||||
return timestamp - cached->timestamp > priv->cache_timeout * G_TIME_SPAN_SECOND;
|
||||
}
|
||||
|
||||
/* }}} */
|
||||
/* {{{ CachedAtlas */
|
||||
|
||||
struct _GskGpuCachedAtlas
|
||||
{
|
||||
GskGpuCached parent;
|
||||
|
||||
GskGpuImage *image;
|
||||
|
||||
gsize n_slices;
|
||||
struct {
|
||||
gsize width;
|
||||
gsize height;
|
||||
} slices[MAX_SLICES_PER_ATLAS];
|
||||
};
|
||||
|
||||
static void
|
||||
gsk_gpu_cached_atlas_free (GskGpuDevice *device,
|
||||
GskGpuCached *cached)
|
||||
{
|
||||
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (device);
|
||||
GskGpuCachedAtlas *self = (GskGpuCachedAtlas *) cached;
|
||||
GskGpuCached *c, *next;
|
||||
|
||||
/* Free all remaining glyphs on this atlas */
|
||||
for (c = priv->first_cached; c != NULL; c = next)
|
||||
{
|
||||
next = c->next;
|
||||
if (c->atlas == self)
|
||||
gsk_gpu_cached_free (device, c);
|
||||
}
|
||||
|
||||
if (priv->current_atlas == self)
|
||||
priv->current_atlas = NULL;
|
||||
|
||||
g_object_unref (self->image);
|
||||
|
||||
g_free (self);
|
||||
}
|
||||
|
||||
static gboolean
|
||||
gsk_gpu_cached_atlas_should_collect (GskGpuDevice *device,
|
||||
GskGpuCached *cached,
|
||||
gint64 timestamp)
|
||||
{
|
||||
return cached->pixels > MAX_DEAD_PIXELS;
|
||||
}
|
||||
|
||||
static const GskGpuCachedClass GSK_GPU_CACHED_ATLAS_CLASS =
|
||||
{
|
||||
sizeof (GskGpuCachedAtlas),
|
||||
gsk_gpu_cached_atlas_free,
|
||||
gsk_gpu_cached_atlas_should_collect
|
||||
};
|
||||
|
||||
static GskGpuCachedAtlas *
|
||||
gsk_gpu_cached_atlas_new (GskGpuDevice *device)
|
||||
{
|
||||
GskGpuCachedAtlas *self;
|
||||
|
||||
self = gsk_gpu_cached_new (device, &GSK_GPU_CACHED_ATLAS_CLASS, NULL);
|
||||
self->image = GSK_GPU_DEVICE_GET_CLASS (device)->create_atlas_image (device, ATLAS_SIZE, ATLAS_SIZE);
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
/* }}} */
|
||||
/* {{{ CachedTexture */
|
||||
|
||||
struct _GskGpuCachedTexture
|
||||
{
|
||||
GskGpuCached parent;
|
||||
|
||||
/* atomic */ int use_count; /* We count the use by the device (via the linked
|
||||
* list) and by the texture (via render data or
|
||||
* weak ref.
|
||||
*/
|
||||
|
||||
gsize *dead_pixels_counter;
|
||||
|
||||
GdkTexture *texture;
|
||||
GskGpuImage *image;
|
||||
};
|
||||
|
||||
static void
|
||||
gsk_gpu_cached_texture_free (GskGpuDevice *device,
|
||||
GskGpuCached *cached)
|
||||
{
|
||||
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (device);
|
||||
GskGpuCachedTexture *self = (GskGpuCachedTexture *) cached;
|
||||
gpointer key, value;
|
||||
|
||||
g_clear_object (&self->image);
|
||||
|
||||
if (g_hash_table_steal_extended (priv->texture_cache, self->texture, &key, &value))
|
||||
{
|
||||
/* If the texture has been reused already, we put the entry back */
|
||||
if ((GskGpuCached *) value != cached)
|
||||
g_hash_table_insert (priv->texture_cache, key, value);
|
||||
}
|
||||
|
||||
/* If the cached item itself is still in use by the texture, we leave
|
||||
* it to the weak ref or render data to free it.
|
||||
*/
|
||||
if (g_atomic_int_dec_and_test (&self->use_count))
|
||||
{
|
||||
g_free (self);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static inline gboolean
|
||||
gsk_gpu_cached_texture_is_invalid (GskGpuCachedTexture *self)
|
||||
{
|
||||
/* If the use count is less than 2, the orignal texture has died,
|
||||
* and the memory may have been reused for a new texture, so we
|
||||
* can't hand out the image that is for the original texture.
|
||||
*/
|
||||
return g_atomic_int_get (&self->use_count) < 2;
|
||||
}
|
||||
|
||||
static gboolean
|
||||
gsk_gpu_cached_texture_should_collect (GskGpuDevice *device,
|
||||
GskGpuCached *cached,
|
||||
gint64 timestamp)
|
||||
{
|
||||
GskGpuCachedTexture *self = (GskGpuCachedTexture *) cached;
|
||||
|
||||
return gsk_gpu_cached_is_old (device, cached, timestamp) ||
|
||||
gsk_gpu_cached_texture_is_invalid (self);
|
||||
}
|
||||
|
||||
static const GskGpuCachedClass GSK_GPU_CACHED_TEXTURE_CLASS =
|
||||
{
|
||||
sizeof (GskGpuCachedTexture),
|
||||
gsk_gpu_cached_texture_free,
|
||||
gsk_gpu_cached_texture_should_collect
|
||||
};
|
||||
|
||||
/* Note: this function can run in an arbitrary thread, so it can
|
||||
* only access things atomically
|
||||
*/
|
||||
static void
|
||||
gsk_gpu_cached_texture_destroy_cb (gpointer data)
|
||||
{
|
||||
GskGpuCachedTexture *self = data;
|
||||
|
||||
if (!gsk_gpu_cached_texture_is_invalid (self))
|
||||
g_atomic_pointer_add (self->dead_pixels_counter, ((GskGpuCached *) self)->pixels);
|
||||
|
||||
if (g_atomic_int_dec_and_test (&self->use_count))
|
||||
g_free (self);
|
||||
}
|
||||
|
||||
static GskGpuCachedTexture *
|
||||
gsk_gpu_cached_texture_new (GskGpuDevice *device,
|
||||
GdkTexture *texture,
|
||||
GskGpuImage *image)
|
||||
{
|
||||
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (device);
|
||||
GskGpuCachedTexture *self;
|
||||
|
||||
if (gdk_texture_get_render_data (texture, device))
|
||||
gdk_texture_clear_render_data (texture);
|
||||
else if ((self = g_hash_table_lookup (priv->texture_cache, texture)))
|
||||
g_hash_table_remove (priv->texture_cache, texture);
|
||||
|
||||
self = gsk_gpu_cached_new (device, &GSK_GPU_CACHED_TEXTURE_CLASS, NULL);
|
||||
self->texture = texture;
|
||||
self->image = g_object_ref (image);
|
||||
((GskGpuCached *)self)->pixels = gsk_gpu_image_get_width (image) * gsk_gpu_image_get_height (image);
|
||||
self->dead_pixels_counter = &priv->dead_texture_pixels;
|
||||
self->use_count = 2;
|
||||
|
||||
if (!gdk_texture_set_render_data (texture, device, self, gsk_gpu_cached_texture_destroy_cb))
|
||||
{
|
||||
g_object_weak_ref (G_OBJECT (texture), (GWeakNotify) gsk_gpu_cached_texture_destroy_cb, self);
|
||||
|
||||
g_hash_table_insert (priv->texture_cache, texture, self);
|
||||
}
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
/* }}} */
|
||||
/* {{{ CachedGlyph */
|
||||
|
||||
struct _GskGpuCachedGlyph
|
||||
{
|
||||
GskGpuCached parent;
|
||||
|
||||
PangoFont *font;
|
||||
PangoGlyph glyph;
|
||||
GskGpuGlyphLookupFlags flags;
|
||||
float scale;
|
||||
|
||||
GskGpuImage *image;
|
||||
graphene_rect_t bounds;
|
||||
graphene_point_t origin;
|
||||
};
|
||||
|
||||
static void
|
||||
gsk_gpu_cached_glyph_free (GskGpuDevice *device,
|
||||
GskGpuCached *cached)
|
||||
{
|
||||
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (device);
|
||||
GskGpuCachedGlyph *self = (GskGpuCachedGlyph *) cached;
|
||||
|
||||
g_hash_table_remove (priv->glyph_cache, self);
|
||||
|
||||
g_object_unref (self->font);
|
||||
g_object_unref (self->image);
|
||||
|
||||
g_free (self);
|
||||
}
|
||||
|
||||
static gboolean
|
||||
gsk_gpu_cached_glyph_should_collect (GskGpuDevice *device,
|
||||
GskGpuCached *cached,
|
||||
gint64 timestamp)
|
||||
{
|
||||
if (gsk_gpu_cached_is_old (device, cached, timestamp))
|
||||
{
|
||||
if (cached->atlas)
|
||||
mark_as_stale (cached, TRUE);
|
||||
else
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
/* Glyphs are only collected when their atlas is freed */
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
static guint
|
||||
gsk_gpu_cached_glyph_hash (gconstpointer data)
|
||||
{
|
||||
const GskGpuCachedGlyph *glyph = data;
|
||||
|
||||
return GPOINTER_TO_UINT (glyph->font) ^
|
||||
glyph->glyph ^
|
||||
(glyph->flags << 24) ^
|
||||
((guint) glyph->scale * PANGO_SCALE);
|
||||
}
|
||||
|
||||
static gboolean
|
||||
gsk_gpu_cached_glyph_equal (gconstpointer v1,
|
||||
gconstpointer v2)
|
||||
{
|
||||
const GskGpuCachedGlyph *glyph1 = v1;
|
||||
const GskGpuCachedGlyph *glyph2 = v2;
|
||||
|
||||
return glyph1->font == glyph2->font
|
||||
&& glyph1->glyph == glyph2->glyph
|
||||
&& glyph1->flags == glyph2->flags
|
||||
&& glyph1->scale == glyph2->scale;
|
||||
}
|
||||
|
||||
static const GskGpuCachedClass GSK_GPU_CACHED_GLYPH_CLASS =
|
||||
{
|
||||
sizeof (GskGpuCachedGlyph),
|
||||
gsk_gpu_cached_glyph_free,
|
||||
gsk_gpu_cached_glyph_should_collect
|
||||
};
|
||||
|
||||
/* }}} */
|
||||
/* {{{ GskGpuDevice */
|
||||
|
||||
static void
|
||||
print_cache_stats (GskGpuDevice *self)
|
||||
{
|
||||
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
|
||||
GskGpuCached *cached;
|
||||
guint glyphs = 0;
|
||||
guint stale_glyphs = 0;
|
||||
guint textures = 0;
|
||||
guint atlases = 0;
|
||||
GString *ratios = g_string_new ("");
|
||||
|
||||
for (cached = priv->first_cached; cached != NULL; cached = cached->next)
|
||||
{
|
||||
if (cached->class == &GSK_GPU_CACHED_GLYPH_CLASS)
|
||||
{
|
||||
glyphs++;
|
||||
if (cached->stale)
|
||||
stale_glyphs++;
|
||||
}
|
||||
else if (cached->class == &GSK_GPU_CACHED_TEXTURE_CLASS)
|
||||
{
|
||||
textures++;
|
||||
}
|
||||
else if (cached->class == &GSK_GPU_CACHED_ATLAS_CLASS)
|
||||
{
|
||||
double ratio;
|
||||
|
||||
atlases++;
|
||||
|
||||
ratio = (double) cached->pixels / (double) (ATLAS_SIZE * ATLAS_SIZE);
|
||||
|
||||
if (ratios->len == 0)
|
||||
g_string_append (ratios, " (ratios ");
|
||||
else
|
||||
g_string_append (ratios, ", ");
|
||||
g_string_append_printf (ratios, "%.2f", ratio);
|
||||
}
|
||||
}
|
||||
|
||||
if (ratios->len > 0)
|
||||
g_string_append (ratios, ")");
|
||||
|
||||
gdk_debug_message ("Cached items\n"
|
||||
" glyphs: %5u (%u stale)\n"
|
||||
" textures: %5u (%u in hash)\n"
|
||||
" atlases: %5u%s",
|
||||
glyphs, stale_glyphs,
|
||||
textures, g_hash_table_size (priv->texture_cache),
|
||||
atlases, ratios->str);
|
||||
|
||||
g_string_free (ratios, TRUE);
|
||||
}
|
||||
|
||||
static void
|
||||
gsk_gpu_device_gc (GskGpuDevice *self,
|
||||
gint64 timestamp)
|
||||
{
|
||||
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
|
||||
GskGpuCached *cached, *prev;
|
||||
gint64 before G_GNUC_UNUSED = GDK_PROFILER_CURRENT_TIME;
|
||||
gboolean result;
|
||||
|
||||
if (priv->cache == NULL)
|
||||
return TRUE;
|
||||
|
||||
gsk_gpu_device_make_current (self);
|
||||
|
||||
/* We walk the cache from the end so we don't end up with prev
|
||||
* being a leftover glyph on the atlas we are freeing
|
||||
*/
|
||||
for (cached = priv->last_cached; cached != NULL; cached = prev)
|
||||
{
|
||||
prev = cached->prev;
|
||||
if (gsk_gpu_cached_should_collect (self, cached, timestamp))
|
||||
gsk_gpu_cached_free (self, cached);
|
||||
}
|
||||
|
||||
g_atomic_pointer_set (&priv->dead_texture_pixels, 0);
|
||||
|
||||
if (GSK_DEBUG_CHECK (GLYPH_CACHE))
|
||||
print_cache_stats (self);
|
||||
result = gsk_gpu_cache_gc (priv->cache,
|
||||
priv->cache_timeout >= 0 ? priv->cache_timeout * G_TIME_SPAN_SECOND : -1,
|
||||
timestamp);
|
||||
if (result)
|
||||
g_clear_object (&priv->cache);
|
||||
|
||||
gdk_profiler_end_mark (before, "Glyph cache GC", NULL);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static gboolean
|
||||
@ -542,7 +79,10 @@ gsk_gpu_device_maybe_gc (GskGpuDevice *self)
|
||||
if (priv->cache_timeout < 0)
|
||||
return;
|
||||
|
||||
dead_texture_pixels = GPOINTER_TO_SIZE (g_atomic_pointer_get (&priv->dead_texture_pixels));
|
||||
if (priv->cache == NULL)
|
||||
return;
|
||||
|
||||
dead_texture_pixels = gsk_gpu_cache_get_dead_texture_pixels (priv->cache);
|
||||
|
||||
if (priv->cache_timeout == 0 || dead_texture_pixels > 1000000)
|
||||
{
|
||||
@ -560,39 +100,12 @@ gsk_gpu_device_queue_gc (GskGpuDevice *self)
|
||||
priv->cache_gc_source = g_timeout_add_seconds (priv->cache_timeout, cache_gc_cb, self);
|
||||
}
|
||||
|
||||
static void
|
||||
gsk_gpu_device_clear_cache (GskGpuDevice *self)
|
||||
{
|
||||
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
|
||||
|
||||
for (GskGpuCached *cached = priv->first_cached; cached; cached = cached->next)
|
||||
{
|
||||
if (cached->prev == NULL)
|
||||
g_assert (priv->first_cached == cached);
|
||||
else
|
||||
g_assert (cached->prev->next == cached);
|
||||
if (cached->next == NULL)
|
||||
g_assert (priv->last_cached == cached);
|
||||
else
|
||||
g_assert (cached->next->prev == cached);
|
||||
}
|
||||
|
||||
/* We clear the cache from the end so glyphs get freed before their atlas */
|
||||
while (priv->last_cached)
|
||||
gsk_gpu_cached_free (self, priv->last_cached);
|
||||
|
||||
g_assert (priv->last_cached == NULL);
|
||||
}
|
||||
|
||||
static void
|
||||
gsk_gpu_device_dispose (GObject *object)
|
||||
{
|
||||
GskGpuDevice *self = GSK_GPU_DEVICE (object);
|
||||
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
|
||||
|
||||
gsk_gpu_device_clear_cache (self);
|
||||
g_hash_table_unref (priv->glyph_cache);
|
||||
g_hash_table_unref (priv->texture_cache);
|
||||
g_clear_handle_id (&priv->cache_gc_source, g_source_remove);
|
||||
|
||||
G_OBJECT_CLASS (gsk_gpu_device_parent_class)->dispose (object);
|
||||
@ -621,14 +134,7 @@ gsk_gpu_device_class_init (GskGpuDeviceClass *klass)
|
||||
static void
|
||||
gsk_gpu_device_init (GskGpuDevice *self)
|
||||
{
|
||||
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
|
||||
|
||||
priv->glyph_cache = g_hash_table_new (gsk_gpu_cached_glyph_hash,
|
||||
gsk_gpu_cached_glyph_equal);
|
||||
priv->texture_cache = g_hash_table_new (g_direct_hash,
|
||||
g_direct_equal);
|
||||
}
|
||||
|
||||
void
|
||||
gsk_gpu_device_setup (GskGpuDevice *self,
|
||||
GdkDisplay *display,
|
||||
@ -677,6 +183,19 @@ gsk_gpu_device_get_display (GskGpuDevice *self)
|
||||
return priv->display;
|
||||
}
|
||||
|
||||
GskGpuCache *
|
||||
gsk_gpu_device_get_cache (GskGpuDevice *self)
|
||||
{
|
||||
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
|
||||
|
||||
if (G_LIKELY (priv->cache))
|
||||
return priv->cache;
|
||||
|
||||
priv->cache = gsk_gpu_cache_new (self);
|
||||
|
||||
return priv->cache;
|
||||
}
|
||||
|
||||
gsize
|
||||
gsk_gpu_device_get_max_image_size (GskGpuDevice *self)
|
||||
{
|
||||
@ -695,6 +214,14 @@ gsk_gpu_device_create_offscreen_image (GskGpuDevice *self,
|
||||
return GSK_GPU_DEVICE_GET_CLASS (self)->create_offscreen_image (self, with_mipmap, depth, width, height);
|
||||
}
|
||||
|
||||
GskGpuImage *
|
||||
gsk_gpu_device_create_atlas_image (GskGpuDevice *self,
|
||||
gsize width,
|
||||
gsize height)
|
||||
{
|
||||
return GSK_GPU_DEVICE_GET_CLASS (self)->create_atlas_image (self, width, height);
|
||||
}
|
||||
|
||||
GskGpuImage *
|
||||
gsk_gpu_device_create_upload_image (GskGpuDevice *self,
|
||||
gboolean with_mipmap,
|
||||
@ -705,12 +232,6 @@ gsk_gpu_device_create_upload_image (GskGpuDevice *self,
|
||||
return GSK_GPU_DEVICE_GET_CLASS (self)->create_upload_image (self, with_mipmap, format, width, height);
|
||||
}
|
||||
|
||||
void
|
||||
gsk_gpu_device_make_current (GskGpuDevice *self)
|
||||
{
|
||||
GSK_GPU_DEVICE_GET_CLASS (self)->make_current (self);
|
||||
}
|
||||
|
||||
GskGpuImage *
|
||||
gsk_gpu_device_create_download_image (GskGpuDevice *self,
|
||||
GdkMemoryDepth depth,
|
||||
@ -720,275 +241,10 @@ gsk_gpu_device_create_download_image (GskGpuDevice *self,
|
||||
return GSK_GPU_DEVICE_GET_CLASS (self)->create_download_image (self, depth, width, height);
|
||||
}
|
||||
|
||||
/* This rounds up to the next number that has <= 2 bits set:
|
||||
* 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, ...
|
||||
* That is roughly sqrt(2), so it should limit waste
|
||||
*/
|
||||
static gsize
|
||||
round_up_atlas_size (gsize num)
|
||||
{
|
||||
gsize storage = g_bit_storage (num);
|
||||
|
||||
num = num + (((1 << storage) - 1) >> 2);
|
||||
num &= (((gsize) 7) << storage) >> 2;
|
||||
|
||||
return num;
|
||||
}
|
||||
|
||||
static gboolean
|
||||
gsk_gpu_cached_atlas_allocate (GskGpuCachedAtlas *atlas,
|
||||
gsize width,
|
||||
gsize height,
|
||||
gsize *out_x,
|
||||
gsize *out_y)
|
||||
{
|
||||
gsize i;
|
||||
gsize waste, slice_waste;
|
||||
gsize best_slice;
|
||||
gsize y, best_y;
|
||||
gboolean can_add_slice;
|
||||
|
||||
best_y = 0;
|
||||
best_slice = G_MAXSIZE;
|
||||
can_add_slice = atlas->n_slices < MAX_SLICES_PER_ATLAS;
|
||||
if (can_add_slice)
|
||||
waste = height; /* Require less than 100% waste */
|
||||
else
|
||||
waste = G_MAXSIZE; /* Accept any slice, we can't make better ones */
|
||||
|
||||
for (i = 0, y = 0; i < atlas->n_slices; y += atlas->slices[i].height, i++)
|
||||
{
|
||||
if (atlas->slices[i].height < height || ATLAS_SIZE - atlas->slices[i].width < width)
|
||||
continue;
|
||||
|
||||
slice_waste = atlas->slices[i].height - height;
|
||||
if (slice_waste < waste)
|
||||
{
|
||||
waste = slice_waste;
|
||||
best_slice = i;
|
||||
best_y = y;
|
||||
if (waste == 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (best_slice >= i && i == atlas->n_slices)
|
||||
{
|
||||
gsize slice_height;
|
||||
|
||||
if (!can_add_slice)
|
||||
return FALSE;
|
||||
|
||||
slice_height = round_up_atlas_size (MAX (height, 4));
|
||||
if (slice_height > ATLAS_SIZE - y)
|
||||
return FALSE;
|
||||
|
||||
atlas->n_slices++;
|
||||
if (atlas->n_slices == MAX_SLICES_PER_ATLAS)
|
||||
slice_height = ATLAS_SIZE - y;
|
||||
|
||||
atlas->slices[i].width = 0;
|
||||
atlas->slices[i].height = slice_height;
|
||||
best_y = y;
|
||||
best_slice = i;
|
||||
}
|
||||
|
||||
*out_x = atlas->slices[best_slice].width;
|
||||
*out_y = best_y;
|
||||
|
||||
atlas->slices[best_slice].width += width;
|
||||
g_assert (atlas->slices[best_slice].width <= ATLAS_SIZE);
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static void
|
||||
gsk_gpu_device_ensure_atlas (GskGpuDevice *self,
|
||||
gboolean recreate)
|
||||
{
|
||||
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
|
||||
|
||||
if (priv->current_atlas && !recreate)
|
||||
return;
|
||||
|
||||
priv->current_atlas = gsk_gpu_cached_atlas_new (self);
|
||||
}
|
||||
|
||||
GskGpuImage *
|
||||
gsk_gpu_device_get_atlas_image (GskGpuDevice *self)
|
||||
{
|
||||
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
|
||||
|
||||
gsk_gpu_device_ensure_atlas (self, FALSE);
|
||||
|
||||
return priv->current_atlas->image;
|
||||
}
|
||||
|
||||
static GskGpuImage *
|
||||
gsk_gpu_device_add_atlas_image (GskGpuDevice *self,
|
||||
gsize width,
|
||||
gsize height,
|
||||
gsize *out_x,
|
||||
gsize *out_y)
|
||||
{
|
||||
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
|
||||
|
||||
if (width > MAX_ATLAS_ITEM_SIZE || height > MAX_ATLAS_ITEM_SIZE)
|
||||
return NULL;
|
||||
|
||||
gsk_gpu_device_ensure_atlas (self, FALSE);
|
||||
|
||||
if (gsk_gpu_cached_atlas_allocate (priv->current_atlas, width, height, out_x, out_y))
|
||||
return priv->current_atlas->image;
|
||||
|
||||
gsk_gpu_device_ensure_atlas (self, TRUE);
|
||||
|
||||
if (gsk_gpu_cached_atlas_allocate (priv->current_atlas, width, height, out_x, out_y))
|
||||
return priv->current_atlas->image;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
GskGpuImage *
|
||||
gsk_gpu_device_lookup_texture_image (GskGpuDevice *self,
|
||||
GdkTexture *texture,
|
||||
gint64 timestamp)
|
||||
{
|
||||
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
|
||||
GskGpuCachedTexture *cache;
|
||||
|
||||
cache = gdk_texture_get_render_data (texture, self);
|
||||
if (cache == NULL)
|
||||
cache = g_hash_table_lookup (priv->texture_cache, texture);
|
||||
|
||||
if (!cache || !cache->image || gsk_gpu_cached_texture_is_invalid (cache))
|
||||
return NULL;
|
||||
|
||||
gsk_gpu_cached_use (self, (GskGpuCached *) cache, timestamp);
|
||||
|
||||
return g_object_ref (cache->image);
|
||||
}
|
||||
|
||||
void
|
||||
gsk_gpu_device_cache_texture_image (GskGpuDevice *self,
|
||||
GdkTexture *texture,
|
||||
gint64 timestamp,
|
||||
GskGpuImage *image)
|
||||
gsk_gpu_device_make_current (GskGpuDevice *self)
|
||||
{
|
||||
GskGpuCachedTexture *cache;
|
||||
|
||||
cache = gsk_gpu_cached_texture_new (self, texture, image);
|
||||
|
||||
gsk_gpu_cached_use (self, (GskGpuCached *) cache, timestamp);
|
||||
}
|
||||
|
||||
GskGpuImage *
|
||||
gsk_gpu_device_lookup_glyph_image (GskGpuDevice *self,
|
||||
GskGpuFrame *frame,
|
||||
PangoFont *font,
|
||||
PangoGlyph glyph,
|
||||
GskGpuGlyphLookupFlags flags,
|
||||
float scale,
|
||||
graphene_rect_t *out_bounds,
|
||||
graphene_point_t *out_origin)
|
||||
{
|
||||
GskGpuDevicePrivate *priv = gsk_gpu_device_get_instance_private (self);
|
||||
GskGpuCachedGlyph lookup = {
|
||||
.font = font,
|
||||
.glyph = glyph,
|
||||
.flags = flags,
|
||||
.scale = scale
|
||||
};
|
||||
GskGpuCachedGlyph *cache;
|
||||
PangoRectangle ink_rect;
|
||||
graphene_rect_t rect;
|
||||
graphene_point_t origin;
|
||||
GskGpuImage *image;
|
||||
gsize atlas_x, atlas_y, padding;
|
||||
float subpixel_x, subpixel_y;
|
||||
PangoFont *scaled_font;
|
||||
cairo_hint_metrics_t hint_metrics;
|
||||
|
||||
cache = g_hash_table_lookup (priv->glyph_cache, &lookup);
|
||||
if (cache)
|
||||
{
|
||||
gsk_gpu_cached_use (self, (GskGpuCached *) cache, gsk_gpu_frame_get_timestamp (frame));
|
||||
|
||||
*out_bounds = cache->bounds;
|
||||
*out_origin = cache->origin;
|
||||
return cache->image;
|
||||
}
|
||||
|
||||
/* The combination of hint-style != none and hint-metrics == off
|
||||
* leads to broken rendering with some fonts.
|
||||
*/
|
||||
if (gsk_font_get_hint_style (font) != CAIRO_HINT_STYLE_NONE)
|
||||
hint_metrics = CAIRO_HINT_METRICS_ON;
|
||||
else
|
||||
hint_metrics = CAIRO_HINT_METRICS_DEFAULT;
|
||||
|
||||
scaled_font = gsk_reload_font (font, scale, hint_metrics, CAIRO_HINT_STYLE_DEFAULT, CAIRO_ANTIALIAS_DEFAULT);
|
||||
|
||||
subpixel_x = (flags & 3) / 4.f;
|
||||
subpixel_y = ((flags >> 2) & 3) / 4.f;
|
||||
pango_font_get_glyph_extents (scaled_font, glyph, &ink_rect, NULL);
|
||||
origin.x = floor (ink_rect.x * 1.0 / PANGO_SCALE + subpixel_x);
|
||||
origin.y = floor (ink_rect.y * 1.0 / PANGO_SCALE + subpixel_y);
|
||||
rect.size.width = ceil ((ink_rect.x + ink_rect.width) * 1.0 / PANGO_SCALE + subpixel_x) - origin.x;
|
||||
rect.size.height = ceil ((ink_rect.y + ink_rect.height) * 1.0 / PANGO_SCALE + subpixel_y) - origin.y;
|
||||
padding = 1;
|
||||
|
||||
image = gsk_gpu_device_add_atlas_image (self,
|
||||
rect.size.width + 2 * padding, rect.size.height + 2 * padding,
|
||||
&atlas_x, &atlas_y);
|
||||
if (image)
|
||||
{
|
||||
g_object_ref (image);
|
||||
rect.origin.x = atlas_x + padding;
|
||||
rect.origin.y = atlas_y + padding;
|
||||
cache = gsk_gpu_cached_new (self, &GSK_GPU_CACHED_GLYPH_CLASS, priv->current_atlas);
|
||||
}
|
||||
else
|
||||
{
|
||||
image = gsk_gpu_device_create_upload_image (self, FALSE, GDK_MEMORY_DEFAULT, rect.size.width, rect.size.height),
|
||||
rect.origin.x = 0;
|
||||
rect.origin.y = 0;
|
||||
padding = 0;
|
||||
cache = gsk_gpu_cached_new (self, &GSK_GPU_CACHED_GLYPH_CLASS, NULL);
|
||||
}
|
||||
|
||||
cache->font = g_object_ref (font);
|
||||
cache->glyph = glyph;
|
||||
cache->flags = flags;
|
||||
cache->scale = scale;
|
||||
cache->bounds = rect;
|
||||
cache->image = image;
|
||||
cache->origin = GRAPHENE_POINT_INIT (- origin.x + subpixel_x,
|
||||
- origin.y + subpixel_y);
|
||||
((GskGpuCached *) cache)->pixels = (rect.size.width + 2 * padding) * (rect.size.height + 2 * padding);
|
||||
|
||||
gsk_gpu_upload_glyph_op (frame,
|
||||
cache->image,
|
||||
scaled_font,
|
||||
glyph,
|
||||
&(cairo_rectangle_int_t) {
|
||||
.x = rect.origin.x - padding,
|
||||
.y = rect.origin.y - padding,
|
||||
.width = rect.size.width + 2 * padding,
|
||||
.height = rect.size.height + 2 * padding,
|
||||
},
|
||||
&GRAPHENE_POINT_INIT (cache->origin.x + padding,
|
||||
cache->origin.y + padding));
|
||||
|
||||
g_hash_table_insert (priv->glyph_cache, cache, cache);
|
||||
gsk_gpu_cached_use (self, (GskGpuCached *) cache, gsk_gpu_frame_get_timestamp (frame));
|
||||
|
||||
*out_bounds = cache->bounds;
|
||||
*out_origin = cache->origin;
|
||||
|
||||
g_object_unref (scaled_font);
|
||||
|
||||
return cache->image;
|
||||
GSK_GPU_DEVICE_GET_CLASS (self)->make_current (self);
|
||||
}
|
||||
|
||||
/* }}} */
|
||||
|
@ -53,14 +53,17 @@ void gsk_gpu_device_setup (GskGpuD
|
||||
void gsk_gpu_device_maybe_gc (GskGpuDevice *self);
|
||||
void gsk_gpu_device_queue_gc (GskGpuDevice *self);
|
||||
GdkDisplay * gsk_gpu_device_get_display (GskGpuDevice *self);
|
||||
GskGpuCache * gsk_gpu_device_get_cache (GskGpuDevice *self);
|
||||
gsize gsk_gpu_device_get_max_image_size (GskGpuDevice *self);
|
||||
GskGpuImage * gsk_gpu_device_get_atlas_image (GskGpuDevice *self);
|
||||
|
||||
GskGpuImage * gsk_gpu_device_create_offscreen_image (GskGpuDevice *self,
|
||||
gboolean with_mipmap,
|
||||
GdkMemoryDepth depth,
|
||||
gsize width,
|
||||
gsize height);
|
||||
GskGpuImage * gsk_gpu_device_create_atlas_image (GskGpuDevice *self,
|
||||
gsize width,
|
||||
gsize height);
|
||||
GskGpuImage * gsk_gpu_device_create_upload_image (GskGpuDevice *self,
|
||||
gboolean with_mipmap,
|
||||
GdkMemoryFormat format,
|
||||
@ -71,33 +74,6 @@ GskGpuImage * gsk_gpu_device_create_download_image (GskGpuD
|
||||
gsize width,
|
||||
gsize height);
|
||||
void gsk_gpu_device_make_current (GskGpuDevice *self);
|
||||
GskGpuImage * gsk_gpu_device_lookup_texture_image (GskGpuDevice *self,
|
||||
GdkTexture *texture,
|
||||
gint64 timestamp);
|
||||
void gsk_gpu_device_cache_texture_image (GskGpuDevice *self,
|
||||
GdkTexture *texture,
|
||||
gint64 timestamp,
|
||||
GskGpuImage *image);
|
||||
|
||||
typedef enum
|
||||
{
|
||||
GSK_GPU_GLYPH_X_OFFSET_1 = 0x1,
|
||||
GSK_GPU_GLYPH_X_OFFSET_2 = 0x2,
|
||||
GSK_GPU_GLYPH_X_OFFSET_3 = 0x3,
|
||||
GSK_GPU_GLYPH_Y_OFFSET_1 = 0x4,
|
||||
GSK_GPU_GLYPH_Y_OFFSET_2 = 0x8,
|
||||
GSK_GPU_GLYPH_Y_OFFSET_3 = 0xC
|
||||
} GskGpuGlyphLookupFlags;
|
||||
|
||||
GskGpuImage * gsk_gpu_device_lookup_glyph_image (GskGpuDevice *self,
|
||||
GskGpuFrame *frame,
|
||||
PangoFont *font,
|
||||
PangoGlyph glyph,
|
||||
GskGpuGlyphLookupFlags flags,
|
||||
float scale,
|
||||
graphene_rect_t *out_bounds,
|
||||
graphene_point_t *out_origin);
|
||||
|
||||
|
||||
G_DEFINE_AUTOPTR_CLEANUP_FUNC(GskGpuDevice, g_object_unref)
|
||||
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include "gskgpuimageprivate.h"
|
||||
#include "gskgpuprintprivate.h"
|
||||
#ifdef GDK_RENDERING_VULKAN
|
||||
#include "gskgpucacheprivate.h"
|
||||
#include "gskvulkanbufferprivate.h"
|
||||
#include "gskvulkanframeprivate.h"
|
||||
#include "gskvulkanimageprivate.h"
|
||||
@ -143,12 +144,13 @@ gsk_gpu_download_op_vk_command (GskGpuOp *op,
|
||||
self->texture = gsk_vulkan_image_to_dmabuf_texture (GSK_VULKAN_IMAGE (self->image));
|
||||
if (self->texture)
|
||||
{
|
||||
GskVulkanDevice *device = GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (frame));
|
||||
VkDevice vk_device = gsk_vulkan_device_get_vk_device (device);
|
||||
GskGpuDevice *device = gsk_gpu_frame_get_device (frame);
|
||||
GskGpuCache *cache = gsk_gpu_device_get_cache (device);
|
||||
VkDevice vk_device = gsk_vulkan_device_get_vk_device (GSK_VULKAN_DEVICE (device));
|
||||
|
||||
gsk_gpu_device_cache_texture_image (GSK_GPU_DEVICE (device), self->texture, gsk_gpu_frame_get_timestamp (frame), self->image);
|
||||
gsk_gpu_cache_cache_texture_image (cache, self->texture, gsk_gpu_frame_get_timestamp (frame), self->image);
|
||||
|
||||
if (gsk_vulkan_device_has_feature (device, GDK_VULKAN_FEATURE_SEMAPHORE_EXPORT))
|
||||
if (gsk_vulkan_device_has_feature (GSK_VULKAN_DEVICE (device), GDK_VULKAN_FEATURE_SEMAPHORE_EXPORT))
|
||||
{
|
||||
GSK_VK_CHECK (vkCreateSemaphore, vk_device,
|
||||
&(VkSemaphoreCreateInfo) {
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include "gskgpuframeprivate.h"
|
||||
|
||||
#include "gskgpubufferprivate.h"
|
||||
#include "gskgpucacheprivate.h"
|
||||
#include "gskgpudeviceprivate.h"
|
||||
#include "gskgpudownloadopprivate.h"
|
||||
#include "gskgpuimageprivate.h"
|
||||
@ -412,7 +413,7 @@ gsk_gpu_frame_upload_texture (GskGpuFrame *self,
|
||||
image = GSK_GPU_FRAME_GET_CLASS (self)->upload_texture (self, with_mipmap, texture);
|
||||
|
||||
if (image)
|
||||
gsk_gpu_device_cache_texture_image (priv->device, texture, priv->timestamp, image);
|
||||
gsk_gpu_cache_cache_texture_image (gsk_gpu_device_get_cache (priv->device), texture, priv->timestamp, image);
|
||||
|
||||
return image;
|
||||
}
|
||||
@ -703,7 +704,7 @@ gsk_gpu_frame_download_texture (GskGpuFrame *self,
|
||||
GskGpuFramePrivate *priv = gsk_gpu_frame_get_instance_private (self);
|
||||
GskGpuImage *image;
|
||||
|
||||
image = gsk_gpu_device_lookup_texture_image (priv->device, texture, timestamp);
|
||||
image = gsk_gpu_cache_lookup_texture_image (gsk_gpu_device_get_cache (priv->device), texture, timestamp);
|
||||
if (image == NULL)
|
||||
image = gsk_gpu_frame_upload_texture (self, FALSE, texture);
|
||||
if (image == NULL)
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "gskgpublendopprivate.h"
|
||||
#include "gskgpublitopprivate.h"
|
||||
#include "gskgpubluropprivate.h"
|
||||
#include "gskgpucacheprivate.h"
|
||||
#include "gskgpuclearopprivate.h"
|
||||
#include "gskgpuclipprivate.h"
|
||||
#include "gskgpucolorizeopprivate.h"
|
||||
@ -801,7 +802,7 @@ gsk_gpu_get_node_as_image (GskGpuFrame *frame,
|
||||
GdkTexture *texture = gsk_texture_node_get_texture (node);
|
||||
GskGpuDevice *device = gsk_gpu_frame_get_device (frame);
|
||||
gint64 timestamp = gsk_gpu_frame_get_timestamp (frame);
|
||||
result = gsk_gpu_device_lookup_texture_image (device, texture, timestamp);
|
||||
result = gsk_gpu_cache_lookup_texture_image (gsk_gpu_device_get_cache (device), texture, timestamp);
|
||||
if (result == NULL)
|
||||
result = gsk_gpu_frame_upload_texture (frame, FALSE, texture);
|
||||
|
||||
@ -982,10 +983,10 @@ gsk_gpu_node_processor_get_node_as_image (GskGpuNodeProcessor *self,
|
||||
if (ensure != image && disallowed_flags &&
|
||||
gsk_render_node_get_node_type (node) == GSK_TEXTURE_NODE)
|
||||
{
|
||||
gsk_gpu_device_cache_texture_image (gsk_gpu_frame_get_device (self->frame),
|
||||
gsk_texture_node_get_texture (node),
|
||||
gsk_gpu_frame_get_timestamp (self->frame),
|
||||
ensure);
|
||||
gsk_gpu_cache_cache_texture_image (gsk_gpu_device_get_cache (gsk_gpu_frame_get_device (self->frame)),
|
||||
gsk_texture_node_get_texture (node),
|
||||
gsk_gpu_frame_get_timestamp (self->frame),
|
||||
ensure);
|
||||
}
|
||||
|
||||
return ensure;
|
||||
@ -1923,16 +1924,16 @@ static void
|
||||
gsk_gpu_node_processor_add_texture_node (GskGpuNodeProcessor *self,
|
||||
GskRenderNode *node)
|
||||
{
|
||||
GskGpuDevice *device;
|
||||
GskGpuCache *cache;
|
||||
GskGpuImage *image;
|
||||
GdkTexture *texture;
|
||||
gint64 timestamp;
|
||||
|
||||
device = gsk_gpu_frame_get_device (self->frame);
|
||||
cache = gsk_gpu_device_get_cache (gsk_gpu_frame_get_device (self->frame));
|
||||
texture = gsk_texture_node_get_texture (node);
|
||||
timestamp = gsk_gpu_frame_get_timestamp (self->frame);
|
||||
|
||||
image = gsk_gpu_device_lookup_texture_image (device, texture, timestamp);
|
||||
image = gsk_gpu_cache_lookup_texture_image (cache, texture, timestamp);
|
||||
if (image == NULL)
|
||||
{
|
||||
image = gsk_gpu_frame_upload_texture (self->frame, FALSE, texture);
|
||||
@ -1995,18 +1996,18 @@ static gboolean
|
||||
gsk_gpu_node_processor_create_texture_pattern (GskGpuPatternWriter *self,
|
||||
GskRenderNode *node)
|
||||
{
|
||||
GskGpuDevice *device;
|
||||
GskGpuCache *cache;
|
||||
GdkTexture *texture;
|
||||
gint64 timestamp;
|
||||
guint32 descriptor;
|
||||
GskGpuImage *image;
|
||||
GskGpuSampler sampler;
|
||||
|
||||
device = gsk_gpu_frame_get_device (self->frame);
|
||||
cache = gsk_gpu_device_get_cache (gsk_gpu_frame_get_device (self->frame));
|
||||
texture = gsk_texture_node_get_texture (node);
|
||||
timestamp = gsk_gpu_frame_get_timestamp (self->frame);
|
||||
|
||||
image = gsk_gpu_device_lookup_texture_image (device, texture, timestamp);
|
||||
image = gsk_gpu_cache_lookup_texture_image (cache, texture, timestamp);
|
||||
if (image == NULL)
|
||||
{
|
||||
image = gsk_gpu_frame_upload_texture (self->frame, FALSE, texture);
|
||||
@ -2051,7 +2052,7 @@ static void
|
||||
gsk_gpu_node_processor_add_texture_scale_node (GskGpuNodeProcessor *self,
|
||||
GskRenderNode *node)
|
||||
{
|
||||
GskGpuDevice *device;
|
||||
GskGpuCache *cache;
|
||||
GskGpuImage *image;
|
||||
GdkTexture *texture;
|
||||
GskScalingFilter scaling_filter;
|
||||
@ -2095,13 +2096,13 @@ gsk_gpu_node_processor_add_texture_scale_node (GskGpuNodeProcessor *self,
|
||||
return;
|
||||
}
|
||||
|
||||
device = gsk_gpu_frame_get_device (self->frame);
|
||||
cache = gsk_gpu_device_get_cache (gsk_gpu_frame_get_device (self->frame));
|
||||
texture = gsk_texture_scale_node_get_texture (node);
|
||||
scaling_filter = gsk_texture_scale_node_get_filter (node);
|
||||
timestamp = gsk_gpu_frame_get_timestamp (self->frame);
|
||||
need_mipmap = scaling_filter == GSK_SCALING_FILTER_TRILINEAR;
|
||||
|
||||
image = gsk_gpu_device_lookup_texture_image (device, texture, timestamp);
|
||||
image = gsk_gpu_cache_lookup_texture_image (cache, texture, timestamp);
|
||||
if (image == NULL)
|
||||
{
|
||||
image = gsk_gpu_frame_upload_texture (self->frame, need_mipmap, texture);
|
||||
@ -2995,7 +2996,7 @@ static void
|
||||
gsk_gpu_node_processor_add_glyph_node (GskGpuNodeProcessor *self,
|
||||
GskRenderNode *node)
|
||||
{
|
||||
GskGpuDevice *device;
|
||||
GskGpuCache *cache;
|
||||
const PangoGlyphInfo *glyphs;
|
||||
PangoFont *font;
|
||||
graphene_point_t offset;
|
||||
@ -3016,7 +3017,7 @@ gsk_gpu_node_processor_add_glyph_node (GskGpuNodeProcessor *self,
|
||||
return;
|
||||
}
|
||||
|
||||
device = gsk_gpu_frame_get_device (self->frame);
|
||||
cache = gsk_gpu_device_get_cache (gsk_gpu_frame_get_device (self->frame));
|
||||
|
||||
color = *gsk_text_node_get_color (node);
|
||||
color.alpha *= self->opacity;
|
||||
@ -3063,7 +3064,7 @@ gsk_gpu_node_processor_add_glyph_node (GskGpuNodeProcessor *self,
|
||||
glyph_origin.x *= inv_align_scale_x;
|
||||
glyph_origin.y *= inv_align_scale_y;
|
||||
|
||||
image = gsk_gpu_device_lookup_glyph_image (device,
|
||||
image = gsk_gpu_cache_lookup_glyph_image (cache,
|
||||
self->frame,
|
||||
font,
|
||||
glyphs[i].glyph,
|
||||
@ -3115,7 +3116,7 @@ static gboolean
|
||||
gsk_gpu_node_processor_create_glyph_pattern (GskGpuPatternWriter *self,
|
||||
GskRenderNode *node)
|
||||
{
|
||||
GskGpuDevice *device;
|
||||
GskGpuCache *cache;
|
||||
const PangoGlyphInfo *glyphs;
|
||||
PangoFont *font;
|
||||
guint num_glyphs;
|
||||
@ -3132,7 +3133,7 @@ gsk_gpu_node_processor_create_glyph_pattern (GskGpuPatternWriter *self,
|
||||
if (gsk_text_node_has_color_glyphs (node))
|
||||
return FALSE;
|
||||
|
||||
device = gsk_gpu_frame_get_device (self->frame);
|
||||
cache = gsk_gpu_device_get_cache (gsk_gpu_frame_get_device (self->frame));
|
||||
num_glyphs = gsk_text_node_get_num_glyphs (node);
|
||||
glyphs = gsk_text_node_get_glyphs (node, NULL);
|
||||
font = gsk_text_node_get_font (node);
|
||||
@ -3179,14 +3180,14 @@ gsk_gpu_node_processor_create_glyph_pattern (GskGpuPatternWriter *self,
|
||||
glyph_origin.x *= inv_align_scale_x;
|
||||
glyph_origin.y *= inv_align_scale_y;
|
||||
|
||||
image = gsk_gpu_device_lookup_glyph_image (device,
|
||||
self->frame,
|
||||
font,
|
||||
glyphs[i].glyph,
|
||||
flags,
|
||||
scale,
|
||||
&glyph_bounds,
|
||||
&glyph_offset);
|
||||
image = gsk_gpu_cache_lookup_glyph_image (cache,
|
||||
self->frame,
|
||||
font,
|
||||
glyphs[i].glyph,
|
||||
flags,
|
||||
scale,
|
||||
&glyph_bounds,
|
||||
&glyph_offset);
|
||||
|
||||
if (image != last_image)
|
||||
{
|
||||
|
@ -9,6 +9,7 @@
|
||||
|
||||
typedef struct _GskGLDescriptors GskGLDescriptors;
|
||||
typedef struct _GskGpuBuffer GskGpuBuffer;
|
||||
typedef struct _GskGpuCache GskGpuCache;
|
||||
typedef struct _GskGpuDescriptors GskGpuDescriptors;
|
||||
typedef struct _GskGpuDevice GskGpuDevice;
|
||||
typedef struct _GskGpuFrame GskGpuFrame;
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include "gskvulkanrealdescriptorsprivate.h"
|
||||
|
||||
#include "gskgpucacheprivate.h"
|
||||
#include "gskvulkanbufferprivate.h"
|
||||
#include "gskvulkanframeprivate.h"
|
||||
#include "gskvulkanimageprivate.h"
|
||||
@ -245,7 +246,7 @@ gsk_vulkan_real_descriptors_fill_sets (GskVulkanRealDescriptors *self)
|
||||
guint32 ignored;
|
||||
|
||||
if (!gsk_gpu_descriptors_add_image (GSK_GPU_DESCRIPTORS (self),
|
||||
gsk_gpu_device_get_atlas_image (GSK_GPU_DEVICE (device)),
|
||||
gsk_gpu_cache_get_atlas_image (gsk_gpu_device_get_cache (GSK_GPU_DEVICE (device))),
|
||||
GSK_GPU_SAMPLER_DEFAULT,
|
||||
&ignored))
|
||||
{
|
||||
|
@ -81,6 +81,7 @@ gsk_private_sources = files([
|
||||
'gpu/gskgpuborderop.c',
|
||||
'gpu/gskgpuboxshadowop.c',
|
||||
'gpu/gskgpubuffer.c',
|
||||
'gpu/gskgpucache.c',
|
||||
'gpu/gskgpuclearop.c',
|
||||
'gpu/gskgpuclip.c',
|
||||
'gpu/gskgpucolorizeop.c',
|
||||
|
Loading…
Reference in New Issue
Block a user