gpu: Handle storage buffers via descriptors

This makes the (currently single) storage buffer handled by
GskGpuDescriptors.
A side effect is that we now have support for multiple buffers in place.
We just have to use it.

Mixed into this commit is a complete rework of the pattern writer.
Instead of writing straight into the buffer (complete with repeatedly
backtracking when we have to do offscreens), write into a temporary
buffer and copy into the storage buffer on committing.
This commit is contained in:
Benjamin Otte 2023-11-19 15:27:10 +01:00
parent c2ec97e922
commit 1b38cbd410
17 changed files with 463 additions and 425 deletions

View File

@ -2,6 +2,7 @@
#include "gskgldescriptorsprivate.h"
#include "gskglbufferprivate.h"
#include "gskglimageprivate.h"
struct _GskGLDescriptors
@ -54,6 +55,23 @@ gsk_gl_descriptors_add_image (GskGpuDescriptors *desc,
}
}
static gboolean
gsk_gl_descriptors_add_buffer (GskGpuDescriptors *desc,
GskGpuBuffer *buffer,
guint32 *out_descriptor)
{
gsize used_buffers;
used_buffers = gsk_gpu_descriptors_get_n_buffers (desc);
if (used_buffers >= 11)
return FALSE;
*out_descriptor = used_buffers;
return TRUE;
}
static void
gsk_gl_descriptors_class_init (GskGLDescriptorsClass *klass)
{
@ -63,6 +81,7 @@ gsk_gl_descriptors_class_init (GskGLDescriptorsClass *klass)
object_class->finalize = gsk_gl_descriptors_finalize;
descriptors_class->add_image = gsk_gl_descriptors_add_image;
descriptors_class->add_buffer = gsk_gl_descriptors_add_buffer;
}
static void
@ -114,4 +133,12 @@ gsk_gl_descriptors_use (GskGLDescriptors *self)
glBindSampler (i - ext, gsk_gl_device_get_sampler_id (self->device, gsk_gpu_descriptors_get_sampler (desc, i)));
}
}
for (i = 0; i < gsk_gpu_descriptors_get_n_buffers (desc); i++)
{
GskGLBuffer *buffer = GSK_GL_BUFFER (gsk_gpu_descriptors_get_buffer (desc, i));
/* index 0 are the globals, we start at 1 */
gsk_gl_buffer_bind_base (buffer, i + 1);
}
}

View File

@ -137,7 +137,6 @@ gsk_gl_frame_create_storage_buffer (GskGpuFrame *frame,
static void
gsk_gl_frame_submit (GskGpuFrame *frame,
GskGpuBuffer *vertex_buffer,
GskGpuBuffer *storage_buffer,
GskGpuOp *op)
{
GskGLFrame *self = GSK_GL_FRAME (frame);
@ -155,11 +154,8 @@ gsk_gl_frame_submit (GskGpuFrame *frame,
if (vertex_buffer)
gsk_gl_buffer_bind (GSK_GL_BUFFER (vertex_buffer));
if (storage_buffer)
gsk_gl_buffer_bind_base (GSK_GL_BUFFER (storage_buffer), 1);
/* The globals buffer must be the last bound buffer,
* the globsals op relies on that. */
glBindBufferBase (GL_UNIFORM_BUFFER, 0, self->globals_buffer_id);
gsk_gl_frame_bind_globals (self);
glBufferData (GL_UNIFORM_BUFFER,
sizeof (GskGpuGlobalsInstance),
NULL,
@ -238,3 +234,9 @@ gsk_gl_frame_use_program (GskGLFrame *self,
g_hash_table_insert (self->vaos, (gpointer) op_class, GUINT_TO_POINTER (vao));
}
void
gsk_gl_frame_bind_globals (GskGLFrame *self)
{
glBindBufferBase (GL_UNIFORM_BUFFER, 0, self->globals_buffer_id);
}

View File

@ -13,4 +13,6 @@ void gsk_gl_frame_use_program (GskGLFr
GskGpuShaderClip clip,
guint n_external_textures);
void gsk_gl_frame_bind_globals (GskGLFrame *self);
G_END_DECLS

View File

@ -1,158 +0,0 @@
#include "config.h"
#include "gskgpubufferwriterprivate.h"
#include "gskrectprivate.h"
#include <string.h>
gsize
gsk_gpu_buffer_writer_commit (GskGpuBufferWriter *self)
{
return self->finish (self, TRUE);
}
void
gsk_gpu_buffer_writer_abort (GskGpuBufferWriter *self)
{
self->finish (self, FALSE);
}
gsize
gsk_gpu_buffer_writer_get_size (GskGpuBufferWriter *self)
{
return self->size;
}
void
gsk_gpu_buffer_writer_rewind (GskGpuBufferWriter *self,
gsize size)
{
g_assert (size <= self->size);
self->size = size;
}
guchar *
gsk_gpu_buffer_writer_backup (GskGpuBufferWriter *self,
gsize *out_size)
{
*out_size = self->size - self->initial_size;
return g_memdup (self->data + self->initial_size, *out_size);
}
void
gsk_gpu_buffer_writer_ensure_size (GskGpuBufferWriter *self,
gsize size)
{
if (size <= self->allocated)
return;
self->ensure_size (self, size);
}
static inline gsize
round_up (gsize number, gsize divisor)
{
return (number + divisor - 1) / divisor * divisor;
}
void
gsk_gpu_buffer_writer_append (GskGpuBufferWriter *self,
gsize align,
const guchar *data,
gsize size)
{
gsize aligned_size = round_up (self->size, align);
gsk_gpu_buffer_writer_ensure_size (self, aligned_size + size);
memcpy (self->data + aligned_size, data, size);
self->size = aligned_size + size;
}
void
gsk_gpu_buffer_writer_append_float (GskGpuBufferWriter *self,
float f)
{
gsk_gpu_buffer_writer_append (self, G_ALIGNOF (float), (guchar *) &f, sizeof (float));
}
void
gsk_gpu_buffer_writer_append_int (GskGpuBufferWriter *self,
gint32 i)
{
gsk_gpu_buffer_writer_append (self, G_ALIGNOF (gint32), (guchar *) &i, sizeof (gint32));
}
void
gsk_gpu_buffer_writer_append_uint (GskGpuBufferWriter *self,
guint32 u)
{
gsk_gpu_buffer_writer_append (self, G_ALIGNOF (guint32), (guchar *) &u, sizeof (guint32));
}
void
gsk_gpu_buffer_writer_append_matrix (GskGpuBufferWriter *self,
const graphene_matrix_t *matrix)
{
float f[16];
graphene_matrix_to_float (matrix, f);
gsk_gpu_buffer_writer_append (self, G_ALIGNOF (float), (guchar *) f, sizeof (f));
}
void
gsk_gpu_buffer_writer_append_vec4 (GskGpuBufferWriter *self,
const graphene_vec4_t *vec4)
{
float f[4];
graphene_vec4_to_float (vec4, f);
gsk_gpu_buffer_writer_append (self, G_ALIGNOF (float), (guchar *) f, sizeof (f));
}
void
gsk_gpu_buffer_writer_append_point (GskGpuBufferWriter *self,
const graphene_point_t *point,
const graphene_point_t *offset)
{
float f[2];
f[0] = point->x + offset->x;
f[1] = point->y + offset->y;
gsk_gpu_buffer_writer_append (self, G_ALIGNOF (float), (guchar *) f, sizeof (f));
}
void
gsk_gpu_buffer_writer_append_rect (GskGpuBufferWriter *self,
const graphene_rect_t *rect,
const graphene_point_t *offset)
{
float f[4];
gsk_gpu_rect_to_float (rect, offset, f);
gsk_gpu_buffer_writer_append (self, G_ALIGNOF (float), (guchar *) f, sizeof (f));
}
void
gsk_gpu_buffer_writer_append_rgba (GskGpuBufferWriter *self,
const GdkRGBA *rgba)
{
float f[4] = { rgba->red, rgba->green, rgba->blue, rgba->alpha };
gsk_gpu_buffer_writer_append (self, G_ALIGNOF (float), (guchar *) f, sizeof (f));
}
void
gsk_gpu_buffer_writer_append_color_stops (GskGpuBufferWriter *self,
const GskColorStop *stops,
gsize n_stops)
{
gsk_gpu_buffer_writer_append_uint (self, n_stops);
gsk_gpu_buffer_writer_append (self, G_ALIGNOF (float), (guchar *) stops, sizeof (GskColorStop) * n_stops);
}

View File

@ -1,63 +0,0 @@
#pragma once
#include "gskgputypesprivate.h"
#include "gskrendernode.h"
#include <graphene.h>
G_BEGIN_DECLS
typedef struct _GskGpuBufferWriter GskGpuBufferWriter;
struct _GskGpuBufferWriter
{
gpointer user_data;
void (* ensure_size) (GskGpuBufferWriter *self,
gsize size);
gsize (* finish) (GskGpuBufferWriter *self,
gboolean commit);
guchar *data;
gsize initial_size;
gsize size;
gsize allocated;
};
gsize gsk_gpu_buffer_writer_commit (GskGpuBufferWriter *self);
void gsk_gpu_buffer_writer_abort (GskGpuBufferWriter *self);
gsize gsk_gpu_buffer_writer_get_size (GskGpuBufferWriter *self);
void gsk_gpu_buffer_writer_rewind (GskGpuBufferWriter *self,
gsize size);
guchar * gsk_gpu_buffer_writer_backup (GskGpuBufferWriter *self,
gsize *out_size);
void gsk_gpu_buffer_writer_ensure_size (GskGpuBufferWriter *self,
gsize size);
void gsk_gpu_buffer_writer_append (GskGpuBufferWriter *self,
gsize align,
const guchar *data,
gsize size);
void gsk_gpu_buffer_writer_append_float (GskGpuBufferWriter *self,
float f);
void gsk_gpu_buffer_writer_append_int (GskGpuBufferWriter *self,
gint32 i);
void gsk_gpu_buffer_writer_append_uint (GskGpuBufferWriter *self,
guint32 u);
void gsk_gpu_buffer_writer_append_matrix (GskGpuBufferWriter *self,
const graphene_matrix_t *matrix);
void gsk_gpu_buffer_writer_append_vec4 (GskGpuBufferWriter *self,
const graphene_vec4_t *vec4);
void gsk_gpu_buffer_writer_append_point (GskGpuBufferWriter *self,
const graphene_point_t *point,
const graphene_point_t *offset);
void gsk_gpu_buffer_writer_append_rect (GskGpuBufferWriter *self,
const graphene_rect_t *rect,
const graphene_point_t *offset);
void gsk_gpu_buffer_writer_append_rgba (GskGpuBufferWriter *self,
const GdkRGBA *rgba);
void gsk_gpu_buffer_writer_append_color_stops (GskGpuBufferWriter *self,
const GskColorStop *stops,
gsize n_stops);
G_END_DECLS

View File

@ -3,6 +3,7 @@
#include "gskgpudescriptorsprivate.h"
typedef struct _GskGpuImageEntry GskGpuImageEntry;
typedef struct _GskGpuBufferEntry GskGpuBufferEntry;
struct _GskGpuImageEntry
{
@ -11,6 +12,12 @@ struct _GskGpuImageEntry
guint32 descriptor;
};
struct _GskGpuBufferEntry
{
GskGpuBuffer *buffer;
guint32 descriptor;
};
static void
gsk_gpu_image_entry_clear (gpointer data)
{
@ -19,6 +26,14 @@ gsk_gpu_image_entry_clear (gpointer data)
g_object_unref (entry->image);
}
static void
gsk_gpu_buffer_entry_clear (gpointer data)
{
GskGpuBufferEntry *entry = data;
g_object_unref (entry->buffer);
}
#define GDK_ARRAY_NAME gsk_gpu_image_entries
#define GDK_ARRAY_TYPE_NAME GskGpuImageEntries
#define GDK_ARRAY_ELEMENT_TYPE GskGpuImageEntry
@ -28,11 +43,21 @@ gsk_gpu_image_entry_clear (gpointer data)
#define GDK_ARRAY_NO_MEMSET 1
#include "gdk/gdkarrayimpl.c"
#define GDK_ARRAY_NAME gsk_gpu_buffer_entries
#define GDK_ARRAY_TYPE_NAME GskGpuBufferEntries
#define GDK_ARRAY_ELEMENT_TYPE GskGpuBufferEntry
#define GDK_ARRAY_FREE_FUNC gsk_gpu_buffer_entry_clear
#define GDK_ARRAY_BY_VALUE 1
#define GDK_ARRAY_PREALLOC 4
#define GDK_ARRAY_NO_MEMSET 1
#include "gdk/gdkarrayimpl.c"
typedef struct _GskGpuDescriptorsPrivate GskGpuDescriptorsPrivate;
struct _GskGpuDescriptorsPrivate
{
GskGpuImageEntries images;
GskGpuBufferEntries buffers;
};
G_DEFINE_TYPE_WITH_PRIVATE (GskGpuDescriptors, gsk_gpu_descriptors, G_TYPE_OBJECT)
@ -44,6 +69,7 @@ gsk_gpu_descriptors_finalize (GObject *object)
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
gsk_gpu_image_entries_clear (&priv->images);
gsk_gpu_buffer_entries_clear (&priv->buffers);
G_OBJECT_CLASS (gsk_gpu_descriptors_parent_class)->finalize (object);
}
@ -62,6 +88,7 @@ gsk_gpu_descriptors_init (GskGpuDescriptors *self)
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
gsk_gpu_image_entries_init (&priv->images);
gsk_gpu_buffer_entries_init (&priv->buffers);
}
gsize
@ -72,14 +99,25 @@ gsk_gpu_descriptors_get_n_images (GskGpuDescriptors *self)
return gsk_gpu_image_entries_get_size (&priv->images);
}
gsize
gsk_gpu_descriptors_get_n_buffers (GskGpuDescriptors *self)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
return gsk_gpu_buffer_entries_get_size (&priv->buffers);
}
void
gsk_gpu_descriptors_set_size (GskGpuDescriptors *self,
gsize n_images)
gsize n_images,
gsize n_buffers)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
g_assert (n_images <= gsk_gpu_image_entries_get_size (&priv->images));
gsk_gpu_image_entries_set_size (&priv->images, n_images);
g_assert (n_buffers <= gsk_gpu_buffer_entries_get_size (&priv->buffers));
gsk_gpu_buffer_entries_set_size (&priv->buffers, n_buffers);
}
GskGpuImage *
@ -120,6 +158,16 @@ gsk_gpu_descriptors_find_image (GskGpuDescriptors *self,
g_return_val_if_reached ((gsize) -1);
}
GskGpuBuffer *
gsk_gpu_descriptors_get_buffer (GskGpuDescriptors *self,
gsize id)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
const GskGpuBufferEntry *entry = gsk_gpu_buffer_entries_get (&priv->buffers, id);
return entry->buffer;
}
gboolean
gsk_gpu_descriptors_add_image (GskGpuDescriptors *self,
GskGpuImage *image,
@ -156,3 +204,37 @@ gsk_gpu_descriptors_add_image (GskGpuDescriptors *self,
return TRUE;
}
gboolean
gsk_gpu_descriptors_add_buffer (GskGpuDescriptors *self,
GskGpuBuffer *buffer,
guint32 *out_descriptor)
{
GskGpuDescriptorsPrivate *priv = gsk_gpu_descriptors_get_instance_private (self);
gsize i;
guint32 descriptor;
for (i = 0; i < gsk_gpu_buffer_entries_get_size (&priv->buffers); i++)
{
const GskGpuBufferEntry *entry = gsk_gpu_buffer_entries_get (&priv->buffers, i);
if (entry->buffer == buffer)
{
*out_descriptor = entry->descriptor;
return TRUE;
}
}
if (!GSK_GPU_DESCRIPTORS_GET_CLASS (self)->add_buffer (self, buffer, &descriptor))
return FALSE;
gsk_gpu_buffer_entries_append (&priv->buffers,
&(GskGpuBufferEntry) {
.buffer = g_object_ref (buffer),
.descriptor = descriptor
});
*out_descriptor = descriptor;
return TRUE;
}

View File

@ -26,24 +26,34 @@ struct _GskGpuDescriptorsClass
GskGpuImage *image,
GskGpuSampler sampler,
guint32 *out_id);
gboolean (* add_buffer) (GskGpuDescriptors *self,
GskGpuBuffer *buffer,
guint32 *out_id);
};
GType gsk_gpu_descriptors_get_type (void) G_GNUC_CONST;
gsize gsk_gpu_descriptors_get_n_images (GskGpuDescriptors *self);
gsize gsk_gpu_descriptors_get_n_buffers (GskGpuDescriptors *self);
void gsk_gpu_descriptors_set_size (GskGpuDescriptors *self,
gsize n_images);
gsize n_images,
gsize n_buffers);
GskGpuImage * gsk_gpu_descriptors_get_image (GskGpuDescriptors *self,
gsize id);
GskGpuSampler gsk_gpu_descriptors_get_sampler (GskGpuDescriptors *self,
gsize id);
gsize gsk_gpu_descriptors_find_image (GskGpuDescriptors *self,
guint32 descriptor);
GskGpuBuffer * gsk_gpu_descriptors_get_buffer (GskGpuDescriptors *self,
gsize id);
gboolean gsk_gpu_descriptors_add_image (GskGpuDescriptors *self,
GskGpuImage *image,
GskGpuSampler sampler,
guint32 *out_descriptor);
gboolean gsk_gpu_descriptors_add_buffer (GskGpuDescriptors *self,
GskGpuBuffer *buffer,
guint32 *out_descriptor);
G_DEFINE_AUTOPTR_CLEANUP_FUNC(GskGpuDescriptors, g_object_unref)

View File

@ -461,47 +461,28 @@ gsk_gpu_frame_ensure_storage_buffer (GskGpuFrame *self)
priv->storage_buffer_data = gsk_gpu_buffer_map (priv->storage_buffer);
}
static void
gsk_gpu_frame_buffer_memory_ensure_size (GskGpuBufferWriter *writer,
gsize size)
GskGpuBuffer *
gsk_gpu_frame_write_storage_buffer (GskGpuFrame *self,
const guchar *data,
gsize size,
gsize *out_offset)
{
/* FIXME: implement */
g_assert_not_reached ();
}
static gsize
gsk_gpu_frame_buffer_memory_finish (GskGpuBufferWriter *writer,
gboolean commit)
{
GskGpuFrame *self = GSK_GPU_FRAME (writer->user_data);
GskGpuFramePrivate *priv = gsk_gpu_frame_get_instance_private (self);
gsize offset;
if (!commit)
return 0;
offset = priv->storage_buffer_used;
priv->storage_buffer_used = writer->size;
return offset;
}
void
gsk_gpu_frame_write_buffer_memory (GskGpuFrame *self,
GskGpuBufferWriter *writer)
{
GskGpuFramePrivate *priv = gsk_gpu_frame_get_instance_private (self);
gsk_gpu_frame_ensure_storage_buffer (self);
writer->user_data = self;
writer->ensure_size = gsk_gpu_frame_buffer_memory_ensure_size;
writer->finish = gsk_gpu_frame_buffer_memory_finish;
offset = priv->storage_buffer_used;
g_assert (offset + size < gsk_gpu_buffer_get_size (priv->storage_buffer));
writer->data = priv->storage_buffer_data;
writer->initial_size = priv->storage_buffer_used;
writer->size = priv->storage_buffer_used;
writer->allocated = gsk_gpu_buffer_get_size (priv->storage_buffer);
if (size)
{
memcpy (priv->storage_buffer_data + offset, data, size);
priv->storage_buffer_used += size;
}
*out_offset = offset;
return priv->storage_buffer;
}
gboolean
@ -591,7 +572,6 @@ gsk_gpu_frame_submit (GskGpuFrame *self)
GSK_GPU_FRAME_GET_CLASS (self)->submit (self,
priv->vertex_buffer,
priv->storage_buffer,
priv->first_op);
}

View File

@ -1,6 +1,5 @@
#pragma once
#include "gskgpubufferwriterprivate.h"
#include "gskgpurenderer.h"
#include "gskgputypesprivate.h"
@ -37,7 +36,6 @@ struct _GskGpuFrameClass
gsize size);
void (* submit) (GskGpuFrame *self,
GskGpuBuffer *vertex_buffer,
GskGpuBuffer *storage_buffer,
GskGpuOp *op);
};
@ -65,8 +63,10 @@ gsize gsk_gpu_frame_reserve_vertex_data (GskGpuF
gsize size);
guchar * gsk_gpu_frame_get_vertex_data (GskGpuFrame *self,
gsize offset);
void gsk_gpu_frame_write_buffer_memory (GskGpuFrame *self,
GskGpuBufferWriter *writer);
GskGpuBuffer * gsk_gpu_frame_write_storage_buffer (GskGpuFrame *self,
const guchar *data,
gsize size,
gsize *out_offset);
gboolean gsk_gpu_frame_is_busy (GskGpuFrame *self);

View File

@ -2,6 +2,7 @@
#include "gskgpuglobalsopprivate.h"
#include "gskglframeprivate.h"
#include "gskgpuframeprivate.h"
#include "gskgpuprintprivate.h"
#include "gskroundedrectprivate.h"
@ -62,7 +63,8 @@ gsk_gpu_globals_op_gl_command (GskGpuOp *op,
{
GskGpuGlobalsOp *self = (GskGpuGlobalsOp *) op;
/* the GskGLFrame makes sure the uniform buffer points to the globals */
gsk_gl_frame_bind_globals (GSK_GL_FRAME (frame));
/* FIXME: Does it matter if we glBufferData() or glSubBufferData() here? */
glBufferSubData (GL_UNIFORM_BUFFER,
0,

View File

@ -102,6 +102,14 @@ struct _GskGpuNodeProcessor
GskGpuGlobals pending_globals;
};
#define GDK_ARRAY_NAME pattern_buffer
#define GDK_ARRAY_TYPE_NAME PatternBuffer
#define GDK_ARRAY_ELEMENT_TYPE guchar
#define GDK_ARRAY_BY_VALUE 1
#define GDK_ARRAY_PREALLOC 2048
#define GDK_ARRAY_NO_MEMSET 1
#include "gdk/gdkarrayimpl.c"
struct _GskGpuPatternWriter
{
GskGpuFrame *frame;
@ -112,7 +120,7 @@ struct _GskGpuPatternWriter
graphene_vec2_t scale;
guint stack;
GskGpuBufferWriter writer;
PatternBuffer buffer;
};
static void gsk_gpu_node_processor_add_node (GskGpuNodeProcessor *self,
@ -282,7 +290,108 @@ gsk_gpu_pattern_writer_init (GskGpuPatternWriter *self,
self->scale = *scale;
self->stack = 0;
gsk_gpu_frame_write_buffer_memory (frame, &self->writer);
pattern_buffer_init (&self->buffer);
}
static inline gsize
round_up (gsize number, gsize divisor)
{
return (number + divisor - 1) / divisor * divisor;
}
static void
gsk_gpu_pattern_writer_append (GskGpuPatternWriter *self,
gsize align,
const guchar *data,
gsize size)
{
pattern_buffer_set_size (&self->buffer, round_up (pattern_buffer_get_size (&self->buffer), align));
pattern_buffer_splice (&self->buffer,
pattern_buffer_get_size (&self->buffer),
0,
FALSE,
data,
size);
}
static void
gsk_gpu_pattern_writer_append_float (GskGpuPatternWriter *self,
float f)
{
gsk_gpu_pattern_writer_append (self, G_ALIGNOF (float), (guchar *) &f, sizeof (float));
}
static void
gsk_gpu_pattern_writer_append_uint (GskGpuPatternWriter *self,
guint32 u)
{
gsk_gpu_pattern_writer_append (self, G_ALIGNOF (guint32), (guchar *) &u, sizeof (guint32));
}
static void
gsk_gpu_pattern_writer_append_matrix (GskGpuPatternWriter *self,
const graphene_matrix_t *matrix)
{
float f[16];
graphene_matrix_to_float (matrix, f);
gsk_gpu_pattern_writer_append (self, G_ALIGNOF (float), (guchar *) f, sizeof (f));
}
static void
gsk_gpu_pattern_writer_append_vec4 (GskGpuPatternWriter *self,
const graphene_vec4_t *vec4)
{
float f[4];
graphene_vec4_to_float (vec4, f);
gsk_gpu_pattern_writer_append (self, G_ALIGNOF (float), (guchar *) f, sizeof (f));
}
static void
gsk_gpu_pattern_writer_append_point (GskGpuPatternWriter *self,
const graphene_point_t *point,
const graphene_point_t *offset)
{
float f[2];
f[0] = point->x + offset->x;
f[1] = point->y + offset->y;
gsk_gpu_pattern_writer_append (self, G_ALIGNOF (float), (guchar *) f, sizeof (f));
}
static void
gsk_gpu_pattern_writer_append_rect (GskGpuPatternWriter *self,
const graphene_rect_t *rect,
const graphene_point_t *offset)
{
float f[4];
gsk_gpu_rect_to_float (rect, offset, f);
gsk_gpu_pattern_writer_append (self, G_ALIGNOF (float), (guchar *) f, sizeof (f));
}
static void
gsk_gpu_pattern_writer_append_rgba (GskGpuPatternWriter *self,
const GdkRGBA *rgba)
{
float f[4] = { rgba->red, rgba->green, rgba->blue, rgba->alpha };
gsk_gpu_pattern_writer_append (self, G_ALIGNOF (float), (guchar *) f, sizeof (f));
}
static void
gsk_gpu_pattern_writer_append_color_stops (GskGpuPatternWriter *self,
const GskColorStop *stops,
gsize n_stops)
{
gsk_gpu_pattern_writer_append_uint (self, n_stops);
gsk_gpu_pattern_writer_append (self, G_ALIGNOF (float), (guchar *) stops, sizeof (GskColorStop) * n_stops);
}
static gboolean
@ -305,36 +414,11 @@ gsk_gpu_pattern_writer_pop_stack (GskGpuPatternWriter *self)
static void
gsk_gpu_pattern_writer_finish (GskGpuPatternWriter *self)
{
pattern_buffer_clear (&self->buffer);
g_assert (self->stack == 0);
g_clear_object (&self->desc);
}
static void
gsk_gpu_pattern_writer_abort (GskGpuPatternWriter *self)
{
gsk_gpu_buffer_writer_abort (&self->writer);
gsk_gpu_pattern_writer_finish (self);
}
static void
gsk_gpu_pattern_writer_commit_op (GskGpuPatternWriter *self,
GskGpuShaderClip clip)
{
guint32 pattern_id;
pattern_id = gsk_gpu_buffer_writer_commit (&self->writer) / sizeof (float);
gsk_gpu_uber_op (self->frame,
clip,
&self->bounds,
&self->offset,
self->desc,
pattern_id);
gsk_gpu_pattern_writer_finish (self);
}
static gboolean
gsk_gpu_pattern_writer_add_image (GskGpuPatternWriter *self,
GskGpuImage *image,
@ -935,6 +1019,9 @@ gsk_gpu_node_processor_try_node_as_pattern (GskGpuNodeProcessor *self,
{
GskGpuPatternWriter writer;
graphene_rect_t clipped;
GskGpuBuffer *buffer;
gsize offset;
guint32 pattern_id;
g_assert (self->pending_globals == 0);
@ -949,20 +1036,46 @@ gsk_gpu_node_processor_try_node_as_pattern (GskGpuNodeProcessor *self,
if (!gsk_gpu_node_processor_create_node_pattern (&writer, node))
{
gsk_gpu_pattern_writer_abort (&writer);
gsk_gpu_pattern_writer_finish (&writer);
return FALSE;
}
if (self->opacity < 1.0)
{
gsk_gpu_buffer_writer_append_uint (&writer.writer, GSK_GPU_PATTERN_OPACITY);
gsk_gpu_buffer_writer_append_float (&writer.writer, self->opacity);
gsk_gpu_pattern_writer_append_uint (&writer, GSK_GPU_PATTERN_OPACITY);
gsk_gpu_pattern_writer_append_float (&writer, self->opacity);
}
gsk_gpu_buffer_writer_append_uint (&writer.writer, GSK_GPU_PATTERN_DONE);
gsk_gpu_pattern_writer_append_uint (&writer, GSK_GPU_PATTERN_DONE);
buffer = gsk_gpu_frame_write_storage_buffer (self->frame,
pattern_buffer_get_data (&writer.buffer),
pattern_buffer_get_size (&writer.buffer),
&offset);
if (writer.desc == NULL)
{
if (self->desc == NULL)
self->desc = gsk_gpu_frame_create_descriptors (self->frame);
if (!gsk_gpu_descriptors_add_buffer (self->desc, buffer, &pattern_id))
writer.desc = gsk_gpu_frame_create_descriptors (self->frame);
}
if (writer.desc &&
!gsk_gpu_descriptors_add_buffer (writer.desc, buffer, &pattern_id))
{
g_assert_not_reached ();
}
pattern_id = (pattern_id << 22) | (offset / sizeof (float));
gsk_gpu_uber_op (self->frame,
gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, &node->bounds),
&node->bounds,
&self->offset,
writer.desc ? writer.desc : self->desc,
pattern_id);
gsk_gpu_pattern_writer_finish (&writer);
gsk_gpu_pattern_writer_commit_op (&writer,
gsk_gpu_clip_get_shader_clip (&self->clip, &self->offset, &node->bounds));
return TRUE;
}
@ -1133,8 +1246,8 @@ gsk_gpu_node_processor_create_clip_pattern (GskGpuPatternWriter *self,
if (!gsk_gpu_node_processor_create_node_pattern (self, gsk_opacity_node_get_child (node)))
return FALSE;
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_CLIP);
gsk_gpu_buffer_writer_append_rect (&self->writer,
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_CLIP);
gsk_gpu_pattern_writer_append_rect (self,
gsk_clip_node_get_clip (node),
&self->offset);
@ -1380,9 +1493,9 @@ gsk_gpu_node_processor_create_transform_pattern (GskGpuPatternWriter *self,
gsk_transform_to_affine (transform, &sx, &sy, &dx, &dy);
inv_sx = 1.f / sx;
inv_sy = 1.f / sy;
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_AFFINE);
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_AFFINE);
graphene_vec4_init (&vec4, self->offset.x + dx, self->offset.y + dy, inv_sx, inv_sy);
gsk_gpu_buffer_writer_append_vec4 (&self->writer, &vec4);
gsk_gpu_pattern_writer_append_vec4 (self, &vec4);
self->bounds.origin.x = (self->bounds.origin.x - self->offset.x - dx) * inv_sx;
self->bounds.origin.y = (self->bounds.origin.y - self->offset.y - dy) * inv_sy;
self->bounds.size.width *= inv_sx;
@ -1408,7 +1521,7 @@ gsk_gpu_node_processor_create_transform_pattern (GskGpuPatternWriter *self,
result = gsk_gpu_node_processor_create_node_pattern (self, child);
if (result)
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_POSITION_POP);
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_POSITION_POP);
gsk_gpu_pattern_writer_pop_stack (self);
self->scale = old_scale;
@ -1546,8 +1659,8 @@ static gboolean
gsk_gpu_node_processor_create_color_pattern (GskGpuPatternWriter *self,
GskRenderNode *node)
{
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_COLOR);
gsk_gpu_buffer_writer_append_rgba (&self->writer, gsk_color_node_get_color (node));
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_COLOR);
gsk_gpu_pattern_writer_append_rgba (self, gsk_color_node_get_color (node));
return TRUE;
}
@ -1637,11 +1750,11 @@ gsk_gpu_node_processor_create_texture_pattern (GskGpuPatternWriter *self,
}
if (gsk_gpu_image_get_flags (image) & GSK_GPU_IMAGE_STRAIGHT_ALPHA)
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_STRAIGHT_ALPHA);
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_STRAIGHT_ALPHA);
else
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_TEXTURE);
gsk_gpu_buffer_writer_append_uint (&self->writer, descriptor);
gsk_gpu_buffer_writer_append_rect (&self->writer, &node->bounds, &self->offset);
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_TEXTURE);
gsk_gpu_pattern_writer_append_uint (self, descriptor);
gsk_gpu_pattern_writer_append_rect (self, &node->bounds, &self->offset);
g_object_unref (image);
@ -1831,17 +1944,17 @@ gsk_gpu_node_processor_create_linear_gradient_pattern (GskGpuPatternWriter *self
GskRenderNode *node)
{
if (gsk_render_node_get_node_type (node) == GSK_REPEATING_LINEAR_GRADIENT_NODE)
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_REPEATING_LINEAR_GRADIENT);
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_REPEATING_LINEAR_GRADIENT);
else
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_LINEAR_GRADIENT);
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_LINEAR_GRADIENT);
gsk_gpu_buffer_writer_append_point (&self->writer,
gsk_gpu_pattern_writer_append_point (self,
gsk_linear_gradient_node_get_start (node),
&self->offset);
gsk_gpu_buffer_writer_append_point (&self->writer,
gsk_gpu_pattern_writer_append_point (self,
gsk_linear_gradient_node_get_end (node),
&self->offset);
gsk_gpu_buffer_writer_append_color_stops (&self->writer,
gsk_gpu_pattern_writer_append_color_stops (self,
gsk_linear_gradient_node_get_color_stops (node, NULL),
gsk_linear_gradient_node_get_n_color_stops (node));
@ -1853,18 +1966,18 @@ gsk_gpu_node_processor_create_radial_gradient_pattern (GskGpuPatternWriter *self
GskRenderNode *node)
{
if (gsk_render_node_get_node_type (node) == GSK_REPEATING_RADIAL_GRADIENT_NODE)
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_REPEATING_RADIAL_GRADIENT);
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_REPEATING_RADIAL_GRADIENT);
else
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_RADIAL_GRADIENT);
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_RADIAL_GRADIENT);
gsk_gpu_buffer_writer_append_point (&self->writer,
gsk_gpu_pattern_writer_append_point (self,
gsk_radial_gradient_node_get_center (node),
&self->offset);
gsk_gpu_buffer_writer_append_float (&self->writer, gsk_radial_gradient_node_get_hradius (node));
gsk_gpu_buffer_writer_append_float (&self->writer, gsk_radial_gradient_node_get_vradius (node));
gsk_gpu_buffer_writer_append_float (&self->writer, gsk_radial_gradient_node_get_start (node));
gsk_gpu_buffer_writer_append_float (&self->writer, gsk_radial_gradient_node_get_end (node));
gsk_gpu_buffer_writer_append_color_stops (&self->writer,
gsk_gpu_pattern_writer_append_float (self, gsk_radial_gradient_node_get_hradius (node));
gsk_gpu_pattern_writer_append_float (self, gsk_radial_gradient_node_get_vradius (node));
gsk_gpu_pattern_writer_append_float (self, gsk_radial_gradient_node_get_start (node));
gsk_gpu_pattern_writer_append_float (self, gsk_radial_gradient_node_get_end (node));
gsk_gpu_pattern_writer_append_color_stops (self,
gsk_radial_gradient_node_get_color_stops (node, NULL),
gsk_radial_gradient_node_get_n_color_stops (node));
@ -1875,12 +1988,12 @@ static gboolean
gsk_gpu_node_processor_create_conic_gradient_pattern (GskGpuPatternWriter *self,
GskRenderNode *node)
{
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_CONIC_GRADIENT);
gsk_gpu_buffer_writer_append_point (&self->writer,
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_CONIC_GRADIENT);
gsk_gpu_pattern_writer_append_point (self,
gsk_conic_gradient_node_get_center (node),
&self->offset);
gsk_gpu_buffer_writer_append_float (&self->writer, gsk_conic_gradient_node_get_angle (node));
gsk_gpu_buffer_writer_append_color_stops (&self->writer,
gsk_gpu_pattern_writer_append_float (self, gsk_conic_gradient_node_get_angle (node));
gsk_gpu_pattern_writer_append_color_stops (self,
gsk_conic_gradient_node_get_color_stops (node, NULL),
gsk_conic_gradient_node_get_n_color_stops (node));
@ -2022,11 +2135,11 @@ gsk_gpu_node_processor_create_cross_fade_pattern (GskGpuPatternWriter *self,
return FALSE;
if (!gsk_rect_contains_rect (&start_child->bounds, &node->bounds))
{
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_CLIP);
gsk_gpu_buffer_writer_append_rect (&self->writer, &start_child->bounds, &self->offset);
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_CLIP);
gsk_gpu_pattern_writer_append_rect (self, &start_child->bounds, &self->offset);
}
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_PUSH_COLOR);
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_PUSH_COLOR);
if (!gsk_gpu_pattern_writer_push_stack (self))
return FALSE;
@ -2038,12 +2151,12 @@ gsk_gpu_node_processor_create_cross_fade_pattern (GskGpuPatternWriter *self,
}
if (!gsk_rect_contains_rect (&end_child->bounds, &node->bounds))
{
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_CLIP);
gsk_gpu_buffer_writer_append_rect (&self->writer, &end_child->bounds, &self->offset);
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_CLIP);
gsk_gpu_pattern_writer_append_rect (self, &end_child->bounds, &self->offset);
}
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_POP_CROSS_FADE);
gsk_gpu_buffer_writer_append_float (&self->writer, gsk_cross_fade_node_get_progress (node));
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_POP_CROSS_FADE);
gsk_gpu_pattern_writer_append_float (self, gsk_cross_fade_node_get_progress (node));
gsk_gpu_pattern_writer_pop_stack (self);
@ -2063,11 +2176,11 @@ gsk_gpu_node_processor_create_mask_pattern (GskGpuPatternWriter *self,
return FALSE;
if (!gsk_rect_contains_rect (&source_child->bounds, &node->bounds))
{
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_CLIP);
gsk_gpu_buffer_writer_append_rect (&self->writer, &source_child->bounds, &self->offset);
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_CLIP);
gsk_gpu_pattern_writer_append_rect (self, &source_child->bounds, &self->offset);
}
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_PUSH_COLOR);
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_PUSH_COLOR);
if (!gsk_gpu_pattern_writer_push_stack (self))
return FALSE;
@ -2079,26 +2192,26 @@ gsk_gpu_node_processor_create_mask_pattern (GskGpuPatternWriter *self,
}
if (!gsk_rect_contains_rect (&mask_child->bounds, &node->bounds))
{
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_CLIP);
gsk_gpu_buffer_writer_append_rect (&self->writer, &mask_child->bounds, &self->offset);
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_CLIP);
gsk_gpu_pattern_writer_append_rect (self, &mask_child->bounds, &self->offset);
}
switch (gsk_mask_node_get_mask_mode (node))
{
case GSK_MASK_MODE_ALPHA:
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_POP_MASK_ALPHA);
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_POP_MASK_ALPHA);
break;
case GSK_MASK_MODE_INVERTED_ALPHA:
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_POP_MASK_INVERTED_ALPHA);
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_POP_MASK_INVERTED_ALPHA);
break;
case GSK_MASK_MODE_LUMINANCE:
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_POP_MASK_LUMINANCE);
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_POP_MASK_LUMINANCE);
break;
case GSK_MASK_MODE_INVERTED_LUMINANCE:
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_POP_MASK_INVERTED_LUMINANCE);
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_POP_MASK_INVERTED_LUMINANCE);
break;
default:
@ -2214,9 +2327,9 @@ gsk_gpu_node_processor_create_glyph_pattern (GskGpuPatternWriter *self,
scale = MAX (graphene_vec2_get_x (&self->scale), graphene_vec2_get_y (&self->scale));
inv_scale = 1.f / scale;
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_GLYPHS);
gsk_gpu_buffer_writer_append_rgba (&self->writer, gsk_text_node_get_color (node));
gsk_gpu_buffer_writer_append_uint (&self->writer, num_glyphs);
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_GLYPHS);
gsk_gpu_pattern_writer_append_rgba (self, gsk_text_node_get_color (node));
gsk_gpu_pattern_writer_append_uint (self, num_glyphs);
last_image = NULL;
for (i = 0; i < num_glyphs; i++)
@ -2246,11 +2359,11 @@ gsk_gpu_node_processor_create_glyph_pattern (GskGpuPatternWriter *self,
glyph_offset = GRAPHENE_POINT_INIT (offset.x - glyph_offset.x * inv_scale + (float) glyphs[i].geometry.x_offset / PANGO_SCALE,
offset.y - glyph_offset.y * inv_scale + (float) glyphs[i].geometry.y_offset / PANGO_SCALE);
gsk_gpu_buffer_writer_append_uint (&self->writer, tex_id);
gsk_gpu_buffer_writer_append_rect (&self->writer,
gsk_gpu_pattern_writer_append_uint (self, tex_id);
gsk_gpu_pattern_writer_append_rect (self,
&glyph_bounds,
&glyph_offset);
gsk_gpu_buffer_writer_append_rect (&self->writer,
gsk_gpu_pattern_writer_append_rect (self,
&GRAPHENE_RECT_INIT (
0, 0,
gsk_gpu_image_get_width (image) * inv_scale,
@ -2271,8 +2384,8 @@ gsk_gpu_node_processor_create_opacity_pattern (GskGpuPatternWriter *self,
if (!gsk_gpu_node_processor_create_node_pattern (self, gsk_opacity_node_get_child (node)))
return FALSE;
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_OPACITY);
gsk_gpu_buffer_writer_append_float (&self->writer, gsk_opacity_node_get_opacity (node));
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_OPACITY);
gsk_gpu_pattern_writer_append_float (self, gsk_opacity_node_get_opacity (node));
return TRUE;
}
@ -2339,9 +2452,9 @@ gsk_gpu_node_processor_create_color_matrix_pattern (GskGpuPatternWriter *self,
if (!gsk_gpu_node_processor_create_node_pattern (self, gsk_color_matrix_node_get_child (node)))
return FALSE;
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_COLOR_MATRIX);
gsk_gpu_buffer_writer_append_matrix (&self->writer, gsk_color_matrix_node_get_color_matrix (node));
gsk_gpu_buffer_writer_append_vec4 (&self->writer, gsk_color_matrix_node_get_color_offset (node));
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_COLOR_MATRIX);
gsk_gpu_pattern_writer_append_matrix (self, gsk_color_matrix_node_get_color_matrix (node));
gsk_gpu_pattern_writer_append_vec4 (self, gsk_color_matrix_node_get_color_offset (node));
return TRUE;
}
@ -2359,16 +2472,16 @@ gsk_gpu_node_processor_create_repeat_pattern (GskGpuPatternWriter *self,
if (gsk_rect_is_empty (child_bounds))
{
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_COLOR);
gsk_gpu_buffer_writer_append_rgba (&self->writer, &(GdkRGBA) { 0, 0, 0, 0 });
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_COLOR);
gsk_gpu_pattern_writer_append_rgba (self, &(GdkRGBA) { 0, 0, 0, 0 });
return TRUE;
}
if (!gsk_gpu_pattern_writer_push_stack (self))
return FALSE;
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_REPEAT_PUSH);
gsk_gpu_buffer_writer_append_rect (&self->writer, child_bounds, &self->offset);
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_REPEAT_PUSH);
gsk_gpu_pattern_writer_append_rect (self, child_bounds, &self->offset);
old_bounds = self->bounds;
self->bounds = GRAPHENE_RECT_INIT (child_bounds->origin.x + self->offset.x,
@ -2385,11 +2498,11 @@ gsk_gpu_node_processor_create_repeat_pattern (GskGpuPatternWriter *self,
if (!gsk_rect_contains_rect (&child->bounds, child_bounds))
{
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_CLIP);
gsk_gpu_buffer_writer_append_rect (&self->writer, &child->bounds, &self->offset);
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_CLIP);
gsk_gpu_pattern_writer_append_rect (self, &child->bounds, &self->offset);
}
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_POSITION_POP);
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_POSITION_POP);
gsk_gpu_pattern_writer_pop_stack (self);
return TRUE;
@ -2713,9 +2826,7 @@ gsk_gpu_node_processor_create_node_pattern (GskGpuPatternWriter *self,
{
GskRenderNodeType node_type;
graphene_rect_t bounds;
guchar *tmp_data;
GskGpuImage *image;
gsize tmp_size;
guint32 tex_id;
if (!gsk_gpu_frame_should_optimize (self->frame, GSK_GPU_OPTIMIZE_UBER))
@ -2730,17 +2841,16 @@ gsk_gpu_node_processor_create_node_pattern (GskGpuPatternWriter *self,
if (nodes_vtable[node_type].create_pattern != NULL)
{
gsize size_before = gsk_gpu_buffer_writer_get_size (&self->writer);
gsize size_before = pattern_buffer_get_size (&self->buffer);
gsize images_before = self->desc ? gsk_gpu_descriptors_get_n_images (self->desc) : 0;
gsize buffers_before = self->desc ? gsk_gpu_descriptors_get_n_buffers (self->desc) : 0;
if (nodes_vtable[node_type].create_pattern (self, node))
return TRUE;
gsk_gpu_buffer_writer_rewind (&self->writer, size_before);
pattern_buffer_set_size (&self->buffer, size_before);
if (self->desc)
gsk_gpu_descriptors_set_size (self->desc, images_before);
gsk_gpu_descriptors_set_size (self->desc, images_before, buffers_before);
}
tmp_data = gsk_gpu_buffer_writer_backup (&self->writer, &tmp_size);
gsk_gpu_buffer_writer_abort (&self->writer);
image = gsk_gpu_get_node_as_image (self->frame,
&GRAPHENE_RECT_INIT (
self->bounds.origin.x - self->offset.x,
@ -2753,18 +2863,11 @@ gsk_gpu_node_processor_create_node_pattern (GskGpuPatternWriter *self,
&bounds);
if (image == NULL)
{
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_COLOR);
gsk_gpu_buffer_writer_append_rgba (&self->writer, &(GdkRGBA) { 0, 0, 0, 0 });
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_COLOR);
gsk_gpu_pattern_writer_append_rgba (self, &(GdkRGBA) { 0, 0, 0, 0 });
return TRUE;
}
gsk_gpu_frame_write_buffer_memory (self->frame, &self->writer);
if (tmp_size)
{
gsk_gpu_buffer_writer_append (&self->writer, sizeof (float), tmp_data, tmp_size);
g_free (tmp_data);
}
if (!gsk_gpu_pattern_writer_add_image (self, image, GSK_GPU_SAMPLER_DEFAULT, &tex_id))
{
g_object_unref (image);
@ -2772,11 +2875,11 @@ gsk_gpu_node_processor_create_node_pattern (GskGpuPatternWriter *self,
}
if (gsk_gpu_image_get_flags (image) & GSK_GPU_IMAGE_STRAIGHT_ALPHA)
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_STRAIGHT_ALPHA);
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_STRAIGHT_ALPHA);
else
gsk_gpu_buffer_writer_append_uint (&self->writer, GSK_GPU_PATTERN_TEXTURE);
gsk_gpu_buffer_writer_append_uint (&self->writer, tex_id);
gsk_gpu_buffer_writer_append_rect (&self->writer, &bounds, &self->offset);
gsk_gpu_pattern_writer_append_uint (self, GSK_GPU_PATTERN_TEXTURE);
gsk_gpu_pattern_writer_append_uint (self, tex_id);
gsk_gpu_pattern_writer_append_rect (self, &bounds, &self->offset);
g_object_unref (image);

View File

@ -137,8 +137,7 @@ gsk_vulkan_frame_upload_texture (GskGpuFrame *frame,
}
static void
gsk_vulkan_frame_prepare_descriptors (GskVulkanFrame *self,
GskGpuBuffer *storage_buffer)
gsk_vulkan_frame_prepare_descriptors (GskVulkanFrame *self)
{
GskVulkanDevice *device;
VkDevice vk_device;
@ -154,13 +153,7 @@ gsk_vulkan_frame_prepare_descriptors (GskVulkanFrame *self,
{
gsize n_desc_images, n_desc_buffers;
GskVulkanRealDescriptors *desc = gsk_descriptors_get (&self->descriptors, i);
if (storage_buffer)
{
G_GNUC_UNUSED guint32 descriptor;
descriptor = gsk_vulkan_real_descriptors_get_buffer_descriptor (desc, storage_buffer);
g_assert (descriptor == 0);
}
gsk_vulkan_real_descriptors_prepare (desc, &n_desc_images, &n_desc_buffers);
gsk_vulkan_real_descriptors_prepare (desc, GSK_GPU_FRAME (self), &n_desc_images, &n_desc_buffers);
n_images += n_desc_images;
n_buffers += n_desc_buffers;
}
@ -269,7 +262,6 @@ gsk_vulkan_frame_create_storage_buffer (GskGpuFrame *frame,
static void
gsk_vulkan_frame_submit (GskGpuFrame *frame,
GskGpuBuffer *vertex_buffer,
GskGpuBuffer *storage_buffer,
GskGpuOp *op)
{
GskVulkanFrame *self = GSK_VULKAN_FRAME (frame);
@ -278,7 +270,7 @@ gsk_vulkan_frame_submit (GskGpuFrame *frame,
if (gsk_descriptors_get_size (&self->descriptors) == 0)
gsk_descriptors_append (&self->descriptors, gsk_vulkan_real_descriptors_new (GSK_VULKAN_DEVICE (gsk_gpu_frame_get_device (frame))));
gsk_vulkan_frame_prepare_descriptors (self, storage_buffer);
gsk_vulkan_frame_prepare_descriptors (self);
GSK_VK_CHECK (vkBeginCommandBuffer, self->vk_command_buffer,
&(VkCommandBufferBeginInfo) {

View File

@ -128,6 +128,28 @@ gsk_vulkan_real_descriptors_add_image (GskGpuDescriptors *desc,
return TRUE;
}
static gboolean
gsk_vulkan_real_descriptors_add_buffer (GskGpuDescriptors *desc,
GskGpuBuffer *buffer,
guint32 *out_descriptor)
{
GskVulkanRealDescriptors *self = GSK_VULKAN_REAL_DESCRIPTORS (desc);
if (gsk_descriptor_buffer_infos_get_size (&self->descriptor_buffers) >=
gsk_vulkan_device_get_max_buffers (self->device))
return FALSE;
*out_descriptor = gsk_descriptor_buffer_infos_get_size (&self->descriptor_buffers);
gsk_descriptor_buffer_infos_append (&self->descriptor_buffers,
&(VkDescriptorBufferInfo) {
.buffer = gsk_vulkan_buffer_get_vk_buffer (GSK_VULKAN_BUFFER (buffer)),
.offset = 0,
.range = VK_WHOLE_SIZE
});
return TRUE;
}
static void
gsk_vulkan_real_descriptors_finalize (GObject *object)
{
@ -155,6 +177,7 @@ gsk_vulkan_real_descriptors_class_init (GskVulkanRealDescriptorsClass *klass)
object_class->finalize = gsk_vulkan_real_descriptors_finalize;
descriptors_class->add_image = gsk_vulkan_real_descriptors_add_image;
descriptors_class->add_buffer = gsk_vulkan_real_descriptors_add_buffer;
vulkan_descriptors_class->get_pipeline_layout = gsk_vulkan_real_descriptors_get_pipeline_layout;
vulkan_descriptors_class->bind = gsk_vulkan_real_descriptors_bind;
@ -192,7 +215,8 @@ gsk_vulkan_real_descriptors_is_full (GskVulkanRealDescriptors *self)
}
static void
gsk_vulkan_real_descriptors_fill_sets (GskVulkanRealDescriptors *self)
gsk_vulkan_real_descriptors_fill_sets (GskVulkanRealDescriptors *self,
GskGpuFrame *frame)
{
gsize n_immutable_samplers, n_samplers, n_buffers;
@ -211,8 +235,9 @@ gsk_vulkan_real_descriptors_fill_sets (GskVulkanRealDescriptors *self)
if (gsk_descriptor_image_infos_get_size (&self->descriptor_images) == 0)
{
guint32 ignored;
/* We have no image, find any random image and attach it */
guint32 ignored;
if (!gsk_gpu_descriptors_add_image (GSK_GPU_DESCRIPTORS (self),
gsk_gpu_device_get_atlas_image (GSK_GPU_DEVICE (self->device)),
GSK_GPU_SAMPLER_DEFAULT,
@ -229,8 +254,21 @@ gsk_vulkan_real_descriptors_fill_sets (GskVulkanRealDescriptors *self)
{
gsk_descriptor_image_infos_append (&self->descriptor_images, gsk_descriptor_image_infos_get (&self->descriptor_images, 0));
}
/* That should be the storage buffer */
g_assert (gsk_descriptor_buffer_infos_get_size (&self->descriptor_buffers) > 0);
if (gsk_descriptor_buffer_infos_get_size (&self->descriptor_buffers) == 0)
{
/* If there's no storage buffer yet, just make one */
GskGpuBuffer *buffer;
gsize ignored_offset;
guint32 ignored;
buffer = gsk_gpu_frame_write_storage_buffer (frame, NULL, 0, &ignored_offset);
if (!gsk_gpu_descriptors_add_buffer (GSK_GPU_DESCRIPTORS (self),
buffer,
&ignored))
{
g_assert_not_reached ();
}
}
while (n_buffers > gsk_descriptor_buffer_infos_get_size (&self->descriptor_buffers))
{
gsk_descriptor_buffer_infos_append (&self->descriptor_buffers, gsk_descriptor_buffer_infos_get (&self->descriptor_buffers, 0));
@ -239,6 +277,7 @@ gsk_vulkan_real_descriptors_fill_sets (GskVulkanRealDescriptors *self)
void
gsk_vulkan_real_descriptors_prepare (GskVulkanRealDescriptors *self,
GskGpuFrame *frame,
gsize *n_images,
gsize *n_buffers)
{
@ -247,7 +286,7 @@ gsk_vulkan_real_descriptors_prepare (GskVulkanRealDescriptors *self,
gsk_samplers_get_size (&self->immutable_samplers),
gsk_descriptor_image_infos_get_size (&self->descriptor_images),
gsk_descriptor_buffer_infos_get_size (&self->descriptor_buffers));
gsk_vulkan_real_descriptors_fill_sets (self);
gsk_vulkan_real_descriptors_fill_sets (self, frame);
*n_images = MAX (1, gsk_descriptor_image_infos_get_size (&self->descriptor_immutable_images)) +
gsk_descriptor_image_infos_get_size (&self->descriptor_images);
@ -329,21 +368,3 @@ gsk_vulkan_real_descriptors_update_sets (GskVulkanRealDescriptors *self,
write_descriptor_sets,
0, NULL);
}
guint32
gsk_vulkan_real_descriptors_get_buffer_descriptor (GskVulkanRealDescriptors *self,
GskGpuBuffer *buffer)
{
guint32 result;
result = gsk_descriptor_buffer_infos_get_size (&self->descriptor_buffers);
gsk_descriptor_buffer_infos_append (&self->descriptor_buffers,
&(VkDescriptorBufferInfo) {
.buffer = gsk_vulkan_buffer_get_vk_buffer (GSK_VULKAN_BUFFER (buffer)),
.offset = 0,
.range = VK_WHOLE_SIZE
});
return result;
}

View File

@ -11,11 +11,9 @@ G_DECLARE_FINAL_TYPE (GskVulkanRealDescriptors, gsk_vulkan_real_descriptors, GSK
GskVulkanRealDescriptors * gsk_vulkan_real_descriptors_new (GskVulkanDevice *device);
gboolean gsk_vulkan_real_descriptors_is_full (GskVulkanRealDescriptors *self);
guint32 gsk_vulkan_real_descriptors_get_buffer_descriptor
(GskVulkanRealDescriptors *self,
GskGpuBuffer *buffer);
void gsk_vulkan_real_descriptors_prepare (GskVulkanRealDescriptors *self,
GskGpuFrame *frame,
gsize *n_images,
gsize *n_buffers);
void gsk_vulkan_real_descriptors_update_sets (GskVulkanRealDescriptors *self,

View File

@ -49,6 +49,18 @@ gsk_vulkan_sub_descriptors_add_image (GskGpuDescriptors *desc,
out_descriptor);
}
static gboolean
gsk_vulkan_sub_descriptors_add_buffer (GskGpuDescriptors *desc,
GskGpuBuffer *buffer,
guint32 *out_descriptor)
{
GskVulkanSubDescriptors *self = GSK_VULKAN_SUB_DESCRIPTORS (desc);
return gsk_gpu_descriptors_add_buffer (GSK_GPU_DESCRIPTORS (self->parent),
buffer,
out_descriptor);
}
static void
gsk_vulkan_sub_descriptors_finalize (GObject *object)
{
@ -69,6 +81,7 @@ gsk_vulkan_sub_descriptors_class_init (GskVulkanSubDescriptorsClass *klass)
object_class->finalize = gsk_vulkan_sub_descriptors_finalize;
descriptors_class->add_image = gsk_vulkan_sub_descriptors_add_image;
descriptors_class->add_buffer = gsk_vulkan_sub_descriptors_add_buffer;
vulkan_descriptors_class->get_pipeline_layout = gsk_vulkan_sub_descriptors_get_pipeline_layout;
vulkan_descriptors_class->bind = gsk_vulkan_sub_descriptors_bind;

View File

@ -25,7 +25,7 @@ layout(std140, binding = 1)
uniform Floats
{
vec4 really_just_floats[1024];
} floats;
} floats[11];
#if N_EXTERNAL_TEXTURES > 0
uniform samplerExternalOES external_textures[N_EXTERNAL_TEXTURES];
@ -51,7 +51,35 @@ uniform sampler2D textures[N_TEXTURES];
float
gsk_get_float (int id)
{
return floats.really_just_floats[id >> 2][id & 3];
int float_id = id & 0x3FFFFF;
int array_id = (id >> 22) & 0xFF;
switch (array_id)
{
case 0:
return floats[0].really_just_floats[float_id >> 2][float_id & 3];
case 1:
return floats[1].really_just_floats[float_id >> 2][float_id & 3];
case 2:
return floats[2].really_just_floats[float_id >> 2][float_id & 3];
case 3:
return floats[3].really_just_floats[float_id >> 2][float_id & 3];
case 4:
return floats[4].really_just_floats[float_id >> 2][float_id & 3];
case 5:
return floats[5].really_just_floats[float_id >> 2][float_id & 3];
case 6:
return floats[6].really_just_floats[float_id >> 2][float_id & 3];
case 7:
return floats[7].really_just_floats[float_id >> 2][float_id & 3];
case 8:
return floats[8].really_just_floats[float_id >> 2][float_id & 3];
case 9:
return floats[9].really_just_floats[float_id >> 2][float_id & 3];
case 10:
return floats[10].really_just_floats[float_id >> 2][float_id & 3];
default:
return 0.0;
}
}
float

View File

@ -77,7 +77,6 @@ gsk_private_sources = files([
'gpu/gskgpuborderop.c',
'gpu/gskgpuboxshadowop.c',
'gpu/gskgpubuffer.c',
'gpu/gskgpubufferwriter.c',
'gpu/gskgpuclearop.c',
'gpu/gskgpuclip.c',
'gpu/gskgpucolorizeop.c',